dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)
[linux-2.6-block.git] / drivers / staging / sxg / sxg.c
CommitLineData
5db6b777
GKH
1/**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
0d414727
MT
33 * Parts developed by LinSysSoft Sahara team
34 *
5db6b777
GKH
35 **************************************************************************/
36
37/*
38 * FILENAME: sxg.c
39 *
40 * The SXG driver for Alacritech's 10Gbe products.
41 *
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
43 * IS-NIC driver.
44 */
45
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/errno.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
cda3b517 51#include <linux/firmware.h>
5db6b777
GKH
52#include <linux/ioport.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/timer.h>
56#include <linux/pci.h>
57#include <linux/spinlock.h>
58#include <linux/init.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/ethtool.h>
62#include <linux/skbuff.h>
63#include <linux/delay.h>
64#include <linux/types.h>
65#include <linux/dma-mapping.h>
66#include <linux/mii.h>
0d414727
MT
67#include <linux/ip.h>
68#include <linux/in.h>
69#include <linux/tcp.h>
70#include <linux/ipv6.h>
5db6b777 71
5db6b777
GKH
72#define SLIC_GET_STATS_ENABLED 0
73#define LINUX_FREES_ADAPTER_RESOURCES 1
74#define SXG_OFFLOAD_IP_CHECKSUM 0
75#define SXG_POWER_MANAGEMENT_ENABLED 0
76#define VPCI 0
5db6b777 77#define ATK_DEBUG 1
cda3b517
MT
78#define SXG_UCODE_DEBUG 0
79
5db6b777
GKH
80
81#include "sxg_os.h"
82#include "sxghw.h"
83#include "sxghif.h"
84#include "sxg.h"
85#include "sxgdbg.h"
a536efcc 86#include "sxgphycode-1.2.h"
5db6b777 87
73b07065 88static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
942798b4 89 enum sxg_buffer_type BufferType);
0d414727 90static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
cb636fe3
MT
91 void *RcvBlock,
92 dma_addr_t PhysicalAddress,
93 u32 Length);
73b07065 94static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
942798b4 95 struct sxg_scatter_gather *SxgSgl,
5c7514e0
M
96 dma_addr_t PhysicalAddress,
97 u32 Length);
5db6b777
GKH
98
99static void sxg_mcast_init_crc32(void);
942798b4 100static int sxg_entry_open(struct net_device *dev);
0d414727 101static int sxg_second_open(struct net_device * dev);
942798b4
MT
102static int sxg_entry_halt(struct net_device *dev);
103static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
104static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
73b07065 105static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
d9d578bf 106static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
cb636fe3 107 struct sxg_scatter_gather *SxgSgl);
73b07065 108
b62a294f
MT
109static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
110 int budget);
111static void sxg_interrupt(struct adapter_t *adapter);
112static int sxg_poll(struct napi_struct *napi, int budget);
73b07065 113static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
b62a294f
MT
114static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
115 int *sxg_napi_continue, int *work_done, int budget);
c5e5cf5a 116static void sxg_complete_slow_send(struct adapter_t *adapter);
cb636fe3
MT
117static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
118 struct sxg_event *Event);
73b07065
M
119static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
120static bool sxg_mac_filter(struct adapter_t *adapter,
121 struct ether_header *EtherHdr, ushort length);
6a2946ba 122static struct net_device_stats *sxg_get_stats(struct net_device * dev);
d9d578bf
MT
123void sxg_free_resources(struct adapter_t *adapter);
124void sxg_free_rcvblocks(struct adapter_t *adapter);
125void sxg_free_sgl_buffers(struct adapter_t *adapter);
126void sxg_unmap_resources(struct adapter_t *adapter);
127void sxg_free_mcast_addrs(struct adapter_t *adapter);
128void sxg_collect_statistics(struct adapter_t *adapter);
1782199f
MT
129static int sxg_register_interrupt(struct adapter_t *adapter);
130static void sxg_remove_isr(struct adapter_t *adapter);
131static irqreturn_t sxg_isr(int irq, void *dev_id);
d0128aa9 132
e5ea8da0
MT
133static void sxg_watchdog(unsigned long data);
134static void sxg_update_link_status (struct work_struct *work);
135
c6c25ed0
GKH
136#define XXXTODO 0
137
96e7088c 138#if XXXTODO
942798b4 139static int sxg_mac_set_address(struct net_device *dev, void *ptr);
96e7088c 140#endif
942798b4 141static void sxg_mcast_set_list(struct net_device *dev);
5db6b777 142
54aed113 143static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
5db6b777 144
73b07065
M
145static int sxg_initialize_adapter(struct adapter_t *adapter);
146static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
147static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
5c7514e0 148 unsigned char Index);
7c66b14b 149int sxg_change_mtu (struct net_device *netdev, int new_mtu);
73b07065
M
150static int sxg_initialize_link(struct adapter_t *adapter);
151static int sxg_phy_init(struct adapter_t *adapter);
152static void sxg_link_event(struct adapter_t *adapter);
153static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
cb636fe3
MT
154static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
73b07065 156static int sxg_write_mdio_reg(struct adapter_t *adapter,
5c7514e0 157 u32 DevAddr, u32 RegAddr, u32 Value);
73b07065 158static int sxg_read_mdio_reg(struct adapter_t *adapter,
5c7514e0 159 u32 DevAddr, u32 RegAddr, u32 *pValue);
b040b07b 160static void sxg_set_mcast_addr(struct adapter_t *adapter);
5db6b777
GKH
161
162static unsigned int sxg_first_init = 1;
163static char *sxg_banner =
cb636fe3
MT
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
5db6b777
GKH
166
167static int sxg_debug = 1;
168static int debug = -1;
942798b4 169static struct net_device *head_netdevice = NULL;
5db6b777 170
942798b4 171static struct sxgbase_driver sxg_global = {
5db6b777
GKH
172 .dynamic_intagg = 1,
173};
174static int intagg_delay = 100;
175static u32 dynamic_intagg = 0;
176
54aed113 177char sxg_driver_name[] = "sxg_nic";
5db6b777 178#define DRV_AUTHOR "Alacritech, Inc. Engineering"
cb636fe3
MT
179#define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181#define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
5db6b777
GKH
183
184MODULE_AUTHOR(DRV_AUTHOR);
185MODULE_DESCRIPTION(DRV_DESCRIPTION);
186MODULE_LICENSE("GPL");
187
188module_param(dynamic_intagg, int, 0);
189MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190module_param(intagg_delay, int, 0);
191MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
192
193static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
195 {0,}
196};
5c7514e0 197
5db6b777
GKH
198MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
199
5db6b777
GKH
200static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
201{
202 writel(value, reg);
203 if (flush)
204 mb();
205}
206
73b07065 207static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
5db6b777
GKH
208 u64 value, u32 cpu)
209{
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
212 unsigned long flags;
213
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
218}
219
220static void sxg_init_driver(void)
221{
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
e88bd231 224 __func__, jiffies);
5db6b777
GKH
225 sxg_first_init = 0;
226 spin_lock_init(&sxg_global.driver_lock);
227 }
228}
229
73b07065 230static void sxg_dbg_macaddrs(struct adapter_t *adapter)
5db6b777
GKH
231{
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
242 return;
243}
244
b243c4aa 245/* SXG Globals */
942798b4 246static struct sxg_driver SxgDriver;
5db6b777
GKH
247
248#ifdef ATKDBG
942798b4 249static struct sxg_trace_buffer LSxgTraceBuffer;
5db6b777 250#endif /* ATKDBG */
942798b4 251static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
5db6b777 252
1782199f
MT
253/*
254 * MSI Related API's
255 */
256int sxg_register_intr(struct adapter_t *adapter);
257int sxg_enable_msi_x(struct adapter_t *adapter);
258int sxg_add_msi_isr(struct adapter_t *adapter);
259void sxg_remove_msix_isr(struct adapter_t *adapter);
260int sxg_set_interrupt_capability(struct adapter_t *adapter);
261
262int sxg_set_interrupt_capability(struct adapter_t *adapter)
263{
264 int ret;
265
266 ret = sxg_enable_msi_x(adapter);
267 if (ret != STATUS_SUCCESS) {
268 adapter->msi_enabled = FALSE;
269 DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n");
270 } else {
271 adapter->msi_enabled = TRUE;
272 DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n");
273 }
274 return ret;
275}
276
277int sxg_register_intr(struct adapter_t *adapter)
278{
279 int ret = 0;
280
281 if (adapter->msi_enabled) {
282 ret = sxg_add_msi_isr(adapter);
283 }
284 else {
285 DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n");
286 ret = sxg_register_interrupt(adapter);
287 if (ret != STATUS_SUCCESS) {
288 DBG_ERROR("sxg_register_interrupt Failed\n");
289 }
290 }
291 return ret;
292}
293
294int sxg_enable_msi_x(struct adapter_t *adapter)
295{
296 int ret;
297
298 adapter->nr_msix_entries = 1;
299 adapter->msi_entries = kmalloc(adapter->nr_msix_entries *
300 sizeof(struct msix_entry),GFP_KERNEL);
301 if (!adapter->msi_entries) {
302 DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__);
303 return -ENOMEM;
304 }
305 memset(adapter->msi_entries, 0, adapter->nr_msix_entries *
306 sizeof(struct msix_entry));
307
308 ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries,
309 adapter->nr_msix_entries);
310 if (ret) {
311 DBG_ERROR("Enabling MSI-X with %d vectors failed\n",
312 adapter->nr_msix_entries);
313 /*Should try with less vector returned.*/
314 kfree(adapter->msi_entries);
315 return STATUS_FAILURE; /*MSI-X Enable failed.*/
316 }
317 return (STATUS_SUCCESS);
318}
319
320int sxg_add_msi_isr(struct adapter_t *adapter)
321{
322 int ret,i;
323
324 if (!adapter->intrregistered) {
325 for (i=0; i<adapter->nr_msix_entries; i++) {
326 ret = request_irq (adapter->msi_entries[i].vector,
327 sxg_isr,
328 IRQF_SHARED,
329 adapter->netdev->name,
330 adapter->netdev);
331 if (ret) {
332 DBG_ERROR("sxg: MSI-X request_irq (%s) "
333 "FAILED [%x]\n", adapter->netdev->name,
334 ret);
335 return (ret);
336 }
337 }
338 }
339 adapter->msi_enabled = TRUE;
340 adapter->intrregistered = 1;
341 adapter->IntRegistered = TRUE;
342 return (STATUS_SUCCESS);
343}
344
345void sxg_remove_msix_isr(struct adapter_t *adapter)
346{
347 int i,vector;
348 struct net_device *netdev = adapter->netdev;
349
350 for(i=0; i< adapter->nr_msix_entries;i++)
351 {
352 vector = adapter->msi_entries[i].vector;
353 DBG_ERROR("%s : Freeing IRQ vector#%d\n",__FUNCTION__,vector);
354 free_irq(vector,netdev);
355 }
356}
357
358
359static void sxg_remove_isr(struct adapter_t *adapter)
360{
361 struct net_device *netdev = adapter->netdev;
362 if (adapter->msi_enabled)
363 sxg_remove_msix_isr(adapter);
364 else
365 free_irq(adapter->netdev->irq, netdev);
366}
367
368void sxg_reset_interrupt_capability(struct adapter_t *adapter)
369{
370 if (adapter->msi_enabled) {
371 pci_disable_msix(adapter->pcidev);
372 kfree(adapter->msi_entries);
373 adapter->msi_entries = NULL;
374 }
375 return;
376}
377
5db6b777
GKH
378/*
379 * sxg_download_microcode
380 *
cda3b517
MT
381 * Download Microcode to Sahara adapter using the Linux
382 * Firmware module to get the ucode.sys file.
5db6b777
GKH
383 *
384 * Arguments -
385 * adapter - A pointer to our adapter structure
386 * UcodeSel - microcode file selection
387 *
388 * Return
389 * int
390 */
cb636fe3
MT
391static bool sxg_download_microcode(struct adapter_t *adapter,
392 enum SXG_UCODE_SEL UcodeSel)
5db6b777 393{
cda3b517
MT
394 const struct firmware *fw;
395 const char *file = "";
942798b4 396 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cda3b517
MT
397 int ret;
398 int ucode_start;
5db6b777
GKH
399 u32 Section;
400 u32 ThisSectionSize;
cda3b517 401 u32 instruction = 0;
5db6b777 402 u32 BaseAddress, AddressOffset, Address;
cb636fe3 403 /* u32 Failure; */
5db6b777
GKH
404 u32 ValueRead;
405 u32 i;
cda3b517
MT
406 u32 index = 0;
407 u32 num_sections = 0;
5db6b777
GKH
408 u32 sectionSize[16];
409 u32 sectionStart[16];
410
411 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
412 adapter, 0, 0, 0);
cda3b517
MT
413
414 /*
415 * This routine is only implemented to download the microcode
416 * for the Revision B Sahara chip. Rev A and Diagnostic
417 * microcode is not supported at this time. If Rev A or
418 * diagnostic ucode is required, this routine will obviously
419 * need to change. Also, eventually need to add support for
420 * Rev B checked version of ucode. That's easy enough once
421 * the free version of Rev B works.
422 */
423 ASSERT(UcodeSel == SXG_UCODE_SYSTEM);
424 ASSERT(adapter->asictype == SAHARA_REV_B);
425#if SXG_UCODE_DEBUG
426 file = "sxg/saharadbgdownloadB.sys";
427#else
428 file = "sxg/saharadownloadB.sys";
429#endif
430 ret = request_firmware(&fw, file, &adapter->pcidev->dev);
431 if (ret) {
432 DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__,file);
433 return ret;
434 }
435
436 /*
437 * The microcode .sys file contains starts with a 4 byte word containing
438 * the number of sections. That is followed by "num_sections" 4 byte
439 * words containing each "section" size. That is followed num_sections
440 * 4 byte words containing each section "start" address.
441 *
442 * Following the above header, the .sys file contains num_sections,
443 * where each section size is specified, newline delineatetd 12 byte
444 * microcode instructions.
445 */
446 num_sections = *(u32 *)(fw->data + index);
447 index += 4;
448 ASSERT(num_sections <= 3);
449 for (i = 0; i < num_sections; i++) {
450 sectionSize[i] = *(u32 *)(fw->data + index);
451 index += 4;
452 }
453 for (i = 0; i < num_sections; i++) {
454 sectionStart[i] = *(u32 *)(fw->data + index);
455 index += 4;
5db6b777
GKH
456 }
457
b243c4aa 458 /* First, reset the card */
5db6b777 459 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
a536efcc 460 udelay(50);
cda3b517 461 HwRegs = adapter->HwRegs;
5db6b777 462
ddd6f0a8
MT
463 /*
464 * Download each section of the microcode as specified in
cda3b517
MT
465 * sectionSize[index] to sectionStart[index] address. As
466 * described above, the .sys file contains 12 byte word
467 * microcode instructions. The *download.sys file is generated
468 * using the objtosys.exe utility that was built for Sahara
469 * microcode.
ddd6f0a8 470 */
cda3b517
MT
471 /* See usage of this below when we read back for parity */
472 ucode_start = index;
473 instruction = *(u32 *)(fw->data + index);
474 index += 4;
a536efcc 475
cda3b517 476 for (Section = 0; Section < num_sections; Section++) {
5db6b777 477 BaseAddress = sectionStart[Section];
cb636fe3
MT
478 /* Size in instructions */
479 ThisSectionSize = sectionSize[Section] / 12;
5db6b777
GKH
480 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
481 AddressOffset++) {
cda3b517
MT
482 u32 first_instr = 0; /* See comment below */
483
5db6b777
GKH
484 Address = BaseAddress + AddressOffset;
485 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
cda3b517
MT
486 /* Write instruction bits 31 - 0 (low) */
487 first_instr = instruction;
488 WRITE_REG(HwRegs->UcodeDataLow, instruction, FLUSH);
489 instruction = *(u32 *)(fw->data + index);
490 index += 4; /* Advance to the "next" instruction */
491
492 /* Write instruction bits 63-32 (middle) */
493 WRITE_REG(HwRegs->UcodeDataMiddle, instruction, FLUSH);
494 instruction = *(u32 *)(fw->data + index);
495 index += 4; /* Advance to the "next" instruction */
496
497 /* Write instruction bits 95-64 (high) */
498 WRITE_REG(HwRegs->UcodeDataHigh, instruction, FLUSH);
499 instruction = *(u32 *)(fw->data + index);
500 index += 4; /* Advance to the "next" instruction */
501
b243c4aa 502 /* Write instruction address with the WRITE bit set */
5db6b777
GKH
503 WRITE_REG(HwRegs->UcodeAddr,
504 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
ddd6f0a8
MT
505 /*
506 * Sahara bug in the ucode download logic - the write to DataLow
507 * for the next instruction could get corrupted. To avoid this,
508 * write to DataLow again for this instruction (which may get
509 * corrupted, but it doesn't matter), then increment the address
510 * and write the data for the next instruction to DataLow. That
511 * write should succeed.
512 */
cda3b517 513 WRITE_REG(HwRegs->UcodeDataLow, first_instr, FLUSH);
5db6b777
GKH
514 }
515 }
ddd6f0a8
MT
516 /*
517 * Now repeat the entire operation reading the instruction back and
518 * checking for parity errors
519 */
cda3b517
MT
520 index = ucode_start;
521
522 for (Section = 0; Section < num_sections; Section++) {
5db6b777 523 BaseAddress = sectionStart[Section];
cb636fe3
MT
524 /* Size in instructions */
525 ThisSectionSize = sectionSize[Section] / 12;
5db6b777
GKH
526 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
527 AddressOffset++) {
528 Address = BaseAddress + AddressOffset;
b243c4aa 529 /* Write the address with the READ bit set */
5db6b777
GKH
530 WRITE_REG(HwRegs->UcodeAddr,
531 (Address | MICROCODE_ADDRESS_READ), FLUSH);
b243c4aa 532 /* Read it back and check parity bit. */
5db6b777
GKH
533 READ_REG(HwRegs->UcodeAddr, ValueRead);
534 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
535 DBG_ERROR("sxg: %s PARITY ERROR\n",
e88bd231 536 __func__);
5db6b777 537
cb636fe3 538 return FALSE; /* Parity error */
5db6b777
GKH
539 }
540 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
b243c4aa 541 /* Read the instruction back and compare */
cda3b517
MT
542 /* First instruction */
543 instruction = *(u32 *)(fw->data + index);
544 index += 4;
5db6b777 545 READ_REG(HwRegs->UcodeDataLow, ValueRead);
cda3b517 546 if (ValueRead != instruction) {
5db6b777 547 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
e88bd231 548 __func__);
cb636fe3 549 return FALSE; /* Miscompare */
5db6b777 550 }
cda3b517
MT
551 instruction = *(u32 *)(fw->data + index);
552 index += 4;
5db6b777 553 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
cda3b517 554 if (ValueRead != instruction) {
5db6b777 555 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
e88bd231 556 __func__);
cb636fe3 557 return FALSE; /* Miscompare */
5db6b777 558 }
cda3b517
MT
559 instruction = *(u32 *)(fw->data + index);
560 index += 4;
5db6b777 561 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
cda3b517 562 if (ValueRead != instruction) {
5db6b777 563 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
e88bd231 564 __func__);
cb636fe3 565 return FALSE; /* Miscompare */
5db6b777 566 }
5db6b777
GKH
567 }
568 }
569
cda3b517
MT
570 /* download finished */
571 release_firmware(fw);
b243c4aa 572 /* Everything OK, Go. */
5db6b777
GKH
573 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
574
ddd6f0a8
MT
575 /*
576 * Poll the CardUp register to wait for microcode to initialize
577 * Give up after 10,000 attemps (500ms).
578 */
5db6b777
GKH
579 for (i = 0; i < 10000; i++) {
580 udelay(50);
581 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
582 if (ValueRead == 0xCAFE) {
5db6b777
GKH
583 break;
584 }
585 }
586 if (i == 10000) {
cda3b517 587 DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__);
5db6b777 588
cb636fe3 589 return FALSE; /* Timeout */
5db6b777 590 }
ddd6f0a8
MT
591 /*
592 * Now write the LoadSync register. This is used to
593 * synchronize with the card so it can scribble on the memory
594 * that contained 0xCAFE from the "CardUp" step above
595 */
a536efcc 596 if (UcodeSel == SXG_UCODE_SYSTEM) {
5db6b777
GKH
597 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
598 }
599
600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
601 adapter, 0, 0, 0);
5db6b777
GKH
602 return (TRUE);
603}
604
605/*
606 * sxg_allocate_resources - Allocate memory and locks
607 *
608 * Arguments -
cb636fe3 609 * adapter - A pointer to our adapter structure
5db6b777 610 *
cb636fe3 611 * Return - int
5db6b777 612 */
73b07065 613static int sxg_allocate_resources(struct adapter_t *adapter)
5db6b777 614{
9fd6966c 615 int status = STATUS_SUCCESS;
5db6b777 616 u32 RssIds, IsrCount;
cb636fe3
MT
617 /* struct sxg_xmt_ring *XmtRing; */
618 /* struct sxg_rcv_ring *RcvRing; */
5db6b777 619
e88bd231 620 DBG_ERROR("%s ENTER\n", __func__);
5db6b777
GKH
621
622 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
623 adapter, 0, 0, 0);
624
b243c4aa
M
625 /* Windows tells us how many CPUs it plans to use for */
626 /* RSS */
5db6b777 627 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 628 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777 629
e88bd231 630 DBG_ERROR("%s Setup the spinlocks\n", __func__);
5db6b777 631
b243c4aa 632 /* Allocate spinlocks and initialize listheads first. */
5db6b777
GKH
633 spin_lock_init(&adapter->RcvQLock);
634 spin_lock_init(&adapter->SglQLock);
635 spin_lock_init(&adapter->XmtZeroLock);
636 spin_lock_init(&adapter->Bit64RegLock);
637 spin_lock_init(&adapter->AdapterLock);
6a2946ba 638 atomic_set(&adapter->pending_allocations, 0);
5db6b777 639
e88bd231 640 DBG_ERROR("%s Setup the lists\n", __func__);
5db6b777
GKH
641
642 InitializeListHead(&adapter->FreeRcvBuffers);
643 InitializeListHead(&adapter->FreeRcvBlocks);
644 InitializeListHead(&adapter->AllRcvBlocks);
645 InitializeListHead(&adapter->FreeSglBuffers);
646 InitializeListHead(&adapter->AllSglBuffers);
647
ddd6f0a8
MT
648 /*
649 * Mark these basic allocations done. This flags essentially
650 * tells the SxgFreeResources routine that it can grab spinlocks
651 * and reference listheads.
652 */
5db6b777 653 adapter->BasicAllocations = TRUE;
ddd6f0a8
MT
654 /*
655 * Main allocation loop. Start with the maximum supported by
656 * the microcode and back off if memory allocation
657 * fails. If we hit a minimum, fail.
658 */
5db6b777
GKH
659
660 for (;;) {
d78404cc 661 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
942798b4 662 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
5db6b777 663
ddd6f0a8 664 /*
cb636fe3
MT
665 * Start with big items first - receive and transmit rings.
666 * At the moment I'm going to keep the ring size fixed and
667 * adjust the TCBs if we fail. Later we might
668 * consider reducing the ring size as well..
ddd6f0a8 669 */
5db6b777 670 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
cb636fe3
MT
671 sizeof(struct sxg_xmt_ring) *
672 1,
673 &adapter->PXmtRings);
e88bd231 674 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
5db6b777
GKH
675
676 if (!adapter->XmtRings) {
677 goto per_tcb_allocation_failed;
678 }
942798b4 679 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
5db6b777 680
d78404cc 681 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
942798b4 682 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
5db6b777
GKH
683 adapter->RcvRings =
684 pci_alloc_consistent(adapter->pcidev,
942798b4 685 sizeof(struct sxg_rcv_ring) * 1,
5db6b777 686 &adapter->PRcvRings);
e88bd231 687 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
5db6b777
GKH
688 if (!adapter->RcvRings) {
689 goto per_tcb_allocation_failed;
690 }
942798b4 691 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
d9d578bf
MT
692 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
693 adapter->pucode_stats = pci_map_single(adapter->pcidev,
694 adapter->ucode_stats,
695 sizeof(struct sxg_ucode_stats),
696 PCI_DMA_FROMDEVICE);
697// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
5db6b777
GKH
698 break;
699
700 per_tcb_allocation_failed:
b243c4aa 701 /* an allocation failed. Free any successful allocations. */
5db6b777
GKH
702 if (adapter->XmtRings) {
703 pci_free_consistent(adapter->pcidev,
942798b4 704 sizeof(struct sxg_xmt_ring) * 1,
5db6b777
GKH
705 adapter->XmtRings,
706 adapter->PXmtRings);
707 adapter->XmtRings = NULL;
708 }
709 if (adapter->RcvRings) {
710 pci_free_consistent(adapter->pcidev,
942798b4 711 sizeof(struct sxg_rcv_ring) * 1,
5db6b777
GKH
712 adapter->RcvRings,
713 adapter->PRcvRings);
714 adapter->RcvRings = NULL;
715 }
b243c4aa 716 /* Loop around and try again.... */
d9d578bf
MT
717 if (adapter->ucode_stats) {
718 pci_unmap_single(adapter->pcidev,
719 sizeof(struct sxg_ucode_stats),
720 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
721 adapter->ucode_stats = NULL;
722 }
723
5db6b777
GKH
724 }
725
e88bd231 726 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
b243c4aa 727 /* Initialize rcv zero and xmt zero rings */
5db6b777
GKH
728 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
729 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
730
b243c4aa 731 /* Sanity check receive data structure format */
d0128aa9
MT
732 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
733 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
942798b4 734 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
5db6b777
GKH
735 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
736
d78404cc 737 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
942798b4 738 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
5db6b777 739
b243c4aa 740 /* Allocate event queues. */
5db6b777 741 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
cb636fe3
MT
742 sizeof(struct sxg_event_ring) *
743 RssIds,
744 &adapter->PEventRings);
5db6b777
GKH
745
746 if (!adapter->EventRings) {
cb636fe3
MT
747 /* Caller will call SxgFreeAdapter to clean up above
748 * allocations */
5db6b777
GKH
749 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
750 adapter, SXG_MAX_ENTRIES, 0, 0);
751 status = STATUS_RESOURCES;
752 goto per_tcb_allocation_failed;
753 }
942798b4 754 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
5db6b777 755
e88bd231 756 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
b243c4aa 757 /* Allocate ISR */
5db6b777
GKH
758 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
759 IsrCount, &adapter->PIsr);
760 if (!adapter->Isr) {
cb636fe3
MT
761 /* Caller will call SxgFreeAdapter to clean up above
762 * allocations */
5db6b777
GKH
763 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
764 adapter, SXG_MAX_ENTRIES, 0, 0);
765 status = STATUS_RESOURCES;
766 goto per_tcb_allocation_failed;
767 }
768 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
769
d78404cc
GKH
770 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
771 __func__, (unsigned int)sizeof(u32));
5db6b777 772
b243c4aa 773 /* Allocate shared XMT ring zero index location */
5db6b777
GKH
774 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
775 sizeof(u32),
776 &adapter->
777 PXmtRingZeroIndex);
778 if (!adapter->XmtRingZeroIndex) {
779 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
780 adapter, SXG_MAX_ENTRIES, 0, 0);
781 status = STATUS_RESOURCES;
782 goto per_tcb_allocation_failed;
783 }
784 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
785
786 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
787 adapter, SXG_MAX_ENTRIES, 0, 0);
788
0d414727 789 return status;
5db6b777
GKH
790}
791
792/*
793 * sxg_config_pci -
794 *
795 * Set up PCI Configuration space
796 *
797 * Arguments -
798 * pcidev - A pointer to our adapter structure
5db6b777
GKH
799 */
800static void sxg_config_pci(struct pci_dev *pcidev)
801{
802 u16 pci_command;
803 u16 new_command;
804
805 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
e88bd231 806 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
b243c4aa 807 /* Set the command register */
cb636fe3
MT
808 new_command = pci_command | (
809 /* Memory Space Enable */
810 PCI_COMMAND_MEMORY |
811 /* Bus master enable */
812 PCI_COMMAND_MASTER |
813 /* Memory write and invalidate */
814 PCI_COMMAND_INVALIDATE |
815 /* Parity error response */
816 PCI_COMMAND_PARITY |
817 /* System ERR */
818 PCI_COMMAND_SERR |
819 /* Fast back-to-back */
820 PCI_COMMAND_FAST_BACK);
5db6b777
GKH
821 if (pci_command != new_command) {
822 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
e88bd231 823 __func__, pci_command, new_command);
5db6b777
GKH
824 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
825 }
826}
827
1323e5f1
MT
828/*
829 * sxg_read_config
830 * @adapter : Pointer to the adapter structure for the card
831 * This function will read the configuration data from EEPROM/FLASH
832 */
833static inline int sxg_read_config(struct adapter_t *adapter)
834{
ddd6f0a8 835 /* struct sxg_config data; */
b9346e0f 836 struct sxg_config *config;
942798b4 837 struct sw_cfg_data *data;
1323e5f1
MT
838 dma_addr_t p_addr;
839 unsigned long status;
840 unsigned long i;
b9346e0f
MT
841 config = pci_alloc_consistent(adapter->pcidev,
842 sizeof(struct sxg_config), &p_addr);
1323e5f1 843
b9346e0f 844 if(!config) {
ddd6f0a8
MT
845 /*
846 * We cant get even this much memory. Raise a hell
1323e5f1
MT
847 * Get out of here
848 */
cb636fe3
MT
849 printk(KERN_ERR"%s : Could not allocate memory for reading \
850 EEPROM\n", __FUNCTION__);
1323e5f1
MT
851 return -ENOMEM;
852 }
853
b9346e0f
MT
854 data = &config->SwCfg;
855
856 /* Initialize (reflective memory) status register */
1323e5f1
MT
857 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
858
b9346e0f 859 /* Send request to fetch configuration data */
1323e5f1
MT
860 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
861 for(i=0; i<1000; i++) {
862 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
863 if (status != SXG_CFG_TIMEOUT)
864 break;
865 mdelay(1); /* Do we really need this */
866 }
867
868 switch(status) {
cb636fe3
MT
869 /* Config read from EEPROM succeeded */
870 case SXG_CFG_LOAD_EEPROM:
871 /* Config read from Flash succeeded */
872 case SXG_CFG_LOAD_FLASH:
b9346e0f
MT
873 /*
874 * Copy the MAC address to adapter structure
875 * TODO: We are not doing the remaining part : FRU, etc
cb636fe3 876 */
d0128aa9 877 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
b9346e0f 878 sizeof(struct sxg_config_mac));
cb636fe3
MT
879 break;
880 case SXG_CFG_TIMEOUT:
881 case SXG_CFG_LOAD_INVALID:
882 case SXG_CFG_LOAD_ERROR:
883 default: /* Fix default handler later */
884 printk(KERN_WARNING"%s : We could not read the config \
885 word. Status = %ld\n", __FUNCTION__, status);
886 break;
1323e5f1 887 }
cb636fe3
MT
888 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
889 p_addr);
1323e5f1
MT
890 if (adapter->netdev) {
891 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
892 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
893 }
1323e5f1
MT
894 sxg_dbg_macaddrs(adapter);
895
896 return status;
897}
898
5db6b777
GKH
899static int sxg_entry_probe(struct pci_dev *pcidev,
900 const struct pci_device_id *pci_tbl_entry)
901{
902 static int did_version = 0;
903 int err;
904 struct net_device *netdev;
73b07065 905 struct adapter_t *adapter;
5db6b777
GKH
906 void __iomem *memmapped_ioaddr;
907 u32 status = 0;
908 ulong mmio_start = 0;
909 ulong mmio_len = 0;
a536efcc 910 unsigned char revision_id;
5db6b777
GKH
911
912 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
e88bd231 913 __func__, jiffies, smp_processor_id());
5db6b777 914
b243c4aa 915 /* Initialize trace buffer */
5db6b777
GKH
916#ifdef ATKDBG
917 SxgTraceBuffer = &LSxgTraceBuffer;
918 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
919#endif
920
921 sxg_global.dynamic_intagg = dynamic_intagg;
922
923 err = pci_enable_device(pcidev);
924
925 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
926 if (err) {
927 return err;
928 }
929
930 if (sxg_debug > 0 && did_version++ == 0) {
931 printk(KERN_INFO "%s\n", sxg_banner);
371d7a9e 932 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
5db6b777
GKH
933 }
934
a536efcc
MT
935 pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
936
6a35528a
YH
937 if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
938 DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
5db6b777
GKH
939 } else {
940 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
941 DBG_ERROR
942 ("No usable DMA configuration, aborting err[%x]\n",
943 err);
944 return err;
945 }
946 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
947 }
948
949 DBG_ERROR("Call pci_request_regions\n");
950
371d7a9e 951 err = pci_request_regions(pcidev, sxg_driver_name);
5db6b777
GKH
952 if (err) {
953 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
954 return err;
955 }
956
957 DBG_ERROR("call pci_set_master\n");
958 pci_set_master(pcidev);
959
960 DBG_ERROR("call alloc_etherdev\n");
73b07065 961 netdev = alloc_etherdev(sizeof(struct adapter_t));
5db6b777
GKH
962 if (!netdev) {
963 err = -ENOMEM;
964 goto err_out_exit_sxg_probe;
965 }
966 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
967
968 SET_NETDEV_DEV(netdev, &pcidev->dev);
969
970 pci_set_drvdata(pcidev, netdev);
971 adapter = netdev_priv(netdev);
a536efcc
MT
972 if (revision_id == 1) {
973 adapter->asictype = SAHARA_REV_A;
974 } else if (revision_id == 2) {
975 adapter->asictype = SAHARA_REV_B;
976 } else {
977 ASSERT(0);
978 DBG_ERROR("%s Unexpected revision ID %x\n", __FUNCTION__, revision_id);
979 goto err_out_exit_sxg_probe;
980 }
5db6b777
GKH
981 adapter->netdev = netdev;
982 adapter->pcidev = pcidev;
983
984 mmio_start = pci_resource_start(pcidev, 0);
985 mmio_len = pci_resource_len(pcidev, 0);
986
987 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
988 mmio_start, mmio_len);
989
990 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
e88bd231 991 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
5c7514e0 992 memmapped_ioaddr);
5db6b777
GKH
993 if (!memmapped_ioaddr) {
994 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
e88bd231 995 __func__, mmio_len, mmio_start);
0d414727 996 goto err_out_free_mmio_region_0;
5db6b777
GKH
997 }
998
cb636fe3
MT
999 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
1000 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
1001 mmio_len, pcidev->irq);
5db6b777 1002
5c7514e0 1003 adapter->HwRegs = (void *)memmapped_ioaddr;
5db6b777
GKH
1004 adapter->base_addr = memmapped_ioaddr;
1005
1006 mmio_start = pci_resource_start(pcidev, 2);
1007 mmio_len = pci_resource_len(pcidev, 2);
1008
1009 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
1010 mmio_start, mmio_len);
1011
1012 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
5c7514e0
M
1013 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
1014 memmapped_ioaddr);
5db6b777
GKH
1015 if (!memmapped_ioaddr) {
1016 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
e88bd231 1017 __func__, mmio_len, mmio_start);
0d414727 1018 goto err_out_free_mmio_region_2;
5db6b777
GKH
1019 }
1020
1021 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
1022 "start[%lx] len[%lx], IRQ %d.\n", __func__,
1023 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
1024
1025 adapter->UcodeRegs = (void *)memmapped_ioaddr;
1026
1027 adapter->State = SXG_STATE_INITIALIZING;
ddd6f0a8
MT
1028 /*
1029 * Maintain a list of all adapters anchored by
1030 * the global SxgDriver structure.
1031 */
5db6b777
GKH
1032 adapter->Next = SxgDriver.Adapters;
1033 SxgDriver.Adapters = adapter;
1034 adapter->AdapterID = ++SxgDriver.AdapterID;
1035
b243c4aa 1036 /* Initialize CRC table used to determine multicast hash */
5db6b777
GKH
1037 sxg_mcast_init_crc32();
1038
1039 adapter->JumboEnabled = FALSE;
1040 adapter->RssEnabled = FALSE;
1041 if (adapter->JumboEnabled) {
1042 adapter->FrameSize = JUMBOMAXFRAME;
1043 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
1044 } else {
1045 adapter->FrameSize = ETHERMAXFRAME;
1046 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
1047 }
1048
cb636fe3
MT
1049 /*
1050 * status = SXG_READ_EEPROM(adapter);
1051 * if (!status) {
1052 * goto sxg_init_bad;
1053 * }
1054 */
5db6b777 1055
e88bd231 1056 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
5db6b777 1057 sxg_config_pci(pcidev);
e88bd231 1058 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
5db6b777 1059
e88bd231 1060 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
5db6b777 1061 sxg_init_driver();
e88bd231 1062 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
5db6b777
GKH
1063
1064 adapter->vendid = pci_tbl_entry->vendor;
1065 adapter->devid = pci_tbl_entry->device;
1066 adapter->subsysid = pci_tbl_entry->subdevice;
5db6b777
GKH
1067 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
1068 adapter->functionnumber = (pcidev->devfn & 0x7);
1069 adapter->memorylength = pci_resource_len(pcidev, 0);
1070 adapter->irq = pcidev->irq;
1071 adapter->next_netdevice = head_netdevice;
1072 head_netdevice = netdev;
b243c4aa 1073 adapter->port = 0; /*adapter->functionnumber; */
5db6b777 1074
b243c4aa 1075 /* Allocate memory and other resources */
e88bd231 1076 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
5db6b777
GKH
1077 status = sxg_allocate_resources(adapter);
1078 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
e88bd231 1079 __func__, status);
5db6b777
GKH
1080 if (status != STATUS_SUCCESS) {
1081 goto err_out_unmap;
1082 }
1083
e88bd231 1084 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
a536efcc 1085 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
5db6b777 1086 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
e88bd231 1087 __func__);
1323e5f1 1088 sxg_read_config(adapter);
54aed113 1089 status = sxg_adapter_set_hwaddr(adapter);
5db6b777
GKH
1090 } else {
1091 adapter->state = ADAPT_FAIL;
1092 adapter->linkstate = LINK_DOWN;
1093 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
1094 }
1095
1096 netdev->base_addr = (unsigned long)adapter->base_addr;
1097 netdev->irq = adapter->irq;
1098 netdev->open = sxg_entry_open;
1099 netdev->stop = sxg_entry_halt;
1100 netdev->hard_start_xmit = sxg_send_packets;
1101 netdev->do_ioctl = sxg_ioctl;
7c66b14b 1102 netdev->change_mtu = sxg_change_mtu;
5db6b777
GKH
1103#if XXXTODO
1104 netdev->set_mac_address = sxg_mac_set_address;
5db6b777 1105#endif
6a2946ba 1106 netdev->get_stats = sxg_get_stats;
1323e5f1 1107 netdev->set_multicast_list = sxg_mcast_set_list;
371d7a9e 1108 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
9914f053 1109 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1782199f
MT
1110 err = sxg_set_interrupt_capability(adapter);
1111 if (err != STATUS_SUCCESS)
1112 DBG_ERROR("Cannot enable MSI-X capability\n");
5db6b777
GKH
1113
1114 strcpy(netdev->name, "eth%d");
cb636fe3 1115 /* strcpy(netdev->name, pci_name(pcidev)); */
5db6b777
GKH
1116 if ((err = register_netdev(netdev))) {
1117 DBG_ERROR("Cannot register net device, aborting. %s\n",
1118 netdev->name);
1119 goto err_out_unmap;
1120 }
1121
b62a294f
MT
1122 netif_napi_add(netdev, &adapter->napi,
1123 sxg_poll, SXG_NETDEV_WEIGHT);
e5ea8da0
MT
1124 netdev->watchdog_timeo = 2 * HZ;
1125 init_timer(&adapter->watchdog_timer);
1126 adapter->watchdog_timer.function = &sxg_watchdog;
1127 adapter->watchdog_timer.data = (unsigned long) adapter;
1128 INIT_WORK(&adapter->update_link_status, sxg_update_link_status);
1129
5db6b777 1130 DBG_ERROR
cb636fe3
MT
1131 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
1132 %02X:%02X:%02X:%02X:%02X:%02X\n",
5db6b777
GKH
1133 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
1134 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
1135 netdev->dev_addr[4], netdev->dev_addr[5]);
1136
cb636fe3 1137 /* sxg_init_bad: */
5db6b777 1138 ASSERT(status == FALSE);
cb636fe3 1139 /* sxg_free_adapter(adapter); */
5db6b777 1140
e88bd231 1141 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
5db6b777
GKH
1142 status, jiffies, smp_processor_id());
1143 return status;
1144
1145 err_out_unmap:
0d414727
MT
1146 sxg_free_resources(adapter);
1147
1148 err_out_free_mmio_region_2:
1149
1150 mmio_start = pci_resource_start(pcidev, 2);
1151 mmio_len = pci_resource_len(pcidev, 2);
1152 release_mem_region(mmio_start, mmio_len);
1153
1154 err_out_free_mmio_region_0:
1155
1156 mmio_start = pci_resource_start(pcidev, 0);
1157 mmio_len = pci_resource_len(pcidev, 0);
5db6b777 1158
5db6b777
GKH
1159 release_mem_region(mmio_start, mmio_len);
1160
1161 err_out_exit_sxg_probe:
1162
e88bd231 1163 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
5db6b777
GKH
1164 smp_processor_id());
1165
0d414727
MT
1166 pci_disable_device(pcidev);
1167 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1168 kfree(netdev);
1169 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1170
5db6b777
GKH
1171 return -ENODEV;
1172}
1173
5db6b777 1174/*
ddd6f0a8 1175 * LINE BASE Interrupt routines..
5db6b777
GKH
1176 *
1177 * sxg_disable_interrupt
1178 *
1179 * DisableInterrupt Handler
1180 *
1181 * Arguments:
1182 *
1183 * adapter: Our adapter structure
1184 *
1185 * Return Value:
1186 * None.
1187 */
73b07065 1188static void sxg_disable_interrupt(struct adapter_t *adapter)
5db6b777
GKH
1189{
1190 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1191 adapter, adapter->InterruptsEnabled, 0, 0);
b243c4aa 1192 /* For now, RSS is disabled with line based interrupts */
5db6b777 1193 ASSERT(adapter->RssEnabled == FALSE);
b243c4aa 1194 /* Turn off interrupts by writing to the icr register. */
5db6b777
GKH
1195 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1196
1197 adapter->InterruptsEnabled = 0;
1198
1199 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1200 adapter, adapter->InterruptsEnabled, 0, 0);
1201}
1202
1203/*
5db6b777
GKH
1204 * sxg_enable_interrupt
1205 *
1206 * EnableInterrupt Handler
1207 *
1208 * Arguments:
1209 *
1210 * adapter: Our adapter structure
1211 *
1212 * Return Value:
1213 * None.
1214 */
73b07065 1215static void sxg_enable_interrupt(struct adapter_t *adapter)
5db6b777
GKH
1216{
1217 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1218 adapter, adapter->InterruptsEnabled, 0, 0);
b243c4aa 1219 /* For now, RSS is disabled with line based interrupts */
5db6b777 1220 ASSERT(adapter->RssEnabled == FALSE);
b243c4aa 1221 /* Turn on interrupts by writing to the icr register. */
5db6b777
GKH
1222 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1223
1224 adapter->InterruptsEnabled = 1;
1225
1226 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1227 adapter, 0, 0, 0);
1228}
1229
1230/*
5db6b777
GKH
1231 * sxg_isr - Process an line-based interrupt
1232 *
1233 * Arguments:
cb636fe3 1234 * Context - Our adapter structure
5db6b777 1235 * QueueDefault - Output parameter to queue to default CPU
cb636fe3 1236 * TargetCpus - Output bitmap to schedule DPC's
5db6b777 1237 *
cb636fe3 1238 * Return Value: TRUE if our interrupt
5db6b777
GKH
1239 */
1240static irqreturn_t sxg_isr(int irq, void *dev_id)
1241{
942798b4 1242 struct net_device *dev = (struct net_device *) dev_id;
73b07065 1243 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777 1244
6a2946ba
MT
1245 if(adapter->state != ADAPT_UP)
1246 return IRQ_NONE;
5db6b777
GKH
1247 adapter->Stats.NumInts++;
1248 if (adapter->Isr[0] == 0) {
ddd6f0a8
MT
1249 /*
1250 * The SLIC driver used to experience a number of spurious
1251 * interrupts due to the delay associated with the masking of
1252 * the interrupt (we'd bounce back in here). If we see that
1253 * again with Sahara,add a READ_REG of the Icr register after
1254 * the WRITE_REG below.
1255 */
5db6b777
GKH
1256 adapter->Stats.FalseInts++;
1257 return IRQ_NONE;
1258 }
ddd6f0a8
MT
1259 /*
1260 * Move the Isr contents and clear the value in
1261 * shared memory, and mask interrupts
1262 */
cb636fe3 1263 /* ASSERT(adapter->IsrDpcsPending == 0); */
b243c4aa 1264#if XXXTODO /* RSS Stuff */
ddd6f0a8
MT
1265 /*
1266 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1267 * schedule DPC's based on event queues.
1268 */
5db6b777
GKH
1269 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1270 for (i = 0;
1271 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1272 i++) {
cb636fe3
MT
1273 struct sxg_event_ring *EventRing =
1274 &adapter->EventRings[i];
942798b4 1275 struct sxg_event *Event =
5db6b777 1276 &EventRing->Ring[adapter->NextEvent[i]];
5c7514e0
M
1277 unsigned char Cpu =
1278 adapter->RssSystemInfo->RssIdToCpu[i];
5db6b777
GKH
1279 if (Event->Status & EVENT_STATUS_VALID) {
1280 adapter->IsrDpcsPending++;
1281 CpuMask |= (1 << Cpu);
1282 }
1283 }
1284 }
cb636fe3
MT
1285 /*
1286 * Now, either schedule the CPUs specified by the CpuMask,
ddd6f0a8
MT
1287 * or queue default
1288 */
5db6b777
GKH
1289 if (CpuMask) {
1290 *QueueDefault = FALSE;
1291 } else {
1292 adapter->IsrDpcsPending = 1;
1293 *QueueDefault = TRUE;
1294 }
1295 *TargetCpus = CpuMask;
1296#endif
b62a294f 1297 sxg_interrupt(adapter);
5db6b777
GKH
1298
1299 return IRQ_HANDLED;
1300}
1301
b62a294f
MT
1302static void sxg_interrupt(struct adapter_t *adapter)
1303{
1304 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1305
c1f46a00
RD
1306 if (napi_schedule_prep(&adapter->napi)) {
1307 __napi_schedule(&adapter->napi);
b62a294f
MT
1308 }
1309}
1310
1311static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1312 int budget)
5db6b777 1313{
cb636fe3 1314 /* unsigned char RssId = 0; */
5db6b777 1315 u32 NewIsr;
b62a294f 1316 int sxg_napi_continue = 1;
5db6b777
GKH
1317 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1318 adapter, adapter->IsrCopy[0], 0, 0);
b243c4aa 1319 /* For now, RSS is disabled with line based interrupts */
5db6b777 1320 ASSERT(adapter->RssEnabled == FALSE);
b62a294f
MT
1321
1322 adapter->IsrCopy[0] = adapter->Isr[0];
1323 adapter->Isr[0] = 0;
5db6b777 1324
b243c4aa 1325 /* Always process the event queue. */
b62a294f
MT
1326 while (sxg_napi_continue)
1327 {
1328 sxg_process_event_queue(adapter,
1329 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1330 &sxg_napi_continue, work_done, budget);
1331 }
5db6b777 1332
b243c4aa 1333#if XXXTODO /* RSS stuff */
5db6b777 1334 if (--adapter->IsrDpcsPending) {
b243c4aa 1335 /* We're done. */
5db6b777
GKH
1336 ASSERT(adapter->RssEnabled);
1337 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1338 adapter, 0, 0, 0);
1339 return;
1340 }
1341#endif
b243c4aa 1342 /* Last (or only) DPC processes the ISR and clears the interrupt. */
5db6b777 1343 NewIsr = sxg_process_isr(adapter, 0);
b243c4aa 1344 /* Reenable interrupts */
5db6b777
GKH
1345 adapter->IsrCopy[0] = 0;
1346 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1347 adapter, NewIsr, 0, 0);
1348
5db6b777
GKH
1349 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1350 adapter, 0, 0, 0);
1351}
b62a294f
MT
1352static int sxg_poll(struct napi_struct *napi, int budget)
1353{
1354 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1355 int work_done = 0;
1356
1357 sxg_handle_interrupt(adapter, &work_done, budget);
1358
1359 if (work_done < budget) {
c1f46a00 1360 napi_complete(napi);
b62a294f
MT
1361 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1362 }
b62a294f
MT
1363 return work_done;
1364}
5db6b777
GKH
1365
1366/*
5db6b777
GKH
1367 * sxg_process_isr - Process an interrupt. Called from the line-based and
1368 * message based interrupt DPC routines
1369 *
1370 * Arguments:
1371 * adapter - Our adapter structure
1372 * Queue - The ISR that needs processing
1373 *
1374 * Return Value:
1375 * None
1376 */
73b07065 1377static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
5db6b777
GKH
1378{
1379 u32 Isr = adapter->IsrCopy[MessageId];
1380 u32 NewIsr = 0;
1381
1382 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1383 adapter, Isr, 0, 0);
1384
b243c4aa 1385 /* Error */
5db6b777
GKH
1386 if (Isr & SXG_ISR_ERR) {
1387 if (Isr & SXG_ISR_PDQF) {
1388 adapter->Stats.PdqFull++;
e88bd231 1389 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
5db6b777 1390 }
b243c4aa 1391 /* No host buffer */
5db6b777 1392 if (Isr & SXG_ISR_RMISS) {
ddd6f0a8
MT
1393 /*
1394 * There is a bunch of code in the SLIC driver which
1395 * attempts to process more receive events per DPC
1396 * if we start to fall behind. We'll probablyd
1397 * need to do something similar here, but hold
1398 * off for now. I don't want to make the code more
1399 * complicated than strictly needed.
1400 */
6a2946ba 1401 adapter->stats.rx_missed_errors++;
54aed113 1402 if (adapter->stats.rx_missed_errors< 5) {
5db6b777 1403 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
e88bd231 1404 __func__);
5db6b777
GKH
1405 }
1406 }
b243c4aa 1407 /* Card crash */
5db6b777 1408 if (Isr & SXG_ISR_DEAD) {
cb636fe3
MT
1409 /*
1410 * Set aside the crash info and set the adapter state
1411 * to RESET
1412 */
1413 adapter->CrashCpu = (unsigned char)
1414 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
5db6b777
GKH
1415 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1416 adapter->Dead = TRUE;
e88bd231 1417 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
5db6b777
GKH
1418 adapter->CrashLocation, adapter->CrashCpu);
1419 }
b243c4aa 1420 /* Event ring full */
5db6b777 1421 if (Isr & SXG_ISR_ERFULL) {
ddd6f0a8
MT
1422 /*
1423 * Same issue as RMISS, really. This means the
1424 * host is falling behind the card. Need to increase
1425 * event ring size, process more events per interrupt,
1426 * and/or reduce/remove interrupt aggregation.
1427 */
5db6b777
GKH
1428 adapter->Stats.EventRingFull++;
1429 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
e88bd231 1430 __func__);
5db6b777 1431 }
b243c4aa 1432 /* Transmit drop - no DRAM buffers or XMT error */
5db6b777 1433 if (Isr & SXG_ISR_XDROP) {
e88bd231 1434 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
5db6b777
GKH
1435 }
1436 }
b243c4aa 1437 /* Slowpath send completions */
5db6b777 1438 if (Isr & SXG_ISR_SPSEND) {
c5e5cf5a 1439 sxg_complete_slow_send(adapter);
5db6b777 1440 }
b243c4aa 1441 /* Dump */
5db6b777 1442 if (Isr & SXG_ISR_UPC) {
cb636fe3 1443 /* Maybe change when debug is added.. */
54aed113 1444// ASSERT(adapter->DumpCmdRunning);
5db6b777
GKH
1445 adapter->DumpCmdRunning = FALSE;
1446 }
b243c4aa 1447 /* Link event */
5db6b777 1448 if (Isr & SXG_ISR_LINK) {
e5ea8da0
MT
1449 if (adapter->state != ADAPT_DOWN) {
1450 adapter->link_status_changed = 1;
1451 schedule_work(&adapter->update_link_status);
1452 }
5db6b777 1453 }
b243c4aa 1454 /* Debug - breakpoint hit */
5db6b777 1455 if (Isr & SXG_ISR_BREAK) {
ddd6f0a8
MT
1456 /*
1457 * At the moment AGDB isn't written to support interactive
cb636fe3
MT
1458 * debug sessions. When it is, this interrupt will be used to
1459 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
ddd6f0a8 1460 */
5db6b777
GKH
1461 ASSERT(0);
1462 }
b243c4aa 1463 /* Heartbeat response */
5db6b777
GKH
1464 if (Isr & SXG_ISR_PING) {
1465 adapter->PingOutstanding = FALSE;
1466 }
1467 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1468 adapter, Isr, NewIsr, 0);
1469
1470 return (NewIsr);
1471}
1472
9914f053
MT
1473/*
1474 * sxg_rcv_checksum - Set the checksum for received packet
1475 *
1476 * Arguements:
bbb18b97 1477 * @adapter - Adapter structure on which packet is received
9914f053
MT
1478 * @skb - Packet which is receieved
1479 * @Event - Event read from hardware
1480 */
1481
bbb18b97
MT
1482void sxg_rcv_checksum(struct adapter_t *adapter, struct sk_buff *skb,
1483 struct sxg_event *Event)
9914f053
MT
1484{
1485 skb->ip_summed = CHECKSUM_NONE;
bbb18b97
MT
1486 if (likely(adapter->flags & SXG_RCV_IP_CSUM_ENABLED)) {
1487 if (likely(adapter->flags & SXG_RCV_TCP_CSUM_ENABLED)
1488 && (Event->Status & EVENT_STATUS_TCPIP)) {
1489 if(!(Event->Status & EVENT_STATUS_TCPBAD))
1490 skb->ip_summed = CHECKSUM_UNNECESSARY;
1491 if(!(Event->Status & EVENT_STATUS_IPBAD))
9914f053 1492 skb->ip_summed = CHECKSUM_UNNECESSARY;
bbb18b97
MT
1493 } else if(Event->Status & EVENT_STATUS_IPONLY) {
1494 if(!(Event->Status & EVENT_STATUS_IPBAD))
1495 skb->ip_summed = CHECKSUM_UNNECESSARY;
9914f053
MT
1496 }
1497 }
1498}
1499
5db6b777 1500/*
5db6b777
GKH
1501 * sxg_process_event_queue - Process our event queue
1502 *
1503 * Arguments:
1504 * - adapter - Adapter structure
1505 * - RssId - The event queue requiring processing
1506 *
1507 * Return Value:
1508 * None.
1509 */
b62a294f
MT
1510static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1511 int *sxg_napi_continue, int *work_done, int budget)
5db6b777 1512{
942798b4
MT
1513 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1514 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
5db6b777 1515 u32 EventsProcessed = 0, Batches = 0;
5db6b777
GKH
1516 struct sk_buff *skb;
1517#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1518 struct sk_buff *prev_skb = NULL;
1519 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1520 u32 Index;
942798b4 1521 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
5db6b777
GKH
1522#endif
1523 u32 ReturnStatus = 0;
7c66b14b 1524 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
5db6b777
GKH
1525
1526 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1527 (adapter->State == SXG_STATE_PAUSING) ||
1528 (adapter->State == SXG_STATE_PAUSED) ||
1529 (adapter->State == SXG_STATE_HALTING));
ddd6f0a8
MT
1530 /*
1531 * We may still have unprocessed events on the queue if
1532 * the card crashed. Don't process them.
1533 */
5db6b777
GKH
1534 if (adapter->Dead) {
1535 return (0);
1536 }
ddd6f0a8
MT
1537 /*
1538 * In theory there should only be a single processor that
1539 * accesses this queue, and only at interrupt-DPC time. So/
1540 * we shouldn't need a lock for any of this.
1541 */
5db6b777 1542 while (Event->Status & EVENT_STATUS_VALID) {
b62a294f 1543 (*sxg_napi_continue) = 1;
5db6b777
GKH
1544 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1545 Event, Event->Code, Event->Status,
1546 adapter->NextEvent);
1547 switch (Event->Code) {
1548 case EVENT_CODE_BUFFERS:
cb636fe3
MT
1549 /* struct sxg_ring_info Head & Tail == unsigned char */
1550 ASSERT(!(Event->CommandIndex & 0xFF00));
5db6b777
GKH
1551 sxg_complete_descriptor_blocks(adapter,
1552 Event->CommandIndex);
5db6b777
GKH
1553 break;
1554 case EVENT_CODE_SLOWRCV:
b62a294f 1555 (*work_done)++;
5db6b777
GKH
1556 --adapter->RcvBuffersOnCard;
1557 if ((skb = sxg_slow_receive(adapter, Event))) {
1558 u32 rx_bytes;
1559#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
b243c4aa 1560 /* Add it to our indication list */
5db6b777
GKH
1561 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1562 IndicationList, num_skbs);
ddd6f0a8
MT
1563 /*
1564 * Linux, we just pass up each skb to the
1565 * protocol above at this point, there is no
1566 * capability of an indication list.
1567 */
5db6b777 1568#else
cb636fe3
MT
1569 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1570 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1571 rx_bytes = Event->Length;
5db6b777
GKH
1572 adapter->stats.rx_packets++;
1573 adapter->stats.rx_bytes += rx_bytes;
bbb18b97 1574 sxg_rcv_checksum(adapter, skb, Event);
5db6b777 1575 skb->dev = adapter->netdev;
b62a294f 1576 netif_receive_skb(skb);
5db6b777
GKH
1577#endif
1578 }
1579 break;
1580 default:
1581 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
e88bd231 1582 __func__, Event->Code);
cb636fe3 1583 /* ASSERT(0); */
5db6b777 1584 }
ddd6f0a8
MT
1585 /*
1586 * See if we need to restock card receive buffers.
1587 * There are two things to note here:
1588 * First - This test is not SMP safe. The
1589 * adapter->BuffersOnCard field is protected via atomic
1590 * interlocked calls, but we do not protect it with respect
1591 * to these tests. The only way to do that is with a lock,
1592 * and I don't want to grab a lock every time we adjust the
1593 * BuffersOnCard count. Instead, we allow the buffer
1594 * replenishment to be off once in a while. The worst that
1595 * can happen is the card is given on more-or-less descriptor
1596 * block than the arbitrary value we've chosen. No big deal
1597 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1598 * is adjusted.
1599 * Second - We expect this test to rarely
1600 * evaluate to true. We attempt to refill descriptor blocks
1601 * as they are returned to us (sxg_complete_descriptor_blocks)
1602 * so The only time this should evaluate to true is when
1603 * sxg_complete_descriptor_blocks failed to allocate
1604 * receive buffers.
1605 */
7c66b14b
MT
1606 if (adapter->JumboEnabled)
1607 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
1608
1609 if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
5db6b777
GKH
1610 sxg_stock_rcv_buffers(adapter);
1611 }
ddd6f0a8
MT
1612 /*
1613 * It's more efficient to just set this to zero.
1614 * But clearing the top bit saves potential debug info...
1615 */
5db6b777 1616 Event->Status &= ~EVENT_STATUS_VALID;
ddd6f0a8 1617 /* Advance to the next event */
5db6b777
GKH
1618 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1619 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1620 EventsProcessed++;
1621 if (EventsProcessed == EVENT_RING_BATCH) {
b243c4aa 1622 /* Release a batch of events back to the card */
5db6b777
GKH
1623 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1624 EVENT_RING_BATCH, FALSE);
1625 EventsProcessed = 0;
ddd6f0a8
MT
1626 /*
1627 * If we've processed our batch limit, break out of the
1628 * loop and return SXG_ISR_EVENT to arrange for us to
1629 * be called again
1630 */
5db6b777
GKH
1631 if (Batches++ == EVENT_BATCH_LIMIT) {
1632 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1633 TRACE_NOISY, "EvtLimit", Batches,
1634 adapter->NextEvent, 0, 0);
1635 ReturnStatus = SXG_ISR_EVENT;
1636 break;
1637 }
1638 }
b62a294f
MT
1639 if (*work_done >= budget) {
1640 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1641 EventsProcessed, FALSE);
1642 EventsProcessed = 0;
1643 (*sxg_napi_continue) = 0;
1644 break;
1645 }
5db6b777 1646 }
b62a294f
MT
1647 if (!(Event->Status & EVENT_STATUS_VALID))
1648 (*sxg_napi_continue) = 0;
1649
5db6b777 1650#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
b243c4aa 1651 /* Indicate any received dumb-nic frames */
5db6b777
GKH
1652 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1653#endif
b243c4aa 1654 /* Release events back to the card. */
5db6b777
GKH
1655 if (EventsProcessed) {
1656 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1657 EventsProcessed, FALSE);
1658 }
1659 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1660 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1661
1662 return (ReturnStatus);
1663}
1664
1665/*
1666 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1667 *
1668 * Arguments -
1669 * adapter - A pointer to our adapter structure
5db6b777
GKH
1670 * Return
1671 * None
1672 */
c5e5cf5a 1673static void sxg_complete_slow_send(struct adapter_t *adapter)
5db6b777 1674{
942798b4
MT
1675 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1676 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
5c7514e0 1677 u32 *ContextType;
942798b4 1678 struct sxg_cmd *XmtCmd;
54aed113
MT
1679 unsigned long flags = 0;
1680 unsigned long sgl_flags = 0;
d9d578bf 1681 unsigned int processed_count = 0;
5db6b777 1682
ddd6f0a8
MT
1683 /*
1684 * NOTE - This lock is dropped and regrabbed in this loop.
1685 * This means two different processors can both be running/
1686 * through this loop. Be *very* careful.
1687 */
c5e5cf5a 1688 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
d9d578bf 1689
5db6b777
GKH
1690 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1691 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1692
d9d578bf
MT
1693 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1694 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
ddd6f0a8
MT
1695 /*
1696 * Locate the current Cmd (ring descriptor entry), and
1697 * associated SGL, and advance the tail
1698 */
5db6b777
GKH
1699 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1700 ASSERT(ContextType);
1701 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1702 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
b243c4aa 1703 /* Clear the SGL field. */
5db6b777
GKH
1704 XmtCmd->Sgl = 0;
1705
1706 switch (*ContextType) {
1707 case SXG_SGL_DUMB:
1708 {
1709 struct sk_buff *skb;
cb636fe3
MT
1710 struct sxg_scatter_gather *SxgSgl =
1711 (struct sxg_scatter_gather *)ContextType;
d9d578bf
MT
1712 dma64_addr_t FirstSgeAddress;
1713 u32 FirstSgeLength;
1323e5f1 1714
b243c4aa 1715 /* Dumb-nic send. Command context is the dumb-nic SGL */
5db6b777 1716 skb = (struct sk_buff *)ContextType;
1323e5f1 1717 skb = SxgSgl->DumbPacket;
d9d578bf
MT
1718 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1719 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
b243c4aa 1720 /* Complete the send */
5db6b777
GKH
1721 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1722 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1723 0, 0);
1724 ASSERT(adapter->Stats.XmtQLen);
ddd6f0a8 1725 /*
cb636fe3
MT
1726 * Now drop the lock and complete the send
1727 * back to Microsoft. We need to drop the lock
1728 * because Microsoft can come back with a
1729 * chimney send, which results in a double trip
1730 * in SxgTcpOuput
ddd6f0a8 1731 */
c5e5cf5a
MT
1732 spin_unlock_irqrestore(
1733 &adapter->XmtZeroLock, flags);
d9d578bf
MT
1734
1735 SxgSgl->DumbPacket = NULL;
1736 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1737 FirstSgeAddress,
1738 FirstSgeLength);
c5e5cf5a 1739 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
b243c4aa 1740 /* and reacquire.. */
c5e5cf5a 1741 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
1742 }
1743 break;
1744 default:
1745 ASSERT(0);
1746 }
1747 }
c5e5cf5a 1748 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777
GKH
1749 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1750 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1751}
1752
1753/*
1754 * sxg_slow_receive
1755 *
1756 * Arguments -
1757 * adapter - A pointer to our adapter structure
1758 * Event - Receive event
1759 *
cb636fe3 1760 * Return - skb
5db6b777 1761 */
cb636fe3
MT
1762static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1763 struct sxg_event *Event)
5db6b777 1764{
d0128aa9 1765 u32 BufferSize = adapter->ReceiveBufferSize;
942798b4 1766 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
5db6b777 1767 struct sk_buff *Packet;
d9d578bf 1768 static int read_counter = 0;
5db6b777 1769
942798b4 1770 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
d9d578bf
MT
1771 if(read_counter++ & 0x100)
1772 {
1773 sxg_collect_statistics(adapter);
1774 read_counter = 0;
1775 }
5db6b777
GKH
1776 ASSERT(RcvDataBufferHdr);
1777 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
5db6b777
GKH
1778 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1779 RcvDataBufferHdr, RcvDataBufferHdr->State,
d0128aa9 1780 /*RcvDataBufferHdr->VirtualAddress*/ 0);
b243c4aa 1781 /* Drop rcv frames in non-running state */
5db6b777
GKH
1782 switch (adapter->State) {
1783 case SXG_STATE_RUNNING:
1784 break;
1785 case SXG_STATE_PAUSING:
1786 case SXG_STATE_PAUSED:
1787 case SXG_STATE_HALTING:
1788 goto drop;
1789 default:
1790 ASSERT(0);
1791 goto drop;
1792 }
1793
cb636fe3
MT
1794 /*
1795 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1796 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1797 */
1323e5f1 1798
b243c4aa 1799 /* Change buffer state to UPSTREAM */
5db6b777
GKH
1800 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1801 if (Event->Status & EVENT_STATUS_RCVERR) {
1802 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1803 Event, Event->Status, Event->HostHandle, 0);
5c7514e0 1804 sxg_process_rcv_error(adapter, *(u32 *)
5db6b777
GKH
1805 SXG_RECEIVE_DATA_LOCATION
1806 (RcvDataBufferHdr));
1807 goto drop;
1808 }
b243c4aa
M
1809#if XXXTODO /* VLAN stuff */
1810 /* If there's a VLAN tag, extract it and validate it */
cb636fe3
MT
1811 if (((struct ether_header *)
1812 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1813 == ETHERTYPE_VLAN) {
5db6b777
GKH
1814 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1815 STATUS_SUCCESS) {
1816 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1817 "BadVlan", Event,
1818 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1819 Event->Length, 0);
1820 goto drop;
1821 }
1822 }
1823#endif
b243c4aa 1824 /* Dumb-nic frame. See if it passes our mac filter and update stats */
ddd6f0a8 1825
b040b07b
MT
1826 if (!sxg_mac_filter(adapter,
1827 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1828 Event->Length)) {
1829 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1830 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1831 Event->Length, 0);
1832 goto drop;
1833 }
5db6b777
GKH
1834
1835 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1323e5f1
MT
1836 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1837 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
5db6b777
GKH
1838
1839 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1840 RcvDataBufferHdr, Packet, Event->Length, 0);
b243c4aa 1841 /* Lastly adjust the receive packet length. */
1323e5f1 1842 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
54aed113 1843 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
d0128aa9
MT
1844 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1845 if (RcvDataBufferHdr->skb)
1846 {
1847 spin_lock(&adapter->RcvQLock);
1848 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
d9d578bf 1849 // adapter->RcvBuffersOnCard ++;
d0128aa9
MT
1850 spin_unlock(&adapter->RcvQLock);
1851 }
5db6b777
GKH
1852 return (Packet);
1853
1854 drop:
1855 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1856 RcvDataBufferHdr, Event->Length, 0, 0);
54aed113
MT
1857 adapter->stats.rx_dropped++;
1858// adapter->Stats.RcvDiscards++;
5db6b777
GKH
1859 spin_lock(&adapter->RcvQLock);
1860 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1861 spin_unlock(&adapter->RcvQLock);
1862 return (NULL);
1863}
1864
1865/*
1866 * sxg_process_rcv_error - process receive error and update
1867 * stats
1868 *
1869 * Arguments:
1870 * adapter - Adapter structure
1871 * ErrorStatus - 4-byte receive error status
1872 *
cb636fe3 1873 * Return Value : None
5db6b777 1874 */
73b07065 1875static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
5db6b777
GKH
1876{
1877 u32 Error;
1878
54aed113 1879 adapter->stats.rx_errors++;
5db6b777
GKH
1880
1881 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1882 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1883 switch (Error) {
1884 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1885 adapter->Stats.TransportCsum++;
1886 break;
1887 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1888 adapter->Stats.TransportUflow++;
1889 break;
1890 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1891 adapter->Stats.TransportHdrLen++;
1892 break;
1893 }
1894 }
1895 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1896 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1897 switch (Error) {
1898 case SXG_RCV_STATUS_NETWORK_CSUM:
1899 adapter->Stats.NetworkCsum++;
1900 break;
1901 case SXG_RCV_STATUS_NETWORK_UFLOW:
1902 adapter->Stats.NetworkUflow++;
1903 break;
1904 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1905 adapter->Stats.NetworkHdrLen++;
1906 break;
1907 }
1908 }
1909 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1910 adapter->Stats.Parity++;
1911 }
1912 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1913 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1914 switch (Error) {
1915 case SXG_RCV_STATUS_LINK_PARITY:
1916 adapter->Stats.LinkParity++;
1917 break;
1918 case SXG_RCV_STATUS_LINK_EARLY:
1919 adapter->Stats.LinkEarly++;
1920 break;
1921 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1922 adapter->Stats.LinkBufOflow++;
1923 break;
1924 case SXG_RCV_STATUS_LINK_CODE:
1925 adapter->Stats.LinkCode++;
1926 break;
1927 case SXG_RCV_STATUS_LINK_DRIBBLE:
1928 adapter->Stats.LinkDribble++;
1929 break;
1930 case SXG_RCV_STATUS_LINK_CRC:
1931 adapter->Stats.LinkCrc++;
1932 break;
1933 case SXG_RCV_STATUS_LINK_OFLOW:
1934 adapter->Stats.LinkOflow++;
1935 break;
1936 case SXG_RCV_STATUS_LINK_UFLOW:
1937 adapter->Stats.LinkUflow++;
1938 break;
1939 }
1940 }
1941}
1942
1943/*
1944 * sxg_mac_filter
1945 *
1946 * Arguments:
1947 * adapter - Adapter structure
1948 * pether - Ethernet header
1949 * length - Frame length
1950 *
cb636fe3 1951 * Return Value : TRUE if the frame is to be allowed
5db6b777 1952 */
cb636fe3
MT
1953static bool sxg_mac_filter(struct adapter_t *adapter,
1954 struct ether_header *EtherHdr, ushort length)
5db6b777
GKH
1955{
1956 bool EqualAddr;
b040b07b 1957 struct net_device *dev = adapter->netdev;
5db6b777
GKH
1958
1959 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1960 if (SXG_BROADCAST_PACKET(EtherHdr)) {
b243c4aa 1961 /* broadcast */
5db6b777
GKH
1962 if (adapter->MacFilter & MAC_BCAST) {
1963 adapter->Stats.DumbRcvBcastPkts++;
1964 adapter->Stats.DumbRcvBcastBytes += length;
5db6b777
GKH
1965 return (TRUE);
1966 }
1967 } else {
b243c4aa 1968 /* multicast */
5db6b777
GKH
1969 if (adapter->MacFilter & MAC_ALLMCAST) {
1970 adapter->Stats.DumbRcvMcastPkts++;
1971 adapter->Stats.DumbRcvMcastBytes += length;
5db6b777
GKH
1972 return (TRUE);
1973 }
1974 if (adapter->MacFilter & MAC_MCAST) {
b040b07b
MT
1975 struct dev_mc_list *mclist = dev->mc_list;
1976 while (mclist) {
1977 ETHER_EQ_ADDR(mclist->da_addr,
5db6b777
GKH
1978 EtherHdr->ether_dhost,
1979 EqualAddr);
1980 if (EqualAddr) {
1981 adapter->Stats.
1982 DumbRcvMcastPkts++;
1983 adapter->Stats.
1984 DumbRcvMcastBytes += length;
5db6b777
GKH
1985 return (TRUE);
1986 }
b040b07b 1987 mclist = mclist->next;
5db6b777
GKH
1988 }
1989 }
1990 }
1991 } else if (adapter->MacFilter & MAC_DIRECTED) {
ddd6f0a8
MT
1992 /*
1993 * Not broadcast or multicast. Must be directed at us or
1994 * the card is in promiscuous mode. Either way, consider it
1995 * ours if MAC_DIRECTED is set
1996 */
5db6b777
GKH
1997 adapter->Stats.DumbRcvUcastPkts++;
1998 adapter->Stats.DumbRcvUcastBytes += length;
5db6b777
GKH
1999 return (TRUE);
2000 }
2001 if (adapter->MacFilter & MAC_PROMISC) {
b243c4aa 2002 /* Whatever it is, keep it. */
5db6b777
GKH
2003 return (TRUE);
2004 }
5db6b777
GKH
2005 return (FALSE);
2006}
b040b07b 2007
73b07065 2008static int sxg_register_interrupt(struct adapter_t *adapter)
5db6b777
GKH
2009{
2010 if (!adapter->intrregistered) {
2011 int retval;
2012
2013 DBG_ERROR
2014 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
e88bd231 2015 __func__, adapter, adapter->netdev->irq, NR_IRQS);
5db6b777 2016
5c7514e0
M
2017 spin_unlock_irqrestore(&sxg_global.driver_lock,
2018 sxg_global.flags);
5db6b777
GKH
2019
2020 retval = request_irq(adapter->netdev->irq,
2021 &sxg_isr,
2022 IRQF_SHARED,
2023 adapter->netdev->name, adapter->netdev);
2024
2025 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2026
2027 if (retval) {
2028 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
2029 adapter->netdev->name, retval);
2030 return (retval);
2031 }
2032 adapter->intrregistered = 1;
2033 adapter->IntRegistered = TRUE;
b243c4aa 2034 /* Disable RSS with line-based interrupts */
5db6b777
GKH
2035 adapter->RssEnabled = FALSE;
2036 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
e88bd231 2037 __func__, adapter, adapter->netdev->irq);
5db6b777
GKH
2038 }
2039 return (STATUS_SUCCESS);
2040}
2041
73b07065 2042static void sxg_deregister_interrupt(struct adapter_t *adapter)
5db6b777 2043{
e88bd231 2044 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
5db6b777
GKH
2045#if XXXTODO
2046 slic_init_cleanup(adapter);
2047#endif
2048 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
2049 adapter->error_interrupts = 0;
2050 adapter->rcv_interrupts = 0;
2051 adapter->xmit_interrupts = 0;
2052 adapter->linkevent_interrupts = 0;
2053 adapter->upr_interrupts = 0;
2054 adapter->num_isrs = 0;
2055 adapter->xmit_completes = 0;
2056 adapter->rcv_broadcasts = 0;
2057 adapter->rcv_multicasts = 0;
2058 adapter->rcv_unicasts = 0;
e88bd231 2059 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2060}
2061
2062/*
2063 * sxg_if_init
2064 *
2065 * Perform initialization of our slic interface.
2066 *
2067 */
73b07065 2068static int sxg_if_init(struct adapter_t *adapter)
5db6b777 2069{
942798b4 2070 struct net_device *dev = adapter->netdev;
5db6b777
GKH
2071 int status = 0;
2072
1323e5f1 2073 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
e88bd231 2074 __func__, adapter->netdev->name,
1323e5f1 2075 adapter->state,
5db6b777
GKH
2076 adapter->linkstate, dev->flags);
2077
2078 /* adapter should be down at this point */
2079 if (adapter->state != ADAPT_DOWN) {
2080 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
2081 return (-EIO);
2082 }
2083 ASSERT(adapter->linkstate == LINK_DOWN);
2084
2085 adapter->devflags_prev = dev->flags;
b040b07b 2086 adapter->MacFilter = MAC_DIRECTED;
5db6b777 2087 if (dev->flags) {
e88bd231 2088 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
5db6b777
GKH
2089 adapter->netdev->name);
2090 if (dev->flags & IFF_BROADCAST) {
b040b07b 2091 adapter->MacFilter |= MAC_BCAST;
5db6b777
GKH
2092 DBG_ERROR("BCAST ");
2093 }
2094 if (dev->flags & IFF_PROMISC) {
b040b07b 2095 adapter->MacFilter |= MAC_PROMISC;
5db6b777
GKH
2096 DBG_ERROR("PROMISC ");
2097 }
2098 if (dev->flags & IFF_ALLMULTI) {
b040b07b 2099 adapter->MacFilter |= MAC_ALLMCAST;
5db6b777
GKH
2100 DBG_ERROR("ALL_MCAST ");
2101 }
2102 if (dev->flags & IFF_MULTICAST) {
b040b07b 2103 adapter->MacFilter |= MAC_MCAST;
5db6b777
GKH
2104 DBG_ERROR("MCAST ");
2105 }
2106 DBG_ERROR("\n");
2107 }
1782199f 2108 status = sxg_register_intr(adapter);
5db6b777 2109 if (status != STATUS_SUCCESS) {
1782199f 2110 DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n",
5db6b777
GKH
2111 status);
2112 sxg_deregister_interrupt(adapter);
2113 return (status);
2114 }
2115
2116 adapter->state = ADAPT_UP;
2117
ddd6f0a8 2118 /* clear any pending events, then enable interrupts */
e88bd231 2119 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
5db6b777
GKH
2120
2121 return (STATUS_SUCCESS);
2122}
2123
b62a294f
MT
2124void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
2125{
2126 /*
2127 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
2128 * Make sure Max is less than 0x8000.
2129 */
2130 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
2131 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
2132 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
2133 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
2134 adapter->min_aggregation),
2135 TRUE);
2136}
2137
942798b4 2138static int sxg_entry_open(struct net_device *dev)
5db6b777 2139{
73b07065 2140 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777 2141 int status;
0d414727 2142 static int turn;
7c66b14b
MT
2143 int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
2144 int i;
2145
2146 if (adapter->JumboEnabled == TRUE) {
2147 sxg_initial_rcv_data_buffers =
2148 SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
2149 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
2150 SXG_JUMBO_RCV_RING_SIZE);
2151 }
2152
2153 /*
2154 * Allocate receive data buffers. We allocate a block of buffers and
2155 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
2156 */
2157
2158 for (i = 0; i < sxg_initial_rcv_data_buffers;
2159 i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
2160 {
2161 status = sxg_allocate_buffer_memory(adapter,
2162 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
2163 SXG_BUFFER_TYPE_RCV);
2164 if (status != STATUS_SUCCESS)
2165 return status;
2166 }
2167 /*
2168 * NBL resource allocation can fail in the 'AllocateComplete' routine,
2169 * which doesn't return status. Make sure we got the number of buffers
2170 * we requested
2171 */
2172
2173 if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
2174 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
2175 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
2176 0);
2177 return (STATUS_RESOURCES);
2178 }
2179 /*
2180 * The microcode expects it to be downloaded on every open.
2181 */
2182 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
a536efcc 2183 if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) {
7c66b14b
MT
2184 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
2185 __FUNCTION__);
2186 sxg_read_config(adapter);
2187 } else {
2188 adapter->state = ADAPT_FAIL;
2189 adapter->linkstate = LINK_DOWN;
2190 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
2191 status);
2192 }
2193 msleep(5);
0d414727
MT
2194
2195 if (turn) {
2196 sxg_second_open(adapter->netdev);
2197
2198 return STATUS_SUCCESS;
2199 }
2200
2201 turn++;
5db6b777
GKH
2202
2203 ASSERT(adapter);
e88bd231 2204 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
5db6b777
GKH
2205 adapter->activated);
2206 DBG_ERROR
2207 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
e88bd231 2208 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
5db6b777
GKH
2209 adapter->netdev, adapter, adapter->port);
2210
2211 netif_stop_queue(adapter->netdev);
2212
2213 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2214 if (!adapter->activated) {
2215 sxg_global.num_sxg_ports_active++;
2216 adapter->activated = 1;
2217 }
b243c4aa 2218 /* Initialize the adapter */
e88bd231 2219 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
5db6b777
GKH
2220 status = sxg_initialize_adapter(adapter);
2221 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
e88bd231 2222 __func__, status);
5db6b777
GKH
2223
2224 if (status == STATUS_SUCCESS) {
e88bd231 2225 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
5db6b777 2226 status = sxg_if_init(adapter);
e88bd231 2227 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
5db6b777
GKH
2228 status);
2229 }
2230
2231 if (status != STATUS_SUCCESS) {
2232 if (adapter->activated) {
2233 sxg_global.num_sxg_ports_active--;
2234 adapter->activated = 0;
2235 }
2236 spin_unlock_irqrestore(&sxg_global.driver_lock,
2237 sxg_global.flags);
2238 return (status);
2239 }
e88bd231 2240 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
b62a294f
MT
2241 sxg_set_interrupt_aggregation(adapter);
2242 napi_enable(&adapter->napi);
5db6b777 2243
b243c4aa 2244 /* Enable interrupts */
5db6b777
GKH
2245 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2246
e88bd231 2247 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2248
2249 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2250 return STATUS_SUCCESS;
2251}
2252
0d414727
MT
2253int sxg_second_open(struct net_device * dev)
2254{
2255 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
b62a294f 2256 int status = 0;
0d414727
MT
2257
2258 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2259 netif_start_queue(adapter->netdev);
2260 adapter->state = ADAPT_UP;
2261 adapter->linkstate = LINK_UP;
2262
b62a294f
MT
2263 status = sxg_initialize_adapter(adapter);
2264 sxg_set_interrupt_aggregation(adapter);
2265 napi_enable(&adapter->napi);
0d414727
MT
2266 /* Re-enable interrupts */
2267 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2268
544ed364 2269 sxg_register_intr(adapter);
1782199f 2270 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
e5ea8da0 2271 mod_timer(&adapter->watchdog_timer, jiffies);
0d414727
MT
2272 return (STATUS_SUCCESS);
2273
2274}
2275
5db6b777
GKH
2276static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2277{
0d414727
MT
2278 u32 mmio_start = 0;
2279 u32 mmio_len = 0;
2280
942798b4 2281 struct net_device *dev = pci_get_drvdata(pcidev);
73b07065 2282 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
6a2946ba
MT
2283
2284 flush_scheduled_work();
d0128aa9
MT
2285
2286 /* Deallocate Resources */
d9d578bf 2287 unregister_netdev(dev);
1782199f 2288 sxg_reset_interrupt_capability(adapter);
d9d578bf 2289 sxg_free_resources(adapter);
d0128aa9 2290
5db6b777 2291 ASSERT(adapter);
5db6b777 2292
0d414727
MT
2293 mmio_start = pci_resource_start(pcidev, 0);
2294 mmio_len = pci_resource_len(pcidev, 0);
5db6b777 2295
0d414727
MT
2296 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2297 mmio_start, mmio_len);
2298 release_mem_region(mmio_start, mmio_len);
5db6b777 2299
d0128aa9
MT
2300 mmio_start = pci_resource_start(pcidev, 2);
2301 mmio_len = pci_resource_len(pcidev, 2);
2302
2303 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2304 mmio_start, mmio_len);
2305 release_mem_region(mmio_start, mmio_len);
2306
d0128aa9 2307 pci_disable_device(pcidev);
5db6b777 2308
e88bd231 2309 DBG_ERROR("sxg: %s deallocate device\n", __func__);
5db6b777 2310 kfree(dev);
e88bd231 2311 DBG_ERROR("sxg: %s EXIT\n", __func__);
5db6b777
GKH
2312}
2313
942798b4 2314static int sxg_entry_halt(struct net_device *dev)
5db6b777 2315{
73b07065 2316 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
7c66b14b
MT
2317 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2318 int i;
2319 u32 RssIds, IsrCount;
2320 unsigned long flags;
2321
2322 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 2323 IsrCount = adapter->msi_enabled ? RssIds : 1;
e5ea8da0 2324 /* Disable interrupts */
5db6b777 2325 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
e5ea8da0 2326 SXG_DISABLE_ALL_INTERRUPTS(adapter);
5db6b777
GKH
2327 adapter->state = ADAPT_DOWN;
2328 adapter->linkstate = LINK_DOWN;
d0128aa9 2329
5db6b777 2330 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
d9d578bf 2331 sxg_deregister_interrupt(adapter);
7c66b14b
MT
2332 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
2333 mdelay(5000);
e5ea8da0
MT
2334
2335 del_timer_sync(&adapter->watchdog_timer);
2336 netif_stop_queue(dev);
2337 netif_carrier_off(dev);
2338
2339 napi_disable(&adapter->napi);
2340
2341 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
2342 adapter->devflags_prev = 0;
2343 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2344 __func__, dev->name, adapter, adapter->state);
2345
7c66b14b
MT
2346 spin_lock(&adapter->RcvQLock);
2347 /* Free all the blocks and the buffers, moved from remove() routine */
2348 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
2349 sxg_free_rcvblocks(adapter);
2350 }
2351
2352
2353 InitializeListHead(&adapter->FreeRcvBuffers);
2354 InitializeListHead(&adapter->FreeRcvBlocks);
2355 InitializeListHead(&adapter->AllRcvBlocks);
2356 InitializeListHead(&adapter->FreeSglBuffers);
2357 InitializeListHead(&adapter->AllSglBuffers);
2358
2359 adapter->FreeRcvBufferCount = 0;
2360 adapter->FreeRcvBlockCount = 0;
2361 adapter->AllRcvBlockCount = 0;
2362 adapter->RcvBuffersOnCard = 0;
2363 adapter->PendingRcvCount = 0;
2364
2365 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
2366 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
2367 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
2368 for (i = 0; i < SXG_MAX_RING_SIZE; i++)
2369 adapter->RcvRingZeroInfo.Context[i] = NULL;
2370 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
2371 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
2372
2373 spin_unlock(&adapter->RcvQLock);
2374
2375 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2376 adapter->AllSglBufferCount = 0;
2377 adapter->FreeSglBufferCount = 0;
2378 adapter->PendingXmtCount = 0;
2379 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
2380 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
2381 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2382
7c66b14b
MT
2383 for (i = 0; i < SXG_MAX_RSS; i++) {
2384 adapter->NextEvent[i] = 0;
2385 }
2386 atomic_set(&adapter->pending_allocations, 0);
1782199f
MT
2387 adapter->intrregistered = 0;
2388 sxg_remove_isr(adapter);
2389 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
5db6b777
GKH
2390 return (STATUS_SUCCESS);
2391}
2392
942798b4 2393static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5db6b777
GKH
2394{
2395 ASSERT(rq);
cb636fe3 2396/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
5db6b777
GKH
2397 switch (cmd) {
2398 case SIOCSLICSETINTAGG:
2399 {
cb636fe3
MT
2400 /* struct adapter_t *adapter = (struct adapter_t *)
2401 * netdev_priv(dev);
2402 */
5db6b777
GKH
2403 u32 data[7];
2404 u32 intagg;
2405
2406 if (copy_from_user(data, rq->ifr_data, 28)) {
cb636fe3
MT
2407 DBG_ERROR("copy_from_user FAILED getting \
2408 initial params\n");
5db6b777
GKH
2409 return -EFAULT;
2410 }
2411 intagg = data[0];
2412 printk(KERN_EMERG
2413 "%s: set interrupt aggregation to %d\n",
e88bd231 2414 __func__, intagg);
5db6b777
GKH
2415 return 0;
2416 }
2417
2418 default:
cb636fe3 2419 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
5db6b777
GKH
2420 return -EOPNOTSUPP;
2421 }
2422 return 0;
2423}
2424
2425#define NORMAL_ETHFRAME 0
2426
2427/*
5db6b777
GKH
2428 * sxg_send_packets - Send a skb packet
2429 *
2430 * Arguments:
cb636fe3
MT
2431 * skb - The packet to send
2432 * dev - Our linux net device that refs our adapter
5db6b777
GKH
2433 *
2434 * Return:
2435 * 0 regardless of outcome XXXTODO refer to e1000 driver
2436 */
942798b4 2437static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
5db6b777 2438{
73b07065 2439 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
2440 u32 status = STATUS_SUCCESS;
2441
ddd6f0a8
MT
2442 /*
2443 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2444 * skb);
2445 */
1323e5f1 2446
b243c4aa 2447 /* Check the adapter state */
5db6b777
GKH
2448 switch (adapter->State) {
2449 case SXG_STATE_INITIALIZING:
2450 case SXG_STATE_HALTED:
2451 case SXG_STATE_SHUTDOWN:
b243c4aa
M
2452 ASSERT(0); /* unexpected */
2453 /* fall through */
5db6b777
GKH
2454 case SXG_STATE_RESETTING:
2455 case SXG_STATE_SLEEP:
2456 case SXG_STATE_BOOTDIAG:
2457 case SXG_STATE_DIAG:
2458 case SXG_STATE_HALTING:
2459 status = STATUS_FAILURE;
2460 break;
2461 case SXG_STATE_RUNNING:
2462 if (adapter->LinkState != SXG_LINK_UP) {
2463 status = STATUS_FAILURE;
2464 }
2465 break;
2466 default:
2467 ASSERT(0);
2468 status = STATUS_FAILURE;
2469 }
2470 if (status != STATUS_SUCCESS) {
2471 goto xmit_fail;
2472 }
b243c4aa 2473 /* send a packet */
5db6b777
GKH
2474 status = sxg_transmit_packet(adapter, skb);
2475 if (status == STATUS_SUCCESS) {
2476 goto xmit_done;
2477 }
2478
2479 xmit_fail:
b243c4aa 2480 /* reject & complete all the packets if they cant be sent */
5db6b777
GKH
2481 if (status != STATUS_SUCCESS) {
2482#if XXXTODO
cb636fe3 2483 /* sxg_send_packets_fail(adapter, skb, status); */
5db6b777
GKH
2484#else
2485 SXG_DROP_DUMB_SEND(adapter, skb);
2486 adapter->stats.tx_dropped++;
d9d578bf 2487 return NETDEV_TX_BUSY;
5db6b777
GKH
2488#endif
2489 }
e88bd231 2490 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
5db6b777
GKH
2491 status);
2492
2493 xmit_done:
d9d578bf 2494 return NETDEV_TX_OK;
5db6b777
GKH
2495}
2496
2497/*
2498 * sxg_transmit_packet
2499 *
2500 * This function transmits a single packet.
2501 *
2502 * Arguments -
2503 * adapter - Pointer to our adapter structure
2504 * skb - The packet to be sent
2505 *
cb636fe3 2506 * Return - STATUS of send
5db6b777 2507 */
73b07065 2508static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
5db6b777 2509{
942798b4
MT
2510 struct sxg_x64_sgl *pSgl;
2511 struct sxg_scatter_gather *SxgSgl;
d9d578bf 2512 unsigned long sgl_flags;
d0128aa9
MT
2513 /* void *SglBuffer; */
2514 /* u32 SglBufferLength; */
5db6b777 2515
ddd6f0a8
MT
2516 /*
2517 * The vast majority of work is done in the shared
2518 * sxg_dumb_sgl routine.
2519 */
5db6b777
GKH
2520 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2521 adapter, skb, 0, 0);
2522
b243c4aa 2523 /* Allocate a SGL buffer */
d9d578bf 2524 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
5db6b777
GKH
2525 if (!SxgSgl) {
2526 adapter->Stats.NoSglBuf++;
54aed113 2527 adapter->stats.tx_errors++;
5db6b777
GKH
2528 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2529 adapter, skb, 0, 0);
2530 return (STATUS_RESOURCES);
2531 }
2532 ASSERT(SxgSgl->adapter == adapter);
d0128aa9
MT
2533 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2534 SglBufferLength = SXG_SGL_BUF_SIZE; */
5db6b777
GKH
2535 SxgSgl->VlanTag.VlanTci = 0;
2536 SxgSgl->VlanTag.VlanTpid = 0;
2537 SxgSgl->Type = SXG_SGL_DUMB;
2538 SxgSgl->DumbPacket = skb;
2539 pSgl = NULL;
2540
b243c4aa 2541 /* Call the common sxg_dumb_sgl routine to complete the send. */
d9d578bf 2542 return (sxg_dumb_sgl(pSgl, SxgSgl));
5db6b777
GKH
2543}
2544
2545/*
2546 * sxg_dumb_sgl
2547 *
2548 * Arguments:
2549 * pSgl -
942798b4 2550 * SxgSgl - struct sxg_scatter_gather
5db6b777
GKH
2551 *
2552 * Return Value:
d9d578bf 2553 * Status of send operation.
5db6b777 2554 */
d9d578bf 2555static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
cb636fe3 2556 struct sxg_scatter_gather *SxgSgl)
5db6b777 2557{
73b07065 2558 struct adapter_t *adapter = SxgSgl->adapter;
5db6b777 2559 struct sk_buff *skb = SxgSgl->DumbPacket;
b243c4aa 2560 /* For now, all dumb-nic sends go on RSS queue zero */
942798b4
MT
2561 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2562 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2563 struct sxg_cmd *XmtCmd = NULL;
cb636fe3 2564 /* u32 Index = 0; */
5db6b777 2565 u32 DataLength = skb->len;
cb636fe3
MT
2566 /* unsigned int BufLen; */
2567 /* u32 SglOffset; */
5db6b777 2568 u64 phys_addr;
d9d578bf 2569 unsigned long flags;
0d414727 2570 unsigned long queue_id=0;
5db6b777
GKH
2571
2572 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2573 pSgl, SxgSgl, 0, 0);
2574
b243c4aa 2575 /* Set aside a pointer to the sgl */
5db6b777
GKH
2576 SxgSgl->pSgl = pSgl;
2577
b243c4aa 2578 /* Sanity check that our SGL format is as we expect. */
942798b4 2579 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
b243c4aa 2580 /* Shouldn't be a vlan tag on this frame */
5db6b777
GKH
2581 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2582 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2583
ddd6f0a8
MT
2584 /*
2585 * From here below we work with the SGL placed in our
2586 * buffer.
2587 */
5db6b777
GKH
2588
2589 SxgSgl->Sgl.NumberOfElements = 1;
0d414727
MT
2590 /*
2591 * Set ucode Queue ID based on bottom bits of destination TCP port.
2592 * This Queue ID splits slowpath/dumb-nic packet processing across
2593 * multiple threads on the card to improve performance. It is split
2594 * using the TCP port to avoid out-of-order packets that can result
2595 * from multithreaded processing. We use the destination port because
2596 * we expect to be run on a server, so in nearly all cases the local
2597 * port is likely to be constant (well-known server port) and the
2598 * remote port is likely to be random. The exception to this is iSCSI,
2599 * in which case we use the sport instead. Note
2600 * that original attempt at XOR'ing source and dest port resulted in
2601 * poor balance on NTTTCP/iometer applications since they tend to
2602 * line up (even-even, odd-odd..).
2603 */
2604
2605 if (skb->protocol == htons(ETH_P_IP)) {
2606 struct iphdr *ip;
2607
2608 ip = ip_hdr(skb);
2609 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2610 struct tcphdr))){
2611 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2612 (ntohs (tcp_hdr(skb)->source) &
2613 SXG_LARGE_SEND_QUEUE_MASK):
2614 (ntohs(tcp_hdr(skb)->dest) &
2615 SXG_LARGE_SEND_QUEUE_MASK));
2616 }
2617 } else if (skb->protocol == htons(ETH_P_IPV6)) {
9914f053 2618 if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
0d414727
MT
2619 sizeof(struct tcphdr)) ) {
2620 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2621 (ntohs (tcp_hdr(skb)->source) &
2622 SXG_LARGE_SEND_QUEUE_MASK):
2623 (ntohs(tcp_hdr(skb)->dest) &
2624 SXG_LARGE_SEND_QUEUE_MASK));
2625 }
2626 }
5db6b777 2627
b243c4aa 2628 /* Grab the spinlock and acquire a command */
d9d578bf 2629 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2630 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2631 if (XmtCmd == NULL) {
ddd6f0a8
MT
2632 /*
2633 * Call sxg_complete_slow_send to see if we can
2634 * free up any XmtRingZero entries and then try again
2635 */
d9d578bf
MT
2636
2637 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
c5e5cf5a 2638 sxg_complete_slow_send(adapter);
d9d578bf 2639 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2640 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2641 if (XmtCmd == NULL) {
2642 adapter->Stats.XmtZeroFull++;
2643 goto abortcmd;
2644 }
2645 }
2646 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2647 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
b243c4aa 2648 /* Update stats */
6a2946ba 2649 adapter->stats.tx_packets++;
6a2946ba 2650 adapter->stats.tx_bytes += DataLength;
b243c4aa 2651#if XXXTODO /* Stats stuff */
5db6b777
GKH
2652 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2653 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2654 adapter->Stats.DumbXmtBcastPkts++;
2655 adapter->Stats.DumbXmtBcastBytes += DataLength;
2656 } else {
2657 adapter->Stats.DumbXmtMcastPkts++;
2658 adapter->Stats.DumbXmtMcastBytes += DataLength;
2659 }
2660 } else {
2661 adapter->Stats.DumbXmtUcastPkts++;
2662 adapter->Stats.DumbXmtUcastBytes += DataLength;
2663 }
2664#endif
ddd6f0a8
MT
2665 /*
2666 * Fill in the command
2667 * Copy out the first SGE to the command and adjust for offset
2668 */
cb636fe3 2669 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
5c7514e0 2670 PCI_DMA_TODEVICE);
7c66b14b
MT
2671
2672 /*
2673 * SAHARA SGL WORKAROUND
2674 * See if the SGL straddles a 64k boundary. If so, skip to
2675 * the start of the next 64k boundary and continue
2676 */
2677
a536efcc
MT
2678 if ((adapter->asictype == SAHARA_REV_A) &&
2679 (SXG_INVALID_SGL(phys_addr,skb->data_len)))
7c66b14b
MT
2680 {
2681 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2682 /* Silently drop this packet */
2683 printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
2684 return STATUS_SUCCESS;
2685 }
1323e5f1
MT
2686 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2687 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
5db6b777 2688 XmtCmd->Buffer.FirstSgeLength = DataLength;
5db6b777 2689 XmtCmd->Buffer.SgeOffset = 0;
5db6b777 2690 XmtCmd->Buffer.TotalLength = DataLength;
1323e5f1 2691 XmtCmd->SgEntries = 1;
5db6b777 2692 XmtCmd->Flags = 0;
9914f053
MT
2693
2694 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2695 /*
2696 * We need to set the Checkum in IP header to 0. This is
2697 * required by hardware.
2698 */
2699 ip_hdr(skb)->check = 0x0;
2700 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP;
2701 XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP;
2702 /* Dont know if length will require a change in case of VLAN */
2703 XmtCmd->CsumFlags.MacLen = ETH_HLEN;
2704 XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >>
2705 SXG_NW_HDR_LEN_SHIFT;
2706 }
ddd6f0a8
MT
2707 /*
2708 * Advance transmit cmd descripter by 1.
2709 * NOTE - See comments in SxgTcpOutput where we write
2710 * to the XmtCmd register regarding CPU ID values and/or
2711 * multiple commands.
0d414727 2712 * Top 16 bits specify queue_id. See comments about queue_id above
ddd6f0a8 2713 */
0d414727
MT
2714 /* Four queues at the moment */
2715 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2716 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
b243c4aa 2717 adapter->Stats.XmtQLen++; /* Stats within lock */
d9d578bf 2718 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777
GKH
2719 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2720 XmtCmd, pSgl, SxgSgl, 0);
d9d578bf 2721 return STATUS_SUCCESS;
5db6b777
GKH
2722
2723 abortcmd:
ddd6f0a8
MT
2724 /*
2725 * NOTE - Only jump to this label AFTER grabbing the
2726 * XmtZeroLock, and DO NOT DROP IT between the
2727 * command allocation and the following abort.
2728 */
5db6b777
GKH
2729 if (XmtCmd) {
2730 SXG_ABORT_CMD(XmtRingInfo);
2731 }
d9d578bf 2732 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
5db6b777 2733
ddd6f0a8
MT
2734/*
2735 * failsgl:
2736 * Jump to this label if failure occurs before the
2737 * XmtZeroLock is grabbed
2738 */
6a2946ba 2739 adapter->stats.tx_errors++;
5db6b777
GKH
2740 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2741 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
cb636fe3 2742 /* SxgSgl->DumbPacket is the skb */
d9d578bf 2743 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
54aed113
MT
2744
2745 return STATUS_FAILURE;
5db6b777
GKH
2746}
2747
5db6b777 2748/*
ddd6f0a8
MT
2749 * Link management functions
2750 *
5db6b777
GKH
2751 * sxg_initialize_link - Initialize the link stuff
2752 *
2753 * Arguments -
2754 * adapter - A pointer to our adapter structure
2755 *
2756 * Return
2757 * status
2758 */
73b07065 2759static int sxg_initialize_link(struct adapter_t *adapter)
5db6b777 2760{
942798b4 2761 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
5db6b777
GKH
2762 u32 Value;
2763 u32 ConfigData;
2764 u32 MaxFrame;
a536efcc 2765 u32 AxgMacReg1;
5db6b777
GKH
2766 int status;
2767
2768 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2769 adapter, 0, 0, 0);
2770
b243c4aa 2771 /* Reset PHY and XGXS module */
5db6b777
GKH
2772 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2773
b243c4aa 2774 /* Reset transmit configuration register */
5db6b777
GKH
2775 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2776
b243c4aa 2777 /* Reset receive configuration register */
5db6b777
GKH
2778 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2779
b243c4aa 2780 /* Reset all MAC modules */
5db6b777
GKH
2781 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2782
ddd6f0a8
MT
2783 /*
2784 * Link address 0
2785 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2786 * is stored with the first nibble (0a) in the byte 0
2787 * of the Mac address. Possibly reverse?
2788 */
1323e5f1 2789 Value = *(u32 *) adapter->macaddr;
5db6b777 2790 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
b243c4aa 2791 /* also write the MAC address to the MAC. Endian is reversed. */
5db6b777 2792 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
1323e5f1 2793 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
5db6b777 2794 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
b243c4aa 2795 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
5db6b777
GKH
2796 Value = ntohl(Value);
2797 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
b243c4aa 2798 /* Link address 1 */
5db6b777
GKH
2799 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2800 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
b243c4aa 2801 /* Link address 2 */
5db6b777
GKH
2802 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2803 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
b243c4aa 2804 /* Link address 3 */
5db6b777
GKH
2805 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2806 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2807
b243c4aa 2808 /* Enable MAC modules */
5db6b777
GKH
2809 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2810
b243c4aa 2811 /* Configure MAC */
a536efcc
MT
2812 AxgMacReg1 = ( /* Enable XMT */
2813 AXGMAC_CFG1_XMT_EN |
2814 /* Enable receive */
2815 AXGMAC_CFG1_RCV_EN |
2816 /* short frame detection */
2817 AXGMAC_CFG1_SHORT_ASSERT |
2818 /* Verify frame length */
2819 AXGMAC_CFG1_CHECK_LEN |
2820 /* Generate FCS */
2821 AXGMAC_CFG1_GEN_FCS |
2822 /* Pad frames to 64 bytes */
2823 AXGMAC_CFG1_PAD_64);
2824
2825 if (adapter->XmtFcEnabled) {
2826 AxgMacReg1 |= AXGMAC_CFG1_XMT_PAUSE; /* Allow sending of pause */
2827 }
2828 if (adapter->RcvFcEnabled) {
2829 AxgMacReg1 |= AXGMAC_CFG1_RCV_PAUSE; /* Enable detection of pause */
2830 }
2831
2832 WRITE_REG(HwRegs->MacConfig1, AxgMacReg1, TRUE);
5db6b777 2833
b243c4aa 2834 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
5db6b777
GKH
2835 if (adapter->JumboEnabled) {
2836 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2837 }
ddd6f0a8
MT
2838 /*
2839 * AMIIM Configuration Register -
2840 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2841 * (bottom bits) of this register is used to determine the MDC frequency
2842 * as specified in the A-XGMAC Design Document. This value must not be
2843 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2844 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2845 * frequency of 2.5 MHz (see the PHY spec), we get:
2846 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2847 * This value happens to be the default value for this register, so we
2848 * really don't have to do this.
2849 */
a536efcc
MT
2850 if (adapter->asictype == SAHARA_REV_B) {
2851 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000001F, TRUE);
2852 } else {
2853 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2854 }
5db6b777 2855
b243c4aa 2856 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
5db6b777 2857 WRITE_REG(HwRegs->LinkStatus,
a536efcc
MT
2858 (LS_PHY_CLR_RESET |
2859 LS_XGXS_ENABLE |
2860 LS_XGXS_CTL |
2861 LS_PHY_CLK_EN |
2862 LS_ATTN_ALARM),
2863 TRUE);
5db6b777
GKH
2864 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2865
ddd6f0a8
MT
2866 /*
2867 * Per information given by Aeluros, wait 100 ms after removing reset.
cb636fe3
MT
2868 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2869 * clear.
ddd6f0a8 2870 */
5db6b777
GKH
2871 mdelay(100);
2872
cb636fe3
MT
2873 /* Verify the PHY has come up by checking that the Reset bit has
2874 * cleared.
2875 */
2876 status = sxg_read_mdio_reg(adapter,
2877 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2878 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2879 &Value);
2880 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2881 (Value & PMA_CONTROL1_RESET));
5db6b777
GKH
2882 if (status != STATUS_SUCCESS)
2883 return (STATUS_FAILURE);
b243c4aa 2884 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
5db6b777
GKH
2885 return (STATUS_FAILURE);
2886
b243c4aa 2887 /* The SERDES should be initialized by now - confirm */
5db6b777 2888 READ_REG(HwRegs->LinkStatus, Value);
b243c4aa 2889 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
5db6b777
GKH
2890 return (STATUS_FAILURE);
2891
b243c4aa
M
2892 /* The XAUI link should also be up - confirm */
2893 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
5db6b777
GKH
2894 return (STATUS_FAILURE);
2895
b243c4aa 2896 /* Initialize the PHY */
5db6b777
GKH
2897 status = sxg_phy_init(adapter);
2898 if (status != STATUS_SUCCESS)
2899 return (STATUS_FAILURE);
2900
b243c4aa 2901 /* Enable the Link Alarm */
cb636fe3
MT
2902
2903 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2904 * LASI_CONTROL - LASI control register
2905 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2906 */
2907 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2908 LASI_CONTROL,
2909 LASI_CTL_LS_ALARM_ENABLE);
5db6b777
GKH
2910 if (status != STATUS_SUCCESS)
2911 return (STATUS_FAILURE);
2912
b243c4aa 2913 /* XXXTODO - temporary - verify bit is set */
cb636fe3
MT
2914
2915 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2916 * LASI_CONTROL - LASI control register
2917 */
2918 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2919 LASI_CONTROL,
5db6b777 2920 &Value);
cb636fe3 2921
5db6b777
GKH
2922 if (status != STATUS_SUCCESS)
2923 return (STATUS_FAILURE);
2924 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2925 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2926 }
b243c4aa 2927 /* Enable receive */
5db6b777
GKH
2928 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2929 ConfigData = (RCV_CONFIG_ENABLE |
2930 RCV_CONFIG_ENPARSE |
2931 RCV_CONFIG_RCVBAD |
2932 RCV_CONFIG_RCVPAUSE |
2933 RCV_CONFIG_TZIPV6 |
2934 RCV_CONFIG_TZIPV4 |
2935 RCV_CONFIG_HASH_16 |
2936 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
a536efcc
MT
2937
2938 if (adapter->asictype == SAHARA_REV_B) {
2939 ConfigData |= (RCV_CONFIG_HIPRICTL |
2940 RCV_CONFIG_NEWSTATUSFMT);
2941 }
5db6b777
GKH
2942 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2943
2944 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2945
b243c4aa 2946 /* Mark the link as down. We'll get a link event when it comes up. */
5db6b777
GKH
2947 sxg_link_state(adapter, SXG_LINK_DOWN);
2948
2949 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2950 adapter, 0, 0, 0);
2951 return (STATUS_SUCCESS);
2952}
2953
2954/*
2955 * sxg_phy_init - Initialize the PHY
2956 *
2957 * Arguments -
2958 * adapter - A pointer to our adapter structure
2959 *
2960 * Return
2961 * status
2962 */
73b07065 2963static int sxg_phy_init(struct adapter_t *adapter)
5db6b777
GKH
2964{
2965 u32 Value;
942798b4 2966 struct phy_ucode *p;
5db6b777
GKH
2967 int status;
2968
e88bd231 2969 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 2970
cb636fe3
MT
2971 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2972 * 0xC205 - PHY ID register (?)
2973 * &Value - XXXTODO - add def
2974 */
2975 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2976 0xC205,
2977 &Value);
5db6b777
GKH
2978 if (status != STATUS_SUCCESS)
2979 return (STATUS_FAILURE);
2980
cb636fe3
MT
2981 if (Value == 0x0012) {
2982 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2983 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2984 microcode.\n");
5db6b777 2985
b243c4aa 2986 /* Initialize AEL2005C PHY and download PHY microcode */
5db6b777
GKH
2987 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2988 if (p->Addr == 0) {
b243c4aa 2989 /* if address == 0, data == sleep time in ms */
5db6b777
GKH
2990 mdelay(p->Data);
2991 } else {
cb636fe3
MT
2992 /* write the given data to the specified address */
2993 status = sxg_write_mdio_reg(adapter,
2994 MIIM_DEV_PHY_PMA,
2995 /* PHY address */
2996 p->Addr,
2997 /* PHY data */
2998 p->Data);
5db6b777
GKH
2999 if (status != STATUS_SUCCESS)
3000 return (STATUS_FAILURE);
3001 }
3002 }
3003 }
e88bd231 3004 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3005
3006 return (STATUS_SUCCESS);
3007}
3008
3009/*
3010 * sxg_link_event - Process a link event notification from the card
3011 *
3012 * Arguments -
3013 * adapter - A pointer to our adapter structure
3014 *
3015 * Return
3016 * None
3017 */
73b07065 3018static void sxg_link_event(struct adapter_t *adapter)
5db6b777 3019{
942798b4 3020 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
0d414727 3021 struct net_device *netdev = adapter->netdev;
73b07065 3022 enum SXG_LINK_STATE LinkState;
5db6b777
GKH
3023 int status;
3024 u32 Value;
3025
e5ea8da0
MT
3026 if (adapter->state == ADAPT_DOWN)
3027 return;
5db6b777
GKH
3028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
3029 adapter, 0, 0, 0);
e88bd231 3030 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 3031
b243c4aa 3032 /* Check the Link Status register. We should have a Link Alarm. */
5db6b777
GKH
3033 READ_REG(HwRegs->LinkStatus, Value);
3034 if (Value & LS_LINK_ALARM) {
ddd6f0a8
MT
3035 /*
3036 * We got a Link Status alarm. First, pause to let the
3037 * link state settle (it can bounce a number of times)
3038 */
5db6b777
GKH
3039 mdelay(10);
3040
b243c4aa 3041 /* Now clear the alarm by reading the LASI status register. */
cb636fe3
MT
3042 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3043 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3044 /* LASI status register */
3045 LASI_STATUS,
5db6b777
GKH
3046 &Value);
3047 if (status != STATUS_SUCCESS) {
3048 DBG_ERROR("Error reading LASI Status MDIO register!\n");
3049 sxg_link_state(adapter, SXG_LINK_DOWN);
cb636fe3 3050 /* ASSERT(0); */
5db6b777 3051 }
a536efcc
MT
3052 /*
3053 * We used to assert that the LASI_LS_ALARM bit was set, as
3054 * it should be. But there appears to be cases during
3055 * initialization (when the PHY is reset and re-initialized)
3056 * when we get a link alarm, but the status bit is 0 when we
3057 * read it. Rather than trying to assure this never happens
3058 * (and nver being certain), just ignore it.
3059
3060 * ASSERT(Value & LASI_STATUS_LS_ALARM);
3061 */
5db6b777 3062
b243c4aa 3063 /* Now get and set the link state */
5db6b777
GKH
3064 LinkState = sxg_get_link_state(adapter);
3065 sxg_link_state(adapter, LinkState);
3066 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
3067 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
e5ea8da0 3068 if (LinkState == SXG_LINK_UP) {
0d414727 3069 netif_carrier_on(netdev);
e5ea8da0
MT
3070 netif_tx_start_all_queues(netdev);
3071 } else {
3072 netif_tx_stop_all_queues(netdev);
0d414727 3073 netif_carrier_off(netdev);
e5ea8da0 3074 }
5db6b777 3075 } else {
ddd6f0a8
MT
3076 /*
3077 * XXXTODO - Assuming Link Attention is only being generated
3078 * for the Link Alarm pin (and not for a XAUI Link Status change)
3079 * , then it's impossible to get here. Yet we've gotten here
3080 * twice (under extreme conditions - bouncing the link up and
3081 * down many times a second). Needs further investigation.
3082 */
5db6b777
GKH
3083 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
3084 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
cb636fe3 3085 /* ASSERT(0); */
5db6b777 3086 }
e88bd231 3087 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3088
3089}
3090
3091/*
3092 * sxg_get_link_state - Determine if the link is up or down
3093 *
3094 * Arguments -
3095 * adapter - A pointer to our adapter structure
3096 *
3097 * Return
3098 * Link State
3099 */
73b07065 3100static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
5db6b777
GKH
3101{
3102 int status;
3103 u32 Value;
3104
e88bd231 3105 DBG_ERROR("ENTER %s\n", __func__);
5db6b777
GKH
3106
3107 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
3108 adapter, 0, 0, 0);
3109
ddd6f0a8
MT
3110 /*
3111 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
3112 * the following 3 bits (from 3 different MDIO registers) are all true.
3113 */
cb636fe3
MT
3114
3115 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
3116 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
3117 /* PMA/PMD Receive Signal Detect register */
3118 PHY_PMA_RCV_DET,
5db6b777
GKH
3119 &Value);
3120 if (status != STATUS_SUCCESS)
3121 goto bad;
3122
b243c4aa 3123 /* If PMA/PMD receive signal detect is 0, then the link is down */
5db6b777
GKH
3124 if (!(Value & PMA_RCV_DETECT))
3125 return (SXG_LINK_DOWN);
3126
cb636fe3
MT
3127 /* MIIM_DEV_PHY_PCS - PHY PCS module */
3128 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
3129 /* PCS 10GBASE-R Status 1 register */
3130 PHY_PCS_10G_STATUS1,
5db6b777
GKH
3131 &Value);
3132 if (status != STATUS_SUCCESS)
3133 goto bad;
3134
b243c4aa 3135 /* If PCS is not locked to receive blocks, then the link is down */
5db6b777
GKH
3136 if (!(Value & PCS_10B_BLOCK_LOCK))
3137 return (SXG_LINK_DOWN);
3138
cb636fe3
MT
3139 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
3140 /* XS Lane Status register */
3141 PHY_XS_LANE_STATUS,
5db6b777
GKH
3142 &Value);
3143 if (status != STATUS_SUCCESS)
3144 goto bad;
3145
b243c4aa 3146 /* If XS transmit lanes are not aligned, then the link is down */
5db6b777
GKH
3147 if (!(Value & XS_LANE_ALIGN))
3148 return (SXG_LINK_DOWN);
3149
b243c4aa 3150 /* All 3 bits are true, so the link is up */
e88bd231 3151 DBG_ERROR("EXIT %s\n", __func__);
5db6b777
GKH
3152
3153 return (SXG_LINK_UP);
3154
3155 bad:
cb636fe3 3156 /* An error occurred reading an MDIO register. This shouldn't happen. */
5db6b777
GKH
3157 DBG_ERROR("Error reading an MDIO register!\n");
3158 ASSERT(0);
3159 return (SXG_LINK_DOWN);
3160}
3161
73b07065
M
3162static void sxg_indicate_link_state(struct adapter_t *adapter,
3163 enum SXG_LINK_STATE LinkState)
5db6b777
GKH
3164{
3165 if (adapter->LinkState == SXG_LINK_UP) {
3166 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
e88bd231 3167 __func__);
5db6b777
GKH
3168 netif_start_queue(adapter->netdev);
3169 } else {
3170 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
e88bd231 3171 __func__);
5db6b777
GKH
3172 netif_stop_queue(adapter->netdev);
3173 }
3174}
3175
7c66b14b
MT
3176/*
3177 * sxg_change_mtu - Change the Maximum Transfer Unit
3178 * * @returns 0 on success, negative on failure
3179 */
3180int sxg_change_mtu (struct net_device *netdev, int new_mtu)
3181{
3182 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
3183
3184 if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
3185 return -EINVAL;
3186
3187 if(new_mtu == netdev->mtu)
3188 return 0;
3189
3190 netdev->mtu = new_mtu;
3191
3192 if (new_mtu == SXG_JUMBO_MTU) {
3193 adapter->JumboEnabled = TRUE;
3194 adapter->FrameSize = JUMBOMAXFRAME;
3195 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
3196 } else {
3197 adapter->JumboEnabled = FALSE;
3198 adapter->FrameSize = ETHERMAXFRAME;
3199 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
3200 }
3201
3202 sxg_entry_halt(netdev);
3203 sxg_entry_open(netdev);
3204 return 0;
3205}
3206
5db6b777
GKH
3207/*
3208 * sxg_link_state - Set the link state and if necessary, indicate.
3209 * This routine the central point of processing for all link state changes.
3210 * Nothing else in the driver should alter the link state or perform
3211 * link state indications
3212 *
3213 * Arguments -
3214 * adapter - A pointer to our adapter structure
3215 * LinkState - The link state
3216 *
3217 * Return
3218 * None
3219 */
cb636fe3
MT
3220static void sxg_link_state(struct adapter_t *adapter,
3221 enum SXG_LINK_STATE LinkState)
5db6b777
GKH
3222{
3223 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
3224 adapter, LinkState, adapter->LinkState, adapter->State);
3225
e88bd231 3226 DBG_ERROR("ENTER %s\n", __func__);
5db6b777 3227
ddd6f0a8
MT
3228 /*
3229 * Hold the adapter lock during this routine. Maybe move
3230 * the lock to the caller.
3231 */
6a2946ba
MT
3232 /* IMP TODO : Check if we can survive without taking this lock */
3233// spin_lock(&adapter->AdapterLock);
5db6b777 3234 if (LinkState == adapter->LinkState) {
b243c4aa 3235 /* Nothing changed.. */
6a2946ba 3236// spin_unlock(&adapter->AdapterLock);
cb636fe3
MT
3237 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
3238 __func__, LinkState);
5db6b777
GKH
3239 return;
3240 }
b243c4aa 3241 /* Save the adapter state */
5db6b777
GKH
3242 adapter->LinkState = LinkState;
3243
b243c4aa 3244 /* Drop the lock and indicate link state */
6a2946ba 3245// spin_unlock(&adapter->AdapterLock);
e88bd231 3246 DBG_ERROR("EXIT #1 %s\n", __func__);
5db6b777
GKH
3247
3248 sxg_indicate_link_state(adapter, LinkState);
3249}
3250
3251/*
3252 * sxg_write_mdio_reg - Write to a register on the MDIO bus
3253 *
3254 * Arguments -
3255 * adapter - A pointer to our adapter structure
3256 * DevAddr - MDIO device number being addressed
3257 * RegAddr - register address for the specified MDIO device
3258 * Value - value to write to the MDIO register
3259 *
3260 * Return
3261 * status
3262 */
73b07065 3263static int sxg_write_mdio_reg(struct adapter_t *adapter,
5c7514e0 3264 u32 DevAddr, u32 RegAddr, u32 Value)
5db6b777 3265{
942798b4 3266 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cb636fe3
MT
3267 /* Address operation (written to MIIM field reg) */
3268 u32 AddrOp;
3269 /* Write operation (written to MIIM field reg) */
3270 u32 WriteOp;
3271 u32 Cmd;/* Command (written to MIIM command reg) */
5db6b777
GKH
3272 u32 ValueRead;
3273 u32 Timeout;
3274
cb636fe3 3275 /* DBG_ERROR("ENTER %s\n", __func__); */
5db6b777
GKH
3276
3277 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3278 adapter, 0, 0, 0);
3279
b243c4aa
M
3280 /* Ensure values don't exceed field width */
3281 DevAddr &= 0x001F; /* 5-bit field */
3282 RegAddr &= 0xFFFF; /* 16-bit field */
3283 Value &= 0xFFFF; /* 16-bit field */
5db6b777 3284
b243c4aa 3285 /* Set MIIM field register bits for an MIIM address operation */
5db6b777
GKH
3286 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3287 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3288 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3289 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3290
b243c4aa 3291 /* Set MIIM field register bits for an MIIM write operation */
5db6b777
GKH
3292 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3293 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3294 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3295 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
3296
b243c4aa 3297 /* Set MIIM command register bits to execute an MIIM command */
5db6b777
GKH
3298 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3299
b243c4aa 3300 /* Reset the command register command bit (in case it's not 0) */
5db6b777
GKH
3301 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3302
b243c4aa 3303 /* MIIM write to set the address of the specified MDIO register */
5db6b777
GKH
3304 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3305
b243c4aa 3306 /* Write to MIIM Command Register to execute to address operation */
5db6b777
GKH
3307 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3308
b243c4aa 3309 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3310 Timeout = SXG_LINK_TIMEOUT;
3311 do {
b243c4aa 3312 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3313 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3314 if (--Timeout == 0) {
3315 return (STATUS_FAILURE);
3316 }
3317 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3318
b243c4aa 3319 /* Reset the command register command bit */
5db6b777
GKH
3320 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3321
b243c4aa 3322 /* MIIM write to set up an MDIO write operation */
5db6b777
GKH
3323 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
3324
b243c4aa 3325 /* Write to MIIM Command Register to execute the write operation */
5db6b777
GKH
3326 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3327
b243c4aa 3328 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3329 Timeout = SXG_LINK_TIMEOUT;
3330 do {
b243c4aa 3331 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3332 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3333 if (--Timeout == 0) {
3334 return (STATUS_FAILURE);
3335 }
3336 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3337
cb636fe3 3338 /* DBG_ERROR("EXIT %s\n", __func__); */
5db6b777
GKH
3339
3340 return (STATUS_SUCCESS);
3341}
3342
3343/*
3344 * sxg_read_mdio_reg - Read a register on the MDIO bus
3345 *
3346 * Arguments -
3347 * adapter - A pointer to our adapter structure
3348 * DevAddr - MDIO device number being addressed
3349 * RegAddr - register address for the specified MDIO device
cb636fe3 3350 * pValue - pointer to where to put data read from the MDIO register
5db6b777
GKH
3351 *
3352 * Return
3353 * status
3354 */
73b07065 3355static int sxg_read_mdio_reg(struct adapter_t *adapter,
5c7514e0 3356 u32 DevAddr, u32 RegAddr, u32 *pValue)
5db6b777 3357{
942798b4 3358 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
cb636fe3
MT
3359 u32 AddrOp; /* Address operation (written to MIIM field reg) */
3360 u32 ReadOp; /* Read operation (written to MIIM field reg) */
3361 u32 Cmd; /* Command (written to MIIM command reg) */
5db6b777
GKH
3362 u32 ValueRead;
3363 u32 Timeout;
3364
3365 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3366 adapter, 0, 0, 0);
cb636fe3 3367 DBG_ERROR("ENTER %s\n", __FUNCTION__);
5db6b777 3368
b243c4aa
M
3369 /* Ensure values don't exceed field width */
3370 DevAddr &= 0x001F; /* 5-bit field */
3371 RegAddr &= 0xFFFF; /* 16-bit field */
5db6b777 3372
b243c4aa 3373 /* Set MIIM field register bits for an MIIM address operation */
5db6b777
GKH
3374 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3375 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3376 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3377 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3378
b243c4aa 3379 /* Set MIIM field register bits for an MIIM read operation */
5db6b777
GKH
3380 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3381 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3382 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3383 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3384
b243c4aa 3385 /* Set MIIM command register bits to execute an MIIM command */
5db6b777
GKH
3386 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3387
b243c4aa 3388 /* Reset the command register command bit (in case it's not 0) */
5db6b777
GKH
3389 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3390
b243c4aa 3391 /* MIIM write to set the address of the specified MDIO register */
5db6b777
GKH
3392 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3393
b243c4aa 3394 /* Write to MIIM Command Register to execute to address operation */
5db6b777
GKH
3395 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3396
b243c4aa 3397 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3398 Timeout = SXG_LINK_TIMEOUT;
3399 do {
b243c4aa 3400 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3401 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3402 if (--Timeout == 0) {
1323e5f1
MT
3403 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3404
5db6b777
GKH
3405 return (STATUS_FAILURE);
3406 }
3407 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3408
b243c4aa 3409 /* Reset the command register command bit */
5db6b777
GKH
3410 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3411
b243c4aa 3412 /* MIIM write to set up an MDIO register read operation */
5db6b777
GKH
3413 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3414
b243c4aa 3415 /* Write to MIIM Command Register to execute the read operation */
5db6b777
GKH
3416 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3417
b243c4aa 3418 /* Poll AMIIM Indicator register to wait for completion */
5db6b777
GKH
3419 Timeout = SXG_LINK_TIMEOUT;
3420 do {
b243c4aa 3421 udelay(100); /* Timeout in 100us units */
5db6b777
GKH
3422 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3423 if (--Timeout == 0) {
1323e5f1
MT
3424 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3425
5db6b777
GKH
3426 return (STATUS_FAILURE);
3427 }
3428 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3429
b243c4aa 3430 /* Read the MDIO register data back from the field register */
5db6b777 3431 READ_REG(HwRegs->MacAmiimField, *pValue);
b243c4aa 3432 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
5db6b777 3433
cb636fe3 3434 DBG_ERROR("EXIT %s\n", __FUNCTION__);
5db6b777
GKH
3435
3436 return (STATUS_SUCCESS);
3437}
3438
5db6b777
GKH
3439/*
3440 * Functions to obtain the CRC corresponding to the destination mac address.
3441 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3442 * the polynomial:
cb636fe3
MT
3443 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3444 * + x^4 + x^2 + x^1.
5db6b777 3445 *
cb636fe3
MT
3446 * After the CRC for the 6 bytes is generated (but before the value is
3447 * complemented), we must then transpose the value and return bits 30-23.
5db6b777 3448 */
cb636fe3
MT
3449static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3450static u32 sxg_crc_init; /* Is table initialized */
5db6b777 3451
cb636fe3 3452/* Contruct the CRC32 table */
5db6b777
GKH
3453static void sxg_mcast_init_crc32(void)
3454{
cb636fe3
MT
3455 u32 c; /* CRC shit reg */
3456 u32 e = 0; /* Poly X-or pattern */
3457 int i; /* counter */
5db6b777
GKH
3458 int k; /* byte being shifted into crc */
3459
3460 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3461
3462 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3463 e |= 1L << (31 - p[i]);
3464 }
3465
3466 for (i = 1; i < 256; i++) {
3467 c = i;
3468 for (k = 8; k; k--) {
3469 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3470 }
3471 sxg_crc_table[i] = c;
3472 }
3473}
3474
3475/*
3476 * Return the MAC hast as described above.
3477 */
3478static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3479{
3480 u32 crc;
3481 char *p;
3482 int i;
3483 unsigned char machash = 0;
3484
3485 if (!sxg_crc_init) {
3486 sxg_mcast_init_crc32();
3487 sxg_crc_init = 1;
3488 }
3489
3490 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3491 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3492 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3493 }
3494
3495 /* Return bits 1-8, transposed */
3496 for (i = 1; i < 9; i++) {
3497 machash |= (((crc >> i) & 1) << (8 - i));
3498 }
3499
3500 return (machash);
3501}
3502
73b07065 3503static void sxg_mcast_set_mask(struct adapter_t *adapter)
c6c25ed0 3504{
942798b4 3505 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
c6c25ed0 3506
b040b07b 3507 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
c6c25ed0
GKH
3508 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3509 adapter->MulticastMask);
3510
3511 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
ddd6f0a8 3512 /*
cb636fe3
MT
3513 * Turn on all multicast addresses. We have to do this for
3514 * promiscuous mode as well as ALLMCAST mode. It saves the
3515 * Microcode from having keep state about the MAC configuration
3516 */
b040b07b 3517 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
cb636fe3 3518 * SLUT MODE!!!\n",__func__);
c6c25ed0 3519 */
c6c25ed0
GKH
3520 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3521 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
cb636fe3
MT
3522 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3523 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3524 */
c6c25ed0
GKH
3525
3526 } else {
ddd6f0a8 3527 /*
cb636fe3
MT
3528 * Commit our multicast mast to the SLIC by writing to the
3529 * multicast address mask registers
c6c25ed0
GKH
3530 */
3531 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3532 __func__, adapter->netdev->name,
3533 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3534 ((ulong)
3535 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3536
3537 WRITE_REG(sxg_regs->McastLow,
3538 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3539 WRITE_REG(sxg_regs->McastHigh,
3540 (u32) ((adapter->
3541 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3542 }
3543}
3544
73b07065 3545static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
5db6b777
GKH
3546{
3547 unsigned char crcpoly;
3548
3549 /* Get the CRC polynomial for the mac address */
3550 crcpoly = sxg_mcast_get_mac_hash(address);
3551
ddd6f0a8
MT
3552 /*
3553 * We only have space on the SLIC for 64 entries. Lop
5db6b777
GKH
3554 * off the top two bits. (2^6 = 64)
3555 */
3556 crcpoly &= 0x3F;
3557
3558 /* OR in the new bit into our 64 bit mask. */
3559 adapter->MulticastMask |= (u64) 1 << crcpoly;
3560}
b040b07b
MT
3561
3562/*
3563 * Function takes MAC addresses from dev_mc_list and generates the Mask
3564 */
3565
3566static void sxg_set_mcast_addr(struct adapter_t *adapter)
3567{
3568 struct dev_mc_list *mclist;
3569 struct net_device *dev = adapter->netdev;
3570 int i;
3571
3572 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3573 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3574 i++, mclist = mclist->next) {
3575 sxg_mcast_set_bit(adapter,mclist->da_addr);
3576 }
3577 }
3578 sxg_mcast_set_mask(adapter);
3579}
5db6b777 3580
942798b4 3581static void sxg_mcast_set_list(struct net_device *dev)
5db6b777 3582{
73b07065 3583 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
3584
3585 ASSERT(adapter);
559990c6 3586 if (dev->flags & IFF_PROMISC)
1323e5f1 3587 adapter->MacFilter |= MAC_PROMISC;
b040b07b
MT
3588 if (dev->flags & IFF_MULTICAST)
3589 adapter->MacFilter |= MAC_MCAST;
559990c6 3590 if (dev->flags & IFF_ALLMULTI)
b040b07b 3591 adapter->MacFilter |= MAC_ALLMCAST;
b040b07b 3592
1323e5f1 3593 //XXX handle other flags as well
b040b07b 3594 sxg_set_mcast_addr(adapter);
1323e5f1 3595}
5db6b777 3596
d9d578bf 3597void sxg_free_sgl_buffers(struct adapter_t *adapter)
d0128aa9 3598{
d0128aa9 3599 struct list_entry *ple;
d9d578bf 3600 struct sxg_scatter_gather *Sgl;
d0128aa9 3601
d9d578bf 3602 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
6a2946ba
MT
3603 ple = RemoveHeadList(&adapter->AllSglBuffers);
3604 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3605 kfree(Sgl);
d9d578bf
MT
3606 adapter->AllSglBufferCount--;
3607 }
3608}
3609
3610void sxg_free_rcvblocks(struct adapter_t *adapter)
3611{
3612 u32 i;
3613 void *temp_RcvBlock;
3614 struct list_entry *ple;
3615 struct sxg_rcv_block_hdr *RcvBlockHdr;
3616 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3617 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3618 (adapter->state == SXG_STATE_HALTING));
3619 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3620
3621 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3622 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3623
3624 if(RcvBlockHdr->VirtualAddress) {
3625 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3626
3627 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3628 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3629 RcvDataBufferHdr =
3630 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3631 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3632 }
3633 }
d0128aa9 3634
d9d578bf
MT
3635 pci_free_consistent(adapter->pcidev,
3636 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3637 RcvBlockHdr->VirtualAddress,
3638 RcvBlockHdr->PhysicalAddress);
3639 adapter->AllRcvBlockCount--;
3640 }
3641 ASSERT(adapter->AllRcvBlockCount == 0);
3642 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3643 adapter, 0, 0, 0);
3644}
3645void sxg_free_mcast_addrs(struct adapter_t *adapter)
3646{
3647 struct sxg_multicast_address *address;
3648 while(adapter->MulticastAddrs) {
3649 address = adapter->MulticastAddrs;
3650 adapter->MulticastAddrs = address->Next;
3651 kfree(address);
3652 }
3653
3654 adapter->MulticastMask= 0;
3655}
d0128aa9 3656
d9d578bf
MT
3657void sxg_unmap_resources(struct adapter_t *adapter)
3658{
3659 if(adapter->HwRegs) {
3660 iounmap((void *)adapter->HwRegs);
3661 }
3662 if(adapter->UcodeRegs) {
3663 iounmap((void *)adapter->UcodeRegs);
d0128aa9 3664 }
d9d578bf
MT
3665
3666 ASSERT(adapter->AllRcvBlockCount == 0);
3667 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3668 adapter, 0, 0, 0);
d0128aa9 3669}
d9d578bf
MT
3670
3671
5db6b777 3672
5db6b777 3673/*
d9d578bf 3674 * sxg_free_resources - Free everything allocated in SxgAllocateResources
5db6b777
GKH
3675 *
3676 * Arguments -
3677 * adapter - A pointer to our adapter structure
3678 *
3679 * Return
3680 * none
3681 */
d9d578bf 3682void sxg_free_resources(struct adapter_t *adapter)
5db6b777
GKH
3683{
3684 u32 RssIds, IsrCount;
5db6b777 3685 RssIds = SXG_RSS_CPU_COUNT(adapter);
1782199f 3686 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777
GKH
3687
3688 if (adapter->BasicAllocations == FALSE) {
ddd6f0a8
MT
3689 /*
3690 * No allocations have been made, including spinlocks,
3691 * or listhead initializations. Return.
3692 */
5db6b777
GKH
3693 return;
3694 }
d9d578bf 3695
5db6b777 3696 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
d9d578bf 3697 sxg_free_rcvblocks(adapter);
5db6b777
GKH
3698 }
3699 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
d9d578bf 3700 sxg_free_sgl_buffers(adapter);
5db6b777 3701 }
d0128aa9 3702
5db6b777
GKH
3703 if (adapter->XmtRingZeroIndex) {
3704 pci_free_consistent(adapter->pcidev,
3705 sizeof(u32),
3706 adapter->XmtRingZeroIndex,
3707 adapter->PXmtRingZeroIndex);
3708 }
d0128aa9
MT
3709 if (adapter->Isr) {
3710 pci_free_consistent(adapter->pcidev,
3711 sizeof(u32) * IsrCount,
3712 adapter->Isr, adapter->PIsr);
3713 }
3714
d0128aa9
MT
3715 if (adapter->EventRings) {
3716 pci_free_consistent(adapter->pcidev,
3717 sizeof(struct sxg_event_ring) * RssIds,
3718 adapter->EventRings, adapter->PEventRings);
3719 }
d0128aa9
MT
3720 if (adapter->RcvRings) {
3721 pci_free_consistent(adapter->pcidev,
d9d578bf 3722 sizeof(struct sxg_rcv_ring) * 1,
d0128aa9
MT
3723 adapter->RcvRings,
3724 adapter->PRcvRings);
3725 adapter->RcvRings = NULL;
3726 }
3727
d0128aa9
MT
3728 if(adapter->XmtRings) {
3729 pci_free_consistent(adapter->pcidev,
d9d578bf 3730 sizeof(struct sxg_xmt_ring) * 1,
d0128aa9
MT
3731 adapter->XmtRings,
3732 adapter->PXmtRings);
3733 adapter->XmtRings = NULL;
3734 }
3735
d9d578bf
MT
3736 if (adapter->ucode_stats) {
3737 pci_unmap_single(adapter->pcidev,
3738 sizeof(struct sxg_ucode_stats),
3739 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3740 adapter->ucode_stats = NULL;
3741 }
d0128aa9 3742
5db6b777 3743
b243c4aa 3744 /* Unmap register spaces */
d9d578bf 3745 sxg_unmap_resources(adapter);
5db6b777 3746
d9d578bf 3747 sxg_free_mcast_addrs(adapter);
5db6b777 3748
5db6b777
GKH
3749 adapter->BasicAllocations = FALSE;
3750
5db6b777 3751}
5db6b777
GKH
3752
3753/*
3754 * sxg_allocate_complete -
3755 *
3756 * This routine is called when a memory allocation has completed.
3757 *
3758 * Arguments -
73b07065 3759 * struct adapter_t * - Our adapter structure
5db6b777
GKH
3760 * VirtualAddress - Memory virtual address
3761 * PhysicalAddress - Memory physical address
3762 * Length - Length of memory allocated (or 0)
3763 * Context - The type of buffer allocated
3764 *
3765 * Return
3766 * None.
3767 */
0d414727 3768static int sxg_allocate_complete(struct adapter_t *adapter,
5c7514e0
M
3769 void *VirtualAddress,
3770 dma_addr_t PhysicalAddress,
942798b4 3771 u32 Length, enum sxg_buffer_type Context)
5db6b777 3772{
0d414727 3773 int status = 0;
5db6b777
GKH
3774 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3775 adapter, VirtualAddress, Length, Context);
6a2946ba
MT
3776 ASSERT(atomic_read(&adapter->pending_allocations));
3777 atomic_dec(&adapter->pending_allocations);
5db6b777
GKH
3778
3779 switch (Context) {
3780
3781 case SXG_BUFFER_TYPE_RCV:
0d414727 3782 status = sxg_allocate_rcvblock_complete(adapter,
5db6b777
GKH
3783 VirtualAddress,
3784 PhysicalAddress, Length);
3785 break;
3786 case SXG_BUFFER_TYPE_SGL:
942798b4 3787 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
5db6b777
GKH
3788 VirtualAddress,
3789 PhysicalAddress, Length);
3790 break;
3791 }
3792 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3793 adapter, VirtualAddress, Length, Context);
0d414727
MT
3794
3795 return status;
5db6b777
GKH
3796}
3797
3798/*
3799 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3800 * synchronous and asynchronous buffer allocations
3801 *
3802 * Arguments -
3803 * adapter - A pointer to our adapter structure
3804 * Size - block size to allocate
3805 * BufferType - Type of buffer to allocate
3806 *
3807 * Return
3808 * int
3809 */
73b07065 3810static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
942798b4 3811 u32 Size, enum sxg_buffer_type BufferType)
5db6b777
GKH
3812{
3813 int status;
5c7514e0 3814 void *Buffer;
5db6b777
GKH
3815 dma_addr_t pBuffer;
3816
3817 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3818 adapter, Size, BufferType, 0);
ddd6f0a8
MT
3819 /*
3820 * Grab the adapter lock and check the state. If we're in anything other
3821 * than INITIALIZING or RUNNING state, fail. This is to prevent
3822 * allocations in an improper driver state
3823 */
5db6b777 3824
6a2946ba 3825 atomic_inc(&adapter->pending_allocations);
5db6b777 3826
d9d578bf
MT
3827 if(BufferType != SXG_BUFFER_TYPE_SGL)
3828 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3829 else {
3830 Buffer = kzalloc(Size, GFP_ATOMIC);
54aed113 3831 pBuffer = (dma_addr_t)NULL;
d9d578bf 3832 }
5db6b777 3833 if (Buffer == NULL) {
ddd6f0a8
MT
3834 /*
3835 * Decrement the AllocationsPending count while holding
3836 * the lock. Pause processing relies on this
3837 */
6a2946ba 3838 atomic_dec(&adapter->pending_allocations);
5db6b777
GKH
3839 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3840 adapter, Size, BufferType, 0);
3841 return (STATUS_RESOURCES);
3842 }
0d414727 3843 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
5db6b777
GKH
3844
3845 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3846 adapter, Size, BufferType, status);
0d414727 3847 return status;
5db6b777
GKH
3848}
3849
3850/*
cb636fe3
MT
3851 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3852 * block allocation
5db6b777
GKH
3853 *
3854 * Arguments -
3855 * adapter - A pointer to our adapter structure
3856 * RcvBlock - receive block virtual address
3857 * PhysicalAddress - Physical address
3858 * Length - Memory length
3859 *
3860 * Return
5db6b777 3861 */
0d414727 3862static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
5c7514e0
M
3863 void *RcvBlock,
3864 dma_addr_t PhysicalAddress,
3865 u32 Length)
5db6b777
GKH
3866{
3867 u32 i;
3868 u32 BufferSize = adapter->ReceiveBufferSize;
3869 u64 Paddr;
d0128aa9 3870 void *temp_RcvBlock;
942798b4 3871 struct sxg_rcv_block_hdr *RcvBlockHdr;
942798b4
MT
3872 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3873 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3874 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
5db6b777
GKH
3875
3876 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3877 adapter, RcvBlock, Length, 0);
3878 if (RcvBlock == NULL) {
3879 goto fail;
3880 }
3881 memset(RcvBlock, 0, Length);
3882 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3883 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
d0128aa9 3884 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
ddd6f0a8
MT
3885 /*
3886 * First, initialize the contained pool of receive data buffers.
3887 * This initialization requires NBL/NB/MDL allocations, if any of them
3888 * fail, free the block and return without queueing the shared memory
3889 */
d0128aa9
MT
3890 //RcvDataBuffer = RcvBlock;
3891 temp_RcvBlock = RcvBlock;
3892 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3893 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3894 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3895 temp_RcvBlock;
3896 /* For FREE macro assertion */
3897 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3898 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3899 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3900 goto fail;
5db6b777 3901
d0128aa9 3902 }
5db6b777 3903
ddd6f0a8
MT
3904 /*
3905 * Place this entire block of memory on the AllRcvBlocks queue so it
3906 * can be free later
3907 */
d0128aa9
MT
3908
3909 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3910 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
5db6b777
GKH
3911 RcvBlockHdr->VirtualAddress = RcvBlock;
3912 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3913 spin_lock(&adapter->RcvQLock);
3914 adapter->AllRcvBlockCount++;
3915 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3916 spin_unlock(&adapter->RcvQLock);
3917
cb636fe3
MT
3918 /* Now free the contained receive data buffers that we
3919 * initialized above */
d0128aa9 3920 temp_RcvBlock = RcvBlock;
5db6b777
GKH
3921 for (i = 0, Paddr = PhysicalAddress;
3922 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
d0128aa9
MT
3923 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3924 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3925 RcvDataBufferHdr =
3926 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
5db6b777
GKH
3927 spin_lock(&adapter->RcvQLock);
3928 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3929 spin_unlock(&adapter->RcvQLock);
3930 }
3931
b243c4aa 3932 /* Locate the descriptor block and put it on a separate free queue */
5c7514e0 3933 RcvDescriptorBlock =
942798b4 3934 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
5c7514e0 3935 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
d0128aa9 3936 (SXG_RCV_DATA_HDR_SIZE));
5db6b777 3937 RcvDescriptorBlockHdr =
942798b4 3938 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
5db6b777 3939 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
d0128aa9 3940 (SXG_RCV_DATA_HDR_SIZE));
5db6b777
GKH
3941 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3942 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3943 spin_lock(&adapter->RcvQLock);
3944 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3945 spin_unlock(&adapter->RcvQLock);
3946 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3947 adapter, RcvBlock, Length, 0);
0d414727 3948 return STATUS_SUCCESS;
cb636fe3 3949fail:
b243c4aa 3950 /* Free any allocated resources */
5db6b777 3951 if (RcvBlock) {
d0128aa9 3952 temp_RcvBlock = RcvBlock;
5db6b777 3953 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
d0128aa9 3954 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
5db6b777 3955 RcvDataBufferHdr =
d0128aa9 3956 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
5db6b777
GKH
3957 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3958 }
3959 pci_free_consistent(adapter->pcidev,
3960 Length, RcvBlock, PhysicalAddress);
3961 }
e88bd231 3962 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
5db6b777
GKH
3963 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3964 adapter, adapter->FreeRcvBufferCount,
3965 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3966 adapter->Stats.NoMem++;
0d414727
MT
3967 /* As allocation failed, free all previously allocated blocks..*/
3968 //sxg_free_rcvblocks(adapter);
3969
3970 return STATUS_RESOURCES;
5db6b777
GKH
3971}
3972
3973/*
3974 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3975 *
3976 * Arguments -
3977 * adapter - A pointer to our adapter structure
942798b4 3978 * SxgSgl - struct sxg_scatter_gather buffer
5db6b777
GKH
3979 * PhysicalAddress - Physical address
3980 * Length - Memory length
3981 *
3982 * Return
5db6b777 3983 */
73b07065 3984static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
942798b4 3985 struct sxg_scatter_gather *SxgSgl,
5c7514e0
M
3986 dma_addr_t PhysicalAddress,
3987 u32 Length)
5db6b777 3988{
d9d578bf 3989 unsigned long sgl_flags;
5db6b777
GKH
3990 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3991 adapter, SxgSgl, Length, 0);
c5e5cf5a 3992 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
5db6b777 3993 adapter->AllSglBufferCount++;
d9d578bf 3994 /* PhysicalAddress; */
cb636fe3
MT
3995 SxgSgl->PhysicalAddress = PhysicalAddress;
3996 /* Initialize backpointer once */
3997 SxgSgl->adapter = adapter;
5db6b777 3998 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
c5e5cf5a 3999 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
5db6b777 4000 SxgSgl->State = SXG_BUFFER_BUSY;
c5e5cf5a 4001 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
5db6b777
GKH
4002 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
4003 adapter, SxgSgl, Length, 0);
4004}
4005
5db6b777 4006
54aed113 4007static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
5db6b777 4008{
cb636fe3
MT
4009 /*
4010 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
4011 * funct#[%d]\n", __func__, card->config_set,
4012 * adapter->port, adapter->physport, adapter->functionnumber);
4013 *
4014 * sxg_dbg_macaddrs(adapter);
4015 */
cb636fe3
MT
4016 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
4017 * __FUNCTION__);
4018 */
4019
4020 /* sxg_dbg_macaddrs(adapter); */
5db6b777 4021
6a2946ba
MT
4022 struct net_device * dev = adapter->netdev;
4023 if(!dev)
4024 {
4025 printk("sxg: Dev is Null\n");
4026 }
4027
4028 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
4029
4030 if (netif_running(dev)) {
4031 return -EBUSY;
4032 }
4033 if (!adapter) {
4034 return -EBUSY;
4035 }
4036
5db6b777
GKH
4037 if (!(adapter->currmacaddr[0] ||
4038 adapter->currmacaddr[1] ||
4039 adapter->currmacaddr[2] ||
4040 adapter->currmacaddr[3] ||
4041 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
4042 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
4043 }
4044 if (adapter->netdev) {
4045 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
1323e5f1 4046 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
5db6b777 4047 }
cb636fe3 4048 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
5db6b777
GKH
4049 sxg_dbg_macaddrs(adapter);
4050
54aed113 4051 return 0;
5db6b777
GKH
4052}
4053
c6c25ed0 4054#if XXXTODO
942798b4 4055static int sxg_mac_set_address(struct net_device *dev, void *ptr)
5db6b777 4056{
73b07065 4057 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
5db6b777
GKH
4058 struct sockaddr *addr = ptr;
4059
e88bd231 4060 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
5db6b777
GKH
4061
4062 if (netif_running(dev)) {
4063 return -EBUSY;
4064 }
4065 if (!adapter) {
4066 return -EBUSY;
4067 }
4068 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
e88bd231 4069 __func__, adapter->netdev->name, adapter->currmacaddr[0],
5db6b777
GKH
4070 adapter->currmacaddr[1], adapter->currmacaddr[2],
4071 adapter->currmacaddr[3], adapter->currmacaddr[4],
4072 adapter->currmacaddr[5]);
4073 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4074 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
4075 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
e88bd231 4076 __func__, adapter->netdev->name, adapter->currmacaddr[0],
5db6b777
GKH
4077 adapter->currmacaddr[1], adapter->currmacaddr[2],
4078 adapter->currmacaddr[3], adapter->currmacaddr[4],
4079 adapter->currmacaddr[5]);
4080
4081 sxg_config_set(adapter, TRUE);
5db6b777
GKH
4082 return 0;
4083}
c6c25ed0 4084#endif
5db6b777 4085
5db6b777 4086/*
ddd6f0a8
MT
4087 * SXG DRIVER FUNCTIONS (below)
4088 *
5db6b777
GKH
4089 * sxg_initialize_adapter - Initialize adapter
4090 *
4091 * Arguments -
4092 * adapter - A pointer to our adapter structure
4093 *
ddd6f0a8 4094 * Return - int
5db6b777 4095 */
73b07065 4096static int sxg_initialize_adapter(struct adapter_t *adapter)
5db6b777
GKH
4097{
4098 u32 RssIds, IsrCount;
4099 u32 i;
4100 int status;
7c66b14b 4101 int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
5db6b777
GKH
4102
4103 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
4104 adapter, 0, 0, 0);
4105
b243c4aa 4106 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
1782199f 4107 IsrCount = adapter->msi_enabled ? RssIds : 1;
5db6b777 4108
ddd6f0a8
MT
4109 /*
4110 * Sanity check SXG_UCODE_REGS structure definition to
4111 * make sure the length is correct
4112 */
942798b4 4113 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
5db6b777 4114
b243c4aa 4115 /* Disable interrupts */
5db6b777
GKH
4116 SXG_DISABLE_ALL_INTERRUPTS(adapter);
4117
b243c4aa 4118 /* Set MTU */
5db6b777
GKH
4119 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
4120 (adapter->FrameSize == JUMBOMAXFRAME));
4121 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
4122
b243c4aa 4123 /* Set event ring base address and size */
5db6b777
GKH
4124 WRITE_REG64(adapter,
4125 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
4126 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
4127
b243c4aa 4128 /* Per-ISR initialization */
5db6b777
GKH
4129 for (i = 0; i < IsrCount; i++) {
4130 u64 Addr;
b243c4aa 4131 /* Set interrupt status pointer */
5db6b777
GKH
4132 Addr = adapter->PIsr + (i * sizeof(u32));
4133 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
4134 }
4135
b243c4aa 4136 /* XMT ring zero index */
5db6b777
GKH
4137 WRITE_REG64(adapter,
4138 adapter->UcodeRegs[0].SPSendIndex,
4139 adapter->PXmtRingZeroIndex, 0);
4140
b243c4aa 4141 /* Per-RSS initialization */
5db6b777 4142 for (i = 0; i < RssIds; i++) {
b243c4aa 4143 /* Release all event ring entries to the Microcode */
5db6b777
GKH
4144 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
4145 TRUE);
4146 }
4147
b243c4aa 4148 /* Transmit ring base and size */
5db6b777
GKH
4149 WRITE_REG64(adapter,
4150 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
4151 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
4152
b243c4aa 4153 /* Receive ring base and size */
5db6b777
GKH
4154 WRITE_REG64(adapter,
4155 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
7c66b14b
MT
4156 if (adapter->JumboEnabled == TRUE)
4157 sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
4158 WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
5db6b777 4159
b243c4aa 4160 /* Populate the card with receive buffers */
5db6b777
GKH
4161 sxg_stock_rcv_buffers(adapter);
4162
ddd6f0a8
MT
4163 /*
4164 * Initialize checksum offload capabilities. At the moment we always
4165 * enable IP and TCP receive checksums on the card. Depending on the
4166 * checksum configuration specified by the user, we can choose to
4167 * report or ignore the checksum information provided by the card.
4168 */
5db6b777
GKH
4169 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
4170 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
4171
9914f053
MT
4172 adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED );
4173
b243c4aa 4174 /* Initialize the MAC, XAUI */
e88bd231 4175 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
5db6b777 4176 status = sxg_initialize_link(adapter);
e88bd231 4177 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
5db6b777
GKH
4178 status);
4179 if (status != STATUS_SUCCESS) {
4180 return (status);
4181 }
ddd6f0a8
MT
4182 /*
4183 * Initialize Dead to FALSE.
4184 * SlicCheckForHang or SlicDumpThread will take it from here.
4185 */
5db6b777
GKH
4186 adapter->Dead = FALSE;
4187 adapter->PingOutstanding = FALSE;
a536efcc
MT
4188 adapter->XmtFcEnabled = TRUE;
4189 adapter->RcvFcEnabled = TRUE;
4190
1323e5f1 4191 adapter->State = SXG_STATE_RUNNING;
5db6b777
GKH
4192
4193 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
4194 adapter, 0, 0, 0);
4195 return (STATUS_SUCCESS);
4196}
4197
4198/*
4199 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
4200 * the card. The caller should hold the RcvQLock
4201 *
4202 * Arguments -
4203 * adapter - A pointer to our adapter structure
4204 * RcvDescriptorBlockHdr - Descriptor block to fill
4205 *
4206 * Return
4207 * status
4208 */
73b07065 4209static int sxg_fill_descriptor_block(struct adapter_t *adapter,
cb636fe3 4210 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
5db6b777
GKH
4211{
4212 u32 i;
942798b4
MT
4213 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4214 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
4215 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
4216 struct sxg_cmd *RingDescriptorCmd;
4217 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
5db6b777
GKH
4218
4219 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
4220 adapter, adapter->RcvBuffersOnCard,
4221 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4222
4223 ASSERT(RcvDescriptorBlockHdr);
4224
ddd6f0a8
MT
4225 /*
4226 * If we don't have the resources to fill the descriptor block,
4227 * return failure
4228 */
5db6b777
GKH
4229 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
4230 SXG_RING_FULL(RcvRingInfo)) {
4231 adapter->Stats.NoMem++;
4232 return (STATUS_FAILURE);
4233 }
b243c4aa 4234 /* Get a ring descriptor command */
5db6b777
GKH
4235 SXG_GET_CMD(RingZero,
4236 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
4237 ASSERT(RingDescriptorCmd);
4238 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
cb636fe3
MT
4239 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
4240 RcvDescriptorBlockHdr->VirtualAddress;
5db6b777 4241
b243c4aa 4242 /* Fill in the descriptor block */
5db6b777
GKH
4243 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
4244 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4245 ASSERT(RcvDataBufferHdr);
6a2946ba 4246// ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
d9d578bf
MT
4247 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
4248 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
4249 adapter->ReceiveBufferSize);
4250 if(RcvDataBufferHdr->skb)
4251 RcvDataBufferHdr->SxgDumbRcvPacket =
4252 RcvDataBufferHdr->skb;
4253 else
4254 goto no_memory;
4255 }
5db6b777
GKH
4256 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
4257 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
5c7514e0 4258 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
cb636fe3 4259 (void *)RcvDataBufferHdr;
1323e5f1 4260
5db6b777
GKH
4261 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4262 RcvDataBufferHdr->PhysicalAddress;
4263 }
b243c4aa 4264 /* Add the descriptor block to receive descriptor ring 0 */
5db6b777
GKH
4265 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
4266
ddd6f0a8
MT
4267 /*
4268 * RcvBuffersOnCard is not protected via the receive lock (see
4269 * sxg_process_event_queue) We don't want to grap a lock every time a
4270 * buffer is returned to us, so we use atomic interlocked functions
4271 * instead.
4272 */
5db6b777
GKH
4273 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
4274
4275 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
4276 RcvDescriptorBlockHdr,
4277 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
4278
4279 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
4280 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
4281 adapter, adapter->RcvBuffersOnCard,
4282 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4283 return (STATUS_SUCCESS);
d9d578bf 4284no_memory:
b9d1081a
MT
4285 for (; i >= 0 ; i--) {
4286 if (RcvDescriptorBlock->Descriptors[i].VirtualAddress) {
4287 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
4288 RcvDescriptorBlock->Descriptors[i].
4289 VirtualAddress;
4290 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
4291 (dma_addr_t)NULL;
4292 RcvDescriptorBlock->Descriptors[i].VirtualAddress=NULL;
4293 }
4294 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
4295 }
4296 RcvDescriptorBlockHdr->State = SXG_BUFFER_FREE;
4297 SXG_RETURN_CMD(RingZero, RcvRingInfo, RingDescriptorCmd,
4298 RcvDescriptorBlockHdr);
4299
d9d578bf 4300 return (-ENOMEM);
5db6b777
GKH
4301}
4302
4303/*
4304 * sxg_stock_rcv_buffers - Stock the card with receive buffers
4305 *
4306 * Arguments -
4307 * adapter - A pointer to our adapter structure
4308 *
4309 * Return
4310 * None
4311 */
73b07065 4312static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
5db6b777 4313{
942798b4 4314 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
7c66b14b
MT
4315 int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
4316 int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
5db6b777
GKH
4317
4318 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
4319 adapter, adapter->RcvBuffersOnCard,
4320 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
ddd6f0a8
MT
4321 /*
4322 * First, see if we've got less than our minimum threshold of
4323 * receive buffers, there isn't an allocation in progress, and
4324 * we haven't exceeded our maximum.. get another block of buffers
4325 * None of this needs to be SMP safe. It's round numbers.
4326 */
7c66b14b
MT
4327 if (adapter->JumboEnabled == TRUE)
4328 sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
4329 if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
5db6b777 4330 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
6a2946ba 4331 (atomic_read(&adapter->pending_allocations) == 0)) {
5db6b777 4332 sxg_allocate_buffer_memory(adapter,
d0128aa9
MT
4333 SXG_RCV_BLOCK_SIZE
4334 (SXG_RCV_DATA_HDR_SIZE),
5db6b777
GKH
4335 SXG_BUFFER_TYPE_RCV);
4336 }
b243c4aa 4337 /* Now grab the RcvQLock lock and proceed */
5db6b777 4338 spin_lock(&adapter->RcvQLock);
7c66b14b
MT
4339 if (adapter->JumboEnabled)
4340 sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
4341 while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
942798b4 4342 struct list_entry *_ple;
5db6b777 4343
b243c4aa 4344 /* Get a descriptor block */
5db6b777
GKH
4345 RcvDescriptorBlockHdr = NULL;
4346 if (adapter->FreeRcvBlockCount) {
4347 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
5c7514e0 4348 RcvDescriptorBlockHdr =
942798b4 4349 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
5c7514e0 4350 FreeList);
5db6b777
GKH
4351 adapter->FreeRcvBlockCount--;
4352 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
4353 }
4354
4355 if (RcvDescriptorBlockHdr == NULL) {
b243c4aa 4356 /* Bail out.. */
5db6b777
GKH
4357 adapter->Stats.NoMem++;
4358 break;
4359 }
b243c4aa 4360 /* Fill in the descriptor block and give it to the card */
5db6b777
GKH
4361 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
4362 STATUS_FAILURE) {
b243c4aa 4363 /* Free the descriptor block */
5db6b777
GKH
4364 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4365 RcvDescriptorBlockHdr);
4366 break;
4367 }
4368 }
4369 spin_unlock(&adapter->RcvQLock);
4370 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4371 adapter, adapter->RcvBuffersOnCard,
4372 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4373}
4374
4375/*
4376 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4377 * completed by the microcode
4378 *
4379 * Arguments -
4380 * adapter - A pointer to our adapter structure
4381 * Index - Where the microcode is up to
4382 *
4383 * Return
4384 * None
4385 */
73b07065 4386static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
5c7514e0 4387 unsigned char Index)
5db6b777 4388{
942798b4
MT
4389 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4390 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4391 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4392 struct sxg_cmd *RingDescriptorCmd;
5db6b777
GKH
4393
4394 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4395 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4396
b243c4aa 4397 /* Now grab the RcvQLock lock and proceed */
5db6b777
GKH
4398 spin_lock(&adapter->RcvQLock);
4399 ASSERT(Index != RcvRingInfo->Tail);
d9d578bf
MT
4400 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4401 RcvRingInfo->Tail) > 3) {
ddd6f0a8
MT
4402 /*
4403 * Locate the current Cmd (ring descriptor entry), and
4404 * associated receive descriptor block, and advance
4405 * the tail
4406 */
5db6b777
GKH
4407 SXG_RETURN_CMD(RingZero,
4408 RcvRingInfo,
4409 RingDescriptorCmd, RcvDescriptorBlockHdr);
4410 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4411 RcvRingInfo->Head, RcvRingInfo->Tail,
4412 RingDescriptorCmd, RcvDescriptorBlockHdr);
4413
b243c4aa 4414 /* Clear the SGL field */
5db6b777 4415 RingDescriptorCmd->Sgl = 0;
ddd6f0a8
MT
4416 /*
4417 * Attempt to refill it and hand it right back to the
4418 * card. If we fail to refill it, free the descriptor block
4419 * header. The card will be restocked later via the
4420 * RcvBuffersOnCard test
4421 */
cb636fe3
MT
4422 if (sxg_fill_descriptor_block(adapter,
4423 RcvDescriptorBlockHdr) == STATUS_FAILURE)
5db6b777
GKH
4424 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4425 RcvDescriptorBlockHdr);
5db6b777
GKH
4426 }
4427 spin_unlock(&adapter->RcvQLock);
4428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4429 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4430}
4431
d9d578bf
MT
4432/*
4433 * Read the statistics which the card has been maintaining.
4434 */
4435void sxg_collect_statistics(struct adapter_t *adapter)
4436{
4437 if(adapter->ucode_stats)
54aed113
MT
4438 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4439 adapter->pucode_stats, 0);
6a2946ba
MT
4440 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4441 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4442 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4443}
4444
4445static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4446{
4447 struct adapter_t *adapter = netdev_priv(dev);
4448
4449 sxg_collect_statistics(adapter);
4450 return (&adapter->stats);
d9d578bf
MT
4451}
4452
e5ea8da0
MT
4453static void sxg_watchdog(unsigned long data)
4454{
4455 struct adapter_t *adapter = (struct adapter_t *) data;
4456
4457 if (adapter->state != ADAPT_DOWN) {
4458 sxg_link_event(adapter);
4459 /* Reset the timer */
4460 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4461 }
4462}
4463
4464static void sxg_update_link_status (struct work_struct *work)
4465{
4466 struct adapter_t *adapter = (struct adapter_t *)container_of
4467 (work, struct adapter_t, update_link_status);
4468 if (likely(adapter->link_status_changed)) {
4469 sxg_link_event(adapter);
4470 adapter->link_status_changed = 0;
4471 }
4472}
4473
5db6b777 4474static struct pci_driver sxg_driver = {
371d7a9e 4475 .name = sxg_driver_name,
5db6b777
GKH
4476 .id_table = sxg_pci_tbl,
4477 .probe = sxg_entry_probe,
4478 .remove = sxg_entry_remove,
4479#if SXG_POWER_MANAGEMENT_ENABLED
4480 .suspend = sxgpm_suspend,
4481 .resume = sxgpm_resume,
4482#endif
cb636fe3 4483 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
5db6b777
GKH
4484};
4485
4486static int __init sxg_module_init(void)
4487{
4488 sxg_init_driver();
4489
4490 if (debug >= 0)
4491 sxg_debug = debug;
4492
4493 return pci_register_driver(&sxg_driver);
4494}
4495
4496static void __exit sxg_module_cleanup(void)
4497{
4498 pci_unregister_driver(&sxg_driver);
4499}
4500
4501module_init(sxg_module_init);
4502module_exit(sxg_module_cleanup);