sfc: Removed unncesssary UL suffixes on 0 literals
[linux-2.6-block.git] / drivers / net / sfc / efx.c
CommitLineData
8ceee660
BH
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
22#include "net_driver.h"
23#include "gmii.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
30#include "workarounds.h"
31#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
54static int lro = 1;
55module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
67static unsigned int separate_tx_and_rx_channels = 1;
68
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = 1;
84
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{
163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue;
165
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
167 !channel->enabled))
168 return rx_quota;
169
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
171
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
177 }
178
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
181
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
184 while (rxdmaqs) {
185 if (rxdmaqs & 0x01)
186 efx_fast_push_rx_descriptors(rx_queue);
187 rx_queue++;
188 rxdmaqs >>= 1;
189 }
190
191 return rx_quota;
192}
193
194/* Mark channel as finished processing
195 *
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
199 */
200static inline void efx_channel_processed(struct efx_channel *channel)
201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race
203 * with finishing processing, a new interrupt will be raised.
204 */
205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */
207 falcon_eventq_read_ack(channel);
208}
209
210/* NAPI poll handler
211 *
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
214 */
215static int efx_poll(struct napi_struct *napi, int budget)
216{
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
220 int unused;
221 int rx_packets;
222
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
225
226 unused = efx_process_channel(channel, budget);
227 rx_packets = (budget - unused);
228
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
234 */
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
237 }
238
239 return rx_packets;
240}
241
242/* Process the eventq of the specified channel immediately on this CPU
243 *
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
247 *
248 * Since we are touching interrupts the caller should hold the suspend lock
249 */
250void efx_process_channel_now(struct efx_channel *channel)
251{
252 struct efx_nic *efx = channel->efx;
253
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
256
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
259 if (efx->legacy_irq)
260 synchronize_irq(efx->legacy_irq);
261 if (channel->has_interrupt && channel->irq)
262 synchronize_irq(channel->irq);
263
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
266
267 /* Poll the channel */
91ad757c 268 efx_process_channel(channel, efx->type->evq_size);
8ceee660
BH
269
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
273
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
276}
277
278/* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
282 */
283static int efx_probe_eventq(struct efx_channel *channel)
284{
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286
287 return falcon_probe_eventq(channel);
288}
289
290/* Prepare channel's event queue */
291static int efx_init_eventq(struct efx_channel *channel)
292{
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294
295 channel->eventq_read_ptr = 0;
296
297 return falcon_init_eventq(channel);
298}
299
300static void efx_fini_eventq(struct efx_channel *channel)
301{
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303
304 falcon_fini_eventq(channel);
305}
306
307static void efx_remove_eventq(struct efx_channel *channel)
308{
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310
311 falcon_remove_eventq(channel);
312}
313
314/**************************************************************************
315 *
316 * Channel handling
317 *
318 *************************************************************************/
319
8ceee660
BH
320static int efx_probe_channel(struct efx_channel *channel)
321{
322 struct efx_tx_queue *tx_queue;
323 struct efx_rx_queue *rx_queue;
324 int rc;
325
326 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
327
328 rc = efx_probe_eventq(channel);
329 if (rc)
330 goto fail1;
331
332 efx_for_each_channel_tx_queue(tx_queue, channel) {
333 rc = efx_probe_tx_queue(tx_queue);
334 if (rc)
335 goto fail2;
336 }
337
338 efx_for_each_channel_rx_queue(rx_queue, channel) {
339 rc = efx_probe_rx_queue(rx_queue);
340 if (rc)
341 goto fail3;
342 }
343
344 channel->n_rx_frm_trunc = 0;
345
346 return 0;
347
348 fail3:
349 efx_for_each_channel_rx_queue(rx_queue, channel)
350 efx_remove_rx_queue(rx_queue);
351 fail2:
352 efx_for_each_channel_tx_queue(tx_queue, channel)
353 efx_remove_tx_queue(tx_queue);
354 fail1:
355 return rc;
356}
357
358
359/* Channels are shutdown and reinitialised whilst the NIC is running
360 * to propagate configuration changes (mtu, checksum offload), or
361 * to clear hardware error conditions
362 */
363static int efx_init_channels(struct efx_nic *efx)
364{
365 struct efx_tx_queue *tx_queue;
366 struct efx_rx_queue *rx_queue;
367 struct efx_channel *channel;
368 int rc = 0;
369
f7f13b0b
BH
370 /* Calculate the rx buffer allocation parameters required to
371 * support the current MTU, including padding for header
372 * alignment and overruns.
373 */
374 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
375 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
376 efx->type->rx_buffer_padding);
377 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
8ceee660
BH
378
379 /* Initialise the channels */
380 efx_for_each_channel(channel, efx) {
381 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
382
383 rc = efx_init_eventq(channel);
384 if (rc)
385 goto err;
386
387 efx_for_each_channel_tx_queue(tx_queue, channel) {
388 rc = efx_init_tx_queue(tx_queue);
389 if (rc)
390 goto err;
391 }
392
393 /* The rx buffer allocation strategy is MTU dependent */
394 efx_rx_strategy(channel);
395
396 efx_for_each_channel_rx_queue(rx_queue, channel) {
397 rc = efx_init_rx_queue(rx_queue);
398 if (rc)
399 goto err;
400 }
401
402 WARN_ON(channel->rx_pkt != NULL);
403 efx_rx_strategy(channel);
404 }
405
406 return 0;
407
408 err:
409 EFX_ERR(efx, "failed to initialise channel %d\n",
410 channel ? channel->channel : -1);
411 efx_fini_channels(efx);
412 return rc;
413}
414
415/* This enables event queue processing and packet transmission.
416 *
417 * Note that this function is not allowed to fail, since that would
418 * introduce too much complexity into the suspend/resume path.
419 */
420static void efx_start_channel(struct efx_channel *channel)
421{
422 struct efx_rx_queue *rx_queue;
423
424 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
425
426 if (!(channel->efx->net_dev->flags & IFF_UP))
427 netif_napi_add(channel->napi_dev, &channel->napi_str,
428 efx_poll, napi_weight);
429
430 channel->work_pending = 0;
431 channel->enabled = 1;
432 smp_wmb(); /* ensure channel updated before first interrupt */
433
434 napi_enable(&channel->napi_str);
435
436 /* Load up RX descriptors */
437 efx_for_each_channel_rx_queue(rx_queue, channel)
438 efx_fast_push_rx_descriptors(rx_queue);
439}
440
441/* This disables event queue processing and packet transmission.
442 * This function does not guarantee that all queue processing
443 * (e.g. RX refill) is complete.
444 */
445static void efx_stop_channel(struct efx_channel *channel)
446{
447 struct efx_rx_queue *rx_queue;
448
449 if (!channel->enabled)
450 return;
451
452 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
453
454 channel->enabled = 0;
455 napi_disable(&channel->napi_str);
456
457 /* Ensure that any worker threads have exited or will be no-ops */
458 efx_for_each_channel_rx_queue(rx_queue, channel) {
459 spin_lock_bh(&rx_queue->add_lock);
460 spin_unlock_bh(&rx_queue->add_lock);
461 }
462}
463
464static void efx_fini_channels(struct efx_nic *efx)
465{
466 struct efx_channel *channel;
467 struct efx_tx_queue *tx_queue;
468 struct efx_rx_queue *rx_queue;
469
470 EFX_ASSERT_RESET_SERIALISED(efx);
471 BUG_ON(efx->port_enabled);
472
473 efx_for_each_channel(channel, efx) {
474 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
475
476 efx_for_each_channel_rx_queue(rx_queue, channel)
477 efx_fini_rx_queue(rx_queue);
478 efx_for_each_channel_tx_queue(tx_queue, channel)
479 efx_fini_tx_queue(tx_queue);
480 }
481
482 /* Do the event queues last so that we can handle flush events
483 * for all DMA queues. */
484 efx_for_each_channel(channel, efx) {
485 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
486
487 efx_fini_eventq(channel);
488 }
489}
490
491static void efx_remove_channel(struct efx_channel *channel)
492{
493 struct efx_tx_queue *tx_queue;
494 struct efx_rx_queue *rx_queue;
495
496 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
497
498 efx_for_each_channel_rx_queue(rx_queue, channel)
499 efx_remove_rx_queue(rx_queue);
500 efx_for_each_channel_tx_queue(tx_queue, channel)
501 efx_remove_tx_queue(tx_queue);
502 efx_remove_eventq(channel);
503
504 channel->used_flags = 0;
505}
506
507void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
508{
509 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
510}
511
512/**************************************************************************
513 *
514 * Port handling
515 *
516 **************************************************************************/
517
518/* This ensures that the kernel is kept informed (via
519 * netif_carrier_on/off) of the link status, and also maintains the
520 * link status's stop on the port's TX queue.
521 */
522static void efx_link_status_changed(struct efx_nic *efx)
523{
524 int carrier_ok;
525
526 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
527 * that no events are triggered between unregister_netdev() and the
528 * driver unloading. A more general condition is that NETDEV_CHANGE
529 * can only be generated between NETDEV_UP and NETDEV_DOWN */
530 if (!netif_running(efx->net_dev))
531 return;
532
533 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
534 if (efx->link_up != carrier_ok) {
535 efx->n_link_state_changes++;
536
537 if (efx->link_up)
538 netif_carrier_on(efx->net_dev);
539 else
540 netif_carrier_off(efx->net_dev);
541 }
542
543 /* Status message for kernel log */
544 if (efx->link_up) {
545 struct mii_if_info *gmii = &efx->mii;
546 unsigned adv, lpa;
547 /* NONE here means direct XAUI from the controller, with no
548 * MDIO-attached device we can query. */
549 if (efx->phy_type != PHY_TYPE_NONE) {
550 adv = gmii_advertised(gmii);
551 lpa = gmii_lpa(gmii);
552 } else {
553 lpa = GM_LPA_10000 | LPA_DUPLEX;
554 adv = lpa;
555 }
556 EFX_INFO(efx, "link up at %dMbps %s-duplex "
557 "(adv %04x lpa %04x) (MTU %d)%s\n",
558 (efx->link_options & GM_LPA_10000 ? 10000 :
559 (efx->link_options & GM_LPA_1000 ? 1000 :
560 (efx->link_options & GM_LPA_100 ? 100 :
561 10))),
562 (efx->link_options & GM_LPA_DUPLEX ?
563 "full" : "half"),
564 adv, lpa,
565 efx->net_dev->mtu,
566 (efx->promiscuous ? " [PROMISC]" : ""));
567 } else {
568 EFX_INFO(efx, "link down\n");
569 }
570
571}
572
573/* This call reinitialises the MAC to pick up new PHY settings. The
574 * caller must hold the mac_lock */
575static void __efx_reconfigure_port(struct efx_nic *efx)
576{
577 WARN_ON(!mutex_is_locked(&efx->mac_lock));
578
579 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
580 raw_smp_processor_id());
581
582 falcon_reconfigure_xmac(efx);
583
584 /* Inform kernel of loss/gain of carrier */
585 efx_link_status_changed(efx);
586}
587
588/* Reinitialise the MAC to pick up new PHY settings, even if the port is
589 * disabled. */
590void efx_reconfigure_port(struct efx_nic *efx)
591{
592 EFX_ASSERT_RESET_SERIALISED(efx);
593
594 mutex_lock(&efx->mac_lock);
595 __efx_reconfigure_port(efx);
596 mutex_unlock(&efx->mac_lock);
597}
598
599/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
600 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
601 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
602static void efx_reconfigure_work(struct work_struct *data)
603{
604 struct efx_nic *efx = container_of(data, struct efx_nic,
605 reconfigure_work);
606
607 mutex_lock(&efx->mac_lock);
608 if (efx->port_enabled)
609 __efx_reconfigure_port(efx);
610 mutex_unlock(&efx->mac_lock);
611}
612
613static int efx_probe_port(struct efx_nic *efx)
614{
615 int rc;
616
617 EFX_LOG(efx, "create port\n");
618
619 /* Connect up MAC/PHY operations table and read MAC address */
620 rc = falcon_probe_port(efx);
621 if (rc)
622 goto err;
623
624 /* Sanity check MAC address */
625 if (is_valid_ether_addr(efx->mac_address)) {
626 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
627 } else {
628 DECLARE_MAC_BUF(mac);
629
630 EFX_ERR(efx, "invalid MAC address %s\n",
631 print_mac(mac, efx->mac_address));
632 if (!allow_bad_hwaddr) {
633 rc = -EINVAL;
634 goto err;
635 }
636 random_ether_addr(efx->net_dev->dev_addr);
637 EFX_INFO(efx, "using locally-generated MAC %s\n",
638 print_mac(mac, efx->net_dev->dev_addr));
639 }
640
641 return 0;
642
643 err:
644 efx_remove_port(efx);
645 return rc;
646}
647
648static int efx_init_port(struct efx_nic *efx)
649{
650 int rc;
651
652 EFX_LOG(efx, "init port\n");
653
654 /* Initialise the MAC and PHY */
655 rc = falcon_init_xmac(efx);
656 if (rc)
657 return rc;
658
659 efx->port_initialized = 1;
660
661 /* Reconfigure port to program MAC registers */
662 falcon_reconfigure_xmac(efx);
663
664 return 0;
665}
666
667/* Allow efx_reconfigure_port() to be scheduled, and close the window
668 * between efx_stop_port and efx_flush_all whereby a previously scheduled
669 * efx_reconfigure_port() may have been cancelled */
670static void efx_start_port(struct efx_nic *efx)
671{
672 EFX_LOG(efx, "start port\n");
673 BUG_ON(efx->port_enabled);
674
675 mutex_lock(&efx->mac_lock);
676 efx->port_enabled = 1;
677 __efx_reconfigure_port(efx);
678 mutex_unlock(&efx->mac_lock);
679}
680
681/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
682 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
683 * efx_reconfigure_work can still be scheduled via NAPI processing
684 * until efx_flush_all() is called */
685static void efx_stop_port(struct efx_nic *efx)
686{
687 EFX_LOG(efx, "stop port\n");
688
689 mutex_lock(&efx->mac_lock);
690 efx->port_enabled = 0;
691 mutex_unlock(&efx->mac_lock);
692
693 /* Serialise against efx_set_multicast_list() */
694 if (NET_DEV_REGISTERED(efx)) {
695 netif_tx_lock_bh(efx->net_dev);
696 netif_tx_unlock_bh(efx->net_dev);
697 }
698}
699
700static void efx_fini_port(struct efx_nic *efx)
701{
702 EFX_LOG(efx, "shut down port\n");
703
704 if (!efx->port_initialized)
705 return;
706
707 falcon_fini_xmac(efx);
708 efx->port_initialized = 0;
709
710 efx->link_up = 0;
711 efx_link_status_changed(efx);
712}
713
714static void efx_remove_port(struct efx_nic *efx)
715{
716 EFX_LOG(efx, "destroying port\n");
717
718 falcon_remove_port(efx);
719}
720
721/**************************************************************************
722 *
723 * NIC handling
724 *
725 **************************************************************************/
726
727/* This configures the PCI device to enable I/O and DMA. */
728static int efx_init_io(struct efx_nic *efx)
729{
730 struct pci_dev *pci_dev = efx->pci_dev;
731 dma_addr_t dma_mask = efx->type->max_dma_mask;
732 int rc;
733
734 EFX_LOG(efx, "initialising I/O\n");
735
736 rc = pci_enable_device(pci_dev);
737 if (rc) {
738 EFX_ERR(efx, "failed to enable PCI device\n");
739 goto fail1;
740 }
741
742 pci_set_master(pci_dev);
743
744 /* Set the PCI DMA mask. Try all possibilities from our
745 * genuine mask down to 32 bits, because some architectures
746 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
747 * masks event though they reject 46 bit masks.
748 */
749 while (dma_mask > 0x7fffffffUL) {
750 if (pci_dma_supported(pci_dev, dma_mask) &&
751 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
752 break;
753 dma_mask >>= 1;
754 }
755 if (rc) {
756 EFX_ERR(efx, "could not find a suitable DMA mask\n");
757 goto fail2;
758 }
759 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
760 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
761 if (rc) {
762 /* pci_set_consistent_dma_mask() is not *allowed* to
763 * fail with a mask that pci_set_dma_mask() accepted,
764 * but just in case...
765 */
766 EFX_ERR(efx, "failed to set consistent DMA mask\n");
767 goto fail2;
768 }
769
770 efx->membase_phys = pci_resource_start(efx->pci_dev,
771 efx->type->mem_bar);
772 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
773 if (rc) {
774 EFX_ERR(efx, "request for memory BAR failed\n");
775 rc = -EIO;
776 goto fail3;
777 }
778 efx->membase = ioremap_nocache(efx->membase_phys,
779 efx->type->mem_map_size);
780 if (!efx->membase) {
781 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
782 efx->type->mem_bar, efx->membase_phys,
783 efx->type->mem_map_size);
784 rc = -ENOMEM;
785 goto fail4;
786 }
787 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
788 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
789 efx->membase);
790
791 return 0;
792
793 fail4:
794 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
795 fail3:
2c118e0f 796 efx->membase_phys = 0;
8ceee660
BH
797 fail2:
798 pci_disable_device(efx->pci_dev);
799 fail1:
800 return rc;
801}
802
803static void efx_fini_io(struct efx_nic *efx)
804{
805 EFX_LOG(efx, "shutting down I/O\n");
806
807 if (efx->membase) {
808 iounmap(efx->membase);
809 efx->membase = NULL;
810 }
811
812 if (efx->membase_phys) {
813 pci_release_region(efx->pci_dev, efx->type->mem_bar);
2c118e0f 814 efx->membase_phys = 0;
8ceee660
BH
815 }
816
817 pci_disable_device(efx->pci_dev);
818}
819
820/* Probe the number and type of interrupts we are able to obtain. */
821static void efx_probe_interrupts(struct efx_nic *efx)
822{
823 int max_channel = efx->type->phys_addr_channels - 1;
824 struct msix_entry xentries[EFX_MAX_CHANNELS];
825 int rc, i;
826
827 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
828 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
829
830 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
831 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
832 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
833
834 /* Request maximum number of MSI interrupts, and fill out
835 * the channel interrupt information the allowed allocation */
836 for (i = 0; i < efx->rss_queues; i++)
837 xentries[i].entry = i;
838 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
839 if (rc > 0) {
840 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
841 efx->rss_queues = rc;
842 rc = pci_enable_msix(efx->pci_dev, xentries,
843 efx->rss_queues);
844 }
845
846 if (rc == 0) {
847 for (i = 0; i < efx->rss_queues; i++) {
848 efx->channel[i].has_interrupt = 1;
849 efx->channel[i].irq = xentries[i].vector;
850 }
851 } else {
852 /* Fall back to single channel MSI */
853 efx->interrupt_mode = EFX_INT_MODE_MSI;
854 EFX_ERR(efx, "could not enable MSI-X\n");
855 }
856 }
857
858 /* Try single interrupt MSI */
859 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
860 efx->rss_queues = 1;
861 rc = pci_enable_msi(efx->pci_dev);
862 if (rc == 0) {
863 efx->channel[0].irq = efx->pci_dev->irq;
864 efx->channel[0].has_interrupt = 1;
865 } else {
866 EFX_ERR(efx, "could not enable MSI\n");
867 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
868 }
869 }
870
871 /* Assume legacy interrupts */
872 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
873 efx->rss_queues = 1;
874 /* Every channel is interruptible */
875 for (i = 0; i < EFX_MAX_CHANNELS; i++)
876 efx->channel[i].has_interrupt = 1;
877 efx->legacy_irq = efx->pci_dev->irq;
878 }
879}
880
881static void efx_remove_interrupts(struct efx_nic *efx)
882{
883 struct efx_channel *channel;
884
885 /* Remove MSI/MSI-X interrupts */
886 efx_for_each_channel_with_interrupt(channel, efx)
887 channel->irq = 0;
888 pci_disable_msi(efx->pci_dev);
889 pci_disable_msix(efx->pci_dev);
890
891 /* Remove legacy interrupt */
892 efx->legacy_irq = 0;
893}
894
895/* Select number of used resources
896 * Should be called after probe_interrupts()
897 */
898static void efx_select_used(struct efx_nic *efx)
899{
900 struct efx_tx_queue *tx_queue;
901 struct efx_rx_queue *rx_queue;
902 int i;
903
904 /* TX queues. One per port per channel with TX capability
905 * (more than one per port won't work on Linux, due to out
906 * of order issues... but will be fine on Solaris)
907 */
908 tx_queue = &efx->tx_queue[0];
909
910 /* Perform this for each channel with TX capabilities.
911 * At the moment, we only support a single TX queue
912 */
913 tx_queue->used = 1;
914 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
915 tx_queue->channel = &efx->channel[1];
916 else
917 tx_queue->channel = &efx->channel[0];
918 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
919 tx_queue++;
920
921 /* RX queues. Each has a dedicated channel. */
922 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
923 rx_queue = &efx->rx_queue[i];
924
925 if (i < efx->rss_queues) {
926 rx_queue->used = 1;
927 /* If we allow multiple RX queues per channel
928 * we need to decide that here
929 */
930 rx_queue->channel = &efx->channel[rx_queue->queue];
931 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
932 rx_queue++;
933 }
934 }
935}
936
937static int efx_probe_nic(struct efx_nic *efx)
938{
939 int rc;
940
941 EFX_LOG(efx, "creating NIC\n");
942
943 /* Carry out hardware-type specific initialisation */
944 rc = falcon_probe_nic(efx);
945 if (rc)
946 return rc;
947
948 /* Determine the number of channels and RX queues by trying to hook
949 * in MSI-X interrupts. */
950 efx_probe_interrupts(efx);
951
952 /* Determine number of RX queues and TX queues */
953 efx_select_used(efx);
954
955 /* Initialise the interrupt moderation settings */
956 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
957
958 return 0;
959}
960
961static void efx_remove_nic(struct efx_nic *efx)
962{
963 EFX_LOG(efx, "destroying NIC\n");
964
965 efx_remove_interrupts(efx);
966 falcon_remove_nic(efx);
967}
968
969/**************************************************************************
970 *
971 * NIC startup/shutdown
972 *
973 *************************************************************************/
974
975static int efx_probe_all(struct efx_nic *efx)
976{
977 struct efx_channel *channel;
978 int rc;
979
980 /* Create NIC */
981 rc = efx_probe_nic(efx);
982 if (rc) {
983 EFX_ERR(efx, "failed to create NIC\n");
984 goto fail1;
985 }
986
987 /* Create port */
988 rc = efx_probe_port(efx);
989 if (rc) {
990 EFX_ERR(efx, "failed to create port\n");
991 goto fail2;
992 }
993
994 /* Create channels */
995 efx_for_each_channel(channel, efx) {
996 rc = efx_probe_channel(channel);
997 if (rc) {
998 EFX_ERR(efx, "failed to create channel %d\n",
999 channel->channel);
1000 goto fail3;
1001 }
1002 }
1003
1004 return 0;
1005
1006 fail3:
1007 efx_for_each_channel(channel, efx)
1008 efx_remove_channel(channel);
1009 efx_remove_port(efx);
1010 fail2:
1011 efx_remove_nic(efx);
1012 fail1:
1013 return rc;
1014}
1015
1016/* Called after previous invocation(s) of efx_stop_all, restarts the
1017 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1018 * and ensures that the port is scheduled to be reconfigured.
1019 * This function is safe to call multiple times when the NIC is in any
1020 * state. */
1021static void efx_start_all(struct efx_nic *efx)
1022{
1023 struct efx_channel *channel;
1024
1025 EFX_ASSERT_RESET_SERIALISED(efx);
1026
1027 /* Check that it is appropriate to restart the interface. All
1028 * of these flags are safe to read under just the rtnl lock */
1029 if (efx->port_enabled)
1030 return;
1031 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1032 return;
1033 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
1034 return;
1035
1036 /* Mark the port as enabled so port reconfigurations can start, then
1037 * restart the transmit interface early so the watchdog timer stops */
1038 efx_start_port(efx);
1039 efx_wake_queue(efx);
1040
1041 efx_for_each_channel(channel, efx)
1042 efx_start_channel(channel);
1043
1044 falcon_enable_interrupts(efx);
1045
1046 /* Start hardware monitor if we're in RUNNING */
1047 if (efx->state == STATE_RUNNING)
1048 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1049 efx_monitor_interval);
1050}
1051
1052/* Flush all delayed work. Should only be called when no more delayed work
1053 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1054 * since we're holding the rtnl_lock at this point. */
1055static void efx_flush_all(struct efx_nic *efx)
1056{
1057 struct efx_rx_queue *rx_queue;
1058
1059 /* Make sure the hardware monitor is stopped */
1060 cancel_delayed_work_sync(&efx->monitor_work);
1061
1062 /* Ensure that all RX slow refills are complete. */
1063 efx_for_each_rx_queue(rx_queue, efx) {
1064 cancel_delayed_work_sync(&rx_queue->work);
1065 }
1066
1067 /* Stop scheduled port reconfigurations */
1068 cancel_work_sync(&efx->reconfigure_work);
1069
1070}
1071
1072/* Quiesce hardware and software without bringing the link down.
1073 * Safe to call multiple times, when the nic and interface is in any
1074 * state. The caller is guaranteed to subsequently be in a position
1075 * to modify any hardware and software state they see fit without
1076 * taking locks. */
1077static void efx_stop_all(struct efx_nic *efx)
1078{
1079 struct efx_channel *channel;
1080
1081 EFX_ASSERT_RESET_SERIALISED(efx);
1082
1083 /* port_enabled can be read safely under the rtnl lock */
1084 if (!efx->port_enabled)
1085 return;
1086
1087 /* Disable interrupts and wait for ISR to complete */
1088 falcon_disable_interrupts(efx);
1089 if (efx->legacy_irq)
1090 synchronize_irq(efx->legacy_irq);
1091 efx_for_each_channel_with_interrupt(channel, efx)
1092 if (channel->irq)
1093 synchronize_irq(channel->irq);
1094
1095 /* Stop all NAPI processing and synchronous rx refills */
1096 efx_for_each_channel(channel, efx)
1097 efx_stop_channel(channel);
1098
1099 /* Stop all asynchronous port reconfigurations. Since all
1100 * event processing has already been stopped, there is no
1101 * window to loose phy events */
1102 efx_stop_port(efx);
1103
1104 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1105 efx_flush_all(efx);
1106
1107 /* Isolate the MAC from the TX and RX engines, so that queue
1108 * flushes will complete in a timely fashion. */
1109 falcon_deconfigure_mac_wrapper(efx);
1110 falcon_drain_tx_fifo(efx);
1111
1112 /* Stop the kernel transmit interface late, so the watchdog
1113 * timer isn't ticking over the flush */
1114 efx_stop_queue(efx);
1115 if (NET_DEV_REGISTERED(efx)) {
1116 netif_tx_lock_bh(efx->net_dev);
1117 netif_tx_unlock_bh(efx->net_dev);
1118 }
1119}
1120
1121static void efx_remove_all(struct efx_nic *efx)
1122{
1123 struct efx_channel *channel;
1124
1125 efx_for_each_channel(channel, efx)
1126 efx_remove_channel(channel);
1127 efx_remove_port(efx);
1128 efx_remove_nic(efx);
1129}
1130
1131/* A convinience function to safely flush all the queues */
1132int efx_flush_queues(struct efx_nic *efx)
1133{
1134 int rc;
1135
1136 EFX_ASSERT_RESET_SERIALISED(efx);
1137
1138 efx_stop_all(efx);
1139
1140 efx_fini_channels(efx);
1141 rc = efx_init_channels(efx);
1142 if (rc) {
1143 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1144 return rc;
1145 }
1146
1147 efx_start_all(efx);
1148
1149 return 0;
1150}
1151
1152/**************************************************************************
1153 *
1154 * Interrupt moderation
1155 *
1156 **************************************************************************/
1157
1158/* Set interrupt moderation parameters */
1159void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1160{
1161 struct efx_tx_queue *tx_queue;
1162 struct efx_rx_queue *rx_queue;
1163
1164 EFX_ASSERT_RESET_SERIALISED(efx);
1165
1166 efx_for_each_tx_queue(tx_queue, efx)
1167 tx_queue->channel->irq_moderation = tx_usecs;
1168
1169 efx_for_each_rx_queue(rx_queue, efx)
1170 rx_queue->channel->irq_moderation = rx_usecs;
1171}
1172
1173/**************************************************************************
1174 *
1175 * Hardware monitor
1176 *
1177 **************************************************************************/
1178
1179/* Run periodically off the general workqueue. Serialised against
1180 * efx_reconfigure_port via the mac_lock */
1181static void efx_monitor(struct work_struct *data)
1182{
1183 struct efx_nic *efx = container_of(data, struct efx_nic,
1184 monitor_work.work);
1185 int rc = 0;
1186
1187 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1188 raw_smp_processor_id());
1189
1190
1191 /* If the mac_lock is already held then it is likely a port
1192 * reconfiguration is already in place, which will likely do
1193 * most of the work of check_hw() anyway. */
1194 if (!mutex_trylock(&efx->mac_lock)) {
1195 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1196 efx_monitor_interval);
1197 return;
1198 }
1199
1200 if (efx->port_enabled)
1201 rc = falcon_check_xmac(efx);
1202 mutex_unlock(&efx->mac_lock);
1203
1204 if (rc) {
1205 if (monitor_reset) {
1206 EFX_ERR(efx, "hardware monitor detected a fault: "
1207 "triggering reset\n");
1208 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1209 } else {
1210 EFX_ERR(efx, "hardware monitor detected a fault, "
1211 "skipping reset\n");
1212 }
1213 }
1214
1215 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1216 efx_monitor_interval);
1217}
1218
1219/**************************************************************************
1220 *
1221 * ioctls
1222 *
1223 *************************************************************************/
1224
1225/* Net device ioctl
1226 * Context: process, rtnl_lock() held.
1227 */
1228static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1229{
1230 struct efx_nic *efx = net_dev->priv;
1231
1232 EFX_ASSERT_RESET_SERIALISED(efx);
1233
1234 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1235}
1236
1237/**************************************************************************
1238 *
1239 * NAPI interface
1240 *
1241 **************************************************************************/
1242
1243static int efx_init_napi(struct efx_nic *efx)
1244{
1245 struct efx_channel *channel;
1246 int rc;
1247
1248 efx_for_each_channel(channel, efx) {
1249 channel->napi_dev = efx->net_dev;
1250 rc = efx_lro_init(&channel->lro_mgr, efx);
1251 if (rc)
1252 goto err;
1253 }
1254 return 0;
1255 err:
1256 efx_fini_napi(efx);
1257 return rc;
1258}
1259
1260static void efx_fini_napi(struct efx_nic *efx)
1261{
1262 struct efx_channel *channel;
1263
1264 efx_for_each_channel(channel, efx) {
1265 efx_lro_fini(&channel->lro_mgr);
1266 channel->napi_dev = NULL;
1267 }
1268}
1269
1270/**************************************************************************
1271 *
1272 * Kernel netpoll interface
1273 *
1274 *************************************************************************/
1275
1276#ifdef CONFIG_NET_POLL_CONTROLLER
1277
1278/* Although in the common case interrupts will be disabled, this is not
1279 * guaranteed. However, all our work happens inside the NAPI callback,
1280 * so no locking is required.
1281 */
1282static void efx_netpoll(struct net_device *net_dev)
1283{
1284 struct efx_nic *efx = net_dev->priv;
1285 struct efx_channel *channel;
1286
1287 efx_for_each_channel_with_interrupt(channel, efx)
1288 efx_schedule_channel(channel);
1289}
1290
1291#endif
1292
1293/**************************************************************************
1294 *
1295 * Kernel net device interface
1296 *
1297 *************************************************************************/
1298
1299/* Context: process, rtnl_lock() held. */
1300static int efx_net_open(struct net_device *net_dev)
1301{
1302 struct efx_nic *efx = net_dev->priv;
1303 EFX_ASSERT_RESET_SERIALISED(efx);
1304
1305 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1306 raw_smp_processor_id());
1307
1308 efx_start_all(efx);
1309 return 0;
1310}
1311
1312/* Context: process, rtnl_lock() held.
1313 * Note that the kernel will ignore our return code; this method
1314 * should really be a void.
1315 */
1316static int efx_net_stop(struct net_device *net_dev)
1317{
1318 struct efx_nic *efx = net_dev->priv;
1319 int rc;
1320
1321 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1322 raw_smp_processor_id());
1323
1324 /* Stop the device and flush all the channels */
1325 efx_stop_all(efx);
1326 efx_fini_channels(efx);
1327 rc = efx_init_channels(efx);
1328 if (rc)
1329 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1330
1331 return 0;
1332}
1333
1334/* Context: process, dev_base_lock held, non-blocking. */
1335static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1336{
1337 struct efx_nic *efx = net_dev->priv;
1338 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1339 struct net_device_stats *stats = &net_dev->stats;
1340
1341 if (!spin_trylock(&efx->stats_lock))
1342 return stats;
1343 if (efx->state == STATE_RUNNING) {
1344 falcon_update_stats_xmac(efx);
1345 falcon_update_nic_stats(efx);
1346 }
1347 spin_unlock(&efx->stats_lock);
1348
1349 stats->rx_packets = mac_stats->rx_packets;
1350 stats->tx_packets = mac_stats->tx_packets;
1351 stats->rx_bytes = mac_stats->rx_bytes;
1352 stats->tx_bytes = mac_stats->tx_bytes;
1353 stats->multicast = mac_stats->rx_multicast;
1354 stats->collisions = mac_stats->tx_collision;
1355 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1356 mac_stats->rx_length_error);
1357 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1358 stats->rx_crc_errors = mac_stats->rx_bad;
1359 stats->rx_frame_errors = mac_stats->rx_align_error;
1360 stats->rx_fifo_errors = mac_stats->rx_overflow;
1361 stats->rx_missed_errors = mac_stats->rx_missed;
1362 stats->tx_window_errors = mac_stats->tx_late_collision;
1363
1364 stats->rx_errors = (stats->rx_length_errors +
1365 stats->rx_over_errors +
1366 stats->rx_crc_errors +
1367 stats->rx_frame_errors +
1368 stats->rx_fifo_errors +
1369 stats->rx_missed_errors +
1370 mac_stats->rx_symbol_error);
1371 stats->tx_errors = (stats->tx_window_errors +
1372 mac_stats->tx_bad);
1373
1374 return stats;
1375}
1376
1377/* Context: netif_tx_lock held, BHs disabled. */
1378static void efx_watchdog(struct net_device *net_dev)
1379{
1380 struct efx_nic *efx = net_dev->priv;
1381
1382 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1383 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1384 monitor_reset ? "resetting channels" : "skipping reset");
1385
1386 if (monitor_reset)
1387 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1388}
1389
1390
1391/* Context: process, rtnl_lock() held. */
1392static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1393{
1394 struct efx_nic *efx = net_dev->priv;
1395 int rc = 0;
1396
1397 EFX_ASSERT_RESET_SERIALISED(efx);
1398
1399 if (new_mtu > EFX_MAX_MTU)
1400 return -EINVAL;
1401
1402 efx_stop_all(efx);
1403
1404 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1405
1406 efx_fini_channels(efx);
1407 net_dev->mtu = new_mtu;
1408 rc = efx_init_channels(efx);
1409 if (rc)
1410 goto fail;
1411
1412 efx_start_all(efx);
1413 return rc;
1414
1415 fail:
1416 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1417 return rc;
1418}
1419
1420static int efx_set_mac_address(struct net_device *net_dev, void *data)
1421{
1422 struct efx_nic *efx = net_dev->priv;
1423 struct sockaddr *addr = data;
1424 char *new_addr = addr->sa_data;
1425
1426 EFX_ASSERT_RESET_SERIALISED(efx);
1427
1428 if (!is_valid_ether_addr(new_addr)) {
1429 DECLARE_MAC_BUF(mac);
1430 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1431 print_mac(mac, new_addr));
1432 return -EINVAL;
1433 }
1434
1435 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1436
1437 /* Reconfigure the MAC */
1438 efx_reconfigure_port(efx);
1439
1440 return 0;
1441}
1442
1443/* Context: netif_tx_lock held, BHs disabled. */
1444static void efx_set_multicast_list(struct net_device *net_dev)
1445{
1446 struct efx_nic *efx = net_dev->priv;
1447 struct dev_mc_list *mc_list = net_dev->mc_list;
1448 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1449 int promiscuous;
1450 u32 crc;
1451 int bit;
1452 int i;
1453
1454 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1455 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1456 if (efx->promiscuous != promiscuous) {
1457 efx->promiscuous = promiscuous;
1458 /* Close the window between efx_stop_port() and efx_flush_all()
1459 * by only queuing work when the port is enabled. */
1460 if (efx->port_enabled)
1461 queue_work(efx->workqueue, &efx->reconfigure_work);
1462 }
1463
1464 /* Build multicast hash table */
1465 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1466 memset(mc_hash, 0xff, sizeof(*mc_hash));
1467 } else {
1468 memset(mc_hash, 0x00, sizeof(*mc_hash));
1469 for (i = 0; i < net_dev->mc_count; i++) {
1470 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1471 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1472 set_bit_le(bit, mc_hash->byte);
1473 mc_list = mc_list->next;
1474 }
1475 }
1476
1477 /* Create and activate new global multicast hash table */
1478 falcon_set_multicast_hash(efx);
1479}
1480
1481static int efx_netdev_event(struct notifier_block *this,
1482 unsigned long event, void *ptr)
1483{
1484 struct net_device *net_dev = (struct net_device *)ptr;
1485
1486 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1487 struct efx_nic *efx = net_dev->priv;
1488
1489 strcpy(efx->name, net_dev->name);
1490 }
1491
1492 return NOTIFY_DONE;
1493}
1494
1495static struct notifier_block efx_netdev_notifier = {
1496 .notifier_call = efx_netdev_event,
1497};
1498
1499static int efx_register_netdev(struct efx_nic *efx)
1500{
1501 struct net_device *net_dev = efx->net_dev;
1502 int rc;
1503
1504 net_dev->watchdog_timeo = 5 * HZ;
1505 net_dev->irq = efx->pci_dev->irq;
1506 net_dev->open = efx_net_open;
1507 net_dev->stop = efx_net_stop;
1508 net_dev->get_stats = efx_net_stats;
1509 net_dev->tx_timeout = &efx_watchdog;
1510 net_dev->hard_start_xmit = efx_hard_start_xmit;
1511 net_dev->do_ioctl = efx_ioctl;
1512 net_dev->change_mtu = efx_change_mtu;
1513 net_dev->set_mac_address = efx_set_mac_address;
1514 net_dev->set_multicast_list = efx_set_multicast_list;
1515#ifdef CONFIG_NET_POLL_CONTROLLER
1516 net_dev->poll_controller = efx_netpoll;
1517#endif
1518 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1519 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1520
1521 /* Always start with carrier off; PHY events will detect the link */
1522 netif_carrier_off(efx->net_dev);
1523
1524 /* Clear MAC statistics */
1525 falcon_update_stats_xmac(efx);
1526 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1527
1528 rc = register_netdev(net_dev);
1529 if (rc) {
1530 EFX_ERR(efx, "could not register net dev\n");
1531 return rc;
1532 }
1533 strcpy(efx->name, net_dev->name);
1534
1535 return 0;
1536}
1537
1538static void efx_unregister_netdev(struct efx_nic *efx)
1539{
1540 struct efx_tx_queue *tx_queue;
1541
1542 if (!efx->net_dev)
1543 return;
1544
1545 BUG_ON(efx->net_dev->priv != efx);
1546
1547 /* Free up any skbs still remaining. This has to happen before
1548 * we try to unregister the netdev as running their destructors
1549 * may be needed to get the device ref. count to 0. */
1550 efx_for_each_tx_queue(tx_queue, efx)
1551 efx_release_tx_buffers(tx_queue);
1552
1553 if (NET_DEV_REGISTERED(efx)) {
1554 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1555 unregister_netdev(efx->net_dev);
1556 }
1557}
1558
1559/**************************************************************************
1560 *
1561 * Device reset and suspend
1562 *
1563 **************************************************************************/
1564
1565/* The final hardware and software finalisation before reset. */
1566static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1567{
1568 int rc;
1569
1570 EFX_ASSERT_RESET_SERIALISED(efx);
1571
1572 rc = falcon_xmac_get_settings(efx, ecmd);
1573 if (rc) {
1574 EFX_ERR(efx, "could not back up PHY settings\n");
1575 goto fail;
1576 }
1577
1578 efx_fini_channels(efx);
1579 return 0;
1580
1581 fail:
1582 return rc;
1583}
1584
1585/* The first part of software initialisation after a hardware reset
1586 * This function does not handle serialisation with the kernel, it
1587 * assumes the caller has done this */
1588static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1589{
1590 int rc;
1591
1592 rc = efx_init_channels(efx);
1593 if (rc)
1594 goto fail1;
1595
1596 /* Restore MAC and PHY settings. */
1597 rc = falcon_xmac_set_settings(efx, ecmd);
1598 if (rc) {
1599 EFX_ERR(efx, "could not restore PHY settings\n");
1600 goto fail2;
1601 }
1602
1603 return 0;
1604
1605 fail2:
1606 efx_fini_channels(efx);
1607 fail1:
1608 return rc;
1609}
1610
1611/* Reset the NIC as transparently as possible. Do not reset the PHY
1612 * Note that the reset may fail, in which case the card will be left
1613 * in a most-probably-unusable state.
1614 *
1615 * This function will sleep. You cannot reset from within an atomic
1616 * state; use efx_schedule_reset() instead.
1617 *
1618 * Grabs the rtnl_lock.
1619 */
1620static int efx_reset(struct efx_nic *efx)
1621{
1622 struct ethtool_cmd ecmd;
1623 enum reset_type method = efx->reset_pending;
1624 int rc;
1625
1626 /* Serialise with kernel interfaces */
1627 rtnl_lock();
1628
1629 /* If we're not RUNNING then don't reset. Leave the reset_pending
1630 * flag set so that efx_pci_probe_main will be retried */
1631 if (efx->state != STATE_RUNNING) {
1632 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1633 goto unlock_rtnl;
1634 }
1635
1636 efx->state = STATE_RESETTING;
1637 EFX_INFO(efx, "resetting (%d)\n", method);
1638
1639 /* The net_dev->get_stats handler is quite slow, and will fail
1640 * if a fetch is pending over reset. Serialise against it. */
1641 spin_lock(&efx->stats_lock);
1642 spin_unlock(&efx->stats_lock);
1643
1644 efx_stop_all(efx);
1645 mutex_lock(&efx->mac_lock);
1646
1647 rc = efx_reset_down(efx, &ecmd);
1648 if (rc)
1649 goto fail1;
1650
1651 rc = falcon_reset_hw(efx, method);
1652 if (rc) {
1653 EFX_ERR(efx, "failed to reset hardware\n");
1654 goto fail2;
1655 }
1656
1657 /* Allow resets to be rescheduled. */
1658 efx->reset_pending = RESET_TYPE_NONE;
1659
1660 /* Reinitialise bus-mastering, which may have been turned off before
1661 * the reset was scheduled. This is still appropriate, even in the
1662 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1663 * can respond to requests. */
1664 pci_set_master(efx->pci_dev);
1665
1666 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1667 * case so the driver can talk to external SRAM */
1668 rc = falcon_init_nic(efx);
1669 if (rc) {
1670 EFX_ERR(efx, "failed to initialise NIC\n");
1671 goto fail3;
1672 }
1673
1674 /* Leave device stopped if necessary */
1675 if (method == RESET_TYPE_DISABLE) {
1676 /* Reinitialise the device anyway so the driver unload sequence
1677 * can talk to the external SRAM */
91ad757c 1678 falcon_init_nic(efx);
8ceee660
BH
1679 rc = -EIO;
1680 goto fail4;
1681 }
1682
1683 rc = efx_reset_up(efx, &ecmd);
1684 if (rc)
1685 goto fail5;
1686
1687 mutex_unlock(&efx->mac_lock);
1688 EFX_LOG(efx, "reset complete\n");
1689
1690 efx->state = STATE_RUNNING;
1691 efx_start_all(efx);
1692
1693 unlock_rtnl:
1694 rtnl_unlock();
1695 return 0;
1696
1697 fail5:
1698 fail4:
1699 fail3:
1700 fail2:
1701 fail1:
1702 EFX_ERR(efx, "has been disabled\n");
1703 efx->state = STATE_DISABLED;
1704
1705 mutex_unlock(&efx->mac_lock);
1706 rtnl_unlock();
1707 efx_unregister_netdev(efx);
1708 efx_fini_port(efx);
1709 return rc;
1710}
1711
1712/* The worker thread exists so that code that cannot sleep can
1713 * schedule a reset for later.
1714 */
1715static void efx_reset_work(struct work_struct *data)
1716{
1717 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1718
1719 efx_reset(nic);
1720}
1721
1722void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1723{
1724 enum reset_type method;
1725
1726 if (efx->reset_pending != RESET_TYPE_NONE) {
1727 EFX_INFO(efx, "quenching already scheduled reset\n");
1728 return;
1729 }
1730
1731 switch (type) {
1732 case RESET_TYPE_INVISIBLE:
1733 case RESET_TYPE_ALL:
1734 case RESET_TYPE_WORLD:
1735 case RESET_TYPE_DISABLE:
1736 method = type;
1737 break;
1738 case RESET_TYPE_RX_RECOVERY:
1739 case RESET_TYPE_RX_DESC_FETCH:
1740 case RESET_TYPE_TX_DESC_FETCH:
1741 case RESET_TYPE_TX_SKIP:
1742 method = RESET_TYPE_INVISIBLE;
1743 break;
1744 default:
1745 method = RESET_TYPE_ALL;
1746 break;
1747 }
1748
1749 if (method != type)
1750 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1751 else
1752 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1753
1754 efx->reset_pending = method;
1755
1756 queue_work(efx->workqueue, &efx->reset_work);
1757}
1758
1759/**************************************************************************
1760 *
1761 * List of NICs we support
1762 *
1763 **************************************************************************/
1764
1765/* PCI device ID table */
1766static struct pci_device_id efx_pci_table[] __devinitdata = {
1767 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1768 .driver_data = (unsigned long) &falcon_a_nic_type},
1769 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1770 .driver_data = (unsigned long) &falcon_b_nic_type},
1771 {0} /* end of list */
1772};
1773
1774/**************************************************************************
1775 *
1776 * Dummy PHY/MAC/Board operations
1777 *
1778 * Can be used where the MAC does not implement this operation
1779 * Needed so all function pointers are valid and do not have to be tested
1780 * before use
1781 *
1782 **************************************************************************/
1783int efx_port_dummy_op_int(struct efx_nic *efx)
1784{
1785 return 0;
1786}
1787void efx_port_dummy_op_void(struct efx_nic *efx) {}
1788void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1789
1790static struct efx_phy_operations efx_dummy_phy_operations = {
1791 .init = efx_port_dummy_op_int,
1792 .reconfigure = efx_port_dummy_op_void,
1793 .check_hw = efx_port_dummy_op_int,
1794 .fini = efx_port_dummy_op_void,
1795 .clear_interrupt = efx_port_dummy_op_void,
1796 .reset_xaui = efx_port_dummy_op_void,
1797};
1798
1799/* Dummy board operations */
1800static int efx_nic_dummy_op_int(struct efx_nic *nic)
1801{
1802 return 0;
1803}
1804
1805static struct efx_board efx_dummy_board_info = {
1806 .init = efx_nic_dummy_op_int,
1807 .init_leds = efx_port_dummy_op_int,
1808 .set_fault_led = efx_port_dummy_op_blink,
1809};
1810
1811/**************************************************************************
1812 *
1813 * Data housekeeping
1814 *
1815 **************************************************************************/
1816
1817/* This zeroes out and then fills in the invariants in a struct
1818 * efx_nic (including all sub-structures).
1819 */
1820static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1821 struct pci_dev *pci_dev, struct net_device *net_dev)
1822{
1823 struct efx_channel *channel;
1824 struct efx_tx_queue *tx_queue;
1825 struct efx_rx_queue *rx_queue;
1826 int i, rc;
1827
1828 /* Initialise common structures */
1829 memset(efx, 0, sizeof(*efx));
1830 spin_lock_init(&efx->biu_lock);
1831 spin_lock_init(&efx->phy_lock);
1832 INIT_WORK(&efx->reset_work, efx_reset_work);
1833 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1834 efx->pci_dev = pci_dev;
1835 efx->state = STATE_INIT;
1836 efx->reset_pending = RESET_TYPE_NONE;
1837 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1838 efx->board_info = efx_dummy_board_info;
1839
1840 efx->net_dev = net_dev;
1841 efx->rx_checksum_enabled = 1;
1842 spin_lock_init(&efx->netif_stop_lock);
1843 spin_lock_init(&efx->stats_lock);
1844 mutex_init(&efx->mac_lock);
1845 efx->phy_op = &efx_dummy_phy_operations;
1846 efx->mii.dev = net_dev;
1847 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1848 atomic_set(&efx->netif_stop_count, 1);
1849
1850 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1851 channel = &efx->channel[i];
1852 channel->efx = efx;
1853 channel->channel = i;
1854 channel->evqnum = i;
1855 channel->work_pending = 0;
1856 }
1857 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1858 tx_queue = &efx->tx_queue[i];
1859 tx_queue->efx = efx;
1860 tx_queue->queue = i;
1861 tx_queue->buffer = NULL;
1862 tx_queue->channel = &efx->channel[0]; /* for safety */
b9b39b62 1863 tx_queue->tso_headers_free = NULL;
8ceee660
BH
1864 }
1865 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1866 rx_queue = &efx->rx_queue[i];
1867 rx_queue->efx = efx;
1868 rx_queue->queue = i;
1869 rx_queue->channel = &efx->channel[0]; /* for safety */
1870 rx_queue->buffer = NULL;
1871 spin_lock_init(&rx_queue->add_lock);
1872 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1873 }
1874
1875 efx->type = type;
1876
1877 /* Sanity-check NIC type */
1878 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1879 (efx->type->txd_ring_mask + 1));
1880 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1881 (efx->type->rxd_ring_mask + 1));
1882 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1883 (efx->type->evq_size - 1));
1884 /* As close as we can get to guaranteeing that we don't overflow */
1885 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1886 (efx->type->txd_ring_mask + 1 +
1887 efx->type->rxd_ring_mask + 1));
1888 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1889
1890 /* Higher numbered interrupt modes are less capable! */
1891 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1892 interrupt_mode);
1893
1894 efx->workqueue = create_singlethread_workqueue("sfc_work");
1895 if (!efx->workqueue) {
1896 rc = -ENOMEM;
1897 goto fail1;
1898 }
1899
1900 return 0;
1901
1902 fail1:
1903 return rc;
1904}
1905
1906static void efx_fini_struct(struct efx_nic *efx)
1907{
1908 if (efx->workqueue) {
1909 destroy_workqueue(efx->workqueue);
1910 efx->workqueue = NULL;
1911 }
1912}
1913
1914/**************************************************************************
1915 *
1916 * PCI interface
1917 *
1918 **************************************************************************/
1919
1920/* Main body of final NIC shutdown code
1921 * This is called only at module unload (or hotplug removal).
1922 */
1923static void efx_pci_remove_main(struct efx_nic *efx)
1924{
1925 EFX_ASSERT_RESET_SERIALISED(efx);
1926
1927 /* Skip everything if we never obtained a valid membase */
1928 if (!efx->membase)
1929 return;
1930
1931 efx_fini_channels(efx);
1932 efx_fini_port(efx);
1933
1934 /* Shutdown the board, then the NIC and board state */
1935 falcon_fini_interrupt(efx);
1936
1937 efx_fini_napi(efx);
1938 efx_remove_all(efx);
1939}
1940
1941/* Final NIC shutdown
1942 * This is called only at module unload (or hotplug removal).
1943 */
1944static void efx_pci_remove(struct pci_dev *pci_dev)
1945{
1946 struct efx_nic *efx;
1947
1948 efx = pci_get_drvdata(pci_dev);
1949 if (!efx)
1950 return;
1951
1952 /* Mark the NIC as fini, then stop the interface */
1953 rtnl_lock();
1954 efx->state = STATE_FINI;
1955 dev_close(efx->net_dev);
1956
1957 /* Allow any queued efx_resets() to complete */
1958 rtnl_unlock();
1959
1960 if (efx->membase == NULL)
1961 goto out;
1962
1963 efx_unregister_netdev(efx);
1964
1965 /* Wait for any scheduled resets to complete. No more will be
1966 * scheduled from this point because efx_stop_all() has been
1967 * called, we are no longer registered with driverlink, and
1968 * the net_device's have been removed. */
1969 flush_workqueue(efx->workqueue);
1970
1971 efx_pci_remove_main(efx);
1972
1973out:
1974 efx_fini_io(efx);
1975 EFX_LOG(efx, "shutdown successful\n");
1976
1977 pci_set_drvdata(pci_dev, NULL);
1978 efx_fini_struct(efx);
1979 free_netdev(efx->net_dev);
1980};
1981
1982/* Main body of NIC initialisation
1983 * This is called at module load (or hotplug insertion, theoretically).
1984 */
1985static int efx_pci_probe_main(struct efx_nic *efx)
1986{
1987 int rc;
1988
1989 /* Do start-of-day initialisation */
1990 rc = efx_probe_all(efx);
1991 if (rc)
1992 goto fail1;
1993
1994 rc = efx_init_napi(efx);
1995 if (rc)
1996 goto fail2;
1997
1998 /* Initialise the board */
1999 rc = efx->board_info.init(efx);
2000 if (rc) {
2001 EFX_ERR(efx, "failed to initialise board\n");
2002 goto fail3;
2003 }
2004
2005 rc = falcon_init_nic(efx);
2006 if (rc) {
2007 EFX_ERR(efx, "failed to initialise NIC\n");
2008 goto fail4;
2009 }
2010
2011 rc = efx_init_port(efx);
2012 if (rc) {
2013 EFX_ERR(efx, "failed to initialise port\n");
2014 goto fail5;
2015 }
2016
2017 rc = efx_init_channels(efx);
2018 if (rc)
2019 goto fail6;
2020
2021 rc = falcon_init_interrupt(efx);
2022 if (rc)
2023 goto fail7;
2024
2025 return 0;
2026
2027 fail7:
2028 efx_fini_channels(efx);
2029 fail6:
2030 efx_fini_port(efx);
2031 fail5:
2032 fail4:
2033 fail3:
2034 efx_fini_napi(efx);
2035 fail2:
2036 efx_remove_all(efx);
2037 fail1:
2038 return rc;
2039}
2040
2041/* NIC initialisation
2042 *
2043 * This is called at module load (or hotplug insertion,
2044 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2045 * sets up and registers the network devices with the kernel and hooks
2046 * the interrupt service routine. It does not prepare the device for
2047 * transmission; this is left to the first time one of the network
2048 * interfaces is brought up (i.e. efx_net_open).
2049 */
2050static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2051 const struct pci_device_id *entry)
2052{
2053 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2054 struct net_device *net_dev;
2055 struct efx_nic *efx;
2056 int i, rc;
2057
2058 /* Allocate and initialise a struct net_device and struct efx_nic */
2059 net_dev = alloc_etherdev(sizeof(*efx));
2060 if (!net_dev)
2061 return -ENOMEM;
b9b39b62
BH
2062 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2063 NETIF_F_HIGHDMA | NETIF_F_TSO);
8ceee660
BH
2064 if (lro)
2065 net_dev->features |= NETIF_F_LRO;
2066 efx = net_dev->priv;
2067 pci_set_drvdata(pci_dev, efx);
2068 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2069 if (rc)
2070 goto fail1;
2071
2072 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2073
2074 /* Set up basic I/O (BAR mappings etc) */
2075 rc = efx_init_io(efx);
2076 if (rc)
2077 goto fail2;
2078
2079 /* No serialisation is required with the reset path because
2080 * we're in STATE_INIT. */
2081 for (i = 0; i < 5; i++) {
2082 rc = efx_pci_probe_main(efx);
2083 if (rc == 0)
2084 break;
2085
2086 /* Serialise against efx_reset(). No more resets will be
2087 * scheduled since efx_stop_all() has been called, and we
2088 * have not and never have been registered with either
2089 * the rtnetlink or driverlink layers. */
2090 cancel_work_sync(&efx->reset_work);
2091
2092 /* Retry if a recoverably reset event has been scheduled */
2093 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2094 (efx->reset_pending != RESET_TYPE_ALL))
2095 goto fail3;
2096
2097 efx->reset_pending = RESET_TYPE_NONE;
2098 }
2099
2100 if (rc) {
2101 EFX_ERR(efx, "Could not reset NIC\n");
2102 goto fail4;
2103 }
2104
2105 /* Switch to the running state before we expose the device to
2106 * the OS. This is to ensure that the initial gathering of
2107 * MAC stats succeeds. */
2108 rtnl_lock();
2109 efx->state = STATE_RUNNING;
2110 rtnl_unlock();
2111
2112 rc = efx_register_netdev(efx);
2113 if (rc)
2114 goto fail5;
2115
2116 EFX_LOG(efx, "initialisation successful\n");
2117
2118 return 0;
2119
2120 fail5:
2121 efx_pci_remove_main(efx);
2122 fail4:
2123 fail3:
2124 efx_fini_io(efx);
2125 fail2:
2126 efx_fini_struct(efx);
2127 fail1:
2128 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2129 free_netdev(net_dev);
2130 return rc;
2131}
2132
2133static struct pci_driver efx_pci_driver = {
2134 .name = EFX_DRIVER_NAME,
2135 .id_table = efx_pci_table,
2136 .probe = efx_pci_probe,
2137 .remove = efx_pci_remove,
2138};
2139
2140/**************************************************************************
2141 *
2142 * Kernel module interface
2143 *
2144 *************************************************************************/
2145
2146module_param(interrupt_mode, uint, 0444);
2147MODULE_PARM_DESC(interrupt_mode,
2148 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2149
2150static int __init efx_init_module(void)
2151{
2152 int rc;
2153
2154 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2155
2156 rc = register_netdevice_notifier(&efx_netdev_notifier);
2157 if (rc)
2158 goto err_notifier;
2159
2160 refill_workqueue = create_workqueue("sfc_refill");
2161 if (!refill_workqueue) {
2162 rc = -ENOMEM;
2163 goto err_refill;
2164 }
2165
2166 rc = pci_register_driver(&efx_pci_driver);
2167 if (rc < 0)
2168 goto err_pci;
2169
2170 return 0;
2171
2172 err_pci:
2173 destroy_workqueue(refill_workqueue);
2174 err_refill:
2175 unregister_netdevice_notifier(&efx_netdev_notifier);
2176 err_notifier:
2177 return rc;
2178}
2179
2180static void __exit efx_exit_module(void)
2181{
2182 printk(KERN_INFO "Solarflare NET driver unloading\n");
2183
2184 pci_unregister_driver(&efx_pci_driver);
2185 destroy_workqueue(refill_workqueue);
2186 unregister_netdevice_notifier(&efx_netdev_notifier);
2187
2188}
2189
2190module_init(efx_init_module);
2191module_exit(efx_exit_module);
2192
2193MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2194 "Solarflare Communications");
2195MODULE_DESCRIPTION("Solarflare Communications network driver");
2196MODULE_LICENSE("GPL");
2197MODULE_DEVICE_TABLE(pci, efx_pci_table);