1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include "net_driver.h"
30 #include "workarounds.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct *refill_workqueue;
42 /**************************************************************************
46 *************************************************************************/
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
51 * This sets the default for new devices. It can be controlled later
55 module_param(lro, int, 0644);
56 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
59 * Use separate channels for TX and RX events
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
67 static unsigned int separate_tx_and_rx_channels = 1;
69 /* This is the weight assigned to each of the (per-channel) virtual
72 static int napi_weight = 64;
74 /* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
78 unsigned int efx_monitor_interval = 1 * HZ;
80 /* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
83 static unsigned int monitor_reset = 1;
85 /* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
92 static unsigned int allow_bad_hwaddr;
94 /* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
100 static unsigned int rx_irq_mod_usec = 60;
102 /* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
111 static unsigned int tx_irq_mod_usec = 150;
113 /* This is the first interrupt mode to try out of:
118 static unsigned int interrupt_mode;
120 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
127 static unsigned int rss_cpus;
128 module_param(rss_cpus, uint, 0444);
129 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
131 /**************************************************************************
133 * Utility functions and prototypes
135 *************************************************************************/
136 static void efx_remove_channel(struct efx_channel *channel);
137 static void efx_remove_port(struct efx_nic *efx);
138 static void efx_fini_napi(struct efx_nic *efx);
139 static void efx_fini_channels(struct efx_nic *efx);
141 #define EFX_ASSERT_RESET_SERIALISED(efx) \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
148 /**************************************************************************
150 * Event queue processing
152 *************************************************************************/
154 /* Process channel's event queue
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
161 static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
164 struct efx_rx_queue *rx_queue;
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
186 efx_fast_push_rx_descriptors(rx_queue);
194 /* Mark channel as finished processing
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
200 static inline void efx_channel_processed(struct efx_channel *channel)
202 /* The interrupt handler for this channel may set work_pending
203 * as soon as we acknowledge the events we've seen. Make sure
204 * it's cleared before then. */
205 channel->work_pending = 0;
208 falcon_eventq_read_ack(channel);
213 * NAPI guarantees serialisation of polls of the same device, which
214 * provides the guarantee required by efx_process_channel().
216 static int efx_poll(struct napi_struct *napi, int budget)
218 struct efx_channel *channel =
219 container_of(napi, struct efx_channel, napi_str);
220 struct net_device *napi_dev = channel->napi_dev;
224 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
225 channel->channel, raw_smp_processor_id());
227 unused = efx_process_channel(channel, budget);
228 rx_packets = (budget - unused);
230 if (rx_packets < budget) {
231 /* There is no race here; although napi_disable() will
232 * only wait for netif_rx_complete(), this isn't a problem
233 * since efx_channel_processed() will have no effect if
234 * interrupts have already been disabled.
236 netif_rx_complete(napi_dev, napi);
237 efx_channel_processed(channel);
243 /* Process the eventq of the specified channel immediately on this CPU
245 * Disable hardware generated interrupts, wait for any existing
246 * processing to finish, then directly poll (and ack ) the eventq.
247 * Finally reenable NAPI and interrupts.
249 * Since we are touching interrupts the caller should hold the suspend lock
251 void efx_process_channel_now(struct efx_channel *channel)
253 struct efx_nic *efx = channel->efx;
255 BUG_ON(!channel->used_flags);
256 BUG_ON(!channel->enabled);
258 /* Disable interrupts and wait for ISRs to complete */
259 falcon_disable_interrupts(efx);
261 synchronize_irq(efx->legacy_irq);
262 if (channel->has_interrupt && channel->irq)
263 synchronize_irq(channel->irq);
265 /* Wait for any NAPI processing to complete */
266 napi_disable(&channel->napi_str);
268 /* Poll the channel */
269 efx_process_channel(channel, efx->type->evq_size);
271 /* Ack the eventq. This may cause an interrupt to be generated
272 * when they are reenabled */
273 efx_channel_processed(channel);
275 napi_enable(&channel->napi_str);
276 falcon_enable_interrupts(efx);
279 /* Create event queue
280 * Event queue memory allocations are done only once. If the channel
281 * is reset, the memory buffer will be reused; this guards against
282 * errors during channel reset and also simplifies interrupt handling.
284 static int efx_probe_eventq(struct efx_channel *channel)
286 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
288 return falcon_probe_eventq(channel);
291 /* Prepare channel's event queue */
292 static int efx_init_eventq(struct efx_channel *channel)
294 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
296 channel->eventq_read_ptr = 0;
298 return falcon_init_eventq(channel);
301 static void efx_fini_eventq(struct efx_channel *channel)
303 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
305 falcon_fini_eventq(channel);
308 static void efx_remove_eventq(struct efx_channel *channel)
310 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
312 falcon_remove_eventq(channel);
315 /**************************************************************************
319 *************************************************************************/
321 static int efx_probe_channel(struct efx_channel *channel)
323 struct efx_tx_queue *tx_queue;
324 struct efx_rx_queue *rx_queue;
327 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
329 rc = efx_probe_eventq(channel);
333 efx_for_each_channel_tx_queue(tx_queue, channel) {
334 rc = efx_probe_tx_queue(tx_queue);
339 efx_for_each_channel_rx_queue(rx_queue, channel) {
340 rc = efx_probe_rx_queue(rx_queue);
345 channel->n_rx_frm_trunc = 0;
350 efx_for_each_channel_rx_queue(rx_queue, channel)
351 efx_remove_rx_queue(rx_queue);
353 efx_for_each_channel_tx_queue(tx_queue, channel)
354 efx_remove_tx_queue(tx_queue);
360 /* Channels are shutdown and reinitialised whilst the NIC is running
361 * to propagate configuration changes (mtu, checksum offload), or
362 * to clear hardware error conditions
364 static int efx_init_channels(struct efx_nic *efx)
366 struct efx_tx_queue *tx_queue;
367 struct efx_rx_queue *rx_queue;
368 struct efx_channel *channel;
371 /* Calculate the rx buffer allocation parameters required to
372 * support the current MTU, including padding for header
373 * alignment and overruns.
375 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
376 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
377 efx->type->rx_buffer_padding);
378 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
380 /* Initialise the channels */
381 efx_for_each_channel(channel, efx) {
382 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
384 rc = efx_init_eventq(channel);
388 efx_for_each_channel_tx_queue(tx_queue, channel) {
389 rc = efx_init_tx_queue(tx_queue);
394 /* The rx buffer allocation strategy is MTU dependent */
395 efx_rx_strategy(channel);
397 efx_for_each_channel_rx_queue(rx_queue, channel) {
398 rc = efx_init_rx_queue(rx_queue);
403 WARN_ON(channel->rx_pkt != NULL);
404 efx_rx_strategy(channel);
410 EFX_ERR(efx, "failed to initialise channel %d\n",
411 channel ? channel->channel : -1);
412 efx_fini_channels(efx);
416 /* This enables event queue processing and packet transmission.
418 * Note that this function is not allowed to fail, since that would
419 * introduce too much complexity into the suspend/resume path.
421 static void efx_start_channel(struct efx_channel *channel)
423 struct efx_rx_queue *rx_queue;
425 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
427 if (!(channel->efx->net_dev->flags & IFF_UP))
428 netif_napi_add(channel->napi_dev, &channel->napi_str,
429 efx_poll, napi_weight);
431 /* The interrupt handler for this channel may set work_pending
432 * as soon as we enable it. Make sure it's cleared before
433 * then. Similarly, make sure it sees the enabled flag set. */
434 channel->work_pending = 0;
435 channel->enabled = 1;
438 napi_enable(&channel->napi_str);
440 /* Load up RX descriptors */
441 efx_for_each_channel_rx_queue(rx_queue, channel)
442 efx_fast_push_rx_descriptors(rx_queue);
445 /* This disables event queue processing and packet transmission.
446 * This function does not guarantee that all queue processing
447 * (e.g. RX refill) is complete.
449 static void efx_stop_channel(struct efx_channel *channel)
451 struct efx_rx_queue *rx_queue;
453 if (!channel->enabled)
456 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
458 channel->enabled = 0;
459 napi_disable(&channel->napi_str);
461 /* Ensure that any worker threads have exited or will be no-ops */
462 efx_for_each_channel_rx_queue(rx_queue, channel) {
463 spin_lock_bh(&rx_queue->add_lock);
464 spin_unlock_bh(&rx_queue->add_lock);
468 static void efx_fini_channels(struct efx_nic *efx)
470 struct efx_channel *channel;
471 struct efx_tx_queue *tx_queue;
472 struct efx_rx_queue *rx_queue;
474 EFX_ASSERT_RESET_SERIALISED(efx);
475 BUG_ON(efx->port_enabled);
477 efx_for_each_channel(channel, efx) {
478 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
480 efx_for_each_channel_rx_queue(rx_queue, channel)
481 efx_fini_rx_queue(rx_queue);
482 efx_for_each_channel_tx_queue(tx_queue, channel)
483 efx_fini_tx_queue(tx_queue);
486 /* Do the event queues last so that we can handle flush events
487 * for all DMA queues. */
488 efx_for_each_channel(channel, efx) {
489 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
491 efx_fini_eventq(channel);
495 static void efx_remove_channel(struct efx_channel *channel)
497 struct efx_tx_queue *tx_queue;
498 struct efx_rx_queue *rx_queue;
500 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
502 efx_for_each_channel_rx_queue(rx_queue, channel)
503 efx_remove_rx_queue(rx_queue);
504 efx_for_each_channel_tx_queue(tx_queue, channel)
505 efx_remove_tx_queue(tx_queue);
506 efx_remove_eventq(channel);
508 channel->used_flags = 0;
511 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
513 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
516 /**************************************************************************
520 **************************************************************************/
522 /* This ensures that the kernel is kept informed (via
523 * netif_carrier_on/off) of the link status, and also maintains the
524 * link status's stop on the port's TX queue.
526 static void efx_link_status_changed(struct efx_nic *efx)
530 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
531 * that no events are triggered between unregister_netdev() and the
532 * driver unloading. A more general condition is that NETDEV_CHANGE
533 * can only be generated between NETDEV_UP and NETDEV_DOWN */
534 if (!netif_running(efx->net_dev))
537 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
538 if (efx->link_up != carrier_ok) {
539 efx->n_link_state_changes++;
542 netif_carrier_on(efx->net_dev);
544 netif_carrier_off(efx->net_dev);
547 /* Status message for kernel log */
549 struct mii_if_info *gmii = &efx->mii;
551 /* NONE here means direct XAUI from the controller, with no
552 * MDIO-attached device we can query. */
553 if (efx->phy_type != PHY_TYPE_NONE) {
554 adv = gmii_advertised(gmii);
555 lpa = gmii_lpa(gmii);
557 lpa = GM_LPA_10000 | LPA_DUPLEX;
560 EFX_INFO(efx, "link up at %dMbps %s-duplex "
561 "(adv %04x lpa %04x) (MTU %d)%s\n",
562 (efx->link_options & GM_LPA_10000 ? 10000 :
563 (efx->link_options & GM_LPA_1000 ? 1000 :
564 (efx->link_options & GM_LPA_100 ? 100 :
566 (efx->link_options & GM_LPA_DUPLEX ?
570 (efx->promiscuous ? " [PROMISC]" : ""));
572 EFX_INFO(efx, "link down\n");
577 /* This call reinitialises the MAC to pick up new PHY settings. The
578 * caller must hold the mac_lock */
579 static void __efx_reconfigure_port(struct efx_nic *efx)
581 WARN_ON(!mutex_is_locked(&efx->mac_lock));
583 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
584 raw_smp_processor_id());
586 falcon_reconfigure_xmac(efx);
588 /* Inform kernel of loss/gain of carrier */
589 efx_link_status_changed(efx);
592 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
594 void efx_reconfigure_port(struct efx_nic *efx)
596 EFX_ASSERT_RESET_SERIALISED(efx);
598 mutex_lock(&efx->mac_lock);
599 __efx_reconfigure_port(efx);
600 mutex_unlock(&efx->mac_lock);
603 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
604 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
605 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
606 static void efx_reconfigure_work(struct work_struct *data)
608 struct efx_nic *efx = container_of(data, struct efx_nic,
611 mutex_lock(&efx->mac_lock);
612 if (efx->port_enabled)
613 __efx_reconfigure_port(efx);
614 mutex_unlock(&efx->mac_lock);
617 static int efx_probe_port(struct efx_nic *efx)
621 EFX_LOG(efx, "create port\n");
623 /* Connect up MAC/PHY operations table and read MAC address */
624 rc = falcon_probe_port(efx);
628 /* Sanity check MAC address */
629 if (is_valid_ether_addr(efx->mac_address)) {
630 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
632 DECLARE_MAC_BUF(mac);
634 EFX_ERR(efx, "invalid MAC address %s\n",
635 print_mac(mac, efx->mac_address));
636 if (!allow_bad_hwaddr) {
640 random_ether_addr(efx->net_dev->dev_addr);
641 EFX_INFO(efx, "using locally-generated MAC %s\n",
642 print_mac(mac, efx->net_dev->dev_addr));
648 efx_remove_port(efx);
652 static int efx_init_port(struct efx_nic *efx)
656 EFX_LOG(efx, "init port\n");
658 /* Initialise the MAC and PHY */
659 rc = falcon_init_xmac(efx);
663 efx->port_initialized = 1;
665 /* Reconfigure port to program MAC registers */
666 falcon_reconfigure_xmac(efx);
671 /* Allow efx_reconfigure_port() to be scheduled, and close the window
672 * between efx_stop_port and efx_flush_all whereby a previously scheduled
673 * efx_reconfigure_port() may have been cancelled */
674 static void efx_start_port(struct efx_nic *efx)
676 EFX_LOG(efx, "start port\n");
677 BUG_ON(efx->port_enabled);
679 mutex_lock(&efx->mac_lock);
680 efx->port_enabled = 1;
681 __efx_reconfigure_port(efx);
682 mutex_unlock(&efx->mac_lock);
685 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
686 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
687 * efx_reconfigure_work can still be scheduled via NAPI processing
688 * until efx_flush_all() is called */
689 static void efx_stop_port(struct efx_nic *efx)
691 EFX_LOG(efx, "stop port\n");
693 mutex_lock(&efx->mac_lock);
694 efx->port_enabled = 0;
695 mutex_unlock(&efx->mac_lock);
697 /* Serialise against efx_set_multicast_list() */
698 if (efx_dev_registered(efx)) {
699 netif_tx_lock_bh(efx->net_dev);
700 netif_addr_lock(efx->net_dev);
701 netif_addr_unlock(efx->net_dev);
702 netif_tx_unlock_bh(efx->net_dev);
706 static void efx_fini_port(struct efx_nic *efx)
708 EFX_LOG(efx, "shut down port\n");
710 if (!efx->port_initialized)
713 falcon_fini_xmac(efx);
714 efx->port_initialized = 0;
717 efx_link_status_changed(efx);
720 static void efx_remove_port(struct efx_nic *efx)
722 EFX_LOG(efx, "destroying port\n");
724 falcon_remove_port(efx);
727 /**************************************************************************
731 **************************************************************************/
733 /* This configures the PCI device to enable I/O and DMA. */
734 static int efx_init_io(struct efx_nic *efx)
736 struct pci_dev *pci_dev = efx->pci_dev;
737 dma_addr_t dma_mask = efx->type->max_dma_mask;
740 EFX_LOG(efx, "initialising I/O\n");
742 rc = pci_enable_device(pci_dev);
744 EFX_ERR(efx, "failed to enable PCI device\n");
748 pci_set_master(pci_dev);
750 /* Set the PCI DMA mask. Try all possibilities from our
751 * genuine mask down to 32 bits, because some architectures
752 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
753 * masks event though they reject 46 bit masks.
755 while (dma_mask > 0x7fffffffUL) {
756 if (pci_dma_supported(pci_dev, dma_mask) &&
757 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
762 EFX_ERR(efx, "could not find a suitable DMA mask\n");
765 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
766 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
768 /* pci_set_consistent_dma_mask() is not *allowed* to
769 * fail with a mask that pci_set_dma_mask() accepted,
770 * but just in case...
772 EFX_ERR(efx, "failed to set consistent DMA mask\n");
776 efx->membase_phys = pci_resource_start(efx->pci_dev,
778 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
780 EFX_ERR(efx, "request for memory BAR failed\n");
784 efx->membase = ioremap_nocache(efx->membase_phys,
785 efx->type->mem_map_size);
787 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
789 (unsigned long long)efx->membase_phys,
790 efx->type->mem_map_size);
794 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
795 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
796 efx->type->mem_map_size, efx->membase);
801 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
803 efx->membase_phys = 0;
805 pci_disable_device(efx->pci_dev);
810 static void efx_fini_io(struct efx_nic *efx)
812 EFX_LOG(efx, "shutting down I/O\n");
815 iounmap(efx->membase);
819 if (efx->membase_phys) {
820 pci_release_region(efx->pci_dev, efx->type->mem_bar);
821 efx->membase_phys = 0;
824 pci_disable_device(efx->pci_dev);
827 /* Probe the number and type of interrupts we are able to obtain. */
828 static void efx_probe_interrupts(struct efx_nic *efx)
830 int max_channel = efx->type->phys_addr_channels - 1;
831 struct msix_entry xentries[EFX_MAX_CHANNELS];
834 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
835 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
837 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
838 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
839 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
841 /* Request maximum number of MSI interrupts, and fill out
842 * the channel interrupt information the allowed allocation */
843 for (i = 0; i < efx->rss_queues; i++)
844 xentries[i].entry = i;
845 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
847 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
848 efx->rss_queues = rc;
849 rc = pci_enable_msix(efx->pci_dev, xentries,
854 for (i = 0; i < efx->rss_queues; i++) {
855 efx->channel[i].has_interrupt = 1;
856 efx->channel[i].irq = xentries[i].vector;
859 /* Fall back to single channel MSI */
860 efx->interrupt_mode = EFX_INT_MODE_MSI;
861 EFX_ERR(efx, "could not enable MSI-X\n");
865 /* Try single interrupt MSI */
866 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
868 rc = pci_enable_msi(efx->pci_dev);
870 efx->channel[0].irq = efx->pci_dev->irq;
871 efx->channel[0].has_interrupt = 1;
873 EFX_ERR(efx, "could not enable MSI\n");
874 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
878 /* Assume legacy interrupts */
879 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
881 /* Every channel is interruptible */
882 for (i = 0; i < EFX_MAX_CHANNELS; i++)
883 efx->channel[i].has_interrupt = 1;
884 efx->legacy_irq = efx->pci_dev->irq;
888 static void efx_remove_interrupts(struct efx_nic *efx)
890 struct efx_channel *channel;
892 /* Remove MSI/MSI-X interrupts */
893 efx_for_each_channel_with_interrupt(channel, efx)
895 pci_disable_msi(efx->pci_dev);
896 pci_disable_msix(efx->pci_dev);
898 /* Remove legacy interrupt */
902 /* Select number of used resources
903 * Should be called after probe_interrupts()
905 static void efx_select_used(struct efx_nic *efx)
907 struct efx_tx_queue *tx_queue;
908 struct efx_rx_queue *rx_queue;
911 /* TX queues. One per port per channel with TX capability
912 * (more than one per port won't work on Linux, due to out
913 * of order issues... but will be fine on Solaris)
915 tx_queue = &efx->tx_queue[0];
917 /* Perform this for each channel with TX capabilities.
918 * At the moment, we only support a single TX queue
921 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
922 tx_queue->channel = &efx->channel[1];
924 tx_queue->channel = &efx->channel[0];
925 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
928 /* RX queues. Each has a dedicated channel. */
929 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
930 rx_queue = &efx->rx_queue[i];
932 if (i < efx->rss_queues) {
934 /* If we allow multiple RX queues per channel
935 * we need to decide that here
937 rx_queue->channel = &efx->channel[rx_queue->queue];
938 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
944 static int efx_probe_nic(struct efx_nic *efx)
948 EFX_LOG(efx, "creating NIC\n");
950 /* Carry out hardware-type specific initialisation */
951 rc = falcon_probe_nic(efx);
955 /* Determine the number of channels and RX queues by trying to hook
956 * in MSI-X interrupts. */
957 efx_probe_interrupts(efx);
959 /* Determine number of RX queues and TX queues */
960 efx_select_used(efx);
962 /* Initialise the interrupt moderation settings */
963 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
968 static void efx_remove_nic(struct efx_nic *efx)
970 EFX_LOG(efx, "destroying NIC\n");
972 efx_remove_interrupts(efx);
973 falcon_remove_nic(efx);
976 /**************************************************************************
978 * NIC startup/shutdown
980 *************************************************************************/
982 static int efx_probe_all(struct efx_nic *efx)
984 struct efx_channel *channel;
988 rc = efx_probe_nic(efx);
990 EFX_ERR(efx, "failed to create NIC\n");
995 rc = efx_probe_port(efx);
997 EFX_ERR(efx, "failed to create port\n");
1001 /* Create channels */
1002 efx_for_each_channel(channel, efx) {
1003 rc = efx_probe_channel(channel);
1005 EFX_ERR(efx, "failed to create channel %d\n",
1014 efx_for_each_channel(channel, efx)
1015 efx_remove_channel(channel);
1016 efx_remove_port(efx);
1018 efx_remove_nic(efx);
1023 /* Called after previous invocation(s) of efx_stop_all, restarts the
1024 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1025 * and ensures that the port is scheduled to be reconfigured.
1026 * This function is safe to call multiple times when the NIC is in any
1028 static void efx_start_all(struct efx_nic *efx)
1030 struct efx_channel *channel;
1032 EFX_ASSERT_RESET_SERIALISED(efx);
1034 /* Check that it is appropriate to restart the interface. All
1035 * of these flags are safe to read under just the rtnl lock */
1036 if (efx->port_enabled)
1038 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1040 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1043 /* Mark the port as enabled so port reconfigurations can start, then
1044 * restart the transmit interface early so the watchdog timer stops */
1045 efx_start_port(efx);
1046 efx_wake_queue(efx);
1048 efx_for_each_channel(channel, efx)
1049 efx_start_channel(channel);
1051 falcon_enable_interrupts(efx);
1053 /* Start hardware monitor if we're in RUNNING */
1054 if (efx->state == STATE_RUNNING)
1055 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1056 efx_monitor_interval);
1059 /* Flush all delayed work. Should only be called when no more delayed work
1060 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1061 * since we're holding the rtnl_lock at this point. */
1062 static void efx_flush_all(struct efx_nic *efx)
1064 struct efx_rx_queue *rx_queue;
1066 /* Make sure the hardware monitor is stopped */
1067 cancel_delayed_work_sync(&efx->monitor_work);
1069 /* Ensure that all RX slow refills are complete. */
1070 efx_for_each_rx_queue(rx_queue, efx)
1071 cancel_delayed_work_sync(&rx_queue->work);
1073 /* Stop scheduled port reconfigurations */
1074 cancel_work_sync(&efx->reconfigure_work);
1078 /* Quiesce hardware and software without bringing the link down.
1079 * Safe to call multiple times, when the nic and interface is in any
1080 * state. The caller is guaranteed to subsequently be in a position
1081 * to modify any hardware and software state they see fit without
1083 static void efx_stop_all(struct efx_nic *efx)
1085 struct efx_channel *channel;
1087 EFX_ASSERT_RESET_SERIALISED(efx);
1089 /* port_enabled can be read safely under the rtnl lock */
1090 if (!efx->port_enabled)
1093 /* Disable interrupts and wait for ISR to complete */
1094 falcon_disable_interrupts(efx);
1095 if (efx->legacy_irq)
1096 synchronize_irq(efx->legacy_irq);
1097 efx_for_each_channel_with_interrupt(channel, efx) {
1099 synchronize_irq(channel->irq);
1102 /* Stop all NAPI processing and synchronous rx refills */
1103 efx_for_each_channel(channel, efx)
1104 efx_stop_channel(channel);
1106 /* Stop all asynchronous port reconfigurations. Since all
1107 * event processing has already been stopped, there is no
1108 * window to loose phy events */
1111 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1114 /* Isolate the MAC from the TX and RX engines, so that queue
1115 * flushes will complete in a timely fashion. */
1116 falcon_deconfigure_mac_wrapper(efx);
1117 falcon_drain_tx_fifo(efx);
1119 /* Stop the kernel transmit interface late, so the watchdog
1120 * timer isn't ticking over the flush */
1121 efx_stop_queue(efx);
1122 if (efx_dev_registered(efx)) {
1123 netif_tx_lock_bh(efx->net_dev);
1124 netif_tx_unlock_bh(efx->net_dev);
1128 static void efx_remove_all(struct efx_nic *efx)
1130 struct efx_channel *channel;
1132 efx_for_each_channel(channel, efx)
1133 efx_remove_channel(channel);
1134 efx_remove_port(efx);
1135 efx_remove_nic(efx);
1138 /* A convinience function to safely flush all the queues */
1139 int efx_flush_queues(struct efx_nic *efx)
1143 EFX_ASSERT_RESET_SERIALISED(efx);
1147 efx_fini_channels(efx);
1148 rc = efx_init_channels(efx);
1150 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1159 /**************************************************************************
1161 * Interrupt moderation
1163 **************************************************************************/
1165 /* Set interrupt moderation parameters */
1166 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1168 struct efx_tx_queue *tx_queue;
1169 struct efx_rx_queue *rx_queue;
1171 EFX_ASSERT_RESET_SERIALISED(efx);
1173 efx_for_each_tx_queue(tx_queue, efx)
1174 tx_queue->channel->irq_moderation = tx_usecs;
1176 efx_for_each_rx_queue(rx_queue, efx)
1177 rx_queue->channel->irq_moderation = rx_usecs;
1180 /**************************************************************************
1184 **************************************************************************/
1186 /* Run periodically off the general workqueue. Serialised against
1187 * efx_reconfigure_port via the mac_lock */
1188 static void efx_monitor(struct work_struct *data)
1190 struct efx_nic *efx = container_of(data, struct efx_nic,
1194 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1195 raw_smp_processor_id());
1198 /* If the mac_lock is already held then it is likely a port
1199 * reconfiguration is already in place, which will likely do
1200 * most of the work of check_hw() anyway. */
1201 if (!mutex_trylock(&efx->mac_lock)) {
1202 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1203 efx_monitor_interval);
1207 if (efx->port_enabled)
1208 rc = falcon_check_xmac(efx);
1209 mutex_unlock(&efx->mac_lock);
1212 if (monitor_reset) {
1213 EFX_ERR(efx, "hardware monitor detected a fault: "
1214 "triggering reset\n");
1215 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1217 EFX_ERR(efx, "hardware monitor detected a fault, "
1218 "skipping reset\n");
1222 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1223 efx_monitor_interval);
1226 /**************************************************************************
1230 *************************************************************************/
1233 * Context: process, rtnl_lock() held.
1235 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1237 struct efx_nic *efx = net_dev->priv;
1239 EFX_ASSERT_RESET_SERIALISED(efx);
1241 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1244 /**************************************************************************
1248 **************************************************************************/
1250 static int efx_init_napi(struct efx_nic *efx)
1252 struct efx_channel *channel;
1255 efx_for_each_channel(channel, efx) {
1256 channel->napi_dev = efx->net_dev;
1257 rc = efx_lro_init(&channel->lro_mgr, efx);
1267 static void efx_fini_napi(struct efx_nic *efx)
1269 struct efx_channel *channel;
1271 efx_for_each_channel(channel, efx) {
1272 efx_lro_fini(&channel->lro_mgr);
1273 channel->napi_dev = NULL;
1277 /**************************************************************************
1279 * Kernel netpoll interface
1281 *************************************************************************/
1283 #ifdef CONFIG_NET_POLL_CONTROLLER
1285 /* Although in the common case interrupts will be disabled, this is not
1286 * guaranteed. However, all our work happens inside the NAPI callback,
1287 * so no locking is required.
1289 static void efx_netpoll(struct net_device *net_dev)
1291 struct efx_nic *efx = net_dev->priv;
1292 struct efx_channel *channel;
1294 efx_for_each_channel_with_interrupt(channel, efx)
1295 efx_schedule_channel(channel);
1300 /**************************************************************************
1302 * Kernel net device interface
1304 *************************************************************************/
1306 /* Context: process, rtnl_lock() held. */
1307 static int efx_net_open(struct net_device *net_dev)
1309 struct efx_nic *efx = net_dev->priv;
1310 EFX_ASSERT_RESET_SERIALISED(efx);
1312 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1313 raw_smp_processor_id());
1319 /* Context: process, rtnl_lock() held.
1320 * Note that the kernel will ignore our return code; this method
1321 * should really be a void.
1323 static int efx_net_stop(struct net_device *net_dev)
1325 struct efx_nic *efx = net_dev->priv;
1328 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1329 raw_smp_processor_id());
1331 /* Stop the device and flush all the channels */
1333 efx_fini_channels(efx);
1334 rc = efx_init_channels(efx);
1336 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1341 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1342 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1344 struct efx_nic *efx = net_dev->priv;
1345 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1346 struct net_device_stats *stats = &net_dev->stats;
1348 /* Update stats if possible, but do not wait if another thread
1349 * is updating them (or resetting the NIC); slightly stale
1350 * stats are acceptable.
1352 if (!spin_trylock(&efx->stats_lock))
1354 if (efx->state == STATE_RUNNING) {
1355 falcon_update_stats_xmac(efx);
1356 falcon_update_nic_stats(efx);
1358 spin_unlock(&efx->stats_lock);
1360 stats->rx_packets = mac_stats->rx_packets;
1361 stats->tx_packets = mac_stats->tx_packets;
1362 stats->rx_bytes = mac_stats->rx_bytes;
1363 stats->tx_bytes = mac_stats->tx_bytes;
1364 stats->multicast = mac_stats->rx_multicast;
1365 stats->collisions = mac_stats->tx_collision;
1366 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1367 mac_stats->rx_length_error);
1368 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1369 stats->rx_crc_errors = mac_stats->rx_bad;
1370 stats->rx_frame_errors = mac_stats->rx_align_error;
1371 stats->rx_fifo_errors = mac_stats->rx_overflow;
1372 stats->rx_missed_errors = mac_stats->rx_missed;
1373 stats->tx_window_errors = mac_stats->tx_late_collision;
1375 stats->rx_errors = (stats->rx_length_errors +
1376 stats->rx_over_errors +
1377 stats->rx_crc_errors +
1378 stats->rx_frame_errors +
1379 stats->rx_fifo_errors +
1380 stats->rx_missed_errors +
1381 mac_stats->rx_symbol_error);
1382 stats->tx_errors = (stats->tx_window_errors +
1388 /* Context: netif_tx_lock held, BHs disabled. */
1389 static void efx_watchdog(struct net_device *net_dev)
1391 struct efx_nic *efx = net_dev->priv;
1393 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1394 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1395 monitor_reset ? "resetting channels" : "skipping reset");
1398 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1402 /* Context: process, rtnl_lock() held. */
1403 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1405 struct efx_nic *efx = net_dev->priv;
1408 EFX_ASSERT_RESET_SERIALISED(efx);
1410 if (new_mtu > EFX_MAX_MTU)
1415 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1417 efx_fini_channels(efx);
1418 net_dev->mtu = new_mtu;
1419 rc = efx_init_channels(efx);
1427 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1431 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1433 struct efx_nic *efx = net_dev->priv;
1434 struct sockaddr *addr = data;
1435 char *new_addr = addr->sa_data;
1437 EFX_ASSERT_RESET_SERIALISED(efx);
1439 if (!is_valid_ether_addr(new_addr)) {
1440 DECLARE_MAC_BUF(mac);
1441 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1442 print_mac(mac, new_addr));
1446 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1448 /* Reconfigure the MAC */
1449 efx_reconfigure_port(efx);
1454 /* Context: netif_tx_lock held, BHs disabled. */
1455 static void efx_set_multicast_list(struct net_device *net_dev)
1457 struct efx_nic *efx = net_dev->priv;
1458 struct dev_mc_list *mc_list = net_dev->mc_list;
1459 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1465 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1466 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1467 if (efx->promiscuous != promiscuous) {
1468 efx->promiscuous = promiscuous;
1469 /* Close the window between efx_stop_port() and efx_flush_all()
1470 * by only queuing work when the port is enabled. */
1471 if (efx->port_enabled)
1472 queue_work(efx->workqueue, &efx->reconfigure_work);
1475 /* Build multicast hash table */
1476 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1477 memset(mc_hash, 0xff, sizeof(*mc_hash));
1479 memset(mc_hash, 0x00, sizeof(*mc_hash));
1480 for (i = 0; i < net_dev->mc_count; i++) {
1481 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1482 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1483 set_bit_le(bit, mc_hash->byte);
1484 mc_list = mc_list->next;
1488 /* Create and activate new global multicast hash table */
1489 falcon_set_multicast_hash(efx);
1492 static int efx_netdev_event(struct notifier_block *this,
1493 unsigned long event, void *ptr)
1495 struct net_device *net_dev = ptr;
1497 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1498 struct efx_nic *efx = net_dev->priv;
1500 strcpy(efx->name, net_dev->name);
1506 static struct notifier_block efx_netdev_notifier = {
1507 .notifier_call = efx_netdev_event,
1510 static int efx_register_netdev(struct efx_nic *efx)
1512 struct net_device *net_dev = efx->net_dev;
1515 net_dev->watchdog_timeo = 5 * HZ;
1516 net_dev->irq = efx->pci_dev->irq;
1517 net_dev->open = efx_net_open;
1518 net_dev->stop = efx_net_stop;
1519 net_dev->get_stats = efx_net_stats;
1520 net_dev->tx_timeout = &efx_watchdog;
1521 net_dev->hard_start_xmit = efx_hard_start_xmit;
1522 net_dev->do_ioctl = efx_ioctl;
1523 net_dev->change_mtu = efx_change_mtu;
1524 net_dev->set_mac_address = efx_set_mac_address;
1525 net_dev->set_multicast_list = efx_set_multicast_list;
1526 #ifdef CONFIG_NET_POLL_CONTROLLER
1527 net_dev->poll_controller = efx_netpoll;
1529 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1530 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1532 /* Always start with carrier off; PHY events will detect the link */
1533 netif_carrier_off(efx->net_dev);
1535 /* Clear MAC statistics */
1536 falcon_update_stats_xmac(efx);
1537 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1539 rc = register_netdev(net_dev);
1541 EFX_ERR(efx, "could not register net dev\n");
1544 strcpy(efx->name, net_dev->name);
1549 static void efx_unregister_netdev(struct efx_nic *efx)
1551 struct efx_tx_queue *tx_queue;
1556 BUG_ON(efx->net_dev->priv != efx);
1558 /* Free up any skbs still remaining. This has to happen before
1559 * we try to unregister the netdev as running their destructors
1560 * may be needed to get the device ref. count to 0. */
1561 efx_for_each_tx_queue(tx_queue, efx)
1562 efx_release_tx_buffers(tx_queue);
1564 if (efx_dev_registered(efx)) {
1565 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1566 unregister_netdev(efx->net_dev);
1570 /**************************************************************************
1572 * Device reset and suspend
1574 **************************************************************************/
1576 /* The final hardware and software finalisation before reset. */
1577 static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1581 EFX_ASSERT_RESET_SERIALISED(efx);
1583 rc = falcon_xmac_get_settings(efx, ecmd);
1585 EFX_ERR(efx, "could not back up PHY settings\n");
1589 efx_fini_channels(efx);
1596 /* The first part of software initialisation after a hardware reset
1597 * This function does not handle serialisation with the kernel, it
1598 * assumes the caller has done this */
1599 static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1603 rc = efx_init_channels(efx);
1607 /* Restore MAC and PHY settings. */
1608 rc = falcon_xmac_set_settings(efx, ecmd);
1610 EFX_ERR(efx, "could not restore PHY settings\n");
1617 efx_fini_channels(efx);
1622 /* Reset the NIC as transparently as possible. Do not reset the PHY
1623 * Note that the reset may fail, in which case the card will be left
1624 * in a most-probably-unusable state.
1626 * This function will sleep. You cannot reset from within an atomic
1627 * state; use efx_schedule_reset() instead.
1629 * Grabs the rtnl_lock.
1631 static int efx_reset(struct efx_nic *efx)
1633 struct ethtool_cmd ecmd;
1634 enum reset_type method = efx->reset_pending;
1637 /* Serialise with kernel interfaces */
1640 /* If we're not RUNNING then don't reset. Leave the reset_pending
1641 * flag set so that efx_pci_probe_main will be retried */
1642 if (efx->state != STATE_RUNNING) {
1643 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1647 efx->state = STATE_RESETTING;
1648 EFX_INFO(efx, "resetting (%d)\n", method);
1650 /* The net_dev->get_stats handler is quite slow, and will fail
1651 * if a fetch is pending over reset. Serialise against it. */
1652 spin_lock(&efx->stats_lock);
1653 spin_unlock(&efx->stats_lock);
1656 mutex_lock(&efx->mac_lock);
1658 rc = efx_reset_down(efx, &ecmd);
1662 rc = falcon_reset_hw(efx, method);
1664 EFX_ERR(efx, "failed to reset hardware\n");
1668 /* Allow resets to be rescheduled. */
1669 efx->reset_pending = RESET_TYPE_NONE;
1671 /* Reinitialise bus-mastering, which may have been turned off before
1672 * the reset was scheduled. This is still appropriate, even in the
1673 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1674 * can respond to requests. */
1675 pci_set_master(efx->pci_dev);
1677 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1678 * case so the driver can talk to external SRAM */
1679 rc = falcon_init_nic(efx);
1681 EFX_ERR(efx, "failed to initialise NIC\n");
1685 /* Leave device stopped if necessary */
1686 if (method == RESET_TYPE_DISABLE) {
1687 /* Reinitialise the device anyway so the driver unload sequence
1688 * can talk to the external SRAM */
1689 falcon_init_nic(efx);
1694 rc = efx_reset_up(efx, &ecmd);
1698 mutex_unlock(&efx->mac_lock);
1699 EFX_LOG(efx, "reset complete\n");
1701 efx->state = STATE_RUNNING;
1713 EFX_ERR(efx, "has been disabled\n");
1714 efx->state = STATE_DISABLED;
1716 mutex_unlock(&efx->mac_lock);
1718 efx_unregister_netdev(efx);
1723 /* The worker thread exists so that code that cannot sleep can
1724 * schedule a reset for later.
1726 static void efx_reset_work(struct work_struct *data)
1728 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1733 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1735 enum reset_type method;
1737 if (efx->reset_pending != RESET_TYPE_NONE) {
1738 EFX_INFO(efx, "quenching already scheduled reset\n");
1743 case RESET_TYPE_INVISIBLE:
1744 case RESET_TYPE_ALL:
1745 case RESET_TYPE_WORLD:
1746 case RESET_TYPE_DISABLE:
1749 case RESET_TYPE_RX_RECOVERY:
1750 case RESET_TYPE_RX_DESC_FETCH:
1751 case RESET_TYPE_TX_DESC_FETCH:
1752 case RESET_TYPE_TX_SKIP:
1753 method = RESET_TYPE_INVISIBLE;
1756 method = RESET_TYPE_ALL;
1761 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1763 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1765 efx->reset_pending = method;
1767 queue_work(efx->workqueue, &efx->reset_work);
1770 /**************************************************************************
1772 * List of NICs we support
1774 **************************************************************************/
1776 /* PCI device ID table */
1777 static struct pci_device_id efx_pci_table[] __devinitdata = {
1778 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1779 .driver_data = (unsigned long) &falcon_a_nic_type},
1780 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1781 .driver_data = (unsigned long) &falcon_b_nic_type},
1782 {0} /* end of list */
1785 /**************************************************************************
1787 * Dummy PHY/MAC/Board operations
1789 * Can be used where the MAC does not implement this operation
1790 * Needed so all function pointers are valid and do not have to be tested
1793 **************************************************************************/
1794 int efx_port_dummy_op_int(struct efx_nic *efx)
1798 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1799 void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1801 static struct efx_phy_operations efx_dummy_phy_operations = {
1802 .init = efx_port_dummy_op_int,
1803 .reconfigure = efx_port_dummy_op_void,
1804 .check_hw = efx_port_dummy_op_int,
1805 .fini = efx_port_dummy_op_void,
1806 .clear_interrupt = efx_port_dummy_op_void,
1807 .reset_xaui = efx_port_dummy_op_void,
1810 /* Dummy board operations */
1811 static int efx_nic_dummy_op_int(struct efx_nic *nic)
1816 static struct efx_board efx_dummy_board_info = {
1817 .init = efx_nic_dummy_op_int,
1818 .init_leds = efx_port_dummy_op_int,
1819 .set_fault_led = efx_port_dummy_op_blink,
1820 .fini = efx_port_dummy_op_void,
1823 /**************************************************************************
1827 **************************************************************************/
1829 /* This zeroes out and then fills in the invariants in a struct
1830 * efx_nic (including all sub-structures).
1832 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1833 struct pci_dev *pci_dev, struct net_device *net_dev)
1835 struct efx_channel *channel;
1836 struct efx_tx_queue *tx_queue;
1837 struct efx_rx_queue *rx_queue;
1840 /* Initialise common structures */
1841 memset(efx, 0, sizeof(*efx));
1842 spin_lock_init(&efx->biu_lock);
1843 spin_lock_init(&efx->phy_lock);
1844 INIT_WORK(&efx->reset_work, efx_reset_work);
1845 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1846 efx->pci_dev = pci_dev;
1847 efx->state = STATE_INIT;
1848 efx->reset_pending = RESET_TYPE_NONE;
1849 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1850 efx->board_info = efx_dummy_board_info;
1852 efx->net_dev = net_dev;
1853 efx->rx_checksum_enabled = 1;
1854 spin_lock_init(&efx->netif_stop_lock);
1855 spin_lock_init(&efx->stats_lock);
1856 mutex_init(&efx->mac_lock);
1857 efx->phy_op = &efx_dummy_phy_operations;
1858 efx->mii.dev = net_dev;
1859 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1860 atomic_set(&efx->netif_stop_count, 1);
1862 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1863 channel = &efx->channel[i];
1865 channel->channel = i;
1866 channel->evqnum = i;
1867 channel->work_pending = 0;
1869 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1870 tx_queue = &efx->tx_queue[i];
1871 tx_queue->efx = efx;
1872 tx_queue->queue = i;
1873 tx_queue->buffer = NULL;
1874 tx_queue->channel = &efx->channel[0]; /* for safety */
1875 tx_queue->tso_headers_free = NULL;
1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1878 rx_queue = &efx->rx_queue[i];
1879 rx_queue->efx = efx;
1880 rx_queue->queue = i;
1881 rx_queue->channel = &efx->channel[0]; /* for safety */
1882 rx_queue->buffer = NULL;
1883 spin_lock_init(&rx_queue->add_lock);
1884 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1889 /* Sanity-check NIC type */
1890 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1891 (efx->type->txd_ring_mask + 1));
1892 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1893 (efx->type->rxd_ring_mask + 1));
1894 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1895 (efx->type->evq_size - 1));
1896 /* As close as we can get to guaranteeing that we don't overflow */
1897 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1898 (efx->type->txd_ring_mask + 1 +
1899 efx->type->rxd_ring_mask + 1));
1900 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1902 /* Higher numbered interrupt modes are less capable! */
1903 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1906 efx->workqueue = create_singlethread_workqueue("sfc_work");
1907 if (!efx->workqueue) {
1918 static void efx_fini_struct(struct efx_nic *efx)
1920 if (efx->workqueue) {
1921 destroy_workqueue(efx->workqueue);
1922 efx->workqueue = NULL;
1926 /**************************************************************************
1930 **************************************************************************/
1932 /* Main body of final NIC shutdown code
1933 * This is called only at module unload (or hotplug removal).
1935 static void efx_pci_remove_main(struct efx_nic *efx)
1937 EFX_ASSERT_RESET_SERIALISED(efx);
1939 /* Skip everything if we never obtained a valid membase */
1943 efx_fini_channels(efx);
1946 /* Shutdown the board, then the NIC and board state */
1947 efx->board_info.fini(efx);
1948 falcon_fini_interrupt(efx);
1951 efx_remove_all(efx);
1954 /* Final NIC shutdown
1955 * This is called only at module unload (or hotplug removal).
1957 static void efx_pci_remove(struct pci_dev *pci_dev)
1959 struct efx_nic *efx;
1961 efx = pci_get_drvdata(pci_dev);
1965 /* Mark the NIC as fini, then stop the interface */
1967 efx->state = STATE_FINI;
1968 dev_close(efx->net_dev);
1970 /* Allow any queued efx_resets() to complete */
1973 if (efx->membase == NULL)
1976 efx_unregister_netdev(efx);
1978 /* Wait for any scheduled resets to complete. No more will be
1979 * scheduled from this point because efx_stop_all() has been
1980 * called, we are no longer registered with driverlink, and
1981 * the net_device's have been removed. */
1982 flush_workqueue(efx->workqueue);
1984 efx_pci_remove_main(efx);
1988 EFX_LOG(efx, "shutdown successful\n");
1990 pci_set_drvdata(pci_dev, NULL);
1991 efx_fini_struct(efx);
1992 free_netdev(efx->net_dev);
1995 /* Main body of NIC initialisation
1996 * This is called at module load (or hotplug insertion, theoretically).
1998 static int efx_pci_probe_main(struct efx_nic *efx)
2002 /* Do start-of-day initialisation */
2003 rc = efx_probe_all(efx);
2007 rc = efx_init_napi(efx);
2011 /* Initialise the board */
2012 rc = efx->board_info.init(efx);
2014 EFX_ERR(efx, "failed to initialise board\n");
2018 rc = falcon_init_nic(efx);
2020 EFX_ERR(efx, "failed to initialise NIC\n");
2024 rc = efx_init_port(efx);
2026 EFX_ERR(efx, "failed to initialise port\n");
2030 rc = efx_init_channels(efx);
2034 rc = falcon_init_interrupt(efx);
2041 efx_fini_channels(efx);
2049 efx_remove_all(efx);
2054 /* NIC initialisation
2056 * This is called at module load (or hotplug insertion,
2057 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2058 * sets up and registers the network devices with the kernel and hooks
2059 * the interrupt service routine. It does not prepare the device for
2060 * transmission; this is left to the first time one of the network
2061 * interfaces is brought up (i.e. efx_net_open).
2063 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2064 const struct pci_device_id *entry)
2066 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2067 struct net_device *net_dev;
2068 struct efx_nic *efx;
2071 /* Allocate and initialise a struct net_device and struct efx_nic */
2072 net_dev = alloc_etherdev(sizeof(*efx));
2075 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2076 NETIF_F_HIGHDMA | NETIF_F_TSO);
2078 net_dev->features |= NETIF_F_LRO;
2079 efx = net_dev->priv;
2080 pci_set_drvdata(pci_dev, efx);
2081 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2085 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2087 /* Set up basic I/O (BAR mappings etc) */
2088 rc = efx_init_io(efx);
2092 /* No serialisation is required with the reset path because
2093 * we're in STATE_INIT. */
2094 for (i = 0; i < 5; i++) {
2095 rc = efx_pci_probe_main(efx);
2099 /* Serialise against efx_reset(). No more resets will be
2100 * scheduled since efx_stop_all() has been called, and we
2101 * have not and never have been registered with either
2102 * the rtnetlink or driverlink layers. */
2103 cancel_work_sync(&efx->reset_work);
2105 /* Retry if a recoverably reset event has been scheduled */
2106 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2107 (efx->reset_pending != RESET_TYPE_ALL))
2110 efx->reset_pending = RESET_TYPE_NONE;
2114 EFX_ERR(efx, "Could not reset NIC\n");
2118 /* Switch to the running state before we expose the device to
2119 * the OS. This is to ensure that the initial gathering of
2120 * MAC stats succeeds. */
2122 efx->state = STATE_RUNNING;
2125 rc = efx_register_netdev(efx);
2129 EFX_LOG(efx, "initialisation successful\n");
2134 efx_pci_remove_main(efx);
2139 efx_fini_struct(efx);
2141 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2142 free_netdev(net_dev);
2146 static struct pci_driver efx_pci_driver = {
2147 .name = EFX_DRIVER_NAME,
2148 .id_table = efx_pci_table,
2149 .probe = efx_pci_probe,
2150 .remove = efx_pci_remove,
2153 /**************************************************************************
2155 * Kernel module interface
2157 *************************************************************************/
2159 module_param(interrupt_mode, uint, 0444);
2160 MODULE_PARM_DESC(interrupt_mode,
2161 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2163 static int __init efx_init_module(void)
2167 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2169 rc = register_netdevice_notifier(&efx_netdev_notifier);
2173 refill_workqueue = create_workqueue("sfc_refill");
2174 if (!refill_workqueue) {
2179 rc = pci_register_driver(&efx_pci_driver);
2186 destroy_workqueue(refill_workqueue);
2188 unregister_netdevice_notifier(&efx_netdev_notifier);
2193 static void __exit efx_exit_module(void)
2195 printk(KERN_INFO "Solarflare NET driver unloading\n");
2197 pci_unregister_driver(&efx_pci_driver);
2198 destroy_workqueue(refill_workqueue);
2199 unregister_netdevice_notifier(&efx_netdev_notifier);
2203 module_init(efx_init_module);
2204 module_exit(efx_exit_module);
2206 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2207 "Solarflare Communications");
2208 MODULE_DESCRIPTION("Solarflare Communications network driver");
2209 MODULE_LICENSE("GPL");
2210 MODULE_DEVICE_TABLE(pci, efx_pci_table);