1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.7.12-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
85 /* required last entry */
88 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
90 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_VERSION);
95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96 static int debug = -1;
97 module_param(debug, int, 0);
98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
101 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105 struct ixgbevf_ring *rx_ring,
109 * Force memory writes to complete before letting h/w
110 * know there are new descriptors to fetch. (Only
111 * applicable for weak-ordered memory model archs,
115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
126 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
127 u8 queue, u8 msix_vector)
130 struct ixgbe_hw *hw = &adapter->hw;
131 if (direction == -1) {
133 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
134 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
137 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
139 /* tx or rx causes */
140 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
141 index = ((16 * (queue & 1)) + (8 * direction));
142 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
143 ivar &= ~(0xFF << index);
144 ivar |= (msix_vector << index);
145 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
149 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
150 struct ixgbevf_tx_buffer
153 if (tx_buffer_info->dma) {
154 if (tx_buffer_info->mapped_as_page)
155 dma_unmap_page(tx_ring->dev,
157 tx_buffer_info->length,
160 dma_unmap_single(tx_ring->dev,
162 tx_buffer_info->length,
164 tx_buffer_info->dma = 0;
166 if (tx_buffer_info->skb) {
167 dev_kfree_skb_any(tx_buffer_info->skb);
168 tx_buffer_info->skb = NULL;
170 tx_buffer_info->time_stamp = 0;
171 /* tx_buffer_info must be completely set up in the transmit path */
174 #define IXGBE_MAX_TXD_PWR 14
175 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
177 /* Tx Descriptors needed, worst case */
178 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
179 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
181 static void ixgbevf_tx_timeout(struct net_device *netdev);
184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
185 * @q_vector: board private structure
186 * @tx_ring: tx ring to clean
188 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
189 struct ixgbevf_ring *tx_ring)
191 struct ixgbevf_adapter *adapter = q_vector->adapter;
192 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
193 struct ixgbevf_tx_buffer *tx_buffer_info;
194 unsigned int i, eop, count = 0;
195 unsigned int total_bytes = 0, total_packets = 0;
197 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
200 i = tx_ring->next_to_clean;
201 eop = tx_ring->tx_buffer_info[i].next_to_watch;
202 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
204 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
205 (count < tx_ring->count)) {
206 bool cleaned = false;
207 rmb(); /* read buffer_info after eop_desc */
208 /* eop could change between read and DD-check */
209 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
211 for ( ; !cleaned; count++) {
213 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
214 tx_buffer_info = &tx_ring->tx_buffer_info[i];
215 cleaned = (i == eop);
216 skb = tx_buffer_info->skb;
218 if (cleaned && skb) {
219 unsigned int segs, bytecount;
221 /* gso_segs is currently only valid for tcp */
222 segs = skb_shinfo(skb)->gso_segs ?: 1;
223 /* multiply data chunks by size of headers */
224 bytecount = ((segs - 1) * skb_headlen(skb)) +
226 total_packets += segs;
227 total_bytes += bytecount;
230 ixgbevf_unmap_and_free_tx_resource(tx_ring,
233 tx_desc->wb.status = 0;
236 if (i == tx_ring->count)
241 eop = tx_ring->tx_buffer_info[i].next_to_watch;
242 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
245 tx_ring->next_to_clean = i;
247 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
248 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
249 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
250 /* Make sure that anybody stopping the queue after this
251 * sees the new next_to_clean.
254 if (__netif_subqueue_stopped(tx_ring->netdev,
255 tx_ring->queue_index) &&
256 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
257 netif_wake_subqueue(tx_ring->netdev,
258 tx_ring->queue_index);
259 ++adapter->restart_queue;
263 u64_stats_update_begin(&tx_ring->syncp);
264 tx_ring->total_bytes += total_bytes;
265 tx_ring->total_packets += total_packets;
266 u64_stats_update_end(&tx_ring->syncp);
267 q_vector->tx.total_bytes += total_bytes;
268 q_vector->tx.total_packets += total_packets;
270 return count < tx_ring->count;
274 * ixgbevf_receive_skb - Send a completed packet up the stack
275 * @q_vector: structure containing interrupt and ring information
276 * @skb: packet to send up
277 * @status: hardware indication of status of receive
278 * @rx_desc: rx descriptor
280 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
281 struct sk_buff *skb, u8 status,
282 union ixgbe_adv_rx_desc *rx_desc)
284 struct ixgbevf_adapter *adapter = q_vector->adapter;
285 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
286 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
288 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
289 __vlan_hwaccel_put_tag(skb, tag);
291 napi_gro_receive(&q_vector->napi, skb);
295 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
296 * @adapter: address of board private structure
297 * @status_err: hardware indication of status of receive
298 * @skb: skb currently being received and modified
300 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
301 struct ixgbevf_ring *ring,
302 u32 status_err, struct sk_buff *skb)
304 skb_checksum_none_assert(skb);
306 /* Rx csum disabled */
307 if (!(ring->netdev->features & NETIF_F_RXCSUM))
310 /* if IP and error */
311 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
312 (status_err & IXGBE_RXDADV_ERR_IPE)) {
313 adapter->hw_csum_rx_error++;
317 if (!(status_err & IXGBE_RXD_STAT_L4CS))
320 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
321 adapter->hw_csum_rx_error++;
325 /* It must be a TCP or UDP packet with a valid checksum */
326 skb->ip_summed = CHECKSUM_UNNECESSARY;
327 adapter->hw_csum_rx_good++;
331 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
332 * @adapter: address of board private structure
334 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
335 struct ixgbevf_ring *rx_ring,
338 struct pci_dev *pdev = adapter->pdev;
339 union ixgbe_adv_rx_desc *rx_desc;
340 struct ixgbevf_rx_buffer *bi;
342 unsigned int i = rx_ring->next_to_use;
344 bi = &rx_ring->rx_buffer_info[i];
346 while (cleaned_count--) {
347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
350 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
351 rx_ring->rx_buf_len);
353 adapter->alloc_rx_buff_failed++;
359 bi->dma = dma_map_single(&pdev->dev, skb->data,
362 if (dma_mapping_error(&pdev->dev, bi->dma)) {
365 dev_err(&pdev->dev, "RX DMA map failed\n");
369 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
372 if (i == rx_ring->count)
374 bi = &rx_ring->rx_buffer_info[i];
378 if (rx_ring->next_to_use != i) {
379 rx_ring->next_to_use = i;
381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
385 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
388 struct ixgbe_hw *hw = &adapter->hw;
390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
393 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
394 struct ixgbevf_ring *rx_ring,
397 struct ixgbevf_adapter *adapter = q_vector->adapter;
398 struct pci_dev *pdev = adapter->pdev;
399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
404 int cleaned_count = 0;
405 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
407 i = rx_ring->next_to_clean;
408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
410 rx_buffer_info = &rx_ring->rx_buffer_info[i];
412 while (staterr & IXGBE_RXD_STAT_DD) {
417 rmb(); /* read descriptor and rx_buffer_info after status DD */
418 len = le16_to_cpu(rx_desc->wb.upper.length);
419 skb = rx_buffer_info->skb;
420 prefetch(skb->data - NET_IP_ALIGN);
421 rx_buffer_info->skb = NULL;
423 if (rx_buffer_info->dma) {
424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
427 rx_buffer_info->dma = 0;
432 if (i == rx_ring->count)
435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
439 next_buffer = &rx_ring->rx_buffer_info[i];
441 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
442 skb->next = next_buffer->skb;
443 IXGBE_CB(skb->next)->prev = skb;
444 adapter->non_eop_descs++;
448 /* we should not be chaining buffers, if we did drop the skb */
449 if (IXGBE_CB(skb)->prev) {
451 struct sk_buff *this = skb;
452 skb = IXGBE_CB(skb)->prev;
458 /* ERR_MASK will only have valid bits if EOP set */
459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
460 dev_kfree_skb_irq(skb);
464 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
466 /* probably a little skewed due to removing CRC */
467 total_rx_bytes += skb->len;
471 * Work around issue of some types of VM to VM loop back
472 * packets not getting split correctly
474 if (staterr & IXGBE_RXD_STAT_LB) {
475 u32 header_fixup_len = skb_headlen(skb);
476 if (header_fixup_len < 14)
477 skb_push(skb, header_fixup_len);
479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
481 /* Workaround hardware that can't do proper VEPA multicast
484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485 !(compare_ether_addr(adapter->netdev->dev_addr,
486 eth_hdr(skb)->h_source))) {
487 dev_kfree_skb_irq(skb);
491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
494 rx_desc->wb.upper.status_error = 0;
496 /* return some buffers to hardware, one at a time is too slow */
497 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
498 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
503 /* use prefetched values */
505 rx_buffer_info = &rx_ring->rx_buffer_info[i];
507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
510 rx_ring->next_to_clean = i;
511 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
514 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
516 u64_stats_update_begin(&rx_ring->syncp);
517 rx_ring->total_packets += total_rx_packets;
518 rx_ring->total_bytes += total_rx_bytes;
519 u64_stats_update_end(&rx_ring->syncp);
520 q_vector->rx.total_packets += total_rx_packets;
521 q_vector->rx.total_bytes += total_rx_bytes;
527 * ixgbevf_poll - NAPI polling calback
528 * @napi: napi struct with our devices info in it
529 * @budget: amount of work driver is allowed to do this pass, in packets
531 * This function will clean more than one or more rings associated with a
534 static int ixgbevf_poll(struct napi_struct *napi, int budget)
536 struct ixgbevf_q_vector *q_vector =
537 container_of(napi, struct ixgbevf_q_vector, napi);
538 struct ixgbevf_adapter *adapter = q_vector->adapter;
539 struct ixgbevf_ring *ring;
541 bool clean_complete = true;
543 ixgbevf_for_each_ring(ring, q_vector->tx)
544 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
546 /* attempt to distribute budget to each queue fairly, but don't allow
547 * the budget to go below 1 because we'll exit polling */
548 if (q_vector->rx.count > 1)
549 per_ring_budget = max(budget/q_vector->rx.count, 1);
551 per_ring_budget = budget;
553 ixgbevf_for_each_ring(ring, q_vector->rx)
554 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
557 /* If all work not completed, return budget and keep polling */
560 /* all work done, exit the polling mode */
562 if (adapter->rx_itr_setting & 1)
563 ixgbevf_set_itr(q_vector);
564 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
565 ixgbevf_irq_enable_queues(adapter,
566 1 << q_vector->v_idx);
572 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
573 * @q_vector: structure containing interrupt and ring information
575 static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
577 struct ixgbevf_adapter *adapter = q_vector->adapter;
578 struct ixgbe_hw *hw = &adapter->hw;
579 int v_idx = q_vector->v_idx;
580 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
583 * set the WDIS bit to not clear the timer bits and cause an
584 * immediate assertion of the interrupt
586 itr_reg |= IXGBE_EITR_CNT_WDIS;
588 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
592 * ixgbevf_configure_msix - Configure MSI-X hardware
593 * @adapter: board private structure
595 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
598 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
600 struct ixgbevf_q_vector *q_vector;
601 int q_vectors, v_idx;
603 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
604 adapter->eims_enable_mask = 0;
607 * Populate the IVAR table and set the ITR values to the
608 * corresponding register.
610 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
611 struct ixgbevf_ring *ring;
612 q_vector = adapter->q_vector[v_idx];
614 ixgbevf_for_each_ring(ring, q_vector->rx)
615 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
617 ixgbevf_for_each_ring(ring, q_vector->tx)
618 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
620 if (q_vector->tx.ring && !q_vector->rx.ring) {
622 if (adapter->tx_itr_setting == 1)
623 q_vector->itr = IXGBE_10K_ITR;
625 q_vector->itr = adapter->tx_itr_setting;
627 /* rx or rx/tx vector */
628 if (adapter->rx_itr_setting == 1)
629 q_vector->itr = IXGBE_20K_ITR;
631 q_vector->itr = adapter->rx_itr_setting;
634 /* add q_vector eims value to global eims_enable_mask */
635 adapter->eims_enable_mask |= 1 << v_idx;
637 ixgbevf_write_eitr(q_vector);
640 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
641 /* setup eims_other and add value to global eims_enable_mask */
642 adapter->eims_other = 1 << v_idx;
643 adapter->eims_enable_mask |= adapter->eims_other;
650 latency_invalid = 255
654 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
655 * @q_vector: structure containing interrupt and ring information
656 * @ring_container: structure containing ring performance data
658 * Stores a new ITR value based on packets and byte
659 * counts during the last interrupt. The advantage of per interrupt
660 * computation is faster updates and more accurate ITR for the current
661 * traffic pattern. Constants in this function were computed
662 * based on theoretical maximum wire speed and thresholds were set based
663 * on testing data as well as attempting to minimize response time
664 * while increasing bulk throughput.
666 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
667 struct ixgbevf_ring_container *ring_container)
669 int bytes = ring_container->total_bytes;
670 int packets = ring_container->total_packets;
673 u8 itr_setting = ring_container->itr;
678 /* simple throttlerate management
679 * 0-20MB/s lowest (100000 ints/s)
680 * 20-100MB/s low (20000 ints/s)
681 * 100-1249MB/s bulk (8000 ints/s)
683 /* what was last interrupt timeslice? */
684 timepassed_us = q_vector->itr >> 2;
685 bytes_perint = bytes / timepassed_us; /* bytes/usec */
687 switch (itr_setting) {
689 if (bytes_perint > 10)
690 itr_setting = low_latency;
693 if (bytes_perint > 20)
694 itr_setting = bulk_latency;
695 else if (bytes_perint <= 10)
696 itr_setting = lowest_latency;
699 if (bytes_perint <= 20)
700 itr_setting = low_latency;
704 /* clear work counters since we have the values we need */
705 ring_container->total_bytes = 0;
706 ring_container->total_packets = 0;
708 /* write updated itr to ring container */
709 ring_container->itr = itr_setting;
712 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
714 u32 new_itr = q_vector->itr;
717 ixgbevf_update_itr(q_vector, &q_vector->tx);
718 ixgbevf_update_itr(q_vector, &q_vector->rx);
720 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
722 switch (current_itr) {
723 /* counts and packets in update_itr are dependent on these numbers */
725 new_itr = IXGBE_100K_ITR;
728 new_itr = IXGBE_20K_ITR;
732 new_itr = IXGBE_8K_ITR;
736 if (new_itr != q_vector->itr) {
737 /* do an exponential smoothing */
738 new_itr = (10 * new_itr * q_vector->itr) /
739 ((9 * new_itr) + q_vector->itr);
741 /* save the algorithm value here */
742 q_vector->itr = new_itr;
744 ixgbevf_write_eitr(q_vector);
748 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
750 struct ixgbevf_adapter *adapter = data;
751 struct ixgbe_hw *hw = &adapter->hw;
753 hw->mac.get_link_status = 1;
755 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
756 mod_timer(&adapter->watchdog_timer, jiffies);
758 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
765 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
767 * @data: pointer to our q_vector struct for this interrupt vector
769 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
771 struct ixgbevf_q_vector *q_vector = data;
773 /* EIAM disabled interrupts (on this vector) for us */
774 if (q_vector->rx.ring || q_vector->tx.ring)
775 napi_schedule(&q_vector->napi);
780 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
783 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
785 a->rx_ring[r_idx].next = q_vector->rx.ring;
786 q_vector->rx.ring = &a->rx_ring[r_idx];
787 q_vector->rx.count++;
790 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
793 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
795 a->tx_ring[t_idx].next = q_vector->tx.ring;
796 q_vector->tx.ring = &a->tx_ring[t_idx];
797 q_vector->tx.count++;
801 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
802 * @adapter: board private structure to initialize
804 * This function maps descriptor rings to the queue-specific vectors
805 * we were allotted through the MSI-X enabling code. Ideally, we'd have
806 * one vector per ring/queue, but on a constrained vector budget, we
807 * group the rings as "efficiently" as possible. You would add new
808 * mapping configurations in here.
810 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
814 int rxr_idx = 0, txr_idx = 0;
815 int rxr_remaining = adapter->num_rx_queues;
816 int txr_remaining = adapter->num_tx_queues;
821 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
824 * The ideal configuration...
825 * We have enough vectors to map one per queue.
827 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
828 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
829 map_vector_to_rxq(adapter, v_start, rxr_idx);
831 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
832 map_vector_to_txq(adapter, v_start, txr_idx);
837 * If we don't have enough vectors for a 1-to-1
838 * mapping, we'll have to group them so there are
839 * multiple queues per vector.
841 /* Re-adjusting *qpv takes care of the remainder. */
842 for (i = v_start; i < q_vectors; i++) {
843 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
844 for (j = 0; j < rqpv; j++) {
845 map_vector_to_rxq(adapter, i, rxr_idx);
850 for (i = v_start; i < q_vectors; i++) {
851 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
852 for (j = 0; j < tqpv; j++) {
853 map_vector_to_txq(adapter, i, txr_idx);
864 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
865 * @adapter: board private structure
867 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
868 * interrupts from the kernel.
870 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
872 struct net_device *netdev = adapter->netdev;
873 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
877 for (vector = 0; vector < q_vectors; vector++) {
878 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
879 struct msix_entry *entry = &adapter->msix_entries[vector];
881 if (q_vector->tx.ring && q_vector->rx.ring) {
882 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
883 "%s-%s-%d", netdev->name, "TxRx", ri++);
885 } else if (q_vector->rx.ring) {
886 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
887 "%s-%s-%d", netdev->name, "rx", ri++);
888 } else if (q_vector->tx.ring) {
889 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
890 "%s-%s-%d", netdev->name, "tx", ti++);
892 /* skip this unused q_vector */
895 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
896 q_vector->name, q_vector);
899 "request_irq failed for MSIX interrupt "
901 goto free_queue_irqs;
905 err = request_irq(adapter->msix_entries[vector].vector,
906 &ixgbevf_msix_other, 0, netdev->name, adapter);
909 "request_irq for msix_other failed: %d\n", err);
910 goto free_queue_irqs;
918 free_irq(adapter->msix_entries[vector].vector,
919 adapter->q_vector[vector]);
921 pci_disable_msix(adapter->pdev);
922 kfree(adapter->msix_entries);
923 adapter->msix_entries = NULL;
927 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
929 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
931 for (i = 0; i < q_vectors; i++) {
932 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
933 q_vector->rx.ring = NULL;
934 q_vector->tx.ring = NULL;
935 q_vector->rx.count = 0;
936 q_vector->tx.count = 0;
941 * ixgbevf_request_irq - initialize interrupts
942 * @adapter: board private structure
944 * Attempts to configure interrupts using the best available
945 * capabilities of the hardware and kernel.
947 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
951 err = ixgbevf_request_msix_irqs(adapter);
955 "request_irq failed, Error %d\n", err);
960 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
964 q_vectors = adapter->num_msix_vectors;
967 free_irq(adapter->msix_entries[i].vector, adapter);
970 for (; i >= 0; i--) {
971 /* free only the irqs that were actually requested */
972 if (!adapter->q_vector[i]->rx.ring &&
973 !adapter->q_vector[i]->tx.ring)
976 free_irq(adapter->msix_entries[i].vector,
977 adapter->q_vector[i]);
980 ixgbevf_reset_q_vectors(adapter);
984 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
985 * @adapter: board private structure
987 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
989 struct ixgbe_hw *hw = &adapter->hw;
992 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
993 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
994 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
996 IXGBE_WRITE_FLUSH(hw);
998 for (i = 0; i < adapter->num_msix_vectors; i++)
999 synchronize_irq(adapter->msix_entries[i].vector);
1003 * ixgbevf_irq_enable - Enable default interrupt generation settings
1004 * @adapter: board private structure
1006 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1008 struct ixgbe_hw *hw = &adapter->hw;
1010 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1011 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1012 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1016 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1017 * @adapter: board private structure
1019 * Configure the Tx unit of the MAC after a reset.
1021 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1024 struct ixgbe_hw *hw = &adapter->hw;
1025 u32 i, j, tdlen, txctrl;
1027 /* Setup the HW Tx Head and Tail descriptor pointers */
1028 for (i = 0; i < adapter->num_tx_queues; i++) {
1029 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1032 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1033 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1034 (tdba & DMA_BIT_MASK(32)));
1035 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1036 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1037 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1038 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1039 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1040 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1041 /* Disable Tx Head Writeback RO bit, since this hoses
1042 * bookkeeping if things aren't delivered in order.
1044 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1045 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1046 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1050 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1052 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1054 struct ixgbevf_ring *rx_ring;
1055 struct ixgbe_hw *hw = &adapter->hw;
1058 rx_ring = &adapter->rx_ring[index];
1060 srrctl = IXGBE_SRRCTL_DROP_EN;
1062 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1064 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1067 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1070 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1072 struct ixgbe_hw *hw = &adapter->hw;
1073 struct net_device *netdev = adapter->netdev;
1074 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1078 /* notify the PF of our intent to use this size of frame */
1079 ixgbevf_rlpml_set_vf(hw, max_frame);
1081 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1082 max_frame += VLAN_HLEN;
1085 * Make best use of allocation by using all but 1K of a
1086 * power of 2 allocation that will be used for skb->head.
1088 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1089 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1090 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1091 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1092 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1093 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1094 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1095 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1096 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1098 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1100 for (i = 0; i < adapter->num_rx_queues; i++)
1101 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1105 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1106 * @adapter: board private structure
1108 * Configure the Rx unit of the MAC after a reset.
1110 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1113 struct ixgbe_hw *hw = &adapter->hw;
1117 /* PSRTYPE must be initialized in 82599 */
1118 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1120 /* set_rx_buffer_len must be called before ring initialization */
1121 ixgbevf_set_rx_buffer_len(adapter);
1123 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1124 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1125 * the Base and Length of the Rx Descriptor Ring */
1126 for (i = 0; i < adapter->num_rx_queues; i++) {
1127 rdba = adapter->rx_ring[i].dma;
1128 j = adapter->rx_ring[i].reg_idx;
1129 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1130 (rdba & DMA_BIT_MASK(32)));
1131 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1132 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1133 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1134 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1135 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1136 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1138 ixgbevf_configure_srrctl(adapter, j);
1142 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1144 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1145 struct ixgbe_hw *hw = &adapter->hw;
1148 if (!hw->mac.ops.set_vfta)
1151 spin_lock_bh(&adapter->mbx_lock);
1153 /* add VID to filter table */
1154 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1156 spin_unlock_bh(&adapter->mbx_lock);
1158 /* translate error return types so error makes sense */
1159 if (err == IXGBE_ERR_MBX)
1162 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1165 set_bit(vid, adapter->active_vlans);
1170 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1172 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1173 struct ixgbe_hw *hw = &adapter->hw;
1174 int err = -EOPNOTSUPP;
1176 spin_lock_bh(&adapter->mbx_lock);
1178 /* remove VID from filter table */
1179 if (hw->mac.ops.set_vfta)
1180 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1182 spin_unlock_bh(&adapter->mbx_lock);
1184 clear_bit(vid, adapter->active_vlans);
1189 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1193 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1194 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1197 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1199 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1200 struct ixgbe_hw *hw = &adapter->hw;
1203 if ((netdev_uc_count(netdev)) > 10) {
1204 pr_err("Too many unicast filters - No Space\n");
1208 if (!netdev_uc_empty(netdev)) {
1209 struct netdev_hw_addr *ha;
1210 netdev_for_each_uc_addr(ha, netdev) {
1211 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1216 * If the list is empty then send message to PF driver to
1217 * clear all macvlans on this VF.
1219 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1226 * ixgbevf_set_rx_mode - Multicast set
1227 * @netdev: network interface device structure
1229 * The set_rx_method entry point is called whenever the multicast address
1230 * list or the network interface flags are updated. This routine is
1231 * responsible for configuring the hardware for proper multicast mode.
1233 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1235 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1236 struct ixgbe_hw *hw = &adapter->hw;
1238 spin_lock_bh(&adapter->mbx_lock);
1240 /* reprogram multicast list */
1241 if (hw->mac.ops.update_mc_addr_list)
1242 hw->mac.ops.update_mc_addr_list(hw, netdev);
1244 ixgbevf_write_uc_addr_list(netdev);
1246 spin_unlock_bh(&adapter->mbx_lock);
1249 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1252 struct ixgbevf_q_vector *q_vector;
1253 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1255 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1256 q_vector = adapter->q_vector[q_idx];
1257 napi_enable(&q_vector->napi);
1261 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1264 struct ixgbevf_q_vector *q_vector;
1265 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1267 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1268 q_vector = adapter->q_vector[q_idx];
1269 napi_disable(&q_vector->napi);
1273 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1275 struct net_device *netdev = adapter->netdev;
1278 ixgbevf_set_rx_mode(netdev);
1280 ixgbevf_restore_vlan(adapter);
1282 ixgbevf_configure_tx(adapter);
1283 ixgbevf_configure_rx(adapter);
1284 for (i = 0; i < adapter->num_rx_queues; i++) {
1285 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1286 ixgbevf_alloc_rx_buffers(adapter, ring,
1287 IXGBE_DESC_UNUSED(ring));
1291 #define IXGBE_MAX_RX_DESC_POLL 10
1292 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1295 struct ixgbe_hw *hw = &adapter->hw;
1296 int j = adapter->rx_ring[rxr].reg_idx;
1299 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1300 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1305 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1306 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1307 "not set within the polling period\n", rxr);
1310 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1311 (adapter->rx_ring[rxr].count - 1));
1314 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1316 /* Only save pre-reset stats if there are some */
1317 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1318 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1319 adapter->stats.base_vfgprc;
1320 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1321 adapter->stats.base_vfgptc;
1322 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1323 adapter->stats.base_vfgorc;
1324 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1325 adapter->stats.base_vfgotc;
1326 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1327 adapter->stats.base_vfmprc;
1331 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1333 struct ixgbe_hw *hw = &adapter->hw;
1335 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1336 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1337 adapter->stats.last_vfgorc |=
1338 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1339 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1340 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1341 adapter->stats.last_vfgotc |=
1342 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1343 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1345 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1346 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1347 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1348 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1349 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1352 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1354 struct ixgbe_hw *hw = &adapter->hw;
1355 int api[] = { ixgbe_mbox_api_11,
1357 ixgbe_mbox_api_unknown };
1358 int err = 0, idx = 0;
1360 spin_lock_bh(&adapter->mbx_lock);
1362 while (api[idx] != ixgbe_mbox_api_unknown) {
1363 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1369 spin_unlock_bh(&adapter->mbx_lock);
1372 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1374 struct net_device *netdev = adapter->netdev;
1375 struct ixgbe_hw *hw = &adapter->hw;
1377 int num_rx_rings = adapter->num_rx_queues;
1380 for (i = 0; i < adapter->num_tx_queues; i++) {
1381 j = adapter->tx_ring[i].reg_idx;
1382 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1383 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1384 txdctl |= (8 << 16);
1385 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1388 for (i = 0; i < adapter->num_tx_queues; i++) {
1389 j = adapter->tx_ring[i].reg_idx;
1390 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1391 txdctl |= IXGBE_TXDCTL_ENABLE;
1392 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1395 for (i = 0; i < num_rx_rings; i++) {
1396 j = adapter->rx_ring[i].reg_idx;
1397 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1398 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1399 if (hw->mac.type == ixgbe_mac_X540_vf) {
1400 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1401 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1402 IXGBE_RXDCTL_RLPML_EN);
1404 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1405 ixgbevf_rx_desc_queue_enable(adapter, i);
1408 ixgbevf_configure_msix(adapter);
1410 spin_lock_bh(&adapter->mbx_lock);
1412 if (hw->mac.ops.set_rar) {
1413 if (is_valid_ether_addr(hw->mac.addr))
1414 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1416 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1419 spin_unlock_bh(&adapter->mbx_lock);
1421 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1422 ixgbevf_napi_enable_all(adapter);
1424 /* enable transmits */
1425 netif_tx_start_all_queues(netdev);
1427 ixgbevf_save_reset_stats(adapter);
1428 ixgbevf_init_last_counter_stats(adapter);
1430 hw->mac.get_link_status = 1;
1431 mod_timer(&adapter->watchdog_timer, jiffies);
1434 static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1436 struct ixgbe_hw *hw = &adapter->hw;
1437 struct ixgbevf_ring *rx_ring;
1438 unsigned int def_q = 0;
1439 unsigned int num_tcs = 0;
1440 unsigned int num_rx_queues = 1;
1443 spin_lock_bh(&adapter->mbx_lock);
1445 /* fetch queue configuration from the PF */
1446 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1448 spin_unlock_bh(&adapter->mbx_lock);
1454 /* update default Tx ring register index */
1455 adapter->tx_ring[0].reg_idx = def_q;
1457 /* we need as many queues as traffic classes */
1458 num_rx_queues = num_tcs;
1461 /* nothing to do if we have the correct number of queues */
1462 if (adapter->num_rx_queues == num_rx_queues)
1465 /* allocate new rings */
1466 rx_ring = kcalloc(num_rx_queues,
1467 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1471 /* setup ring fields */
1472 for (i = 0; i < num_rx_queues; i++) {
1473 rx_ring[i].count = adapter->rx_ring_count;
1474 rx_ring[i].queue_index = i;
1475 rx_ring[i].reg_idx = i;
1476 rx_ring[i].dev = &adapter->pdev->dev;
1477 rx_ring[i].netdev = adapter->netdev;
1479 /* allocate resources on the ring */
1480 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1484 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1491 /* free the existing rings and queues */
1492 ixgbevf_free_all_rx_resources(adapter);
1493 adapter->num_rx_queues = 0;
1494 kfree(adapter->rx_ring);
1496 /* move new rings into position on the adapter struct */
1497 adapter->rx_ring = rx_ring;
1498 adapter->num_rx_queues = num_rx_queues;
1500 /* reset ring to vector mapping */
1501 ixgbevf_reset_q_vectors(adapter);
1502 ixgbevf_map_rings_to_vectors(adapter);
1507 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1509 struct ixgbe_hw *hw = &adapter->hw;
1511 ixgbevf_negotiate_api(adapter);
1513 ixgbevf_reset_queues(adapter);
1515 ixgbevf_configure(adapter);
1517 ixgbevf_up_complete(adapter);
1519 /* clear any pending interrupts, may auto mask */
1520 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1522 ixgbevf_irq_enable(adapter);
1526 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1527 * @adapter: board private structure
1528 * @rx_ring: ring to free buffers from
1530 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1531 struct ixgbevf_ring *rx_ring)
1533 struct pci_dev *pdev = adapter->pdev;
1537 if (!rx_ring->rx_buffer_info)
1540 /* Free all the Rx ring sk_buffs */
1541 for (i = 0; i < rx_ring->count; i++) {
1542 struct ixgbevf_rx_buffer *rx_buffer_info;
1544 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1545 if (rx_buffer_info->dma) {
1546 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1547 rx_ring->rx_buf_len,
1549 rx_buffer_info->dma = 0;
1551 if (rx_buffer_info->skb) {
1552 struct sk_buff *skb = rx_buffer_info->skb;
1553 rx_buffer_info->skb = NULL;
1555 struct sk_buff *this = skb;
1556 skb = IXGBE_CB(skb)->prev;
1557 dev_kfree_skb(this);
1562 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1563 memset(rx_ring->rx_buffer_info, 0, size);
1565 /* Zero out the descriptor ring */
1566 memset(rx_ring->desc, 0, rx_ring->size);
1568 rx_ring->next_to_clean = 0;
1569 rx_ring->next_to_use = 0;
1572 writel(0, adapter->hw.hw_addr + rx_ring->head);
1574 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1578 * ixgbevf_clean_tx_ring - Free Tx Buffers
1579 * @adapter: board private structure
1580 * @tx_ring: ring to be cleaned
1582 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1583 struct ixgbevf_ring *tx_ring)
1585 struct ixgbevf_tx_buffer *tx_buffer_info;
1589 if (!tx_ring->tx_buffer_info)
1592 /* Free all the Tx ring sk_buffs */
1594 for (i = 0; i < tx_ring->count; i++) {
1595 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1596 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1599 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1600 memset(tx_ring->tx_buffer_info, 0, size);
1602 memset(tx_ring->desc, 0, tx_ring->size);
1604 tx_ring->next_to_use = 0;
1605 tx_ring->next_to_clean = 0;
1608 writel(0, adapter->hw.hw_addr + tx_ring->head);
1610 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1614 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1615 * @adapter: board private structure
1617 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1621 for (i = 0; i < adapter->num_rx_queues; i++)
1622 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1626 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1627 * @adapter: board private structure
1629 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1633 for (i = 0; i < adapter->num_tx_queues; i++)
1634 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1637 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1639 struct net_device *netdev = adapter->netdev;
1640 struct ixgbe_hw *hw = &adapter->hw;
1644 /* signal that we are down to the interrupt handler */
1645 set_bit(__IXGBEVF_DOWN, &adapter->state);
1646 /* disable receives */
1648 netif_tx_disable(netdev);
1652 netif_tx_stop_all_queues(netdev);
1654 ixgbevf_irq_disable(adapter);
1656 ixgbevf_napi_disable_all(adapter);
1658 del_timer_sync(&adapter->watchdog_timer);
1659 /* can't call flush scheduled work here because it can deadlock
1660 * if linkwatch_event tries to acquire the rtnl_lock which we are
1662 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1665 /* disable transmits in the hardware now that interrupts are off */
1666 for (i = 0; i < adapter->num_tx_queues; i++) {
1667 j = adapter->tx_ring[i].reg_idx;
1668 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1669 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1670 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1673 netif_carrier_off(netdev);
1675 if (!pci_channel_offline(adapter->pdev))
1676 ixgbevf_reset(adapter);
1678 ixgbevf_clean_all_tx_rings(adapter);
1679 ixgbevf_clean_all_rx_rings(adapter);
1682 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1684 WARN_ON(in_interrupt());
1686 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1690 * Check if PF is up before re-init. If not then skip until
1691 * later when the PF is up and ready to service requests from
1692 * the VF via mailbox. If the VF is up and running then the
1693 * watchdog task will continue to schedule reset tasks until
1694 * the PF is up and running.
1696 ixgbevf_down(adapter);
1697 ixgbevf_up(adapter);
1699 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1702 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1704 struct ixgbe_hw *hw = &adapter->hw;
1705 struct net_device *netdev = adapter->netdev;
1707 spin_lock_bh(&adapter->mbx_lock);
1709 if (hw->mac.ops.reset_hw(hw))
1710 hw_dbg(hw, "PF still resetting\n");
1712 hw->mac.ops.init_hw(hw);
1714 spin_unlock_bh(&adapter->mbx_lock);
1716 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1717 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1719 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1724 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1727 int err, vector_threshold;
1729 /* We'll want at least 2 (vector_threshold):
1730 * 1) TxQ[0] + RxQ[0] handler
1731 * 2) Other (Link Status Change, etc.)
1733 vector_threshold = MIN_MSIX_COUNT;
1735 /* The more we get, the more we will assign to Tx/Rx Cleanup
1736 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1737 * Right now, we simply care about how many we'll get; we'll
1738 * set them up later while requesting irq's.
1740 while (vectors >= vector_threshold) {
1741 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1743 if (!err) /* Success in acquiring all requested vectors. */
1746 vectors = 0; /* Nasty failure, quit now */
1747 else /* err == number of vectors we should try again with */
1751 if (vectors < vector_threshold) {
1752 /* Can't allocate enough MSI-X interrupts? Oh well.
1753 * This just means we'll go with either a single MSI
1754 * vector or fall back to legacy interrupts.
1756 hw_dbg(&adapter->hw,
1757 "Unable to allocate MSI-X interrupts\n");
1758 kfree(adapter->msix_entries);
1759 adapter->msix_entries = NULL;
1762 * Adjust for only the vectors we'll use, which is minimum
1763 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1764 * vectors we were allocated.
1766 adapter->num_msix_vectors = vectors;
1771 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1772 * @adapter: board private structure to initialize
1774 * This is the top level queue allocation routine. The order here is very
1775 * important, starting with the "most" number of features turned on at once,
1776 * and ending with the smallest set of features. This way large combinations
1777 * can be allocated if they're turned on, and smaller combinations are the
1778 * fallthrough conditions.
1781 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1783 /* Start with base case */
1784 adapter->num_rx_queues = 1;
1785 adapter->num_tx_queues = 1;
1789 * ixgbevf_alloc_queues - Allocate memory for all rings
1790 * @adapter: board private structure to initialize
1792 * We allocate one ring per queue at run-time since we don't know the
1793 * number of queues at compile-time. The polling_netdev array is
1794 * intended for Multiqueue, but should work fine with a single queue.
1796 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1800 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1801 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1802 if (!adapter->tx_ring)
1803 goto err_tx_ring_allocation;
1805 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1806 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1807 if (!adapter->rx_ring)
1808 goto err_rx_ring_allocation;
1810 for (i = 0; i < adapter->num_tx_queues; i++) {
1811 adapter->tx_ring[i].count = adapter->tx_ring_count;
1812 adapter->tx_ring[i].queue_index = i;
1813 /* reg_idx may be remapped later by DCB config */
1814 adapter->tx_ring[i].reg_idx = i;
1815 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1816 adapter->tx_ring[i].netdev = adapter->netdev;
1819 for (i = 0; i < adapter->num_rx_queues; i++) {
1820 adapter->rx_ring[i].count = adapter->rx_ring_count;
1821 adapter->rx_ring[i].queue_index = i;
1822 adapter->rx_ring[i].reg_idx = i;
1823 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1824 adapter->rx_ring[i].netdev = adapter->netdev;
1829 err_rx_ring_allocation:
1830 kfree(adapter->tx_ring);
1831 err_tx_ring_allocation:
1836 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1837 * @adapter: board private structure to initialize
1839 * Attempt to configure the interrupts using the best available
1840 * capabilities of the hardware and the kernel.
1842 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1844 struct net_device *netdev = adapter->netdev;
1846 int vector, v_budget;
1849 * It's easy to be greedy for MSI-X vectors, but it really
1850 * doesn't do us much good if we have a lot more vectors
1851 * than CPU's. So let's be conservative and only ask for
1852 * (roughly) the same number of vectors as there are CPU's.
1853 * The default is to use pairs of vectors.
1855 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1856 v_budget = min_t(int, v_budget, num_online_cpus());
1857 v_budget += NON_Q_VECTORS;
1859 /* A failure in MSI-X entry allocation isn't fatal, but it does
1860 * mean we disable MSI-X capabilities of the adapter. */
1861 adapter->msix_entries = kcalloc(v_budget,
1862 sizeof(struct msix_entry), GFP_KERNEL);
1863 if (!adapter->msix_entries) {
1868 for (vector = 0; vector < v_budget; vector++)
1869 adapter->msix_entries[vector].entry = vector;
1871 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1873 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1877 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1884 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1885 * @adapter: board private structure to initialize
1887 * We allocate one q_vector per queue interrupt. If allocation fails we
1890 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1892 int q_idx, num_q_vectors;
1893 struct ixgbevf_q_vector *q_vector;
1895 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1897 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1898 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1901 q_vector->adapter = adapter;
1902 q_vector->v_idx = q_idx;
1903 netif_napi_add(adapter->netdev, &q_vector->napi,
1905 adapter->q_vector[q_idx] = q_vector;
1913 q_vector = adapter->q_vector[q_idx];
1914 netif_napi_del(&q_vector->napi);
1916 adapter->q_vector[q_idx] = NULL;
1922 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1923 * @adapter: board private structure to initialize
1925 * This function frees the memory allocated to the q_vectors. In addition if
1926 * NAPI is enabled it will delete any references to the NAPI struct prior
1927 * to freeing the q_vector.
1929 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1931 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1933 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1934 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1936 adapter->q_vector[q_idx] = NULL;
1937 netif_napi_del(&q_vector->napi);
1943 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1944 * @adapter: board private structure
1947 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1949 pci_disable_msix(adapter->pdev);
1950 kfree(adapter->msix_entries);
1951 adapter->msix_entries = NULL;
1955 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1956 * @adapter: board private structure to initialize
1959 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1963 /* Number of supported queues */
1964 ixgbevf_set_num_queues(adapter);
1966 err = ixgbevf_set_interrupt_capability(adapter);
1968 hw_dbg(&adapter->hw,
1969 "Unable to setup interrupt capabilities\n");
1970 goto err_set_interrupt;
1973 err = ixgbevf_alloc_q_vectors(adapter);
1975 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1977 goto err_alloc_q_vectors;
1980 err = ixgbevf_alloc_queues(adapter);
1982 pr_err("Unable to allocate memory for queues\n");
1983 goto err_alloc_queues;
1986 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1987 "Tx Queue count = %u\n",
1988 (adapter->num_rx_queues > 1) ? "Enabled" :
1989 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1991 set_bit(__IXGBEVF_DOWN, &adapter->state);
1995 ixgbevf_free_q_vectors(adapter);
1996 err_alloc_q_vectors:
1997 ixgbevf_reset_interrupt_capability(adapter);
2003 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2004 * @adapter: board private structure to clear interrupt scheme on
2006 * We go through and clear interrupt specific resources and reset the structure
2007 * to pre-load conditions
2009 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2011 adapter->num_tx_queues = 0;
2012 adapter->num_rx_queues = 0;
2014 ixgbevf_free_q_vectors(adapter);
2015 ixgbevf_reset_interrupt_capability(adapter);
2019 * ixgbevf_sw_init - Initialize general software structures
2020 * (struct ixgbevf_adapter)
2021 * @adapter: board private structure to initialize
2023 * ixgbevf_sw_init initializes the Adapter private data structure.
2024 * Fields are initialized based on PCI device information and
2025 * OS network device settings (MTU size).
2027 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2029 struct ixgbe_hw *hw = &adapter->hw;
2030 struct pci_dev *pdev = adapter->pdev;
2033 /* PCI config space info */
2035 hw->vendor_id = pdev->vendor;
2036 hw->device_id = pdev->device;
2037 hw->revision_id = pdev->revision;
2038 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2039 hw->subsystem_device_id = pdev->subsystem_device;
2041 hw->mbx.ops.init_params(hw);
2043 /* assume legacy case in which PF would only give VF 2 queues */
2044 hw->mac.max_tx_queues = 2;
2045 hw->mac.max_rx_queues = 2;
2047 err = hw->mac.ops.reset_hw(hw);
2049 dev_info(&pdev->dev,
2050 "PF still in reset state, assigning new address\n");
2051 eth_hw_addr_random(adapter->netdev);
2052 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2053 adapter->netdev->addr_len);
2055 err = hw->mac.ops.init_hw(hw);
2057 pr_err("init_shared_code failed: %d\n", err);
2060 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
2061 adapter->netdev->addr_len);
2064 /* lock to protect mailbox accesses */
2065 spin_lock_init(&adapter->mbx_lock);
2067 /* Enable dynamic interrupt throttling rates */
2068 adapter->rx_itr_setting = 1;
2069 adapter->tx_itr_setting = 1;
2071 /* set default ring sizes */
2072 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2073 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2075 set_bit(__IXGBEVF_DOWN, &adapter->state);
2082 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2084 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2085 if (current_counter < last_counter) \
2086 counter += 0x100000000LL; \
2087 last_counter = current_counter; \
2088 counter &= 0xFFFFFFFF00000000LL; \
2089 counter |= current_counter; \
2092 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2094 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2095 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2096 u64 current_counter = (current_counter_msb << 32) | \
2097 current_counter_lsb; \
2098 if (current_counter < last_counter) \
2099 counter += 0x1000000000LL; \
2100 last_counter = current_counter; \
2101 counter &= 0xFFFFFFF000000000LL; \
2102 counter |= current_counter; \
2105 * ixgbevf_update_stats - Update the board statistics counters.
2106 * @adapter: board private structure
2108 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2110 struct ixgbe_hw *hw = &adapter->hw;
2112 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2113 adapter->stats.vfgprc);
2114 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2115 adapter->stats.vfgptc);
2116 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2117 adapter->stats.last_vfgorc,
2118 adapter->stats.vfgorc);
2119 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2120 adapter->stats.last_vfgotc,
2121 adapter->stats.vfgotc);
2122 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2123 adapter->stats.vfmprc);
2127 * ixgbevf_watchdog - Timer Call-back
2128 * @data: pointer to adapter cast into an unsigned long
2130 static void ixgbevf_watchdog(unsigned long data)
2132 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2133 struct ixgbe_hw *hw = &adapter->hw;
2138 * Do the watchdog outside of interrupt context due to the lovely
2139 * delays that some of the newer hardware requires
2142 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2143 goto watchdog_short_circuit;
2145 /* get one bit for every active tx/rx interrupt vector */
2146 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2147 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2148 if (qv->rx.ring || qv->tx.ring)
2152 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2154 watchdog_short_circuit:
2155 schedule_work(&adapter->watchdog_task);
2159 * ixgbevf_tx_timeout - Respond to a Tx Hang
2160 * @netdev: network interface device structure
2162 static void ixgbevf_tx_timeout(struct net_device *netdev)
2164 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2166 /* Do the reset outside of interrupt context */
2167 schedule_work(&adapter->reset_task);
2170 static void ixgbevf_reset_task(struct work_struct *work)
2172 struct ixgbevf_adapter *adapter;
2173 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2175 /* If we're already down or resetting, just bail */
2176 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2177 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2180 adapter->tx_timeout_count++;
2182 ixgbevf_reinit_locked(adapter);
2186 * ixgbevf_watchdog_task - worker thread to bring link up
2187 * @work: pointer to work_struct containing our data
2189 static void ixgbevf_watchdog_task(struct work_struct *work)
2191 struct ixgbevf_adapter *adapter = container_of(work,
2192 struct ixgbevf_adapter,
2194 struct net_device *netdev = adapter->netdev;
2195 struct ixgbe_hw *hw = &adapter->hw;
2196 u32 link_speed = adapter->link_speed;
2197 bool link_up = adapter->link_up;
2199 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2202 * Always check the link on the watchdog because we have
2205 if (hw->mac.ops.check_link) {
2208 spin_lock_bh(&adapter->mbx_lock);
2210 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2213 spin_unlock_bh(&adapter->mbx_lock);
2216 adapter->link_up = link_up;
2217 adapter->link_speed = link_speed;
2218 netif_carrier_off(netdev);
2219 netif_tx_stop_all_queues(netdev);
2220 schedule_work(&adapter->reset_task);
2224 /* always assume link is up, if no check link
2226 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2229 adapter->link_up = link_up;
2230 adapter->link_speed = link_speed;
2233 if (!netif_carrier_ok(netdev)) {
2234 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2235 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2237 netif_carrier_on(netdev);
2238 netif_tx_wake_all_queues(netdev);
2241 adapter->link_up = false;
2242 adapter->link_speed = 0;
2243 if (netif_carrier_ok(netdev)) {
2244 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2245 netif_carrier_off(netdev);
2246 netif_tx_stop_all_queues(netdev);
2250 ixgbevf_update_stats(adapter);
2253 /* Reset the timer */
2254 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2255 mod_timer(&adapter->watchdog_timer,
2256 round_jiffies(jiffies + (2 * HZ)));
2258 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2262 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2263 * @adapter: board private structure
2264 * @tx_ring: Tx descriptor ring for a specific queue
2266 * Free all transmit software resources
2268 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2269 struct ixgbevf_ring *tx_ring)
2271 struct pci_dev *pdev = adapter->pdev;
2273 ixgbevf_clean_tx_ring(adapter, tx_ring);
2275 vfree(tx_ring->tx_buffer_info);
2276 tx_ring->tx_buffer_info = NULL;
2278 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2281 tx_ring->desc = NULL;
2285 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2286 * @adapter: board private structure
2288 * Free all transmit software resources
2290 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2294 for (i = 0; i < adapter->num_tx_queues; i++)
2295 if (adapter->tx_ring[i].desc)
2296 ixgbevf_free_tx_resources(adapter,
2297 &adapter->tx_ring[i]);
2302 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2303 * @adapter: board private structure
2304 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2306 * Return 0 on success, negative on failure
2308 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2309 struct ixgbevf_ring *tx_ring)
2311 struct pci_dev *pdev = adapter->pdev;
2314 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2315 tx_ring->tx_buffer_info = vzalloc(size);
2316 if (!tx_ring->tx_buffer_info)
2319 /* round up to nearest 4K */
2320 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2321 tx_ring->size = ALIGN(tx_ring->size, 4096);
2323 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2324 &tx_ring->dma, GFP_KERNEL);
2328 tx_ring->next_to_use = 0;
2329 tx_ring->next_to_clean = 0;
2333 vfree(tx_ring->tx_buffer_info);
2334 tx_ring->tx_buffer_info = NULL;
2335 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2336 "descriptor ring\n");
2341 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2342 * @adapter: board private structure
2344 * If this function returns with an error, then it's possible one or
2345 * more of the rings is populated (while the rest are not). It is the
2346 * callers duty to clean those orphaned rings.
2348 * Return 0 on success, negative on failure
2350 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2354 for (i = 0; i < adapter->num_tx_queues; i++) {
2355 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2358 hw_dbg(&adapter->hw,
2359 "Allocation for Tx Queue %u failed\n", i);
2367 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2368 * @adapter: board private structure
2369 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2371 * Returns 0 on success, negative on failure
2373 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2374 struct ixgbevf_ring *rx_ring)
2376 struct pci_dev *pdev = adapter->pdev;
2379 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2380 rx_ring->rx_buffer_info = vzalloc(size);
2381 if (!rx_ring->rx_buffer_info)
2384 /* Round up to nearest 4K */
2385 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2386 rx_ring->size = ALIGN(rx_ring->size, 4096);
2388 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2389 &rx_ring->dma, GFP_KERNEL);
2391 if (!rx_ring->desc) {
2392 hw_dbg(&adapter->hw,
2393 "Unable to allocate memory for "
2394 "the receive descriptor ring\n");
2395 vfree(rx_ring->rx_buffer_info);
2396 rx_ring->rx_buffer_info = NULL;
2400 rx_ring->next_to_clean = 0;
2401 rx_ring->next_to_use = 0;
2409 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2410 * @adapter: board private structure
2412 * If this function returns with an error, then it's possible one or
2413 * more of the rings is populated (while the rest are not). It is the
2414 * callers duty to clean those orphaned rings.
2416 * Return 0 on success, negative on failure
2418 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2422 for (i = 0; i < adapter->num_rx_queues; i++) {
2423 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2426 hw_dbg(&adapter->hw,
2427 "Allocation for Rx Queue %u failed\n", i);
2434 * ixgbevf_free_rx_resources - Free Rx Resources
2435 * @adapter: board private structure
2436 * @rx_ring: ring to clean the resources from
2438 * Free all receive software resources
2440 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2441 struct ixgbevf_ring *rx_ring)
2443 struct pci_dev *pdev = adapter->pdev;
2445 ixgbevf_clean_rx_ring(adapter, rx_ring);
2447 vfree(rx_ring->rx_buffer_info);
2448 rx_ring->rx_buffer_info = NULL;
2450 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2453 rx_ring->desc = NULL;
2457 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2458 * @adapter: board private structure
2460 * Free all receive software resources
2462 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2466 for (i = 0; i < adapter->num_rx_queues; i++)
2467 if (adapter->rx_ring[i].desc)
2468 ixgbevf_free_rx_resources(adapter,
2469 &adapter->rx_ring[i]);
2472 static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2474 struct ixgbe_hw *hw = &adapter->hw;
2475 struct ixgbevf_ring *rx_ring;
2476 unsigned int def_q = 0;
2477 unsigned int num_tcs = 0;
2478 unsigned int num_rx_queues = 1;
2481 spin_lock_bh(&adapter->mbx_lock);
2483 /* fetch queue configuration from the PF */
2484 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2486 spin_unlock_bh(&adapter->mbx_lock);
2492 /* update default Tx ring register index */
2493 adapter->tx_ring[0].reg_idx = def_q;
2495 /* we need as many queues as traffic classes */
2496 num_rx_queues = num_tcs;
2499 /* nothing to do if we have the correct number of queues */
2500 if (adapter->num_rx_queues == num_rx_queues)
2503 /* allocate new rings */
2504 rx_ring = kcalloc(num_rx_queues,
2505 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2509 /* setup ring fields */
2510 for (i = 0; i < num_rx_queues; i++) {
2511 rx_ring[i].count = adapter->rx_ring_count;
2512 rx_ring[i].queue_index = i;
2513 rx_ring[i].reg_idx = i;
2514 rx_ring[i].dev = &adapter->pdev->dev;
2515 rx_ring[i].netdev = adapter->netdev;
2518 /* free the existing ring and queues */
2519 adapter->num_rx_queues = 0;
2520 kfree(adapter->rx_ring);
2522 /* move new rings into position on the adapter struct */
2523 adapter->rx_ring = rx_ring;
2524 adapter->num_rx_queues = num_rx_queues;
2530 * ixgbevf_open - Called when a network interface is made active
2531 * @netdev: network interface device structure
2533 * Returns 0 on success, negative value on failure
2535 * The open entry point is called when a network interface is made
2536 * active by the system (IFF_UP). At this point all resources needed
2537 * for transmit and receive operations are allocated, the interrupt
2538 * handler is registered with the OS, the watchdog timer is started,
2539 * and the stack is notified that the interface is ready.
2541 static int ixgbevf_open(struct net_device *netdev)
2543 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2544 struct ixgbe_hw *hw = &adapter->hw;
2547 /* disallow open during test */
2548 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2551 if (hw->adapter_stopped) {
2552 ixgbevf_reset(adapter);
2553 /* if adapter is still stopped then PF isn't up and
2554 * the vf can't start. */
2555 if (hw->adapter_stopped) {
2556 err = IXGBE_ERR_MBX;
2557 pr_err("Unable to start - perhaps the PF Driver isn't "
2559 goto err_setup_reset;
2563 ixgbevf_negotiate_api(adapter);
2565 /* setup queue reg_idx and Rx queue count */
2566 err = ixgbevf_setup_queues(adapter);
2568 goto err_setup_queues;
2570 /* allocate transmit descriptors */
2571 err = ixgbevf_setup_all_tx_resources(adapter);
2575 /* allocate receive descriptors */
2576 err = ixgbevf_setup_all_rx_resources(adapter);
2580 ixgbevf_configure(adapter);
2583 * Map the Tx/Rx rings to the vectors we were allotted.
2584 * if request_irq will be called in this function map_rings
2585 * must be called *before* up_complete
2587 ixgbevf_map_rings_to_vectors(adapter);
2589 ixgbevf_up_complete(adapter);
2591 /* clear any pending interrupts, may auto mask */
2592 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2593 err = ixgbevf_request_irq(adapter);
2597 ixgbevf_irq_enable(adapter);
2602 ixgbevf_down(adapter);
2603 ixgbevf_free_irq(adapter);
2605 ixgbevf_free_all_rx_resources(adapter);
2607 ixgbevf_free_all_tx_resources(adapter);
2609 ixgbevf_reset(adapter);
2617 * ixgbevf_close - Disables a network interface
2618 * @netdev: network interface device structure
2620 * Returns 0, this is not allowed to fail
2622 * The close entry point is called when an interface is de-activated
2623 * by the OS. The hardware is still under the drivers control, but
2624 * needs to be disabled. A global MAC reset is issued to stop the
2625 * hardware, and all transmit and receive resources are freed.
2627 static int ixgbevf_close(struct net_device *netdev)
2629 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2631 ixgbevf_down(adapter);
2632 ixgbevf_free_irq(adapter);
2634 ixgbevf_free_all_tx_resources(adapter);
2635 ixgbevf_free_all_rx_resources(adapter);
2640 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2641 u32 vlan_macip_lens, u32 type_tucmd,
2644 struct ixgbe_adv_tx_context_desc *context_desc;
2645 u16 i = tx_ring->next_to_use;
2647 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2650 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2652 /* set bits to identify this as an advanced context descriptor */
2653 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2655 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2656 context_desc->seqnum_seed = 0;
2657 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2658 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2661 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2662 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2664 u32 vlan_macip_lens, type_tucmd;
2665 u32 mss_l4len_idx, l4len;
2667 if (!skb_is_gso(skb))
2670 if (skb_header_cloned(skb)) {
2671 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2676 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2677 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2679 if (skb->protocol == htons(ETH_P_IP)) {
2680 struct iphdr *iph = ip_hdr(skb);
2683 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2687 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2688 } else if (skb_is_gso_v6(skb)) {
2689 ipv6_hdr(skb)->payload_len = 0;
2690 tcp_hdr(skb)->check =
2691 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2692 &ipv6_hdr(skb)->daddr,
2696 /* compute header lengths */
2697 l4len = tcp_hdrlen(skb);
2699 *hdr_len = skb_transport_offset(skb) + l4len;
2701 /* mss_l4len_id: use 1 as index for TSO */
2702 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2703 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2704 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2706 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2707 vlan_macip_lens = skb_network_header_len(skb);
2708 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2709 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2711 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2712 type_tucmd, mss_l4len_idx);
2717 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2718 struct sk_buff *skb, u32 tx_flags)
2723 u32 vlan_macip_lens = 0;
2724 u32 mss_l4len_idx = 0;
2727 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2729 switch (skb->protocol) {
2730 case __constant_htons(ETH_P_IP):
2731 vlan_macip_lens |= skb_network_header_len(skb);
2732 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2733 l4_hdr = ip_hdr(skb)->protocol;
2735 case __constant_htons(ETH_P_IPV6):
2736 vlan_macip_lens |= skb_network_header_len(skb);
2737 l4_hdr = ipv6_hdr(skb)->nexthdr;
2740 if (unlikely(net_ratelimit())) {
2741 dev_warn(tx_ring->dev,
2742 "partial checksum but proto=%x!\n",
2750 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2751 mss_l4len_idx = tcp_hdrlen(skb) <<
2752 IXGBE_ADVTXD_L4LEN_SHIFT;
2755 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2756 mss_l4len_idx = sizeof(struct sctphdr) <<
2757 IXGBE_ADVTXD_L4LEN_SHIFT;
2760 mss_l4len_idx = sizeof(struct udphdr) <<
2761 IXGBE_ADVTXD_L4LEN_SHIFT;
2764 if (unlikely(net_ratelimit())) {
2765 dev_warn(tx_ring->dev,
2766 "partial checksum but l4 proto=%x!\n",
2773 /* vlan_macip_lens: MACLEN, VLAN tag */
2774 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2775 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2777 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2778 type_tucmd, mss_l4len_idx);
2780 return (skb->ip_summed == CHECKSUM_PARTIAL);
2783 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2784 struct sk_buff *skb, u32 tx_flags,
2787 struct ixgbevf_tx_buffer *tx_buffer_info;
2789 unsigned int total = skb->len;
2790 unsigned int offset = 0, size;
2792 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2796 i = tx_ring->next_to_use;
2798 len = min(skb_headlen(skb), total);
2800 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2801 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2803 tx_buffer_info->length = size;
2804 tx_buffer_info->mapped_as_page = false;
2805 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2807 size, DMA_TO_DEVICE);
2808 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2810 tx_buffer_info->next_to_watch = i;
2817 if (i == tx_ring->count)
2821 for (f = 0; f < nr_frags; f++) {
2822 const struct skb_frag_struct *frag;
2824 frag = &skb_shinfo(skb)->frags[f];
2825 len = min((unsigned int)skb_frag_size(frag), total);
2829 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2830 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2832 tx_buffer_info->length = size;
2833 tx_buffer_info->dma =
2834 skb_frag_dma_map(tx_ring->dev, frag,
2835 offset, size, DMA_TO_DEVICE);
2836 if (dma_mapping_error(tx_ring->dev,
2837 tx_buffer_info->dma))
2839 tx_buffer_info->mapped_as_page = true;
2840 tx_buffer_info->next_to_watch = i;
2847 if (i == tx_ring->count)
2855 i = tx_ring->count - 1;
2858 tx_ring->tx_buffer_info[i].skb = skb;
2859 tx_ring->tx_buffer_info[first].next_to_watch = i;
2860 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2865 dev_err(tx_ring->dev, "TX DMA map failed\n");
2867 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2868 tx_buffer_info->dma = 0;
2869 tx_buffer_info->next_to_watch = 0;
2872 /* clear timestamp and dma mappings for remaining portion of packet */
2873 while (count >= 0) {
2877 i += tx_ring->count;
2878 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2879 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2885 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2886 int count, u32 paylen, u8 hdr_len)
2888 union ixgbe_adv_tx_desc *tx_desc = NULL;
2889 struct ixgbevf_tx_buffer *tx_buffer_info;
2890 u32 olinfo_status = 0, cmd_type_len = 0;
2893 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2895 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2897 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2899 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2900 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2902 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2903 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2905 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2906 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2908 /* use index 1 context for tso */
2909 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2910 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2911 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2916 * Check Context must be set if Tx switch is enabled, which it
2917 * always is for case where virtual functions are running
2919 olinfo_status |= IXGBE_ADVTXD_CC;
2921 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2923 i = tx_ring->next_to_use;
2925 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2926 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2927 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2928 tx_desc->read.cmd_type_len =
2929 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2930 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2932 if (i == tx_ring->count)
2936 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2938 tx_ring->next_to_use = i;
2941 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2943 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2945 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2946 /* Herbert's original patch had:
2947 * smp_mb__after_netif_stop_queue();
2948 * but since that doesn't exist yet, just open code it. */
2951 /* We need to check again in a case another CPU has just
2952 * made room available. */
2953 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2956 /* A reprieve! - use start_queue because it doesn't call schedule */
2957 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2958 ++adapter->restart_queue;
2962 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2964 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2966 return __ixgbevf_maybe_stop_tx(tx_ring, size);
2969 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2971 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2972 struct ixgbevf_ring *tx_ring;
2974 unsigned int tx_flags = 0;
2977 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2978 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2982 tx_ring = &adapter->tx_ring[r_idx];
2985 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2986 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2987 * + 2 desc gap to keep tail from touching head,
2988 * + 1 desc for context descriptor,
2989 * otherwise try next time
2991 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2992 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2993 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2995 count += skb_shinfo(skb)->nr_frags;
2997 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2999 return NETDEV_TX_BUSY;
3002 if (vlan_tx_tag_present(skb)) {
3003 tx_flags |= vlan_tx_tag_get(skb);
3004 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3005 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3008 first = tx_ring->next_to_use;
3010 if (skb->protocol == htons(ETH_P_IP))
3011 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3012 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3014 dev_kfree_skb_any(skb);
3015 return NETDEV_TX_OK;
3019 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3020 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3021 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3023 ixgbevf_tx_queue(tx_ring, tx_flags,
3024 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
3027 * Force memory writes to complete before letting h/w
3028 * know there are new descriptors to fetch. (Only
3029 * applicable for weak-ordered memory model archs,
3034 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3036 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3038 return NETDEV_TX_OK;
3042 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3043 * @netdev: network interface device structure
3044 * @p: pointer to an address structure
3046 * Returns 0 on success, negative on failure
3048 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3050 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3051 struct ixgbe_hw *hw = &adapter->hw;
3052 struct sockaddr *addr = p;
3054 if (!is_valid_ether_addr(addr->sa_data))
3055 return -EADDRNOTAVAIL;
3057 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3058 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3060 spin_lock_bh(&adapter->mbx_lock);
3062 if (hw->mac.ops.set_rar)
3063 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3065 spin_unlock_bh(&adapter->mbx_lock);
3071 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3072 * @netdev: network interface device structure
3073 * @new_mtu: new value for maximum frame size
3075 * Returns 0 on success, negative on failure
3077 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3079 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3080 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3081 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3083 switch (adapter->hw.api_version) {
3084 case ixgbe_mbox_api_11:
3085 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3088 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3089 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3093 /* MTU < 68 is an error and causes problems on some kernels */
3094 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3097 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3098 netdev->mtu, new_mtu);
3099 /* must set new MTU before calling down or up */
3100 netdev->mtu = new_mtu;
3102 if (netif_running(netdev))
3103 ixgbevf_reinit_locked(adapter);
3108 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3110 struct net_device *netdev = pci_get_drvdata(pdev);
3111 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3116 netif_device_detach(netdev);
3118 if (netif_running(netdev)) {
3120 ixgbevf_down(adapter);
3121 ixgbevf_free_irq(adapter);
3122 ixgbevf_free_all_tx_resources(adapter);
3123 ixgbevf_free_all_rx_resources(adapter);
3127 ixgbevf_clear_interrupt_scheme(adapter);
3130 retval = pci_save_state(pdev);
3135 pci_disable_device(pdev);
3141 static int ixgbevf_resume(struct pci_dev *pdev)
3143 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3144 struct net_device *netdev = adapter->netdev;
3147 pci_set_power_state(pdev, PCI_D0);
3148 pci_restore_state(pdev);
3150 * pci_restore_state clears dev->state_saved so call
3151 * pci_save_state to restore it.
3153 pci_save_state(pdev);
3155 err = pci_enable_device_mem(pdev);
3157 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3160 pci_set_master(pdev);
3163 err = ixgbevf_init_interrupt_scheme(adapter);
3166 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3170 ixgbevf_reset(adapter);
3172 if (netif_running(netdev)) {
3173 err = ixgbevf_open(netdev);
3178 netif_device_attach(netdev);
3183 #endif /* CONFIG_PM */
3184 static void ixgbevf_shutdown(struct pci_dev *pdev)
3186 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3189 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3190 struct rtnl_link_stats64 *stats)
3192 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3195 const struct ixgbevf_ring *ring;
3198 ixgbevf_update_stats(adapter);
3200 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3202 for (i = 0; i < adapter->num_rx_queues; i++) {
3203 ring = &adapter->rx_ring[i];
3205 start = u64_stats_fetch_begin_bh(&ring->syncp);
3206 bytes = ring->total_bytes;
3207 packets = ring->total_packets;
3208 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3209 stats->rx_bytes += bytes;
3210 stats->rx_packets += packets;
3213 for (i = 0; i < adapter->num_tx_queues; i++) {
3214 ring = &adapter->tx_ring[i];
3216 start = u64_stats_fetch_begin_bh(&ring->syncp);
3217 bytes = ring->total_bytes;
3218 packets = ring->total_packets;
3219 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3220 stats->tx_bytes += bytes;
3221 stats->tx_packets += packets;
3227 static const struct net_device_ops ixgbevf_netdev_ops = {
3228 .ndo_open = ixgbevf_open,
3229 .ndo_stop = ixgbevf_close,
3230 .ndo_start_xmit = ixgbevf_xmit_frame,
3231 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3232 .ndo_get_stats64 = ixgbevf_get_stats,
3233 .ndo_validate_addr = eth_validate_addr,
3234 .ndo_set_mac_address = ixgbevf_set_mac,
3235 .ndo_change_mtu = ixgbevf_change_mtu,
3236 .ndo_tx_timeout = ixgbevf_tx_timeout,
3237 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3238 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3241 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3243 dev->netdev_ops = &ixgbevf_netdev_ops;
3244 ixgbevf_set_ethtool_ops(dev);
3245 dev->watchdog_timeo = 5 * HZ;
3249 * ixgbevf_probe - Device Initialization Routine
3250 * @pdev: PCI device information struct
3251 * @ent: entry in ixgbevf_pci_tbl
3253 * Returns 0 on success, negative on failure
3255 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3256 * The OS initialization, configuring of the adapter private structure,
3257 * and a hardware reset occur.
3259 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3260 const struct pci_device_id *ent)
3262 struct net_device *netdev;
3263 struct ixgbevf_adapter *adapter = NULL;
3264 struct ixgbe_hw *hw = NULL;
3265 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3266 static int cards_found;
3267 int err, pci_using_dac;
3269 err = pci_enable_device(pdev);
3273 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3274 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3277 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3279 err = dma_set_coherent_mask(&pdev->dev,
3282 dev_err(&pdev->dev, "No usable DMA "
3283 "configuration, aborting\n");
3290 err = pci_request_regions(pdev, ixgbevf_driver_name);
3292 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3296 pci_set_master(pdev);
3298 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3302 goto err_alloc_etherdev;
3305 SET_NETDEV_DEV(netdev, &pdev->dev);
3307 pci_set_drvdata(pdev, netdev);
3308 adapter = netdev_priv(netdev);
3310 adapter->netdev = netdev;
3311 adapter->pdev = pdev;
3314 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3317 * call save state here in standalone driver because it relies on
3318 * adapter struct to exist, and needs to call netdev_priv
3320 pci_save_state(pdev);
3322 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3323 pci_resource_len(pdev, 0));
3329 ixgbevf_assign_netdev_ops(netdev);
3331 adapter->bd_number = cards_found;
3334 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3335 hw->mac.type = ii->mac;
3337 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3338 sizeof(struct ixgbe_mbx_operations));
3340 /* setup the private structure */
3341 err = ixgbevf_sw_init(adapter);
3345 /* The HW MAC address was set and/or determined in sw_init */
3346 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3348 if (!is_valid_ether_addr(netdev->dev_addr)) {
3349 pr_err("invalid MAC address\n");
3354 netdev->hw_features = NETIF_F_SG |
3361 netdev->features = netdev->hw_features |
3362 NETIF_F_HW_VLAN_TX |
3363 NETIF_F_HW_VLAN_RX |
3364 NETIF_F_HW_VLAN_FILTER;
3366 netdev->vlan_features |= NETIF_F_TSO;
3367 netdev->vlan_features |= NETIF_F_TSO6;
3368 netdev->vlan_features |= NETIF_F_IP_CSUM;
3369 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3370 netdev->vlan_features |= NETIF_F_SG;
3373 netdev->features |= NETIF_F_HIGHDMA;
3375 netdev->priv_flags |= IFF_UNICAST_FLT;
3377 init_timer(&adapter->watchdog_timer);
3378 adapter->watchdog_timer.function = ixgbevf_watchdog;
3379 adapter->watchdog_timer.data = (unsigned long)adapter;
3381 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3382 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3384 err = ixgbevf_init_interrupt_scheme(adapter);
3388 /* pick up the PCI bus settings for reporting later */
3389 if (hw->mac.ops.get_bus_info)
3390 hw->mac.ops.get_bus_info(hw);
3392 strcpy(netdev->name, "eth%d");
3394 err = register_netdev(netdev);
3398 netif_carrier_off(netdev);
3400 ixgbevf_init_last_counter_stats(adapter);
3402 /* print the MAC address */
3403 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3405 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3407 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3412 ixgbevf_clear_interrupt_scheme(adapter);
3414 ixgbevf_reset_interrupt_capability(adapter);
3415 iounmap(hw->hw_addr);
3417 free_netdev(netdev);
3419 pci_release_regions(pdev);
3422 pci_disable_device(pdev);
3427 * ixgbevf_remove - Device Removal Routine
3428 * @pdev: PCI device information struct
3430 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3431 * that it should release a PCI device. The could be caused by a
3432 * Hot-Plug event, or because the driver is going to be removed from
3435 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3437 struct net_device *netdev = pci_get_drvdata(pdev);
3438 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3440 set_bit(__IXGBEVF_DOWN, &adapter->state);
3442 del_timer_sync(&adapter->watchdog_timer);
3444 cancel_work_sync(&adapter->reset_task);
3445 cancel_work_sync(&adapter->watchdog_task);
3447 if (netdev->reg_state == NETREG_REGISTERED)
3448 unregister_netdev(netdev);
3450 ixgbevf_clear_interrupt_scheme(adapter);
3451 ixgbevf_reset_interrupt_capability(adapter);
3453 iounmap(adapter->hw.hw_addr);
3454 pci_release_regions(pdev);
3456 hw_dbg(&adapter->hw, "Remove complete\n");
3458 kfree(adapter->tx_ring);
3459 kfree(adapter->rx_ring);
3461 free_netdev(netdev);
3463 pci_disable_device(pdev);
3467 * ixgbevf_io_error_detected - called when PCI error is detected
3468 * @pdev: Pointer to PCI device
3469 * @state: The current pci connection state
3471 * This function is called after a PCI bus error affecting
3472 * this device has been detected.
3474 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3475 pci_channel_state_t state)
3477 struct net_device *netdev = pci_get_drvdata(pdev);
3478 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3480 netif_device_detach(netdev);
3482 if (state == pci_channel_io_perm_failure)
3483 return PCI_ERS_RESULT_DISCONNECT;
3485 if (netif_running(netdev))
3486 ixgbevf_down(adapter);
3488 pci_disable_device(pdev);
3490 /* Request a slot slot reset. */
3491 return PCI_ERS_RESULT_NEED_RESET;
3495 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3496 * @pdev: Pointer to PCI device
3498 * Restart the card from scratch, as if from a cold-boot. Implementation
3499 * resembles the first-half of the ixgbevf_resume routine.
3501 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3503 struct net_device *netdev = pci_get_drvdata(pdev);
3504 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3506 if (pci_enable_device_mem(pdev)) {
3508 "Cannot re-enable PCI device after reset.\n");
3509 return PCI_ERS_RESULT_DISCONNECT;
3512 pci_set_master(pdev);
3514 ixgbevf_reset(adapter);
3516 return PCI_ERS_RESULT_RECOVERED;
3520 * ixgbevf_io_resume - called when traffic can start flowing again.
3521 * @pdev: Pointer to PCI device
3523 * This callback is called when the error recovery driver tells us that
3524 * its OK to resume normal operation. Implementation resembles the
3525 * second-half of the ixgbevf_resume routine.
3527 static void ixgbevf_io_resume(struct pci_dev *pdev)
3529 struct net_device *netdev = pci_get_drvdata(pdev);
3530 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3532 if (netif_running(netdev))
3533 ixgbevf_up(adapter);
3535 netif_device_attach(netdev);
3538 /* PCI Error Recovery (ERS) */
3539 static const struct pci_error_handlers ixgbevf_err_handler = {
3540 .error_detected = ixgbevf_io_error_detected,
3541 .slot_reset = ixgbevf_io_slot_reset,
3542 .resume = ixgbevf_io_resume,
3545 static struct pci_driver ixgbevf_driver = {
3546 .name = ixgbevf_driver_name,
3547 .id_table = ixgbevf_pci_tbl,
3548 .probe = ixgbevf_probe,
3549 .remove = __devexit_p(ixgbevf_remove),
3551 /* Power Management Hooks */
3552 .suspend = ixgbevf_suspend,
3553 .resume = ixgbevf_resume,
3555 .shutdown = ixgbevf_shutdown,
3556 .err_handler = &ixgbevf_err_handler
3560 * ixgbevf_init_module - Driver Registration Routine
3562 * ixgbevf_init_module is the first routine called when the driver is
3563 * loaded. All it does is register with the PCI subsystem.
3565 static int __init ixgbevf_init_module(void)
3568 pr_info("%s - version %s\n", ixgbevf_driver_string,
3569 ixgbevf_driver_version);
3571 pr_info("%s\n", ixgbevf_copyright);
3573 ret = pci_register_driver(&ixgbevf_driver);
3577 module_init(ixgbevf_init_module);
3580 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3582 * ixgbevf_exit_module is called just before the driver is removed
3585 static void __exit ixgbevf_exit_module(void)
3587 pci_unregister_driver(&ixgbevf_driver);
3592 * ixgbevf_get_hw_dev_name - return device name string
3593 * used by hardware layer to print debugging information
3595 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3597 struct ixgbevf_adapter *adapter = hw->back;
3598 return adapter->netdev->name;
3602 module_exit(ixgbevf_exit_module);
3604 /* ixgbevf_main.c */