1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2018 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
53 #include <linux/bpf.h>
54 #include <linux/bpf_trace.h>
55 #include <linux/atomic.h>
59 const char ixgbevf_driver_name[] = "ixgbevf";
60 static const char ixgbevf_driver_string[] =
61 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
63 #define DRV_VERSION "4.1.0-k"
64 const char ixgbevf_driver_version[] = DRV_VERSION;
65 static char ixgbevf_copyright[] =
66 "Copyright (c) 2009 - 2015 Intel Corporation.";
68 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
69 [board_82599_vf] = &ixgbevf_82599_vf_info,
70 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
71 [board_X540_vf] = &ixgbevf_X540_vf_info,
72 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
73 [board_X550_vf] = &ixgbevf_X550_vf_info,
74 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
75 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
76 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
77 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
80 /* ixgbevf_pci_tbl - PCI Device ID Table
82 * Wildcard entries (PCI_ANY_ID) should come last
83 * Last entry must be all 0s
85 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
86 * Class, Class Mask, private data (not used) }
88 static const struct pci_device_id ixgbevf_pci_tbl[] = {
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
98 /* required last entry */
101 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
103 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
104 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
105 MODULE_LICENSE("GPL");
106 MODULE_VERSION(DRV_VERSION);
108 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
109 static int debug = -1;
110 module_param(debug, int, 0);
111 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
113 static struct workqueue_struct *ixgbevf_wq;
115 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
117 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
118 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
119 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
120 queue_work(ixgbevf_wq, &adapter->service_task);
123 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
125 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
127 /* flush memory to make sure state is correct before next watchdog */
128 smp_mb__before_atomic();
129 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
133 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
134 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
135 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
136 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
137 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
138 struct ixgbevf_rx_buffer *old_buff);
140 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
142 struct ixgbevf_adapter *adapter = hw->back;
147 dev_err(&adapter->pdev->dev, "Adapter removed\n");
148 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
149 ixgbevf_service_event_schedule(adapter);
152 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
156 /* The following check not only optimizes a bit by not
157 * performing a read on the status register when the
158 * register just read was a status register read that
159 * returned IXGBE_FAILED_READ_REG. It also blocks any
160 * potential recursion.
162 if (reg == IXGBE_VFSTATUS) {
163 ixgbevf_remove_adapter(hw);
166 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
167 if (value == IXGBE_FAILED_READ_REG)
168 ixgbevf_remove_adapter(hw);
171 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
173 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
176 if (IXGBE_REMOVED(reg_addr))
177 return IXGBE_FAILED_READ_REG;
178 value = readl(reg_addr + reg);
179 if (unlikely(value == IXGBE_FAILED_READ_REG))
180 ixgbevf_check_remove(hw, reg);
185 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
186 * @adapter: pointer to adapter struct
187 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
188 * @queue: queue to map the corresponding interrupt to
189 * @msix_vector: the vector to map to the corresponding queue
191 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
192 u8 queue, u8 msix_vector)
195 struct ixgbe_hw *hw = &adapter->hw;
197 if (direction == -1) {
199 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
200 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
203 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
205 /* Tx or Rx causes */
206 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
207 index = ((16 * (queue & 1)) + (8 * direction));
208 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
209 ivar &= ~(0xFF << index);
210 ivar |= (msix_vector << index);
211 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
215 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
217 return ring->stats.packets;
220 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
222 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
223 struct ixgbe_hw *hw = &adapter->hw;
225 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
226 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
229 return (head < tail) ?
230 tail - head : (tail + ring->count - head);
235 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
237 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
238 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
239 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
241 clear_check_for_tx_hang(tx_ring);
243 /* Check for a hung queue, but be thorough. This verifies
244 * that a transmit has been completed since the previous
245 * check AND there is at least one packet pending. The
246 * ARMED bit is set to indicate a potential hang.
248 if ((tx_done_old == tx_done) && tx_pending) {
249 /* make sure it is true for two checks in a row */
250 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
253 /* reset the countdown */
254 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
256 /* update completed stats and continue */
257 tx_ring->tx_stats.tx_done_old = tx_done;
262 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
264 /* Do the reset outside of interrupt context */
265 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
266 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
267 ixgbevf_service_event_schedule(adapter);
272 * ixgbevf_tx_timeout - Respond to a Tx Hang
273 * @netdev: network interface device structure
275 static void ixgbevf_tx_timeout(struct net_device *netdev)
277 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
279 ixgbevf_tx_timeout_reset(adapter);
283 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
284 * @q_vector: board private structure
285 * @tx_ring: tx ring to clean
286 * @napi_budget: Used to determine if we are in netpoll
288 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
289 struct ixgbevf_ring *tx_ring, int napi_budget)
291 struct ixgbevf_adapter *adapter = q_vector->adapter;
292 struct ixgbevf_tx_buffer *tx_buffer;
293 union ixgbe_adv_tx_desc *tx_desc;
294 unsigned int total_bytes = 0, total_packets = 0;
295 unsigned int budget = tx_ring->count / 2;
296 unsigned int i = tx_ring->next_to_clean;
298 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
301 tx_buffer = &tx_ring->tx_buffer_info[i];
302 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
306 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
308 /* if next_to_watch is not set then there is no work pending */
312 /* prevent any other reads prior to eop_desc */
315 /* if DD is not set pending work has not been completed */
316 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
319 /* clear next_to_watch to prevent false hangs */
320 tx_buffer->next_to_watch = NULL;
322 /* update the statistics for this packet */
323 total_bytes += tx_buffer->bytecount;
324 total_packets += tx_buffer->gso_segs;
327 if (ring_is_xdp(tx_ring))
328 page_frag_free(tx_buffer->data);
330 napi_consume_skb(tx_buffer->skb, napi_budget);
332 /* unmap skb header data */
333 dma_unmap_single(tx_ring->dev,
334 dma_unmap_addr(tx_buffer, dma),
335 dma_unmap_len(tx_buffer, len),
338 /* clear tx_buffer data */
339 dma_unmap_len_set(tx_buffer, len, 0);
341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
358 dma_unmap_len_set(tx_buffer, len, 0);
362 /* move us one more past the eop_desc for start of next pkt */
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
372 /* issue prefetch for next Tx descriptor */
375 /* update budget accounting */
377 } while (likely(budget));
380 tx_ring->next_to_clean = i;
381 u64_stats_update_begin(&tx_ring->syncp);
382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
384 u64_stats_update_end(&tx_ring->syncp);
385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394 pr_err("Detected Tx Unit Hang%s\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
404 ring_is_xdp(tx_ring) ? " XDP" : "",
405 tx_ring->queue_index,
406 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
407 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
408 tx_ring->next_to_use, i,
409 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
410 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
412 if (!ring_is_xdp(tx_ring))
413 netif_stop_subqueue(tx_ring->netdev,
414 tx_ring->queue_index);
416 /* schedule immediate reset if we believe we hung */
417 ixgbevf_tx_timeout_reset(adapter);
422 if (ring_is_xdp(tx_ring))
425 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
426 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
427 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
428 /* Make sure that anybody stopping the queue after this
429 * sees the new next_to_clean.
433 if (__netif_subqueue_stopped(tx_ring->netdev,
434 tx_ring->queue_index) &&
435 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
436 netif_wake_subqueue(tx_ring->netdev,
437 tx_ring->queue_index);
438 ++tx_ring->tx_stats.restart_queue;
446 * ixgbevf_rx_skb - Helper function to determine proper Rx method
447 * @q_vector: structure containing interrupt and ring information
448 * @skb: packet to send up
450 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
453 napi_gro_receive(&q_vector->napi, skb);
456 #define IXGBE_RSS_L4_TYPES_MASK \
457 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
458 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
459 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
460 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
462 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
463 union ixgbe_adv_rx_desc *rx_desc,
468 if (!(ring->netdev->features & NETIF_F_RXHASH))
471 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
472 IXGBE_RXDADV_RSSTYPE_MASK;
477 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
478 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
479 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
483 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
484 * @ring: structure containig ring specific data
485 * @rx_desc: current Rx descriptor being processed
486 * @skb: skb currently being received and modified
488 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
489 union ixgbe_adv_rx_desc *rx_desc,
492 skb_checksum_none_assert(skb);
494 /* Rx csum disabled */
495 if (!(ring->netdev->features & NETIF_F_RXCSUM))
498 /* if IP and error */
499 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
500 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
501 ring->rx_stats.csum_err++;
505 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
508 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
509 ring->rx_stats.csum_err++;
513 /* It must be a TCP or UDP packet with a valid checksum */
514 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
519 * @rx_ring: rx descriptor ring packet is being transacted on
520 * @rx_desc: pointer to the EOP Rx descriptor
521 * @skb: pointer to current skb being populated
523 * This function checks the ring, descriptor, and packet information in
524 * order to populate the checksum, VLAN, protocol, and other fields within
527 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
528 union ixgbe_adv_rx_desc *rx_desc,
531 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
532 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
534 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
535 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
536 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
538 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
539 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
542 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
546 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
547 const unsigned int size)
549 struct ixgbevf_rx_buffer *rx_buffer;
551 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
552 prefetchw(rx_buffer->page);
554 /* we are reusing so sync this buffer for CPU use */
555 dma_sync_single_range_for_cpu(rx_ring->dev,
557 rx_buffer->page_offset,
561 rx_buffer->pagecnt_bias--;
566 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
567 struct ixgbevf_rx_buffer *rx_buffer,
570 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
571 /* hand second half of page back to the ring */
572 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
575 /* We are not reusing the buffer so unmap it and free
576 * any references we are holding to it
578 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
579 ixgbevf_rx_pg_size(rx_ring),
581 IXGBEVF_RX_DMA_ATTR);
582 __page_frag_cache_drain(rx_buffer->page,
583 rx_buffer->pagecnt_bias);
586 /* clear contents of rx_buffer */
587 rx_buffer->page = NULL;
591 * ixgbevf_is_non_eop - process handling of non-EOP buffers
592 * @rx_ring: Rx ring being processed
593 * @rx_desc: Rx descriptor for current buffer
595 * This function updates next to clean. If the buffer is an EOP buffer
596 * this function exits returning false, otherwise it will place the
597 * sk_buff in the next buffer to be chained and return true indicating
598 * that this is in fact a non-EOP buffer.
600 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
601 union ixgbe_adv_rx_desc *rx_desc)
603 u32 ntc = rx_ring->next_to_clean + 1;
605 /* fetch, update, and store next to clean */
606 ntc = (ntc < rx_ring->count) ? ntc : 0;
607 rx_ring->next_to_clean = ntc;
609 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
611 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
617 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
619 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
622 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
623 struct ixgbevf_rx_buffer *bi)
625 struct page *page = bi->page;
628 /* since we are recycling buffers we should seldom need to alloc */
632 /* alloc new page for storage */
633 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
634 if (unlikely(!page)) {
635 rx_ring->rx_stats.alloc_rx_page_failed++;
639 /* map page for use */
640 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
641 ixgbevf_rx_pg_size(rx_ring),
642 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
644 /* if mapping failed free memory back to system since
645 * there isn't much point in holding memory we can't use
647 if (dma_mapping_error(rx_ring->dev, dma)) {
648 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
650 rx_ring->rx_stats.alloc_rx_page_failed++;
656 bi->page_offset = ixgbevf_rx_offset(rx_ring);
657 bi->pagecnt_bias = 1;
658 rx_ring->rx_stats.alloc_rx_page++;
664 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
665 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
666 * @cleaned_count: number of buffers to replace
668 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
671 union ixgbe_adv_rx_desc *rx_desc;
672 struct ixgbevf_rx_buffer *bi;
673 unsigned int i = rx_ring->next_to_use;
675 /* nothing to do or no valid netdev defined */
676 if (!cleaned_count || !rx_ring->netdev)
679 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
680 bi = &rx_ring->rx_buffer_info[i];
684 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
687 /* sync the buffer for use by the device */
688 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
690 ixgbevf_rx_bufsz(rx_ring),
693 /* Refresh the desc even if pkt_addr didn't change
694 * because each write-back erases this info.
696 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
702 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
703 bi = rx_ring->rx_buffer_info;
707 /* clear the length for the next_to_use descriptor */
708 rx_desc->wb.upper.length = 0;
711 } while (cleaned_count);
715 if (rx_ring->next_to_use != i) {
716 /* record the next descriptor to use */
717 rx_ring->next_to_use = i;
719 /* update next to alloc since we have filled the ring */
720 rx_ring->next_to_alloc = i;
722 /* Force memory writes to complete before letting h/w
723 * know there are new descriptors to fetch. (Only
724 * applicable for weak-ordered memory model archs,
728 ixgbevf_write_tail(rx_ring, i);
733 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
734 * @rx_ring: rx descriptor ring packet is being transacted on
735 * @rx_desc: pointer to the EOP Rx descriptor
736 * @skb: pointer to current skb being fixed
738 * Check for corrupted packet headers caused by senders on the local L2
739 * embedded NIC switch not setting up their Tx Descriptors right. These
740 * should be very rare.
742 * Also address the case where we are pulling data in on pages only
743 * and as such no data is present in the skb header.
745 * In addition if skb is not at least 60 bytes we need to pad it so that
746 * it is large enough to qualify as a valid Ethernet frame.
748 * Returns true if an error was encountered and skb was freed.
750 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
751 union ixgbe_adv_rx_desc *rx_desc,
754 /* XDP packets use error pointer so abort at this point */
758 /* verify that the packet does not have any known errors */
759 if (unlikely(ixgbevf_test_staterr(rx_desc,
760 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
761 struct net_device *netdev = rx_ring->netdev;
763 if (!(netdev->features & NETIF_F_RXALL)) {
764 dev_kfree_skb_any(skb);
769 /* if eth_skb_pad returns an error the skb was freed */
770 if (eth_skb_pad(skb))
777 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
778 * @rx_ring: rx descriptor ring to store buffers on
779 * @old_buff: donor buffer to have page reused
781 * Synchronizes page for reuse by the adapter
783 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
784 struct ixgbevf_rx_buffer *old_buff)
786 struct ixgbevf_rx_buffer *new_buff;
787 u16 nta = rx_ring->next_to_alloc;
789 new_buff = &rx_ring->rx_buffer_info[nta];
791 /* update, and store next to alloc */
793 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
795 /* transfer page from old buffer to new buffer */
796 new_buff->page = old_buff->page;
797 new_buff->dma = old_buff->dma;
798 new_buff->page_offset = old_buff->page_offset;
799 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
802 static inline bool ixgbevf_page_is_reserved(struct page *page)
804 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
807 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
809 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
810 struct page *page = rx_buffer->page;
812 /* avoid re-using remote pages */
813 if (unlikely(ixgbevf_page_is_reserved(page)))
816 #if (PAGE_SIZE < 8192)
817 /* if we are only owner of page we can reuse it */
818 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
821 #define IXGBEVF_LAST_OFFSET \
822 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
824 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
829 /* If we have drained the page fragment pool we need to update
830 * the pagecnt_bias and page count so that we fully restock the
831 * number of references the driver holds.
833 if (unlikely(!pagecnt_bias)) {
834 page_ref_add(page, USHRT_MAX);
835 rx_buffer->pagecnt_bias = USHRT_MAX;
842 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
843 * @rx_ring: rx descriptor ring to transact packets on
844 * @rx_buffer: buffer containing page to add
845 * @skb: sk_buff to place the data into
846 * @size: size of buffer to be added
848 * This function will add the data contained in rx_buffer->page to the skb.
850 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
851 struct ixgbevf_rx_buffer *rx_buffer,
855 #if (PAGE_SIZE < 8192)
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
858 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
859 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
860 SKB_DATA_ALIGN(size);
862 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
863 rx_buffer->page_offset, size, truesize);
864 #if (PAGE_SIZE < 8192)
865 rx_buffer->page_offset ^= truesize;
867 rx_buffer->page_offset += truesize;
872 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
873 struct ixgbevf_rx_buffer *rx_buffer,
874 struct xdp_buff *xdp,
875 union ixgbe_adv_rx_desc *rx_desc)
877 unsigned int size = xdp->data_end - xdp->data;
878 #if (PAGE_SIZE < 8192)
879 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
881 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
882 xdp->data_hard_start);
884 unsigned int headlen;
887 /* prefetch first cache line of first page */
889 #if L1_CACHE_BYTES < 128
890 prefetch(xdp->data + L1_CACHE_BYTES);
892 /* Note, we get here by enabling legacy-rx via:
894 * ethtool --set-priv-flags <dev> legacy-rx on
896 * In this mode, we currently get 0 extra XDP headroom as
897 * opposed to having legacy-rx off, where we process XDP
898 * packets going to stack via ixgbevf_build_skb().
900 * For ixgbevf_construct_skb() mode it means that the
901 * xdp->data_meta will always point to xdp->data, since
902 * the helper cannot expand the head. Should this ever
903 * changed in future for legacy-rx mode on, then lets also
904 * add xdp->data_meta handling here.
907 /* allocate a skb to store the frags */
908 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
912 /* Determine available headroom for copy */
914 if (headlen > IXGBEVF_RX_HDR_SIZE)
915 headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
917 /* align pull length to size of long to optimize memcpy performance */
918 memcpy(__skb_put(skb, headlen), xdp->data,
919 ALIGN(headlen, sizeof(long)));
921 /* update all of the pointers */
924 skb_add_rx_frag(skb, 0, rx_buffer->page,
925 (xdp->data + headlen) -
926 page_address(rx_buffer->page),
928 #if (PAGE_SIZE < 8192)
929 rx_buffer->page_offset ^= truesize;
931 rx_buffer->page_offset += truesize;
934 rx_buffer->pagecnt_bias++;
940 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
943 struct ixgbe_hw *hw = &adapter->hw;
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
948 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
949 struct ixgbevf_rx_buffer *rx_buffer,
950 struct xdp_buff *xdp,
951 union ixgbe_adv_rx_desc *rx_desc)
953 unsigned int metasize = xdp->data - xdp->data_meta;
954 #if (PAGE_SIZE < 8192)
955 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
957 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
958 SKB_DATA_ALIGN(xdp->data_end -
959 xdp->data_hard_start);
963 /* Prefetch first cache line of first page. If xdp->data_meta
964 * is unused, this points to xdp->data, otherwise, we likely
965 * have a consumer accessing first few bytes of meta data,
966 * and then actual data.
968 prefetch(xdp->data_meta);
969 #if L1_CACHE_BYTES < 128
970 prefetch(xdp->data_meta + L1_CACHE_BYTES);
973 /* build an skb around the page buffer */
974 skb = build_skb(xdp->data_hard_start, truesize);
978 /* update pointers within the skb to store the data */
979 skb_reserve(skb, xdp->data - xdp->data_hard_start);
980 __skb_put(skb, xdp->data_end - xdp->data);
982 skb_metadata_set(skb, metasize);
984 /* update buffer offset */
985 #if (PAGE_SIZE < 8192)
986 rx_buffer->page_offset ^= truesize;
988 rx_buffer->page_offset += truesize;
994 #define IXGBEVF_XDP_PASS 0
995 #define IXGBEVF_XDP_CONSUMED 1
996 #define IXGBEVF_XDP_TX 2
998 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
999 struct xdp_buff *xdp)
1001 struct ixgbevf_tx_buffer *tx_buffer;
1002 union ixgbe_adv_tx_desc *tx_desc;
1007 len = xdp->data_end - xdp->data;
1009 if (unlikely(!ixgbevf_desc_unused(ring)))
1010 return IXGBEVF_XDP_CONSUMED;
1012 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
1013 if (dma_mapping_error(ring->dev, dma))
1014 return IXGBEVF_XDP_CONSUMED;
1016 /* record the location of the first descriptor for this packet */
1017 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
1018 tx_buffer->bytecount = len;
1019 tx_buffer->gso_segs = 1;
1020 tx_buffer->protocol = 0;
1022 i = ring->next_to_use;
1023 tx_desc = IXGBEVF_TX_DESC(ring, i);
1025 dma_unmap_len_set(tx_buffer, len, len);
1026 dma_unmap_addr_set(tx_buffer, dma, dma);
1027 tx_buffer->data = xdp->data;
1028 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1030 /* put descriptor type bits */
1031 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1032 IXGBE_ADVTXD_DCMD_DEXT |
1033 IXGBE_ADVTXD_DCMD_IFCS;
1034 cmd_type |= len | IXGBE_TXD_CMD;
1035 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1036 tx_desc->read.olinfo_status =
1037 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1040 /* Avoid any potential race with cleanup */
1043 /* set next_to_watch value indicating a packet is present */
1045 if (i == ring->count)
1048 tx_buffer->next_to_watch = tx_desc;
1049 ring->next_to_use = i;
1051 return IXGBEVF_XDP_TX;
1054 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1055 struct ixgbevf_ring *rx_ring,
1056 struct xdp_buff *xdp)
1058 int result = IXGBEVF_XDP_PASS;
1059 struct ixgbevf_ring *xdp_ring;
1060 struct bpf_prog *xdp_prog;
1064 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1069 act = bpf_prog_run_xdp(xdp_prog, xdp);
1074 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1075 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1078 bpf_warn_invalid_xdp_action(act);
1081 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1082 /* fallthrough -- handle aborts by dropping packet */
1084 result = IXGBEVF_XDP_CONSUMED;
1089 return ERR_PTR(-result);
1092 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1093 struct ixgbevf_rx_buffer *rx_buffer,
1096 #if (PAGE_SIZE < 8192)
1097 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1099 rx_buffer->page_offset ^= truesize;
1101 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1102 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1103 SKB_DATA_ALIGN(size);
1105 rx_buffer->page_offset += truesize;
1109 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1110 struct ixgbevf_ring *rx_ring,
1113 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1114 struct ixgbevf_adapter *adapter = q_vector->adapter;
1115 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1116 struct sk_buff *skb = rx_ring->skb;
1117 bool xdp_xmit = false;
1118 struct xdp_buff xdp;
1120 xdp.rxq = &rx_ring->xdp_rxq;
1122 while (likely(total_rx_packets < budget)) {
1123 struct ixgbevf_rx_buffer *rx_buffer;
1124 union ixgbe_adv_rx_desc *rx_desc;
1127 /* return some buffers to hardware, one at a time is too slow */
1128 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1129 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1133 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1134 size = le16_to_cpu(rx_desc->wb.upper.length);
1138 /* This memory barrier is needed to keep us from reading
1139 * any other fields out of the rx_desc until we know the
1140 * RXD_STAT_DD bit is set
1144 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1146 /* retrieve a buffer from the ring */
1148 xdp.data = page_address(rx_buffer->page) +
1149 rx_buffer->page_offset;
1150 xdp.data_meta = xdp.data;
1151 xdp.data_hard_start = xdp.data -
1152 ixgbevf_rx_offset(rx_ring);
1153 xdp.data_end = xdp.data + size;
1155 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1159 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1161 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1164 rx_buffer->pagecnt_bias++;
1167 total_rx_bytes += size;
1169 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1170 } else if (ring_uses_build_skb(rx_ring)) {
1171 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1174 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1178 /* exit if we failed to retrieve a buffer */
1180 rx_ring->rx_stats.alloc_rx_buff_failed++;
1181 rx_buffer->pagecnt_bias++;
1185 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1188 /* fetch next buffer in frame if non-eop */
1189 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1192 /* verify the packet layout is correct */
1193 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1198 /* probably a little skewed due to removing CRC */
1199 total_rx_bytes += skb->len;
1201 /* Workaround hardware that can't do proper VEPA multicast
1204 if ((skb->pkt_type == PACKET_BROADCAST ||
1205 skb->pkt_type == PACKET_MULTICAST) &&
1206 ether_addr_equal(rx_ring->netdev->dev_addr,
1207 eth_hdr(skb)->h_source)) {
1208 dev_kfree_skb_irq(skb);
1212 /* populate checksum, VLAN, and protocol */
1213 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1215 ixgbevf_rx_skb(q_vector, skb);
1217 /* reset skb pointer */
1220 /* update budget accounting */
1224 /* place incomplete frames back on ring for completion */
1228 struct ixgbevf_ring *xdp_ring =
1229 adapter->xdp_ring[rx_ring->queue_index];
1231 /* Force memory writes to complete before letting h/w
1232 * know there are new descriptors to fetch.
1235 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1238 u64_stats_update_begin(&rx_ring->syncp);
1239 rx_ring->stats.packets += total_rx_packets;
1240 rx_ring->stats.bytes += total_rx_bytes;
1241 u64_stats_update_end(&rx_ring->syncp);
1242 q_vector->rx.total_packets += total_rx_packets;
1243 q_vector->rx.total_bytes += total_rx_bytes;
1245 return total_rx_packets;
1249 * ixgbevf_poll - NAPI polling calback
1250 * @napi: napi struct with our devices info in it
1251 * @budget: amount of work driver is allowed to do this pass, in packets
1253 * This function will clean more than one or more rings associated with a
1256 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1258 struct ixgbevf_q_vector *q_vector =
1259 container_of(napi, struct ixgbevf_q_vector, napi);
1260 struct ixgbevf_adapter *adapter = q_vector->adapter;
1261 struct ixgbevf_ring *ring;
1262 int per_ring_budget, work_done = 0;
1263 bool clean_complete = true;
1265 ixgbevf_for_each_ring(ring, q_vector->tx) {
1266 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1267 clean_complete = false;
1273 /* attempt to distribute budget to each queue fairly, but don't allow
1274 * the budget to go below 1 because we'll exit polling
1276 if (q_vector->rx.count > 1)
1277 per_ring_budget = max(budget/q_vector->rx.count, 1);
1279 per_ring_budget = budget;
1281 ixgbevf_for_each_ring(ring, q_vector->rx) {
1282 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1284 work_done += cleaned;
1285 if (cleaned >= per_ring_budget)
1286 clean_complete = false;
1289 /* If all work not completed, return budget and keep polling */
1290 if (!clean_complete)
1292 /* all work done, exit the polling mode */
1293 napi_complete_done(napi, work_done);
1294 if (adapter->rx_itr_setting == 1)
1295 ixgbevf_set_itr(q_vector);
1296 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1297 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1298 ixgbevf_irq_enable_queues(adapter,
1299 BIT(q_vector->v_idx));
1305 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1306 * @q_vector: structure containing interrupt and ring information
1308 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1310 struct ixgbevf_adapter *adapter = q_vector->adapter;
1311 struct ixgbe_hw *hw = &adapter->hw;
1312 int v_idx = q_vector->v_idx;
1313 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1315 /* set the WDIS bit to not clear the timer bits and cause an
1316 * immediate assertion of the interrupt
1318 itr_reg |= IXGBE_EITR_CNT_WDIS;
1320 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1324 * ixgbevf_configure_msix - Configure MSI-X hardware
1325 * @adapter: board private structure
1327 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1330 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1332 struct ixgbevf_q_vector *q_vector;
1333 int q_vectors, v_idx;
1335 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1336 adapter->eims_enable_mask = 0;
1338 /* Populate the IVAR table and set the ITR values to the
1339 * corresponding register.
1341 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1342 struct ixgbevf_ring *ring;
1344 q_vector = adapter->q_vector[v_idx];
1346 ixgbevf_for_each_ring(ring, q_vector->rx)
1347 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1349 ixgbevf_for_each_ring(ring, q_vector->tx)
1350 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1352 if (q_vector->tx.ring && !q_vector->rx.ring) {
1353 /* Tx only vector */
1354 if (adapter->tx_itr_setting == 1)
1355 q_vector->itr = IXGBE_12K_ITR;
1357 q_vector->itr = adapter->tx_itr_setting;
1359 /* Rx or Rx/Tx vector */
1360 if (adapter->rx_itr_setting == 1)
1361 q_vector->itr = IXGBE_20K_ITR;
1363 q_vector->itr = adapter->rx_itr_setting;
1366 /* add q_vector eims value to global eims_enable_mask */
1367 adapter->eims_enable_mask |= BIT(v_idx);
1369 ixgbevf_write_eitr(q_vector);
1372 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1373 /* setup eims_other and add value to global eims_enable_mask */
1374 adapter->eims_other = BIT(v_idx);
1375 adapter->eims_enable_mask |= adapter->eims_other;
1378 enum latency_range {
1382 latency_invalid = 255
1386 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1387 * @q_vector: structure containing interrupt and ring information
1388 * @ring_container: structure containing ring performance data
1390 * Stores a new ITR value based on packets and byte
1391 * counts during the last interrupt. The advantage of per interrupt
1392 * computation is faster updates and more accurate ITR for the current
1393 * traffic pattern. Constants in this function were computed
1394 * based on theoretical maximum wire speed and thresholds were set based
1395 * on testing data as well as attempting to minimize response time
1396 * while increasing bulk throughput.
1398 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1399 struct ixgbevf_ring_container *ring_container)
1401 int bytes = ring_container->total_bytes;
1402 int packets = ring_container->total_packets;
1405 u8 itr_setting = ring_container->itr;
1410 /* simple throttle rate management
1411 * 0-20MB/s lowest (100000 ints/s)
1412 * 20-100MB/s low (20000 ints/s)
1413 * 100-1249MB/s bulk (12000 ints/s)
1415 /* what was last interrupt timeslice? */
1416 timepassed_us = q_vector->itr >> 2;
1417 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1419 switch (itr_setting) {
1420 case lowest_latency:
1421 if (bytes_perint > 10)
1422 itr_setting = low_latency;
1425 if (bytes_perint > 20)
1426 itr_setting = bulk_latency;
1427 else if (bytes_perint <= 10)
1428 itr_setting = lowest_latency;
1431 if (bytes_perint <= 20)
1432 itr_setting = low_latency;
1436 /* clear work counters since we have the values we need */
1437 ring_container->total_bytes = 0;
1438 ring_container->total_packets = 0;
1440 /* write updated itr to ring container */
1441 ring_container->itr = itr_setting;
1444 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1446 u32 new_itr = q_vector->itr;
1449 ixgbevf_update_itr(q_vector, &q_vector->tx);
1450 ixgbevf_update_itr(q_vector, &q_vector->rx);
1452 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1454 switch (current_itr) {
1455 /* counts and packets in update_itr are dependent on these numbers */
1456 case lowest_latency:
1457 new_itr = IXGBE_100K_ITR;
1460 new_itr = IXGBE_20K_ITR;
1463 new_itr = IXGBE_12K_ITR;
1469 if (new_itr != q_vector->itr) {
1470 /* do an exponential smoothing */
1471 new_itr = (10 * new_itr * q_vector->itr) /
1472 ((9 * new_itr) + q_vector->itr);
1474 /* save the algorithm value here */
1475 q_vector->itr = new_itr;
1477 ixgbevf_write_eitr(q_vector);
1481 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1483 struct ixgbevf_adapter *adapter = data;
1484 struct ixgbe_hw *hw = &adapter->hw;
1486 hw->mac.get_link_status = 1;
1488 ixgbevf_service_event_schedule(adapter);
1490 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1496 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1498 * @data: pointer to our q_vector struct for this interrupt vector
1500 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1502 struct ixgbevf_q_vector *q_vector = data;
1504 /* EIAM disabled interrupts (on this vector) for us */
1505 if (q_vector->rx.ring || q_vector->tx.ring)
1506 napi_schedule_irqoff(&q_vector->napi);
1512 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1513 * @adapter: board private structure
1515 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1516 * interrupts from the kernel.
1518 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1520 struct net_device *netdev = adapter->netdev;
1521 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1522 unsigned int ri = 0, ti = 0;
1525 for (vector = 0; vector < q_vectors; vector++) {
1526 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1527 struct msix_entry *entry = &adapter->msix_entries[vector];
1529 if (q_vector->tx.ring && q_vector->rx.ring) {
1530 snprintf(q_vector->name, sizeof(q_vector->name),
1531 "%s-TxRx-%u", netdev->name, ri++);
1533 } else if (q_vector->rx.ring) {
1534 snprintf(q_vector->name, sizeof(q_vector->name),
1535 "%s-rx-%u", netdev->name, ri++);
1536 } else if (q_vector->tx.ring) {
1537 snprintf(q_vector->name, sizeof(q_vector->name),
1538 "%s-tx-%u", netdev->name, ti++);
1540 /* skip this unused q_vector */
1543 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1544 q_vector->name, q_vector);
1546 hw_dbg(&adapter->hw,
1547 "request_irq failed for MSIX interrupt Error: %d\n",
1549 goto free_queue_irqs;
1553 err = request_irq(adapter->msix_entries[vector].vector,
1554 &ixgbevf_msix_other, 0, netdev->name, adapter);
1556 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1558 goto free_queue_irqs;
1566 free_irq(adapter->msix_entries[vector].vector,
1567 adapter->q_vector[vector]);
1569 /* This failure is non-recoverable - it indicates the system is
1570 * out of MSIX vector resources and the VF driver cannot run
1571 * without them. Set the number of msix vectors to zero
1572 * indicating that not enough can be allocated. The error
1573 * will be returned to the user indicating device open failed.
1574 * Any further attempts to force the driver to open will also
1575 * fail. The only way to recover is to unload the driver and
1576 * reload it again. If the system has recovered some MSIX
1577 * vectors then it may succeed.
1579 adapter->num_msix_vectors = 0;
1584 * ixgbevf_request_irq - initialize interrupts
1585 * @adapter: board private structure
1587 * Attempts to configure interrupts using the best available
1588 * capabilities of the hardware and kernel.
1590 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1592 int err = ixgbevf_request_msix_irqs(adapter);
1595 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1600 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1604 if (!adapter->msix_entries)
1607 q_vectors = adapter->num_msix_vectors;
1610 free_irq(adapter->msix_entries[i].vector, adapter);
1613 for (; i >= 0; i--) {
1614 /* free only the irqs that were actually requested */
1615 if (!adapter->q_vector[i]->rx.ring &&
1616 !adapter->q_vector[i]->tx.ring)
1619 free_irq(adapter->msix_entries[i].vector,
1620 adapter->q_vector[i]);
1625 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1626 * @adapter: board private structure
1628 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1630 struct ixgbe_hw *hw = &adapter->hw;
1633 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1634 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1635 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1637 IXGBE_WRITE_FLUSH(hw);
1639 for (i = 0; i < adapter->num_msix_vectors; i++)
1640 synchronize_irq(adapter->msix_entries[i].vector);
1644 * ixgbevf_irq_enable - Enable default interrupt generation settings
1645 * @adapter: board private structure
1647 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1649 struct ixgbe_hw *hw = &adapter->hw;
1651 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1652 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1653 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1657 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1658 * @adapter: board private structure
1659 * @ring: structure containing ring specific data
1661 * Configure the Tx descriptor ring after a reset.
1663 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1664 struct ixgbevf_ring *ring)
1666 struct ixgbe_hw *hw = &adapter->hw;
1667 u64 tdba = ring->dma;
1669 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1670 u8 reg_idx = ring->reg_idx;
1672 /* disable queue to avoid issues while updating state */
1673 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1674 IXGBE_WRITE_FLUSH(hw);
1676 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1677 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1678 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1679 ring->count * sizeof(union ixgbe_adv_tx_desc));
1681 /* disable head writeback */
1682 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1683 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1685 /* enable relaxed ordering */
1686 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1687 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1688 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1690 /* reset head and tail pointers */
1691 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1692 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1693 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1695 /* reset ntu and ntc to place SW in sync with hardwdare */
1696 ring->next_to_clean = 0;
1697 ring->next_to_use = 0;
1699 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1700 * to or less than the number of on chip descriptors, which is
1703 txdctl |= (8 << 16); /* WTHRESH = 8 */
1705 /* Setting PTHRESH to 32 both improves performance */
1706 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1707 32; /* PTHRESH = 32 */
1709 /* reinitialize tx_buffer_info */
1710 memset(ring->tx_buffer_info, 0,
1711 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1713 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1715 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1717 /* poll to verify queue is enabled */
1719 usleep_range(1000, 2000);
1720 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1721 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1723 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1727 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1728 * @adapter: board private structure
1730 * Configure the Tx unit of the MAC after a reset.
1732 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1736 /* Setup the HW Tx Head and Tail descriptor pointers */
1737 for (i = 0; i < adapter->num_tx_queues; i++)
1738 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1739 for (i = 0; i < adapter->num_xdp_queues; i++)
1740 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1743 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1745 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1746 struct ixgbevf_ring *ring, int index)
1748 struct ixgbe_hw *hw = &adapter->hw;
1751 srrctl = IXGBE_SRRCTL_DROP_EN;
1753 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1754 if (ring_uses_large_buffer(ring))
1755 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1757 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1758 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1760 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1763 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1765 struct ixgbe_hw *hw = &adapter->hw;
1767 /* PSRTYPE must be initialized in 82599 */
1768 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1769 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1770 IXGBE_PSRTYPE_L2HDR;
1772 if (adapter->num_rx_queues > 1)
1775 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1778 #define IXGBEVF_MAX_RX_DESC_POLL 10
1779 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1780 struct ixgbevf_ring *ring)
1782 struct ixgbe_hw *hw = &adapter->hw;
1783 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1785 u8 reg_idx = ring->reg_idx;
1787 if (IXGBE_REMOVED(hw->hw_addr))
1789 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1790 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1792 /* write value back with RXDCTL.ENABLE bit cleared */
1793 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1795 /* the hardware may take up to 100us to really disable the Rx queue */
1798 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1799 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1802 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1806 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1807 struct ixgbevf_ring *ring)
1809 struct ixgbe_hw *hw = &adapter->hw;
1810 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1812 u8 reg_idx = ring->reg_idx;
1814 if (IXGBE_REMOVED(hw->hw_addr))
1817 usleep_range(1000, 2000);
1818 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1819 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1822 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1827 * ixgbevf_init_rss_key - Initialize adapter RSS key
1828 * @adapter: device handle
1830 * Allocates and initializes the RSS key if it is not allocated.
1832 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1836 if (!adapter->rss_key) {
1837 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1838 if (unlikely(!rss_key))
1841 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1842 adapter->rss_key = rss_key;
1848 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1850 struct ixgbe_hw *hw = &adapter->hw;
1851 u32 vfmrqc = 0, vfreta = 0;
1852 u16 rss_i = adapter->num_rx_queues;
1855 /* Fill out hash function seeds */
1856 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1857 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1859 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1863 adapter->rss_indir_tbl[i] = j;
1865 vfreta |= j << (i & 0x3) * 8;
1867 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1872 /* Perform hash on these packet types */
1873 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1874 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1875 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1876 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1878 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1880 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1883 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1884 struct ixgbevf_ring *ring)
1886 struct ixgbe_hw *hw = &adapter->hw;
1887 union ixgbe_adv_rx_desc *rx_desc;
1888 u64 rdba = ring->dma;
1890 u8 reg_idx = ring->reg_idx;
1892 /* disable queue to avoid issues while updating state */
1893 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1894 ixgbevf_disable_rx_queue(adapter, ring);
1896 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1897 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1898 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1899 ring->count * sizeof(union ixgbe_adv_rx_desc));
1901 #ifndef CONFIG_SPARC
1902 /* enable relaxed ordering */
1903 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1904 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1906 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1907 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1908 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1911 /* reset head and tail pointers */
1912 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1913 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1914 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1916 /* initialize rx_buffer_info */
1917 memset(ring->rx_buffer_info, 0,
1918 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1920 /* initialize Rx descriptor 0 */
1921 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1922 rx_desc->wb.upper.length = 0;
1924 /* reset ntu and ntc to place SW in sync with hardwdare */
1925 ring->next_to_clean = 0;
1926 ring->next_to_use = 0;
1927 ring->next_to_alloc = 0;
1929 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1931 /* RXDCTL.RLPML does not work on 82599 */
1932 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1933 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1934 IXGBE_RXDCTL_RLPML_EN);
1936 #if (PAGE_SIZE < 8192)
1937 /* Limit the maximum frame size so we don't overrun the skb */
1938 if (ring_uses_build_skb(ring) &&
1939 !ring_uses_large_buffer(ring))
1940 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1941 IXGBE_RXDCTL_RLPML_EN;
1945 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1946 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1948 ixgbevf_rx_desc_queue_enable(adapter, ring);
1949 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1952 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1953 struct ixgbevf_ring *rx_ring)
1955 struct net_device *netdev = adapter->netdev;
1956 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1958 /* set build_skb and buffer size flags */
1959 clear_ring_build_skb_enabled(rx_ring);
1960 clear_ring_uses_large_buffer(rx_ring);
1962 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1965 set_ring_build_skb_enabled(rx_ring);
1967 if (PAGE_SIZE < 8192) {
1968 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1971 set_ring_uses_large_buffer(rx_ring);
1976 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1977 * @adapter: board private structure
1979 * Configure the Rx unit of the MAC after a reset.
1981 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1983 struct ixgbe_hw *hw = &adapter->hw;
1984 struct net_device *netdev = adapter->netdev;
1987 ixgbevf_setup_psrtype(adapter);
1988 if (hw->mac.type >= ixgbe_mac_X550_vf)
1989 ixgbevf_setup_vfmrqc(adapter);
1991 spin_lock_bh(&adapter->mbx_lock);
1992 /* notify the PF of our intent to use this size of frame */
1993 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1994 spin_unlock_bh(&adapter->mbx_lock);
1996 dev_err(&adapter->pdev->dev,
1997 "Failed to set MTU at %d\n", netdev->mtu);
1999 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2000 * the Base and Length of the Rx Descriptor Ring
2002 for (i = 0; i < adapter->num_rx_queues; i++) {
2003 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2005 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2006 ixgbevf_configure_rx_ring(adapter, rx_ring);
2010 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2011 __be16 proto, u16 vid)
2013 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2014 struct ixgbe_hw *hw = &adapter->hw;
2017 spin_lock_bh(&adapter->mbx_lock);
2019 /* add VID to filter table */
2020 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2022 spin_unlock_bh(&adapter->mbx_lock);
2024 /* translate error return types so error makes sense */
2025 if (err == IXGBE_ERR_MBX)
2028 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2031 set_bit(vid, adapter->active_vlans);
2036 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2037 __be16 proto, u16 vid)
2039 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2040 struct ixgbe_hw *hw = &adapter->hw;
2043 spin_lock_bh(&adapter->mbx_lock);
2045 /* remove VID from filter table */
2046 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2048 spin_unlock_bh(&adapter->mbx_lock);
2050 clear_bit(vid, adapter->active_vlans);
2055 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2059 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2060 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2061 htons(ETH_P_8021Q), vid);
2064 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2066 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2067 struct ixgbe_hw *hw = &adapter->hw;
2070 if ((netdev_uc_count(netdev)) > 10) {
2071 pr_err("Too many unicast filters - No Space\n");
2075 if (!netdev_uc_empty(netdev)) {
2076 struct netdev_hw_addr *ha;
2078 netdev_for_each_uc_addr(ha, netdev) {
2079 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2083 /* If the list is empty then send message to PF driver to
2084 * clear all MAC VLANs on this VF.
2086 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2093 * ixgbevf_set_rx_mode - Multicast and unicast set
2094 * @netdev: network interface device structure
2096 * The set_rx_method entry point is called whenever the multicast address
2097 * list, unicast address list or the network interface flags are updated.
2098 * This routine is responsible for configuring the hardware for proper
2099 * multicast mode and configuring requested unicast filters.
2101 static void ixgbevf_set_rx_mode(struct net_device *netdev)
2103 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2104 struct ixgbe_hw *hw = &adapter->hw;
2105 unsigned int flags = netdev->flags;
2108 /* request the most inclusive mode we need */
2109 if (flags & IFF_PROMISC)
2110 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2111 else if (flags & IFF_ALLMULTI)
2112 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2113 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2114 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2116 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2118 spin_lock_bh(&adapter->mbx_lock);
2120 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2122 /* reprogram multicast list */
2123 hw->mac.ops.update_mc_addr_list(hw, netdev);
2125 ixgbevf_write_uc_addr_list(netdev);
2127 spin_unlock_bh(&adapter->mbx_lock);
2130 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2133 struct ixgbevf_q_vector *q_vector;
2134 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2136 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2137 q_vector = adapter->q_vector[q_idx];
2138 napi_enable(&q_vector->napi);
2142 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2145 struct ixgbevf_q_vector *q_vector;
2146 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2148 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2149 q_vector = adapter->q_vector[q_idx];
2150 napi_disable(&q_vector->napi);
2154 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2156 struct ixgbe_hw *hw = &adapter->hw;
2157 unsigned int def_q = 0;
2158 unsigned int num_tcs = 0;
2159 unsigned int num_rx_queues = adapter->num_rx_queues;
2160 unsigned int num_tx_queues = adapter->num_tx_queues;
2163 spin_lock_bh(&adapter->mbx_lock);
2165 /* fetch queue configuration from the PF */
2166 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2168 spin_unlock_bh(&adapter->mbx_lock);
2174 /* we need only one Tx queue */
2177 /* update default Tx ring register index */
2178 adapter->tx_ring[0]->reg_idx = def_q;
2180 /* we need as many queues as traffic classes */
2181 num_rx_queues = num_tcs;
2184 /* if we have a bad config abort request queue reset */
2185 if ((adapter->num_rx_queues != num_rx_queues) ||
2186 (adapter->num_tx_queues != num_tx_queues)) {
2187 /* force mailbox timeout to prevent further messages */
2188 hw->mbx.timeout = 0;
2190 /* wait for watchdog to come around and bail us out */
2191 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2197 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2199 ixgbevf_configure_dcb(adapter);
2201 ixgbevf_set_rx_mode(adapter->netdev);
2203 ixgbevf_restore_vlan(adapter);
2205 ixgbevf_configure_tx(adapter);
2206 ixgbevf_configure_rx(adapter);
2209 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2211 /* Only save pre-reset stats if there are some */
2212 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2213 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2214 adapter->stats.base_vfgprc;
2215 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2216 adapter->stats.base_vfgptc;
2217 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2218 adapter->stats.base_vfgorc;
2219 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2220 adapter->stats.base_vfgotc;
2221 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2222 adapter->stats.base_vfmprc;
2226 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2228 struct ixgbe_hw *hw = &adapter->hw;
2230 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2231 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2232 adapter->stats.last_vfgorc |=
2233 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2234 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2235 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2236 adapter->stats.last_vfgotc |=
2237 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2238 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2240 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2241 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2242 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2243 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2244 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2247 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2249 struct ixgbe_hw *hw = &adapter->hw;
2250 int api[] = { ixgbe_mbox_api_13,
2254 ixgbe_mbox_api_unknown };
2257 spin_lock_bh(&adapter->mbx_lock);
2259 while (api[idx] != ixgbe_mbox_api_unknown) {
2260 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2266 spin_unlock_bh(&adapter->mbx_lock);
2269 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2271 struct net_device *netdev = adapter->netdev;
2272 struct ixgbe_hw *hw = &adapter->hw;
2274 ixgbevf_configure_msix(adapter);
2276 spin_lock_bh(&adapter->mbx_lock);
2278 if (is_valid_ether_addr(hw->mac.addr))
2279 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2281 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2283 spin_unlock_bh(&adapter->mbx_lock);
2285 smp_mb__before_atomic();
2286 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2287 ixgbevf_napi_enable_all(adapter);
2289 /* clear any pending interrupts, may auto mask */
2290 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2291 ixgbevf_irq_enable(adapter);
2293 /* enable transmits */
2294 netif_tx_start_all_queues(netdev);
2296 ixgbevf_save_reset_stats(adapter);
2297 ixgbevf_init_last_counter_stats(adapter);
2299 hw->mac.get_link_status = 1;
2300 mod_timer(&adapter->service_timer, jiffies);
2303 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2305 ixgbevf_configure(adapter);
2307 ixgbevf_up_complete(adapter);
2311 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2312 * @rx_ring: ring to free buffers from
2314 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2316 u16 i = rx_ring->next_to_clean;
2318 /* Free Rx ring sk_buff */
2320 dev_kfree_skb(rx_ring->skb);
2321 rx_ring->skb = NULL;
2324 /* Free all the Rx ring pages */
2325 while (i != rx_ring->next_to_alloc) {
2326 struct ixgbevf_rx_buffer *rx_buffer;
2328 rx_buffer = &rx_ring->rx_buffer_info[i];
2330 /* Invalidate cache lines that may have been written to by
2331 * device so that we avoid corrupting memory.
2333 dma_sync_single_range_for_cpu(rx_ring->dev,
2335 rx_buffer->page_offset,
2336 ixgbevf_rx_bufsz(rx_ring),
2339 /* free resources associated with mapping */
2340 dma_unmap_page_attrs(rx_ring->dev,
2342 ixgbevf_rx_pg_size(rx_ring),
2344 IXGBEVF_RX_DMA_ATTR);
2346 __page_frag_cache_drain(rx_buffer->page,
2347 rx_buffer->pagecnt_bias);
2350 if (i == rx_ring->count)
2354 rx_ring->next_to_alloc = 0;
2355 rx_ring->next_to_clean = 0;
2356 rx_ring->next_to_use = 0;
2360 * ixgbevf_clean_tx_ring - Free Tx Buffers
2361 * @tx_ring: ring to be cleaned
2363 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2365 u16 i = tx_ring->next_to_clean;
2366 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2368 while (i != tx_ring->next_to_use) {
2369 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2371 /* Free all the Tx ring sk_buffs */
2372 if (ring_is_xdp(tx_ring))
2373 page_frag_free(tx_buffer->data);
2375 dev_kfree_skb_any(tx_buffer->skb);
2377 /* unmap skb header data */
2378 dma_unmap_single(tx_ring->dev,
2379 dma_unmap_addr(tx_buffer, dma),
2380 dma_unmap_len(tx_buffer, len),
2383 /* check for eop_desc to determine the end of the packet */
2384 eop_desc = tx_buffer->next_to_watch;
2385 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2387 /* unmap remaining buffers */
2388 while (tx_desc != eop_desc) {
2392 if (unlikely(i == tx_ring->count)) {
2394 tx_buffer = tx_ring->tx_buffer_info;
2395 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2398 /* unmap any remaining paged data */
2399 if (dma_unmap_len(tx_buffer, len))
2400 dma_unmap_page(tx_ring->dev,
2401 dma_unmap_addr(tx_buffer, dma),
2402 dma_unmap_len(tx_buffer, len),
2406 /* move us one more past the eop_desc for start of next pkt */
2409 if (unlikely(i == tx_ring->count)) {
2411 tx_buffer = tx_ring->tx_buffer_info;
2415 /* reset next_to_use and next_to_clean */
2416 tx_ring->next_to_use = 0;
2417 tx_ring->next_to_clean = 0;
2422 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2423 * @adapter: board private structure
2425 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2429 for (i = 0; i < adapter->num_rx_queues; i++)
2430 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2434 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2435 * @adapter: board private structure
2437 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2441 for (i = 0; i < adapter->num_tx_queues; i++)
2442 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2443 for (i = 0; i < adapter->num_xdp_queues; i++)
2444 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2447 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2449 struct net_device *netdev = adapter->netdev;
2450 struct ixgbe_hw *hw = &adapter->hw;
2453 /* signal that we are down to the interrupt handler */
2454 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2455 return; /* do nothing if already down */
2457 /* disable all enabled Rx queues */
2458 for (i = 0; i < adapter->num_rx_queues; i++)
2459 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2461 usleep_range(10000, 20000);
2463 netif_tx_stop_all_queues(netdev);
2465 /* call carrier off first to avoid false dev_watchdog timeouts */
2466 netif_carrier_off(netdev);
2467 netif_tx_disable(netdev);
2469 ixgbevf_irq_disable(adapter);
2471 ixgbevf_napi_disable_all(adapter);
2473 del_timer_sync(&adapter->service_timer);
2475 /* disable transmits in the hardware now that interrupts are off */
2476 for (i = 0; i < adapter->num_tx_queues; i++) {
2477 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2479 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2480 IXGBE_TXDCTL_SWFLSH);
2483 for (i = 0; i < adapter->num_xdp_queues; i++) {
2484 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2486 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2487 IXGBE_TXDCTL_SWFLSH);
2490 if (!pci_channel_offline(adapter->pdev))
2491 ixgbevf_reset(adapter);
2493 ixgbevf_clean_all_tx_rings(adapter);
2494 ixgbevf_clean_all_rx_rings(adapter);
2497 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2499 WARN_ON(in_interrupt());
2501 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2504 ixgbevf_down(adapter);
2505 ixgbevf_up(adapter);
2507 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2510 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2512 struct ixgbe_hw *hw = &adapter->hw;
2513 struct net_device *netdev = adapter->netdev;
2515 if (hw->mac.ops.reset_hw(hw)) {
2516 hw_dbg(hw, "PF still resetting\n");
2518 hw->mac.ops.init_hw(hw);
2519 ixgbevf_negotiate_api(adapter);
2522 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2523 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2524 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2527 adapter->last_reset = jiffies;
2530 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2533 int vector_threshold;
2535 /* We'll want at least 2 (vector_threshold):
2536 * 1) TxQ[0] + RxQ[0] handler
2537 * 2) Other (Link Status Change, etc.)
2539 vector_threshold = MIN_MSIX_COUNT;
2541 /* The more we get, the more we will assign to Tx/Rx Cleanup
2542 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2543 * Right now, we simply care about how many we'll get; we'll
2544 * set them up later while requesting irq's.
2546 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2547 vector_threshold, vectors);
2550 dev_err(&adapter->pdev->dev,
2551 "Unable to allocate MSI-X interrupts\n");
2552 kfree(adapter->msix_entries);
2553 adapter->msix_entries = NULL;
2557 /* Adjust for only the vectors we'll use, which is minimum
2558 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2559 * vectors we were allocated.
2561 adapter->num_msix_vectors = vectors;
2567 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2568 * @adapter: board private structure to initialize
2570 * This is the top level queue allocation routine. The order here is very
2571 * important, starting with the "most" number of features turned on at once,
2572 * and ending with the smallest set of features. This way large combinations
2573 * can be allocated if they're turned on, and smaller combinations are the
2574 * fallthrough conditions.
2577 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2579 struct ixgbe_hw *hw = &adapter->hw;
2580 unsigned int def_q = 0;
2581 unsigned int num_tcs = 0;
2584 /* Start with base case */
2585 adapter->num_rx_queues = 1;
2586 adapter->num_tx_queues = 1;
2587 adapter->num_xdp_queues = 0;
2589 spin_lock_bh(&adapter->mbx_lock);
2591 /* fetch queue configuration from the PF */
2592 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2594 spin_unlock_bh(&adapter->mbx_lock);
2599 /* we need as many queues as traffic classes */
2601 adapter->num_rx_queues = num_tcs;
2603 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2605 switch (hw->api_version) {
2606 case ixgbe_mbox_api_11:
2607 case ixgbe_mbox_api_12:
2608 case ixgbe_mbox_api_13:
2609 if (adapter->xdp_prog &&
2610 hw->mac.max_tx_queues == rss)
2611 rss = rss > 3 ? 2 : 1;
2613 adapter->num_rx_queues = rss;
2614 adapter->num_tx_queues = rss;
2615 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2623 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2624 * @adapter: board private structure to initialize
2626 * Attempt to configure the interrupts using the best available
2627 * capabilities of the hardware and the kernel.
2629 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2631 int vector, v_budget;
2633 /* It's easy to be greedy for MSI-X vectors, but it really
2634 * doesn't do us much good if we have a lot more vectors
2635 * than CPU's. So let's be conservative and only ask for
2636 * (roughly) the same number of vectors as there are CPU's.
2637 * The default is to use pairs of vectors.
2639 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2640 v_budget = min_t(int, v_budget, num_online_cpus());
2641 v_budget += NON_Q_VECTORS;
2643 adapter->msix_entries = kcalloc(v_budget,
2644 sizeof(struct msix_entry), GFP_KERNEL);
2645 if (!adapter->msix_entries)
2648 for (vector = 0; vector < v_budget; vector++)
2649 adapter->msix_entries[vector].entry = vector;
2651 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2652 * does not support any other modes, so we will simply fail here. Note
2653 * that we clean up the msix_entries pointer else-where.
2655 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2658 static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2659 struct ixgbevf_ring_container *head)
2661 ring->next = head->ring;
2667 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2668 * @adapter: board private structure to initialize
2669 * @v_idx: index of vector in adapter struct
2670 * @txr_count: number of Tx rings for q vector
2671 * @txr_idx: index of first Tx ring to assign
2672 * @xdp_count: total number of XDP rings to allocate
2673 * @xdp_idx: index of first XDP ring to allocate
2674 * @rxr_count: number of Rx rings for q vector
2675 * @rxr_idx: index of first Rx ring to assign
2677 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2679 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2680 int txr_count, int txr_idx,
2681 int xdp_count, int xdp_idx,
2682 int rxr_count, int rxr_idx)
2684 struct ixgbevf_q_vector *q_vector;
2685 int reg_idx = txr_idx + xdp_idx;
2686 struct ixgbevf_ring *ring;
2687 int ring_count, size;
2689 ring_count = txr_count + xdp_count + rxr_count;
2690 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2692 /* allocate q_vector and rings */
2693 q_vector = kzalloc(size, GFP_KERNEL);
2697 /* initialize NAPI */
2698 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2700 /* tie q_vector and adapter together */
2701 adapter->q_vector[v_idx] = q_vector;
2702 q_vector->adapter = adapter;
2703 q_vector->v_idx = v_idx;
2705 /* initialize pointer to rings */
2706 ring = q_vector->ring;
2709 /* assign generic ring traits */
2710 ring->dev = &adapter->pdev->dev;
2711 ring->netdev = adapter->netdev;
2713 /* configure backlink on ring */
2714 ring->q_vector = q_vector;
2716 /* update q_vector Tx values */
2717 ixgbevf_add_ring(ring, &q_vector->tx);
2719 /* apply Tx specific ring traits */
2720 ring->count = adapter->tx_ring_count;
2721 ring->queue_index = txr_idx;
2722 ring->reg_idx = reg_idx;
2724 /* assign ring to adapter */
2725 adapter->tx_ring[txr_idx] = ring;
2727 /* update count and index */
2732 /* push pointer to next ring */
2737 /* assign generic ring traits */
2738 ring->dev = &adapter->pdev->dev;
2739 ring->netdev = adapter->netdev;
2741 /* configure backlink on ring */
2742 ring->q_vector = q_vector;
2744 /* update q_vector Tx values */
2745 ixgbevf_add_ring(ring, &q_vector->tx);
2747 /* apply Tx specific ring traits */
2748 ring->count = adapter->tx_ring_count;
2749 ring->queue_index = xdp_idx;
2750 ring->reg_idx = reg_idx;
2753 /* assign ring to adapter */
2754 adapter->xdp_ring[xdp_idx] = ring;
2756 /* update count and index */
2761 /* push pointer to next ring */
2766 /* assign generic ring traits */
2767 ring->dev = &adapter->pdev->dev;
2768 ring->netdev = adapter->netdev;
2770 /* configure backlink on ring */
2771 ring->q_vector = q_vector;
2773 /* update q_vector Rx values */
2774 ixgbevf_add_ring(ring, &q_vector->rx);
2776 /* apply Rx specific ring traits */
2777 ring->count = adapter->rx_ring_count;
2778 ring->queue_index = rxr_idx;
2779 ring->reg_idx = rxr_idx;
2781 /* assign ring to adapter */
2782 adapter->rx_ring[rxr_idx] = ring;
2784 /* update count and index */
2788 /* push pointer to next ring */
2796 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2797 * @adapter: board private structure to initialize
2798 * @v_idx: index of vector in adapter struct
2800 * This function frees the memory allocated to the q_vector. In addition if
2801 * NAPI is enabled it will delete any references to the NAPI struct prior
2802 * to freeing the q_vector.
2804 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2806 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2807 struct ixgbevf_ring *ring;
2809 ixgbevf_for_each_ring(ring, q_vector->tx) {
2810 if (ring_is_xdp(ring))
2811 adapter->xdp_ring[ring->queue_index] = NULL;
2813 adapter->tx_ring[ring->queue_index] = NULL;
2816 ixgbevf_for_each_ring(ring, q_vector->rx)
2817 adapter->rx_ring[ring->queue_index] = NULL;
2819 adapter->q_vector[v_idx] = NULL;
2820 netif_napi_del(&q_vector->napi);
2822 /* ixgbevf_get_stats() might access the rings on this vector,
2823 * we must wait a grace period before freeing it.
2825 kfree_rcu(q_vector, rcu);
2829 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2830 * @adapter: board private structure to initialize
2832 * We allocate one q_vector per queue interrupt. If allocation fails we
2835 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2837 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2838 int rxr_remaining = adapter->num_rx_queues;
2839 int txr_remaining = adapter->num_tx_queues;
2840 int xdp_remaining = adapter->num_xdp_queues;
2841 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2844 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2845 for (; rxr_remaining; v_idx++, q_vectors--) {
2846 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2848 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2849 0, 0, 0, 0, rqpv, rxr_idx);
2853 /* update counts and index */
2854 rxr_remaining -= rqpv;
2859 for (; q_vectors; v_idx++, q_vectors--) {
2860 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2861 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2862 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2864 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2872 /* update counts and index */
2873 rxr_remaining -= rqpv;
2875 txr_remaining -= tqpv;
2877 xdp_remaining -= xqpv;
2886 ixgbevf_free_q_vector(adapter, v_idx);
2893 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2894 * @adapter: board private structure to initialize
2896 * This function frees the memory allocated to the q_vectors. In addition if
2897 * NAPI is enabled it will delete any references to the NAPI struct prior
2898 * to freeing the q_vector.
2900 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2902 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2906 ixgbevf_free_q_vector(adapter, q_vectors);
2911 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2912 * @adapter: board private structure
2915 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2917 if (!adapter->msix_entries)
2920 pci_disable_msix(adapter->pdev);
2921 kfree(adapter->msix_entries);
2922 adapter->msix_entries = NULL;
2926 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2927 * @adapter: board private structure to initialize
2930 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2934 /* Number of supported queues */
2935 ixgbevf_set_num_queues(adapter);
2937 err = ixgbevf_set_interrupt_capability(adapter);
2939 hw_dbg(&adapter->hw,
2940 "Unable to setup interrupt capabilities\n");
2941 goto err_set_interrupt;
2944 err = ixgbevf_alloc_q_vectors(adapter);
2946 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2947 goto err_alloc_q_vectors;
2950 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2951 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2952 adapter->num_rx_queues, adapter->num_tx_queues,
2953 adapter->num_xdp_queues);
2955 set_bit(__IXGBEVF_DOWN, &adapter->state);
2958 err_alloc_q_vectors:
2959 ixgbevf_reset_interrupt_capability(adapter);
2965 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2966 * @adapter: board private structure to clear interrupt scheme on
2968 * We go through and clear interrupt specific resources and reset the structure
2969 * to pre-load conditions
2971 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2973 adapter->num_tx_queues = 0;
2974 adapter->num_xdp_queues = 0;
2975 adapter->num_rx_queues = 0;
2977 ixgbevf_free_q_vectors(adapter);
2978 ixgbevf_reset_interrupt_capability(adapter);
2982 * ixgbevf_sw_init - Initialize general software structures
2983 * @adapter: board private structure to initialize
2985 * ixgbevf_sw_init initializes the Adapter private data structure.
2986 * Fields are initialized based on PCI device information and
2987 * OS network device settings (MTU size).
2989 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2991 struct ixgbe_hw *hw = &adapter->hw;
2992 struct pci_dev *pdev = adapter->pdev;
2993 struct net_device *netdev = adapter->netdev;
2996 /* PCI config space info */
2997 hw->vendor_id = pdev->vendor;
2998 hw->device_id = pdev->device;
2999 hw->revision_id = pdev->revision;
3000 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3001 hw->subsystem_device_id = pdev->subsystem_device;
3003 hw->mbx.ops.init_params(hw);
3005 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3006 err = ixgbevf_init_rss_key(adapter);
3011 /* assume legacy case in which PF would only give VF 2 queues */
3012 hw->mac.max_tx_queues = 2;
3013 hw->mac.max_rx_queues = 2;
3015 /* lock to protect mailbox accesses */
3016 spin_lock_init(&adapter->mbx_lock);
3018 err = hw->mac.ops.reset_hw(hw);
3020 dev_info(&pdev->dev,
3021 "PF still in reset state. Is the PF interface up?\n");
3023 err = hw->mac.ops.init_hw(hw);
3025 pr_err("init_shared_code failed: %d\n", err);
3028 ixgbevf_negotiate_api(adapter);
3029 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3031 dev_info(&pdev->dev, "Error reading MAC address\n");
3032 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3033 dev_info(&pdev->dev,
3034 "MAC address not assigned by administrator.\n");
3035 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3038 if (!is_valid_ether_addr(netdev->dev_addr)) {
3039 dev_info(&pdev->dev, "Assigning random MAC address\n");
3040 eth_hw_addr_random(netdev);
3041 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3042 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3045 /* Enable dynamic interrupt throttling rates */
3046 adapter->rx_itr_setting = 1;
3047 adapter->tx_itr_setting = 1;
3049 /* set default ring sizes */
3050 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3051 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3053 set_bit(__IXGBEVF_DOWN, &adapter->state);
3060 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3062 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3063 if (current_counter < last_counter) \
3064 counter += 0x100000000LL; \
3065 last_counter = current_counter; \
3066 counter &= 0xFFFFFFFF00000000LL; \
3067 counter |= current_counter; \
3070 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3072 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3073 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3074 u64 current_counter = (current_counter_msb << 32) | \
3075 current_counter_lsb; \
3076 if (current_counter < last_counter) \
3077 counter += 0x1000000000LL; \
3078 last_counter = current_counter; \
3079 counter &= 0xFFFFFFF000000000LL; \
3080 counter |= current_counter; \
3083 * ixgbevf_update_stats - Update the board statistics counters.
3084 * @adapter: board private structure
3086 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3088 struct ixgbe_hw *hw = &adapter->hw;
3089 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3090 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3093 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3094 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3097 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3098 adapter->stats.vfgprc);
3099 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3100 adapter->stats.vfgptc);
3101 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3102 adapter->stats.last_vfgorc,
3103 adapter->stats.vfgorc);
3104 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3105 adapter->stats.last_vfgotc,
3106 adapter->stats.vfgotc);
3107 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3108 adapter->stats.vfmprc);
3110 for (i = 0; i < adapter->num_rx_queues; i++) {
3111 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3113 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3114 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3115 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3116 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3119 adapter->hw_csum_rx_error = hw_csum_rx_error;
3120 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3121 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3122 adapter->alloc_rx_page = alloc_rx_page;
3126 * ixgbevf_service_timer - Timer Call-back
3127 * @t: pointer to timer_list struct
3129 static void ixgbevf_service_timer(struct timer_list *t)
3131 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3134 /* Reset the timer */
3135 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3137 ixgbevf_service_event_schedule(adapter);
3140 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3142 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3145 /* If we're already down or resetting, just bail */
3146 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3147 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3148 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3151 adapter->tx_timeout_count++;
3154 ixgbevf_reinit_locked(adapter);
3159 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3160 * @adapter: pointer to the device adapter structure
3162 * This function serves two purposes. First it strobes the interrupt lines
3163 * in order to make certain interrupts are occurring. Secondly it sets the
3164 * bits needed to check for TX hangs. As a result we should immediately
3165 * determine if a hang has occurred.
3167 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3169 struct ixgbe_hw *hw = &adapter->hw;
3173 /* If we're down or resetting, just bail */
3174 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3175 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3178 /* Force detection of hung controller */
3179 if (netif_carrier_ok(adapter->netdev)) {
3180 for (i = 0; i < adapter->num_tx_queues; i++)
3181 set_check_for_tx_hang(adapter->tx_ring[i]);
3182 for (i = 0; i < adapter->num_xdp_queues; i++)
3183 set_check_for_tx_hang(adapter->xdp_ring[i]);
3186 /* get one bit for every active Tx/Rx interrupt vector */
3187 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3188 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3190 if (qv->rx.ring || qv->tx.ring)
3194 /* Cause software interrupt to ensure rings are cleaned */
3195 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3199 * ixgbevf_watchdog_update_link - update the link status
3200 * @adapter: pointer to the device adapter structure
3202 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3204 struct ixgbe_hw *hw = &adapter->hw;
3205 u32 link_speed = adapter->link_speed;
3206 bool link_up = adapter->link_up;
3209 spin_lock_bh(&adapter->mbx_lock);
3211 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3213 spin_unlock_bh(&adapter->mbx_lock);
3215 /* if check for link returns error we will need to reset */
3216 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3217 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3221 adapter->link_up = link_up;
3222 adapter->link_speed = link_speed;
3226 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3227 * print link up message
3228 * @adapter: pointer to the device adapter structure
3230 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3232 struct net_device *netdev = adapter->netdev;
3234 /* only continue if link was previously down */
3235 if (netif_carrier_ok(netdev))
3238 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3239 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3241 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3243 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3247 netif_carrier_on(netdev);
3251 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3252 * print link down message
3253 * @adapter: pointer to the adapter structure
3255 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3257 struct net_device *netdev = adapter->netdev;
3259 adapter->link_speed = 0;
3261 /* only continue if link was up previously */
3262 if (!netif_carrier_ok(netdev))
3265 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3267 netif_carrier_off(netdev);
3271 * ixgbevf_watchdog_subtask - worker thread to bring link up
3272 * @adapter: board private structure
3274 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3276 /* if interface is down do nothing */
3277 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3278 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3281 ixgbevf_watchdog_update_link(adapter);
3283 if (adapter->link_up)
3284 ixgbevf_watchdog_link_is_up(adapter);
3286 ixgbevf_watchdog_link_is_down(adapter);
3288 ixgbevf_update_stats(adapter);
3292 * ixgbevf_service_task - manages and runs subtasks
3293 * @work: pointer to work_struct containing our data
3295 static void ixgbevf_service_task(struct work_struct *work)
3297 struct ixgbevf_adapter *adapter = container_of(work,
3298 struct ixgbevf_adapter,
3300 struct ixgbe_hw *hw = &adapter->hw;
3302 if (IXGBE_REMOVED(hw->hw_addr)) {
3303 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3305 ixgbevf_down(adapter);
3311 ixgbevf_queue_reset_subtask(adapter);
3312 ixgbevf_reset_subtask(adapter);
3313 ixgbevf_watchdog_subtask(adapter);
3314 ixgbevf_check_hang_subtask(adapter);
3316 ixgbevf_service_event_complete(adapter);
3320 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3321 * @tx_ring: Tx descriptor ring for a specific queue
3323 * Free all transmit software resources
3325 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3327 ixgbevf_clean_tx_ring(tx_ring);
3329 vfree(tx_ring->tx_buffer_info);
3330 tx_ring->tx_buffer_info = NULL;
3332 /* if not set, then don't free */
3336 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3339 tx_ring->desc = NULL;
3343 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3344 * @adapter: board private structure
3346 * Free all transmit software resources
3348 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3352 for (i = 0; i < adapter->num_tx_queues; i++)
3353 if (adapter->tx_ring[i]->desc)
3354 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3355 for (i = 0; i < adapter->num_xdp_queues; i++)
3356 if (adapter->xdp_ring[i]->desc)
3357 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3361 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3362 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3364 * Return 0 on success, negative on failure
3366 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3368 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3371 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3372 tx_ring->tx_buffer_info = vmalloc(size);
3373 if (!tx_ring->tx_buffer_info)
3376 u64_stats_init(&tx_ring->syncp);
3378 /* round up to nearest 4K */
3379 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3380 tx_ring->size = ALIGN(tx_ring->size, 4096);
3382 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3383 &tx_ring->dma, GFP_KERNEL);
3390 vfree(tx_ring->tx_buffer_info);
3391 tx_ring->tx_buffer_info = NULL;
3392 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3397 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3398 * @adapter: board private structure
3400 * If this function returns with an error, then it's possible one or
3401 * more of the rings is populated (while the rest are not). It is the
3402 * callers duty to clean those orphaned rings.
3404 * Return 0 on success, negative on failure
3406 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3408 int i, j = 0, err = 0;
3410 for (i = 0; i < adapter->num_tx_queues; i++) {
3411 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3414 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3418 for (j = 0; j < adapter->num_xdp_queues; j++) {
3419 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3428 /* rewind the index freeing the rings as we go */
3430 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3432 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3438 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3439 * @adapter: board private structure
3440 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3442 * Returns 0 on success, negative on failure
3444 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3445 struct ixgbevf_ring *rx_ring)
3449 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3450 rx_ring->rx_buffer_info = vmalloc(size);
3451 if (!rx_ring->rx_buffer_info)
3454 u64_stats_init(&rx_ring->syncp);
3456 /* Round up to nearest 4K */
3457 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3458 rx_ring->size = ALIGN(rx_ring->size, 4096);
3460 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3461 &rx_ring->dma, GFP_KERNEL);
3466 /* XDP RX-queue info */
3467 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3468 rx_ring->queue_index) < 0)
3471 rx_ring->xdp_prog = adapter->xdp_prog;
3475 vfree(rx_ring->rx_buffer_info);
3476 rx_ring->rx_buffer_info = NULL;
3477 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3482 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3483 * @adapter: board private structure
3485 * If this function returns with an error, then it's possible one or
3486 * more of the rings is populated (while the rest are not). It is the
3487 * callers duty to clean those orphaned rings.
3489 * Return 0 on success, negative on failure
3491 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3495 for (i = 0; i < adapter->num_rx_queues; i++) {
3496 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3499 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3505 /* rewind the index freeing the rings as we go */
3507 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3512 * ixgbevf_free_rx_resources - Free Rx Resources
3513 * @rx_ring: ring to clean the resources from
3515 * Free all receive software resources
3517 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3519 ixgbevf_clean_rx_ring(rx_ring);
3521 rx_ring->xdp_prog = NULL;
3522 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3523 vfree(rx_ring->rx_buffer_info);
3524 rx_ring->rx_buffer_info = NULL;
3526 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3529 rx_ring->desc = NULL;
3533 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3534 * @adapter: board private structure
3536 * Free all receive software resources
3538 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3542 for (i = 0; i < adapter->num_rx_queues; i++)
3543 if (adapter->rx_ring[i]->desc)
3544 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3548 * ixgbevf_open - Called when a network interface is made active
3549 * @netdev: network interface device structure
3551 * Returns 0 on success, negative value on failure
3553 * The open entry point is called when a network interface is made
3554 * active by the system (IFF_UP). At this point all resources needed
3555 * for transmit and receive operations are allocated, the interrupt
3556 * handler is registered with the OS, the watchdog timer is started,
3557 * and the stack is notified that the interface is ready.
3559 int ixgbevf_open(struct net_device *netdev)
3561 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3562 struct ixgbe_hw *hw = &adapter->hw;
3565 /* A previous failure to open the device because of a lack of
3566 * available MSIX vector resources may have reset the number
3567 * of msix vectors variable to zero. The only way to recover
3568 * is to unload/reload the driver and hope that the system has
3569 * been able to recover some MSIX vector resources.
3571 if (!adapter->num_msix_vectors)
3574 if (hw->adapter_stopped) {
3575 ixgbevf_reset(adapter);
3576 /* if adapter is still stopped then PF isn't up and
3577 * the VF can't start.
3579 if (hw->adapter_stopped) {
3580 err = IXGBE_ERR_MBX;
3581 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3582 goto err_setup_reset;
3586 /* disallow open during test */
3587 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3590 netif_carrier_off(netdev);
3592 /* allocate transmit descriptors */
3593 err = ixgbevf_setup_all_tx_resources(adapter);
3597 /* allocate receive descriptors */
3598 err = ixgbevf_setup_all_rx_resources(adapter);
3602 ixgbevf_configure(adapter);
3604 err = ixgbevf_request_irq(adapter);
3608 /* Notify the stack of the actual queue counts. */
3609 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3611 goto err_set_queues;
3613 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3615 goto err_set_queues;
3617 ixgbevf_up_complete(adapter);
3622 ixgbevf_free_irq(adapter);
3624 ixgbevf_free_all_rx_resources(adapter);
3626 ixgbevf_free_all_tx_resources(adapter);
3628 ixgbevf_reset(adapter);
3635 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3636 * @adapter: the private adapter struct
3638 * This function should contain the necessary work common to both suspending
3639 * and closing of the device.
3641 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3643 ixgbevf_down(adapter);
3644 ixgbevf_free_irq(adapter);
3645 ixgbevf_free_all_tx_resources(adapter);
3646 ixgbevf_free_all_rx_resources(adapter);
3650 * ixgbevf_close - Disables a network interface
3651 * @netdev: network interface device structure
3653 * Returns 0, this is not allowed to fail
3655 * The close entry point is called when an interface is de-activated
3656 * by the OS. The hardware is still under the drivers control, but
3657 * needs to be disabled. A global MAC reset is issued to stop the
3658 * hardware, and all transmit and receive resources are freed.
3660 int ixgbevf_close(struct net_device *netdev)
3662 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3664 if (netif_device_present(netdev))
3665 ixgbevf_close_suspend(adapter);
3670 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3672 struct net_device *dev = adapter->netdev;
3674 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3678 /* if interface is down do nothing */
3679 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3680 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3683 /* Hardware has to reinitialize queues and interrupts to
3684 * match packet buffer alignment. Unfortunately, the
3685 * hardware is not flexible enough to do this dynamically.
3689 if (netif_running(dev))
3692 ixgbevf_clear_interrupt_scheme(adapter);
3693 ixgbevf_init_interrupt_scheme(adapter);
3695 if (netif_running(dev))
3701 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3702 u32 vlan_macip_lens, u32 type_tucmd,
3705 struct ixgbe_adv_tx_context_desc *context_desc;
3706 u16 i = tx_ring->next_to_use;
3708 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3711 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3713 /* set bits to identify this as an advanced context descriptor */
3714 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3716 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3717 context_desc->seqnum_seed = 0;
3718 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3719 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3722 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3723 struct ixgbevf_tx_buffer *first,
3726 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3727 struct sk_buff *skb = first->skb;
3737 u32 paylen, l4_offset;
3740 if (skb->ip_summed != CHECKSUM_PARTIAL)
3743 if (!skb_is_gso(skb))
3746 err = skb_cow_head(skb, 0);
3750 if (eth_p_mpls(first->protocol))
3751 ip.hdr = skb_inner_network_header(skb);
3753 ip.hdr = skb_network_header(skb);
3754 l4.hdr = skb_checksum_start(skb);
3756 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3757 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3759 /* initialize outer IP header fields */
3760 if (ip.v4->version == 4) {
3761 unsigned char *csum_start = skb_checksum_start(skb);
3762 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3764 /* IP header will have to cancel out any data that
3765 * is not a part of the outer IP header
3767 ip.v4->check = csum_fold(csum_partial(trans_start,
3768 csum_start - trans_start,
3770 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3773 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3774 IXGBE_TX_FLAGS_CSUM |
3775 IXGBE_TX_FLAGS_IPV4;
3777 ip.v6->payload_len = 0;
3778 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3779 IXGBE_TX_FLAGS_CSUM;
3782 /* determine offset of inner transport header */
3783 l4_offset = l4.hdr - skb->data;
3785 /* compute length of segmentation header */
3786 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3788 /* remove payload length from inner checksum */
3789 paylen = skb->len - l4_offset;
3790 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3792 /* update gso size and bytecount with header size */
3793 first->gso_segs = skb_shinfo(skb)->gso_segs;
3794 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3796 /* mss_l4len_id: use 1 as index for TSO */
3797 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3798 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3799 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3801 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3802 vlan_macip_lens = l4.hdr - ip.hdr;
3803 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3804 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3806 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3807 type_tucmd, mss_l4len_idx);
3812 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3814 unsigned int offset = 0;
3816 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3818 return offset == skb_checksum_start_offset(skb);
3821 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3822 struct ixgbevf_tx_buffer *first)
3824 struct sk_buff *skb = first->skb;
3825 u32 vlan_macip_lens = 0;
3828 if (skb->ip_summed != CHECKSUM_PARTIAL)
3831 switch (skb->csum_offset) {
3832 case offsetof(struct tcphdr, check):
3833 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3835 case offsetof(struct udphdr, check):
3837 case offsetof(struct sctphdr, checksum):
3838 /* validate that this is actually an SCTP request */
3839 if (((first->protocol == htons(ETH_P_IP)) &&
3840 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3841 ((first->protocol == htons(ETH_P_IPV6)) &&
3842 ixgbevf_ipv6_csum_is_sctp(skb))) {
3843 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3848 skb_checksum_help(skb);
3851 /* update TX checksum flag */
3852 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3853 vlan_macip_lens = skb_checksum_start_offset(skb) -
3854 skb_network_offset(skb);
3856 /* vlan_macip_lens: MACLEN, VLAN tag */
3857 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3858 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3860 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3863 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3865 /* set type for advanced descriptor with frame checksum insertion */
3866 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3867 IXGBE_ADVTXD_DCMD_IFCS |
3868 IXGBE_ADVTXD_DCMD_DEXT);
3870 /* set HW VLAN bit if VLAN is present */
3871 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3872 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3874 /* set segmentation enable bits for TSO/FSO */
3875 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3876 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3881 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3882 u32 tx_flags, unsigned int paylen)
3884 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3886 /* enable L4 checksum for TSO and TX checksum offload */
3887 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3888 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3890 /* enble IPv4 checksum for TSO */
3891 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3892 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3894 /* use index 1 context for TSO/FSO/FCOE */
3895 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3896 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3898 /* Check Context must be set if Tx switch is enabled, which it
3899 * always is for case where virtual functions are running
3901 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3903 tx_desc->read.olinfo_status = olinfo_status;
3906 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3907 struct ixgbevf_tx_buffer *first,
3910 struct sk_buff *skb = first->skb;
3911 struct ixgbevf_tx_buffer *tx_buffer;
3912 union ixgbe_adv_tx_desc *tx_desc;
3913 struct skb_frag_struct *frag;
3915 unsigned int data_len, size;
3916 u32 tx_flags = first->tx_flags;
3917 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3918 u16 i = tx_ring->next_to_use;
3920 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3922 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3924 size = skb_headlen(skb);
3925 data_len = skb->data_len;
3927 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3931 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3932 if (dma_mapping_error(tx_ring->dev, dma))
3935 /* record length, and DMA address */
3936 dma_unmap_len_set(tx_buffer, len, size);
3937 dma_unmap_addr_set(tx_buffer, dma, dma);
3939 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3941 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3942 tx_desc->read.cmd_type_len =
3943 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3947 if (i == tx_ring->count) {
3948 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3951 tx_desc->read.olinfo_status = 0;
3953 dma += IXGBE_MAX_DATA_PER_TXD;
3954 size -= IXGBE_MAX_DATA_PER_TXD;
3956 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3959 if (likely(!data_len))
3962 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3966 if (i == tx_ring->count) {
3967 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3970 tx_desc->read.olinfo_status = 0;
3972 size = skb_frag_size(frag);
3975 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3978 tx_buffer = &tx_ring->tx_buffer_info[i];
3981 /* write last descriptor with RS and EOP bits */
3982 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3983 tx_desc->read.cmd_type_len = cmd_type;
3985 /* set the timestamp */
3986 first->time_stamp = jiffies;
3988 /* Force memory writes to complete before letting h/w know there
3989 * are new descriptors to fetch. (Only applicable for weak-ordered
3990 * memory model archs, such as IA-64).
3992 * We also need this memory barrier (wmb) to make certain all of the
3993 * status bits have been updated before next_to_watch is written.
3997 /* set next_to_watch value indicating a packet is present */
3998 first->next_to_watch = tx_desc;
4001 if (i == tx_ring->count)
4004 tx_ring->next_to_use = i;
4006 /* notify HW of packet */
4007 ixgbevf_write_tail(tx_ring, i);
4011 dev_err(tx_ring->dev, "TX DMA map failed\n");
4012 tx_buffer = &tx_ring->tx_buffer_info[i];
4014 /* clear dma mappings for failed tx_buffer_info map */
4015 while (tx_buffer != first) {
4016 if (dma_unmap_len(tx_buffer, len))
4017 dma_unmap_page(tx_ring->dev,
4018 dma_unmap_addr(tx_buffer, dma),
4019 dma_unmap_len(tx_buffer, len),
4021 dma_unmap_len_set(tx_buffer, len, 0);
4024 i += tx_ring->count;
4025 tx_buffer = &tx_ring->tx_buffer_info[i];
4028 if (dma_unmap_len(tx_buffer, len))
4029 dma_unmap_single(tx_ring->dev,
4030 dma_unmap_addr(tx_buffer, dma),
4031 dma_unmap_len(tx_buffer, len),
4033 dma_unmap_len_set(tx_buffer, len, 0);
4035 dev_kfree_skb_any(tx_buffer->skb);
4036 tx_buffer->skb = NULL;
4038 tx_ring->next_to_use = i;
4041 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4043 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4044 /* Herbert's original patch had:
4045 * smp_mb__after_netif_stop_queue();
4046 * but since that doesn't exist yet, just open code it.
4050 /* We need to check again in a case another CPU has just
4051 * made room available.
4053 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4056 /* A reprieve! - use start_queue because it doesn't call schedule */
4057 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4058 ++tx_ring->tx_stats.restart_queue;
4063 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4065 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4067 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4070 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4071 struct ixgbevf_ring *tx_ring)
4073 struct ixgbevf_tx_buffer *first;
4076 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4077 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4081 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4083 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4084 dev_kfree_skb_any(skb);
4085 return NETDEV_TX_OK;
4088 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4089 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4090 * + 2 desc gap to keep tail from touching head,
4091 * + 1 desc for context descriptor,
4092 * otherwise try next time
4094 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4095 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4096 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4098 count += skb_shinfo(skb)->nr_frags;
4100 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4101 tx_ring->tx_stats.tx_busy++;
4102 return NETDEV_TX_BUSY;
4105 /* record the location of the first descriptor for this packet */
4106 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4108 first->bytecount = skb->len;
4109 first->gso_segs = 1;
4111 if (skb_vlan_tag_present(skb)) {
4112 tx_flags |= skb_vlan_tag_get(skb);
4113 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4114 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4117 /* record initial flags and protocol */
4118 first->tx_flags = tx_flags;
4119 first->protocol = vlan_get_protocol(skb);
4121 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
4125 ixgbevf_tx_csum(tx_ring, first);
4127 ixgbevf_tx_map(tx_ring, first, hdr_len);
4129 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4131 return NETDEV_TX_OK;
4134 dev_kfree_skb_any(first->skb);
4137 return NETDEV_TX_OK;
4140 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4142 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4143 struct ixgbevf_ring *tx_ring;
4145 if (skb->len <= 0) {
4146 dev_kfree_skb_any(skb);
4147 return NETDEV_TX_OK;
4150 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4151 * in order to meet this minimum size requirement.
4153 if (skb->len < 17) {
4154 if (skb_padto(skb, 17))
4155 return NETDEV_TX_OK;
4159 tx_ring = adapter->tx_ring[skb->queue_mapping];
4160 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4164 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4165 * @netdev: network interface device structure
4166 * @p: pointer to an address structure
4168 * Returns 0 on success, negative on failure
4170 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4172 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4173 struct ixgbe_hw *hw = &adapter->hw;
4174 struct sockaddr *addr = p;
4177 if (!is_valid_ether_addr(addr->sa_data))
4178 return -EADDRNOTAVAIL;
4180 spin_lock_bh(&adapter->mbx_lock);
4182 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4184 spin_unlock_bh(&adapter->mbx_lock);
4189 ether_addr_copy(hw->mac.addr, addr->sa_data);
4190 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4196 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4197 * @netdev: network interface device structure
4198 * @new_mtu: new value for maximum frame size
4200 * Returns 0 on success, negative on failure
4202 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4204 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4205 struct ixgbe_hw *hw = &adapter->hw;
4206 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4209 /* prevent MTU being changed to a size unsupported by XDP */
4210 if (adapter->xdp_prog) {
4211 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4215 spin_lock_bh(&adapter->mbx_lock);
4216 /* notify the PF of our intent to use this size of frame */
4217 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4218 spin_unlock_bh(&adapter->mbx_lock);
4222 hw_dbg(hw, "changing MTU from %d to %d\n",
4223 netdev->mtu, new_mtu);
4225 /* must set new MTU before calling down or up */
4226 netdev->mtu = new_mtu;
4228 if (netif_running(netdev))
4229 ixgbevf_reinit_locked(adapter);
4234 #ifdef CONFIG_NET_POLL_CONTROLLER
4235 /* Polling 'interrupt' - used by things like netconsole to send skbs
4236 * without having to re-enable interrupts. It's not called while
4237 * the interrupt routine is executing.
4239 static void ixgbevf_netpoll(struct net_device *netdev)
4241 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4244 /* if interface is down do nothing */
4245 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
4247 for (i = 0; i < adapter->num_rx_queues; i++)
4248 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
4250 #endif /* CONFIG_NET_POLL_CONTROLLER */
4252 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4254 struct net_device *netdev = pci_get_drvdata(pdev);
4255 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4261 netif_device_detach(netdev);
4263 if (netif_running(netdev))
4264 ixgbevf_close_suspend(adapter);
4266 ixgbevf_clear_interrupt_scheme(adapter);
4270 retval = pci_save_state(pdev);
4275 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4276 pci_disable_device(pdev);
4282 static int ixgbevf_resume(struct pci_dev *pdev)
4284 struct net_device *netdev = pci_get_drvdata(pdev);
4285 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4288 pci_restore_state(pdev);
4289 /* pci_restore_state clears dev->state_saved so call
4290 * pci_save_state to restore it.
4292 pci_save_state(pdev);
4294 err = pci_enable_device_mem(pdev);
4296 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4300 adapter->hw.hw_addr = adapter->io_addr;
4301 smp_mb__before_atomic();
4302 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4303 pci_set_master(pdev);
4305 ixgbevf_reset(adapter);
4308 err = ixgbevf_init_interrupt_scheme(adapter);
4309 if (!err && netif_running(netdev))
4310 err = ixgbevf_open(netdev);
4315 netif_device_attach(netdev);
4320 #endif /* CONFIG_PM */
4321 static void ixgbevf_shutdown(struct pci_dev *pdev)
4323 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4326 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4327 const struct ixgbevf_ring *ring)
4334 start = u64_stats_fetch_begin_irq(&ring->syncp);
4335 bytes = ring->stats.bytes;
4336 packets = ring->stats.packets;
4337 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4338 stats->tx_bytes += bytes;
4339 stats->tx_packets += packets;
4343 static void ixgbevf_get_stats(struct net_device *netdev,
4344 struct rtnl_link_stats64 *stats)
4346 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4349 const struct ixgbevf_ring *ring;
4352 ixgbevf_update_stats(adapter);
4354 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4357 for (i = 0; i < adapter->num_rx_queues; i++) {
4358 ring = adapter->rx_ring[i];
4360 start = u64_stats_fetch_begin_irq(&ring->syncp);
4361 bytes = ring->stats.bytes;
4362 packets = ring->stats.packets;
4363 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4364 stats->rx_bytes += bytes;
4365 stats->rx_packets += packets;
4368 for (i = 0; i < adapter->num_tx_queues; i++) {
4369 ring = adapter->tx_ring[i];
4370 ixgbevf_get_tx_ring_stats(stats, ring);
4373 for (i = 0; i < adapter->num_xdp_queues; i++) {
4374 ring = adapter->xdp_ring[i];
4375 ixgbevf_get_tx_ring_stats(stats, ring);
4380 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4381 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4383 static netdev_features_t
4384 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4385 netdev_features_t features)
4387 unsigned int network_hdr_len, mac_hdr_len;
4389 /* Make certain the headers can be described by a context descriptor */
4390 mac_hdr_len = skb_network_header(skb) - skb->data;
4391 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4392 return features & ~(NETIF_F_HW_CSUM |
4394 NETIF_F_HW_VLAN_CTAG_TX |
4398 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4399 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4400 return features & ~(NETIF_F_HW_CSUM |
4405 /* We can only support IPV4 TSO in tunnels if we can mangle the
4406 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4408 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4409 features &= ~NETIF_F_TSO;
4414 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4416 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4417 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4418 struct bpf_prog *old_prog;
4420 /* verify ixgbevf ring attributes are sufficient for XDP */
4421 for (i = 0; i < adapter->num_rx_queues; i++) {
4422 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4424 if (frame_size > ixgbevf_rx_bufsz(ring))
4428 old_prog = xchg(&adapter->xdp_prog, prog);
4430 /* If transitioning XDP modes reconfigure rings */
4431 if (!!prog != !!old_prog) {
4432 /* Hardware has to reinitialize queues and interrupts to
4433 * match packet buffer alignment. Unfortunately, the
4434 * hardware is not flexible enough to do this dynamically.
4436 if (netif_running(dev))
4439 ixgbevf_clear_interrupt_scheme(adapter);
4440 ixgbevf_init_interrupt_scheme(adapter);
4442 if (netif_running(dev))
4445 for (i = 0; i < adapter->num_rx_queues; i++)
4446 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4450 bpf_prog_put(old_prog);
4455 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4457 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4459 switch (xdp->command) {
4460 case XDP_SETUP_PROG:
4461 return ixgbevf_xdp_setup(dev, xdp->prog);
4462 case XDP_QUERY_PROG:
4463 xdp->prog_attached = !!(adapter->xdp_prog);
4464 xdp->prog_id = adapter->xdp_prog ?
4465 adapter->xdp_prog->aux->id : 0;
4472 static const struct net_device_ops ixgbevf_netdev_ops = {
4473 .ndo_open = ixgbevf_open,
4474 .ndo_stop = ixgbevf_close,
4475 .ndo_start_xmit = ixgbevf_xmit_frame,
4476 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4477 .ndo_get_stats64 = ixgbevf_get_stats,
4478 .ndo_validate_addr = eth_validate_addr,
4479 .ndo_set_mac_address = ixgbevf_set_mac,
4480 .ndo_change_mtu = ixgbevf_change_mtu,
4481 .ndo_tx_timeout = ixgbevf_tx_timeout,
4482 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4483 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4484 #ifdef CONFIG_NET_POLL_CONTROLLER
4485 .ndo_poll_controller = ixgbevf_netpoll,
4487 .ndo_features_check = ixgbevf_features_check,
4488 .ndo_bpf = ixgbevf_xdp,
4491 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4493 dev->netdev_ops = &ixgbevf_netdev_ops;
4494 ixgbevf_set_ethtool_ops(dev);
4495 dev->watchdog_timeo = 5 * HZ;
4499 * ixgbevf_probe - Device Initialization Routine
4500 * @pdev: PCI device information struct
4501 * @ent: entry in ixgbevf_pci_tbl
4503 * Returns 0 on success, negative on failure
4505 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4506 * The OS initialization, configuring of the adapter private structure,
4507 * and a hardware reset occur.
4509 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4511 struct net_device *netdev;
4512 struct ixgbevf_adapter *adapter = NULL;
4513 struct ixgbe_hw *hw = NULL;
4514 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4515 int err, pci_using_dac;
4516 bool disable_dev = false;
4518 err = pci_enable_device(pdev);
4522 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4525 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4527 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4533 err = pci_request_regions(pdev, ixgbevf_driver_name);
4535 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4539 pci_set_master(pdev);
4541 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4545 goto err_alloc_etherdev;
4548 SET_NETDEV_DEV(netdev, &pdev->dev);
4550 adapter = netdev_priv(netdev);
4552 adapter->netdev = netdev;
4553 adapter->pdev = pdev;
4556 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4558 /* call save state here in standalone driver because it relies on
4559 * adapter struct to exist, and needs to call netdev_priv
4561 pci_save_state(pdev);
4563 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4564 pci_resource_len(pdev, 0));
4565 adapter->io_addr = hw->hw_addr;
4571 ixgbevf_assign_netdev_ops(netdev);
4574 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4575 hw->mac.type = ii->mac;
4577 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4578 sizeof(struct ixgbe_mbx_operations));
4580 /* setup the private structure */
4581 err = ixgbevf_sw_init(adapter);
4585 /* The HW MAC address was set and/or determined in sw_init */
4586 if (!is_valid_ether_addr(netdev->dev_addr)) {
4587 pr_err("invalid MAC address\n");
4592 netdev->hw_features = NETIF_F_SG |
4599 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4600 NETIF_F_GSO_GRE_CSUM | \
4601 NETIF_F_GSO_IPXIP4 | \
4602 NETIF_F_GSO_IPXIP6 | \
4603 NETIF_F_GSO_UDP_TUNNEL | \
4604 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4606 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4607 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4608 IXGBEVF_GSO_PARTIAL_FEATURES;
4610 netdev->features = netdev->hw_features;
4613 netdev->features |= NETIF_F_HIGHDMA;
4615 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4616 netdev->mpls_features |= NETIF_F_SG |
4620 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4621 netdev->hw_enc_features |= netdev->vlan_features;
4623 /* set this bit last since it cannot be part of vlan_features */
4624 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4625 NETIF_F_HW_VLAN_CTAG_RX |
4626 NETIF_F_HW_VLAN_CTAG_TX;
4628 netdev->priv_flags |= IFF_UNICAST_FLT;
4630 /* MTU range: 68 - 1504 or 9710 */
4631 netdev->min_mtu = ETH_MIN_MTU;
4632 switch (adapter->hw.api_version) {
4633 case ixgbe_mbox_api_11:
4634 case ixgbe_mbox_api_12:
4635 case ixgbe_mbox_api_13:
4636 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4637 (ETH_HLEN + ETH_FCS_LEN);
4640 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4641 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4642 (ETH_HLEN + ETH_FCS_LEN);
4644 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4648 if (IXGBE_REMOVED(hw->hw_addr)) {
4653 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4655 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4656 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4657 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4659 err = ixgbevf_init_interrupt_scheme(adapter);
4663 strcpy(netdev->name, "eth%d");
4665 err = register_netdev(netdev);
4669 pci_set_drvdata(pdev, netdev);
4670 netif_carrier_off(netdev);
4672 ixgbevf_init_last_counter_stats(adapter);
4674 /* print the VF info */
4675 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4676 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4678 switch (hw->mac.type) {
4679 case ixgbe_mac_X550_vf:
4680 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4682 case ixgbe_mac_X540_vf:
4683 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4685 case ixgbe_mac_82599_vf:
4687 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4694 ixgbevf_clear_interrupt_scheme(adapter);
4696 ixgbevf_reset_interrupt_capability(adapter);
4697 iounmap(adapter->io_addr);
4698 kfree(adapter->rss_key);
4700 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4701 free_netdev(netdev);
4703 pci_release_regions(pdev);
4706 if (!adapter || disable_dev)
4707 pci_disable_device(pdev);
4712 * ixgbevf_remove - Device Removal Routine
4713 * @pdev: PCI device information struct
4715 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4716 * that it should release a PCI device. The could be caused by a
4717 * Hot-Plug event, or because the driver is going to be removed from
4720 static void ixgbevf_remove(struct pci_dev *pdev)
4722 struct net_device *netdev = pci_get_drvdata(pdev);
4723 struct ixgbevf_adapter *adapter;
4729 adapter = netdev_priv(netdev);
4731 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4732 cancel_work_sync(&adapter->service_task);
4734 if (netdev->reg_state == NETREG_REGISTERED)
4735 unregister_netdev(netdev);
4737 ixgbevf_clear_interrupt_scheme(adapter);
4738 ixgbevf_reset_interrupt_capability(adapter);
4740 iounmap(adapter->io_addr);
4741 pci_release_regions(pdev);
4743 hw_dbg(&adapter->hw, "Remove complete\n");
4745 kfree(adapter->rss_key);
4746 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4747 free_netdev(netdev);
4750 pci_disable_device(pdev);
4754 * ixgbevf_io_error_detected - called when PCI error is detected
4755 * @pdev: Pointer to PCI device
4756 * @state: The current pci connection state
4758 * This function is called after a PCI bus error affecting
4759 * this device has been detected.
4761 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4762 pci_channel_state_t state)
4764 struct net_device *netdev = pci_get_drvdata(pdev);
4765 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4767 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4768 return PCI_ERS_RESULT_DISCONNECT;
4771 netif_device_detach(netdev);
4773 if (state == pci_channel_io_perm_failure) {
4775 return PCI_ERS_RESULT_DISCONNECT;
4778 if (netif_running(netdev))
4779 ixgbevf_close_suspend(adapter);
4781 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4782 pci_disable_device(pdev);
4785 /* Request a slot slot reset. */
4786 return PCI_ERS_RESULT_NEED_RESET;
4790 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4791 * @pdev: Pointer to PCI device
4793 * Restart the card from scratch, as if from a cold-boot. Implementation
4794 * resembles the first-half of the ixgbevf_resume routine.
4796 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4798 struct net_device *netdev = pci_get_drvdata(pdev);
4799 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4801 if (pci_enable_device_mem(pdev)) {
4803 "Cannot re-enable PCI device after reset.\n");
4804 return PCI_ERS_RESULT_DISCONNECT;
4807 adapter->hw.hw_addr = adapter->io_addr;
4808 smp_mb__before_atomic();
4809 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4810 pci_set_master(pdev);
4812 ixgbevf_reset(adapter);
4814 return PCI_ERS_RESULT_RECOVERED;
4818 * ixgbevf_io_resume - called when traffic can start flowing again.
4819 * @pdev: Pointer to PCI device
4821 * This callback is called when the error recovery driver tells us that
4822 * its OK to resume normal operation. Implementation resembles the
4823 * second-half of the ixgbevf_resume routine.
4825 static void ixgbevf_io_resume(struct pci_dev *pdev)
4827 struct net_device *netdev = pci_get_drvdata(pdev);
4830 if (netif_running(netdev))
4831 ixgbevf_open(netdev);
4833 netif_device_attach(netdev);
4837 /* PCI Error Recovery (ERS) */
4838 static const struct pci_error_handlers ixgbevf_err_handler = {
4839 .error_detected = ixgbevf_io_error_detected,
4840 .slot_reset = ixgbevf_io_slot_reset,
4841 .resume = ixgbevf_io_resume,
4844 static struct pci_driver ixgbevf_driver = {
4845 .name = ixgbevf_driver_name,
4846 .id_table = ixgbevf_pci_tbl,
4847 .probe = ixgbevf_probe,
4848 .remove = ixgbevf_remove,
4850 /* Power Management Hooks */
4851 .suspend = ixgbevf_suspend,
4852 .resume = ixgbevf_resume,
4854 .shutdown = ixgbevf_shutdown,
4855 .err_handler = &ixgbevf_err_handler
4859 * ixgbevf_init_module - Driver Registration Routine
4861 * ixgbevf_init_module is the first routine called when the driver is
4862 * loaded. All it does is register with the PCI subsystem.
4864 static int __init ixgbevf_init_module(void)
4866 pr_info("%s - version %s\n", ixgbevf_driver_string,
4867 ixgbevf_driver_version);
4869 pr_info("%s\n", ixgbevf_copyright);
4870 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4872 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4876 return pci_register_driver(&ixgbevf_driver);
4879 module_init(ixgbevf_init_module);
4882 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4884 * ixgbevf_exit_module is called just before the driver is removed
4887 static void __exit ixgbevf_exit_module(void)
4889 pci_unregister_driver(&ixgbevf_driver);
4891 destroy_workqueue(ixgbevf_wq);
4898 * ixgbevf_get_hw_dev_name - return device name string
4899 * used by hardware layer to print debugging information
4900 * @hw: pointer to private hardware struct
4902 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4904 struct ixgbevf_adapter *adapter = hw->back;
4906 return adapter->netdev->name;
4910 module_exit(ixgbevf_exit_module);
4912 /* ixgbevf_main.c */