Merge tag 'pci-v4.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-block.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index a0f716713e878e454c607b17a0ecc6bdc93f7b27..0049a2becd7e7349db1cc6d7acf653489882277c 100644 (file)
 #include <net/tc_act/tc_mirred.h>
 #include <net/vxlan.h>
 #include <net/mpls.h>
+#include <net/xdp_sock.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
 #include "ixgbe_model.h"
+#include "ixgbe_txrx_common.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -159,7 +161,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 MODULE_VERSION(DRV_VERSION);
 
 static struct workqueue_struct *ixgbe_wq;
@@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
        }
 }
 
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
-                                         u64 qmask)
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+                           u64 qmask)
 {
        u32 mask;
 
@@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  * other fields within the skb.
  **/
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
-                                    union ixgbe_adv_rx_desc *rx_desc,
-                                    struct sk_buff *skb)
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
        struct net_device *dev = rx_ring->netdev;
        u32 flags = rx_ring->q_vector->adapter->flags;
@@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
        skb->protocol = eth_type_trans(skb, dev);
 }
 
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
-                        struct sk_buff *skb)
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+                 struct sk_buff *skb)
 {
        napi_gro_receive(&q_vector->napi, skb);
 }
@@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
  *
  * Returns true if an error was encountered and skb was freed.
  **/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
-                                 union ixgbe_adv_rx_desc *rx_desc,
-                                 struct sk_buff *skb)
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+                          union ixgbe_adv_rx_desc *rx_desc,
+                          struct sk_buff *skb)
 {
        struct net_device *netdev = rx_ring->netdev;
 
@@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS         0
-#define IXGBE_XDP_CONSUMED     BIT(0)
-#define IXGBE_XDP_TX           BIT(1)
-#define IXGBE_XDP_REDIR                BIT(2)
-
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
-                              struct xdp_frame *xdpf);
-
 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                                     struct ixgbe_ring *rx_ring,
                                     struct xdp_buff *xdp)
@@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
 #endif
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
-               if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+               bool wd = ring->xsk_umem ?
+                         ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
+                         ixgbe_clean_tx_irq(q_vector, ring, budget);
+
+               if (!wd)
                        clean_complete = false;
        }
 
@@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                per_ring_budget = budget;
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
-               int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+               int cleaned = ring->xsk_umem ?
+                             ixgbe_clean_rx_irq_zc(q_vector, ring,
+                                                   per_ring_budget) :
+                             ixgbe_clean_rx_irq(q_vector, ring,
                                                 per_ring_budget);
 
                work_done += cleaned;
@@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete_done(napi, work_done);
-       if (adapter->rx_itr_setting & 1)
-               ixgbe_set_itr(q_vector);
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+       if (likely(napi_complete_done(napi, work_done))) {
+               if (adapter->rx_itr_setting & 1)
+                       ixgbe_set_itr(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter,
+                                               BIT_ULL(q_vector->v_idx));
+       }
 
        return min(work_done, budget - 1);
 }
@@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u32 txdctl = IXGBE_TXDCTL_ENABLE;
        u8 reg_idx = ring->reg_idx;
 
+       ring->xsk_umem = NULL;
+       if (ring_is_xdp(ring))
+               ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+
        /* disable queue to avoid issues while updating state */
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
        IXGBE_WRITE_FLUSH(hw);
@@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
                else
                        mtqc |= IXGBE_MTQC_64VF;
        } else {
-               if (tcs > 4)
+               if (tcs > 4) {
                        mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
-               else if (tcs > 1)
+               } else if (tcs > 1) {
                        mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
-               else
-                       mtqc = IXGBE_MTQC_64Q_1PB;
+               } else {
+                       u8 max_txq = adapter->num_tx_queues +
+                               adapter->num_xdp_queues;
+                       if (max_txq > 63)
+                               mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+                       else
+                               mtqc = IXGBE_MTQC_64Q_1PB;
+               }
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
@@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-       if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+       if (rx_ring->xsk_umem) {
+               u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
+                                 XDP_PACKET_HEADROOM;
+
+               /* If the MAC support setting RXDCTL.RLPML, the
+                * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
+                * RXDCTL.RLPML is set to the actual UMEM buffer
+                * size. If not, then we are stuck with a 1k buffer
+                * size resolution. In this case frames larger than
+                * the UMEM buffer size viewed in a 1k resolution will
+                * be dropped.
+                */
+               if (hw->mac.type != ixgbe_mac_82599EB)
+                       srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+               else
+                       srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
                srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       else
+       } else {
                srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       }
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
 
+       xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+       ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+       if (ring->xsk_umem) {
+               ring->zca.free = ixgbe_zca_free;
+               WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                  MEM_TYPE_ZERO_COPY,
+                                                  &ring->zca));
+
+       } else {
+               WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                  MEM_TYPE_PAGE_SHARED, NULL));
+       }
+
        /* disable queue to avoid use of these values while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
        rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 #endif
        }
 
+       if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
+               u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
+                                 XDP_PACKET_HEADROOM;
+
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+                           IXGBE_RXDCTL_RLPML_EN);
+               rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
+
+               ring->rx_buf_len = xsk_buf_len;
+       }
+
        /* initialize rx_buffer_info */
        memset(ring->rx_buffer_info, 0,
               sizeof(struct ixgbe_rx_buffer) * ring->count);
@@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+       if (ring->xsk_umem)
+               ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
+       else
+               ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
+       u64 action;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
@@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 
        hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
+               action = filter->action;
+               if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
+                       action =
+                       (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+
                ixgbe_fdir_write_perfect_filter_82599(hw,
                                &filter->filter,
                                filter->sw_idx,
-                               (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+                               (action == IXGBE_FDIR_DROP_QUEUE) ?
                                IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[filter->action]->reg_idx);
+                               adapter->rx_ring[action]->reg_idx);
        }
 
        spin_unlock(&adapter->fdir_perfect_lock);
@@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        u16 i = rx_ring->next_to_clean;
        struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
 
+       if (rx_ring->xsk_umem) {
+               ixgbe_xsk_clean_rx_ring(rx_ring);
+               goto skip_free;
+       }
+
        /* Free all the Rx ring sk_buffs */
        while (i != rx_ring->next_to_alloc) {
                if (rx_buffer->skb) {
@@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                }
        }
 
+skip_free:
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
        u16 i = tx_ring->next_to_clean;
        struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
+       if (tx_ring->xsk_umem) {
+               ixgbe_xsk_clean_tx_ring(tx_ring);
+               goto out;
+       }
+
        while (i != tx_ring->next_to_use) {
                union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
 
@@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
        if (!ring_is_xdp(tx_ring))
                netdev_tx_reset_queue(txring_txq(tx_ring));
 
+out:
        /* reset next_to_use and next_to_clean */
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
@@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
        struct device *dev = rx_ring->dev;
        int orig_node = dev_to_node(dev);
        int ring_node = -1;
-       int size, err;
+       int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 
@@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
                             rx_ring->queue_index) < 0)
                goto err;
 
-       err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
-                                        MEM_TYPE_PAGE_SHARED, NULL);
-       if (err) {
-               xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-               goto err;
-       }
-
        rx_ring->xdp_prog = adapter->xdp_prog;
 
        return 0;
@@ -7774,6 +7842,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
        rtnl_unlock();
 }
 
+/**
+ * ixgbe_check_fw_error - Check firmware for errors
+ * @adapter: the adapter private structure
+ *
+ * Check firmware errors in register FWSM
+ */
+static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 fwsm;
+
+       /* read fwsm.ext_err_ind register and log errors */
+       fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+       if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
+           !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+               e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
+                          fwsm);
+
+       if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
+               e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+               return true;
+       }
+
+       return false;
+}
+
 /**
  * ixgbe_service_task - manages and runs subtasks
  * @work: pointer to work_struct containing our data
@@ -7792,6 +7887,15 @@ static void ixgbe_service_task(struct work_struct *work)
                ixgbe_service_event_complete(adapter);
                return;
        }
+       if (ixgbe_check_fw_error(adapter)) {
+               if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+                       rtnl_lock();
+                       unregister_netdev(adapter->netdev);
+                       rtnl_unlock();
+               }
+               ixgbe_service_event_complete(adapter);
+               return;
+       }
        if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
                rtnl_lock();
                adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
@@ -8066,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
        return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
-                      IXGBE_TXD_CMD_RS)
-
 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                        struct ixgbe_tx_buffer *first,
                        const u8 hdr_len)
@@ -8421,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
 }
 
 #endif
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
-                              struct xdp_frame *xdpf)
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+                       struct xdp_frame *xdpf)
 {
        struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
        struct ixgbe_tx_buffer *tx_buffer;
@@ -8644,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
 
        tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+               return NETDEV_TX_BUSY;
 
        return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
@@ -8768,28 +8871,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
        return err;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               return;
-
-       /* loop through and schedule all active queues */
-       for (i = 0; i < adapter->num_q_vectors; i++)
-               ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
                                   struct ixgbe_ring *ring)
 {
@@ -10177,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                xdp->prog_id = adapter->xdp_prog ?
                        adapter->xdp_prog->aux->id : 0;
                return 0;
+       case XDP_QUERY_XSK_UMEM:
+               return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
+                                           xdp->xsk.queue_id);
+       case XDP_SETUP_XSK_UMEM:
+               return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+                                           xdp->xsk.queue_id);
+
        default:
                return -EINVAL;
        }
 }
 
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
 {
        /* Force memory writes to complete before letting h/w know there
         * are new descriptors to fetch.
@@ -10212,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
        if (unlikely(!ring))
                return -ENXIO;
 
+       if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+               return -ENXIO;
+
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
                int err;
@@ -10251,9 +10342,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
        .ndo_setup_tc           = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgbe_netpoll,
-#endif
 #ifdef IXGBE_FCOE
        .ndo_select_queue       = ixgbe_select_queue,
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
@@ -10276,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_features_check     = ixgbe_features_check,
        .ndo_bpf                = ixgbe_xdp,
        .ndo_xdp_xmit           = ixgbe_xdp_xmit,
+       .ndo_xsk_async_xmit     = ixgbe_xsk_async_xmit,
 };
 
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *tx_ring)
+{
+       unsigned long wait_delay, delay_interval;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = tx_ring->reg_idx;
+       int wait_loop;
+       u32 txdctl;
+
+       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+
+       /* delay mechanism from ixgbe_disable_tx */
+       delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+       wait_loop = IXGBE_MAX_RX_DESC_POLL;
+       wait_delay = delay_interval;
+
+       while (wait_loop--) {
+               usleep_range(wait_delay, wait_delay + 10);
+               wait_delay += delay_interval * 2;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+
+               if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+                       return;
+       }
+
+       e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
+                             struct ixgbe_ring *tx_ring)
+{
+       set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       ixgbe_disable_txr_hw(adapter, tx_ring);
+}
+
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *rx_ring)
+{
+       unsigned long wait_delay, delay_interval;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u8 reg_idx = rx_ring->reg_idx;
+       int wait_loop;
+       u32 rxdctl;
+
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+       rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+       /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+       if (hw->mac.type == ixgbe_mac_82598EB &&
+           !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+               return;
+
+       /* delay mechanism from ixgbe_disable_rx */
+       delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+       wait_loop = IXGBE_MAX_RX_DESC_POLL;
+       wait_delay = delay_interval;
+
+       while (wait_loop--) {
+               usleep_range(wait_delay, wait_delay + 10);
+               wait_delay += delay_interval * 2;
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+
+               if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+                       return;
+       }
+
+       e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
+{
+       memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
+       memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+{
+       memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
+       memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+}
+
+/**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+{
+       struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+       rx_ring = adapter->rx_ring[ring];
+       tx_ring = adapter->tx_ring[ring];
+       xdp_ring = adapter->xdp_ring[ring];
+
+       ixgbe_disable_txr(adapter, tx_ring);
+       if (xdp_ring)
+               ixgbe_disable_txr(adapter, xdp_ring);
+       ixgbe_disable_rxr_hw(adapter, rx_ring);
+
+       if (xdp_ring)
+               synchronize_sched();
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_disable(&rx_ring->q_vector->napi);
+
+       ixgbe_clean_tx_ring(tx_ring);
+       if (xdp_ring)
+               ixgbe_clean_tx_ring(xdp_ring);
+       ixgbe_clean_rx_ring(rx_ring);
+
+       ixgbe_reset_txr_stats(tx_ring);
+       if (xdp_ring)
+               ixgbe_reset_txr_stats(xdp_ring);
+       ixgbe_reset_rxr_stats(rx_ring);
+}
+
+/**
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+{
+       struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+       rx_ring = adapter->rx_ring[ring];
+       tx_ring = adapter->tx_ring[ring];
+       xdp_ring = adapter->xdp_ring[ring];
+
+       /* Rx/Tx/XDP Tx share the same napi context. */
+       napi_enable(&rx_ring->q_vector->napi);
+
+       ixgbe_configure_tx_ring(adapter, tx_ring);
+       if (xdp_ring)
+               ixgbe_configure_tx_ring(adapter, xdp_ring);
+       ixgbe_configure_rx_ring(adapter, rx_ring);
+
+       clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+       clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+}
+
 /**
  * ixgbe_enumerate_functions - Get the number of ports this device has
  * @adapter: adapter structure
@@ -10716,6 +10958,11 @@ skip_sriov:
        if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                netdev->features |= NETIF_F_LRO;
 
+       if (ixgbe_check_fw_error(adapter)) {
+               err = -EIO;
+               goto err_sw_init;
+       }
+
        /* make sure the EEPROM is good */
        if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
                e_dev_err("The EEPROM Checksum Is Not Valid\n");