qede: Add VF support
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
index 3f35c6ca92528d3cc1951df3d09f8ef19c3dd8ba..80f0b853a1426018c4c75c91849db320801edbef 100644 (file)
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_int.h"
+#include "qed_l2.h"
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 
-enum qed_rss_caps {
-       QED_RSS_IPV4            = 0x1,
-       QED_RSS_IPV6            = 0x2,
-       QED_RSS_IPV4_TCP        = 0x4,
-       QED_RSS_IPV6_TCP        = 0x8,
-       QED_RSS_IPV4_UDP        = 0x10,
-       QED_RSS_IPV6_UDP        = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
-       u8      update_rss_config;
-       u8      rss_enable;
-       u8      rss_eng_id;
-       u8      update_rss_capabilities;
-       u8      update_rss_ind_table;
-       u8      update_rss_key;
-       u8      rss_caps;
-       u8      rss_table_size_log;
-       u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
-       u32     rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
-       QED_FILTER_ADD,
-       QED_FILTER_REMOVE,
-       QED_FILTER_MOVE,
-       QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
-       QED_FILTER_FLUSH,       /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
-       QED_FILTER_MAC,
-       QED_FILTER_VLAN,
-       QED_FILTER_MAC_VLAN,
-       QED_FILTER_INNER_MAC,
-       QED_FILTER_INNER_VLAN,
-       QED_FILTER_INNER_PAIR,
-       QED_FILTER_INNER_MAC_VNI_PAIR,
-       QED_FILTER_MAC_VNI_PAIR,
-       QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
-       enum qed_filter_opcode          opcode;
-       enum qed_filter_ucast_type      type;
-       u8                              is_rx_filter;
-       u8                              is_tx_filter;
-       u8                              vport_to_add_to;
-       u8                              vport_to_remove_from;
-       unsigned char                   mac[ETH_ALEN];
-       u8                              assert_on_error;
-       u16                             vlan;
-       u32                             vni;
-};
-
-struct qed_filter_mcast {
-       /* MOVE is not supported for multicast */
-       enum qed_filter_opcode  opcode;
-       u8                      vport_to_add_to;
-       u8                      vport_to_remove_from;
-       u8                      num_mc_addrs;
-#define QED_MAX_MC_ADDRS        64
-       unsigned char           mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
-       u8      update_rx_mode_config;
-       u8      update_tx_mode_config;
-       u8      rx_accept_filter;
-       u8      tx_accept_filter;
-#define QED_ACCEPT_NONE         0x01
-#define QED_ACCEPT_UCAST_MATCHED        0x02
-#define QED_ACCEPT_UCAST_UNMATCHED      0x04
-#define QED_ACCEPT_MCAST_MATCHED        0x08
-#define QED_ACCEPT_MCAST_UNMATCHED      0x10
-#define QED_ACCEPT_BCAST                0x20
-};
-
-struct qed_sp_vport_update_params {
-       u16                             opaque_fid;
-       u8                              vport_id;
-       u8                              update_vport_active_rx_flg;
-       u8                              vport_active_rx_flg;
-       u8                              update_vport_active_tx_flg;
-       u8                              vport_active_tx_flg;
-       u8                              update_approx_mcast_flg;
-       u8                              update_accept_any_vlan_flg;
-       u8                              accept_any_vlan;
-       unsigned long                   bins[8];
-       struct qed_rss_params           *rss_params;
-       struct qed_filter_accept_flags  accept_flags;
-};
-
-enum qed_tpa_mode {
-       QED_TPA_MODE_NONE,
-       QED_TPA_MODE_UNUSED,
-       QED_TPA_MODE_GRO,
-       QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
-       enum qed_tpa_mode       tpa_mode;
-       bool                    remove_inner_vlan;
-       bool                    drop_ttl0;
-       u8                      max_buffers_per_cqe;
-       u32                     concrete_fid;
-       u16                     opaque_fid;
-       u8                      vport_id;
-       u16                     mtu;
-};
 
 #define QED_MAX_SGES_NUM 16
 #define CRC32_POLY 0x1edc6f41
 
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
-                             struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+                          struct qed_sp_vport_start_params *p_params)
 {
        struct vport_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent =  NULL;
        struct qed_sp_init_data init_data;
+       u8 abs_vport_id = 0;
        int rc = -EINVAL;
        u16 rx_mode = 0;
-       u8 abs_vport_id = 0;
 
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc != 0)
@@ -218,6 +106,20 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+                      struct qed_sp_vport_start_params *p_params)
+{
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+                                            p_params->mtu,
+                                            p_params->remove_inner_vlan,
+                                            p_params->tpa_mode,
+                                            p_params->max_buffers_per_cqe);
+       }
+
+       return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
 static int
 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
                        struct vport_update_ramrod_data *p_ramrod,
@@ -362,6 +264,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
        }
 }
 
+static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+                           struct vport_update_ramrod_data *p_ramrod,
+                           struct qed_sge_tpa_params *p_params)
+{
+       struct eth_vport_tpa_param *p_tpa;
+
+       if (!p_params) {
+               p_ramrod->common.update_tpa_param_flg = 0;
+               p_ramrod->common.update_tpa_en_flg = 0;
+               p_ramrod->common.update_tpa_param_flg = 0;
+               return;
+       }
+
+       p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+       p_tpa = &p_ramrod->tpa_param;
+       p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+       p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+       p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+       p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+       p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+       p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+       p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+       p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+       p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+       p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+       p_tpa->tpa_max_size = p_params->tpa_max_size;
+       p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+       p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
 static void
 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
                        struct vport_update_ramrod_data *p_ramrod,
@@ -383,20 +317,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
        }
 }
 
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
-                   struct qed_sp_vport_update_params *p_params,
-                   enum spq_mode comp_mode,
-                   struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+                       struct qed_sp_vport_update_params *p_params,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_data)
 {
        struct qed_rss_params *p_rss_params = p_params->rss_params;
        struct vport_update_ramrod_data_cmn *p_cmn;
        struct qed_sp_init_data init_data;
        struct vport_update_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
-       u8 abs_vport_id = 0;
+       u8 abs_vport_id = 0, val;
        int rc = -EINVAL;
 
+       if (IS_VF(p_hwfn->cdev)) {
+               rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+               return rc;
+       }
+
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc != 0)
                return rc;
@@ -425,6 +363,13 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
        p_cmn->accept_any_vlan = p_params->accept_any_vlan;
        p_cmn->update_accept_any_vlan_flg =
                        p_params->update_accept_any_vlan_flg;
+
+       p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+       val = p_params->update_inner_vlan_removal_flg;
+       p_cmn->update_inner_vlan_removal_en_flg = val;
+       p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+       p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
        rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
        if (rc) {
                /* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +381,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
        qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
 
        qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+       qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
-                            u16 opaque_fid,
-                            u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
 {
        struct vport_stop_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
@@ -449,6 +393,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
        u8 abs_vport_id = 0;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_vport_stop(p_hwfn);
+
        rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
        if (rc != 0)
                return rc;
@@ -470,13 +417,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+                      struct qed_filter_accept_flags *p_accept_flags)
+{
+       struct qed_sp_vport_update_params s_params;
+
+       memset(&s_params, 0, sizeof(s_params));
+       memcpy(&s_params.accept_flags, p_accept_flags,
+              sizeof(struct qed_filter_accept_flags));
+
+       return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
 static int qed_filter_accept_cmd(struct qed_dev *cdev,
                                 u8 vport,
                                 struct qed_filter_accept_flags accept_flags,
                                 u8 update_accept_any_vlan,
                                 u8 accept_any_vlan,
-                               enum spq_mode comp_mode,
-                               struct qed_spq_comp_cb *p_comp_data)
+                                enum spq_mode comp_mode,
+                                struct qed_spq_comp_cb *p_comp_data)
 {
        struct qed_sp_vport_update_params vport_update_params;
        int i, rc;
@@ -493,6 +453,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
 
                vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
 
+               if (IS_VF(cdev)) {
+                       rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+                       if (rc)
+                               return rc;
+                       continue;
+               }
+
                rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
                                         comp_mode, p_comp_data);
                if (rc != 0) {
@@ -527,16 +494,14 @@ static int qed_sp_release_queue_cid(
        return 0;
 }
 
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
-                           u16 opaque_fid,
-                           u32 cid,
-                           struct qed_queue_start_common_params *params,
-                           u8 stats_id,
-                           u16 bd_max_bytes,
-                           dma_addr_t bd_chain_phys_addr,
-                           dma_addr_t cqe_pbl_addr,
-                           u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                               u16 opaque_fid,
+                               u32 cid,
+                               struct qed_queue_start_common_params *params,
+                               u8 stats_id,
+                               u16 bd_max_bytes,
+                               dma_addr_t bd_chain_phys_addr,
+                               dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
 {
        struct rx_queue_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -605,8 +570,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
                          u16 bd_max_bytes,
                          dma_addr_t bd_chain_phys_addr,
                          dma_addr_t cqe_pbl_addr,
-                         u16 cqe_pbl_size,
-                         void __iomem **pp_prod)
+                         u16 cqe_pbl_size, void __iomem **pp_prod)
 {
        struct qed_hw_cid_data *p_rx_cid;
        u64 init_prod_val = 0;
@@ -614,6 +578,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
        u8 abs_stats_id = 0;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_rxq_start(p_hwfn,
+                                          params->queue_id,
+                                          params->sb,
+                                          params->sb_idx,
+                                          bd_max_bytes,
+                                          bd_chain_phys_addr,
+                                          cqe_pbl_addr, cqe_pbl_size, pp_prod);
+       }
+
        rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
        if (rc != 0)
                return rc;
@@ -656,10 +630,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
-                                   u16 rx_queue_id,
-                                   bool eq_completion_only,
-                                   bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+                               u16 rx_queue_id,
+                               u8 num_rxqs,
+                               u8 complete_cqe_flg,
+                               u8 complete_event_flg,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
+{
+       struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       struct qed_hw_cid_data *p_rx_cid;
+       u16 qid, abs_rx_q_id = 0;
+       int rc = -EINVAL;
+       u8 i;
+
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       for (i = 0; i < num_rxqs; i++) {
+               qid = rx_queue_id + i;
+               p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+               /* Get SPQ entry */
+               init_data.cid = p_rx_cid->cid;
+               init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+               rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                        ETH_RAMROD_RX_QUEUE_UPDATE,
+                                        PROTOCOLID_ETH, &init_data);
+               if (rc)
+                       return rc;
+
+               p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+               qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+               qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+               p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+               p_ramrod->complete_cqe_flg = complete_cqe_flg;
+               p_ramrod->complete_event_flg = complete_event_flg;
+
+               rc = qed_spq_post(p_hwfn, p_ent, NULL);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                            u16 rx_queue_id,
+                            bool eq_completion_only, bool cqe_completion)
 {
        struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
        struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +691,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
        u16 abs_rx_q_id = 0;
        int rc = -EINVAL;
 
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
        init_data.cid = p_rx_cid->cid;
@@ -703,15 +729,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
        return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
 }
 
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
-                           u16  opaque_fid,
-                           u32  cid,
-                           struct qed_queue_start_common_params *p_params,
-                           u8  stats_id,
-                           dma_addr_t pbl_addr,
-                           u16 pbl_size,
-                           union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+                               u16  opaque_fid,
+                               u32  cid,
+                               struct qed_queue_start_common_params *p_params,
+                               u8  stats_id,
+                               dma_addr_t pbl_addr,
+                               u16 pbl_size,
+                               union qed_qm_pq_params *p_pq_params)
 {
        struct tx_queue_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -765,14 +790,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
                          u16 opaque_fid,
                          struct qed_queue_start_common_params *p_params,
                          dma_addr_t pbl_addr,
-                         u16 pbl_size,
-                         void __iomem **pp_doorbell)
+                         u16 pbl_size, void __iomem **pp_doorbell)
 {
        struct qed_hw_cid_data *p_tx_cid;
        union qed_qm_pq_params pq_params;
        u8 abs_stats_id = 0;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev)) {
+               return qed_vf_pf_txq_start(p_hwfn,
+                                          p_params->queue_id,
+                                          p_params->sb,
+                                          p_params->sb_idx,
+                                          pbl_addr, pbl_size, pp_doorbell);
+       }
+
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
        if (rc)
                return rc;
@@ -813,14 +845,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
-                                   u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
 {
        struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        int rc = -EINVAL;
 
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
        init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1050,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
-                                  u16 opaque_fid,
-                                  struct qed_filter_ucast *p_filter_cmd,
-                                  enum spq_mode comp_mode,
-                                  struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           struct qed_filter_ucast *p_filter_cmd,
+                           enum spq_mode comp_mode,
+                           struct qed_spq_comp_cb *p_comp_data)
 {
        struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
        struct qed_spq_entry                    *p_ent          = NULL;
@@ -1118,7 +1152,7 @@ static inline u32 qed_crc32c_le(u32 seed,
        return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
 }
 
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
 {
        u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
                                mac, ETH_ALEN);
@@ -1201,11 +1235,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
-                    struct qed_filter_mcast *p_filter_cmd,
-                    enum spq_mode comp_mode,
-                    struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+                               struct qed_filter_mcast *p_filter_cmd,
+                               enum spq_mode comp_mode,
+                               struct qed_spq_comp_cb *p_comp_data)
 {
        int rc = 0;
        int i;
@@ -1221,8 +1254,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
 
                u16 opaque_fid;
 
-               if (rc != 0)
-                       break;
+               if (IS_VF(cdev)) {
+                       qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
 
                opaque_fid = p_hwfn->hw_info.opaque_fid;
 
@@ -1247,8 +1282,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
                u16 opaque_fid;
 
-               if (rc != 0)
-                       break;
+               if (IS_VF(cdev)) {
+                       rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
 
                opaque_fid = p_hwfn->hw_info.opaque_fid;
 
@@ -1257,6 +1294,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
                                             p_filter_cmd,
                                             comp_mode,
                                             p_comp_data);
+               if (rc != 0)
+                       break;
        }
 
        return rc;
@@ -1265,12 +1304,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
 /* Statistics related code */
 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
                                           u32 *p_addr,
-                                          u32 *p_len,
-                                          u16 statistics_bin)
+                                          u32 *p_len, u16 statistics_bin)
 {
-       *p_addr = BAR0_MAP_REG_PSDM_RAM +
-                 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
-       *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_PSDM_RAM +
+                   PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+       }
 }
 
 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1331,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
                                       statistics_bin);
 
        memset(&pstats, 0, sizeof(pstats));
-       qed_memcpy_from(p_hwfn, p_ptt, &pstats,
-                       pstats_addr, pstats_len);
-
-       p_stats->tx_ucast_bytes +=
-               HILO_64_REGPAIR(pstats.sent_ucast_bytes);
-       p_stats->tx_mcast_bytes +=
-               HILO_64_REGPAIR(pstats.sent_mcast_bytes);
-       p_stats->tx_bcast_bytes +=
-               HILO_64_REGPAIR(pstats.sent_bcast_bytes);
-       p_stats->tx_ucast_pkts +=
-               HILO_64_REGPAIR(pstats.sent_ucast_pkts);
-       p_stats->tx_mcast_pkts +=
-               HILO_64_REGPAIR(pstats.sent_mcast_pkts);
-       p_stats->tx_bcast_pkts +=
-               HILO_64_REGPAIR(pstats.sent_bcast_pkts);
-       p_stats->tx_err_drop_pkts +=
-               HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
-                                          u32 *p_addr,
-                                          u32 *p_len)
-{
-       *p_addr = BAR0_MAP_REG_TSDM_RAM +
-                 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
-       *p_len = sizeof(struct tstorm_per_port_stat);
+       qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+       p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+       p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
 }
 
 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1347,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
                                   struct qed_eth_stats *p_stats,
                                   u16 statistics_bin)
 {
-       u32 tstats_addr = 0, tstats_len = 0;
        struct tstorm_per_port_stat tstats;
+       u32 tstats_addr, tstats_len;
 
-       __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+       if (IS_PF(p_hwfn->cdev)) {
+               tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+                   TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+               tstats_len = sizeof(struct tstorm_per_port_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+               tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+       }
 
        memset(&tstats, 0, sizeof(tstats));
-       qed_memcpy_from(p_hwfn, p_ptt, &tstats,
-                       tstats_addr, tstats_len);
+       qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
 
        p_stats->mftag_filter_discards +=
                HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1373,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
 
 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
                                           u32 *p_addr,
-                                          u32 *p_len,
-                                          u16 statistics_bin)
+                                          u32 *p_len, u16 statistics_bin)
 {
-       *p_addr = BAR0_MAP_REG_USDM_RAM +
-                 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
-       *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_USDM_RAM +
+                   USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+               *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+       }
 }
 
 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1400,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
                                       statistics_bin);
 
        memset(&ustats, 0, sizeof(ustats));
-       qed_memcpy_from(p_hwfn, p_ptt, &ustats,
-                       ustats_addr, ustats_len);
-
-       p_stats->rx_ucast_bytes +=
-               HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
-       p_stats->rx_mcast_bytes +=
-               HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
-       p_stats->rx_bcast_bytes +=
-               HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
-       p_stats->rx_ucast_pkts +=
-               HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
-       p_stats->rx_mcast_pkts +=
-               HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
-       p_stats->rx_bcast_pkts +=
-               HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+       qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+       p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
 }
 
 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
                                           u32 *p_addr,
-                                          u32 *p_len,
-                                          u16 statistics_bin)
+                                          u32 *p_len, u16 statistics_bin)
 {
-       *p_addr = BAR0_MAP_REG_MSDM_RAM +
-                 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
-       *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+       if (IS_PF(p_hwfn->cdev)) {
+               *p_addr = BAR0_MAP_REG_MSDM_RAM +
+                   MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+       } else {
+               struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+       }
 }
 
 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1439,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
                                       statistics_bin);
 
        memset(&mstats, 0, sizeof(mstats));
-       qed_memcpy_from(p_hwfn, p_ptt, &mstats,
-                       mstats_addr, mstats_len);
+       qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
 
-       p_stats->no_buff_discards +=
-               HILO_64_REGPAIR(mstats.no_buff_discard);
+       p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
        p_stats->packet_too_big_discard +=
                HILO_64_REGPAIR(mstats.packet_too_big_discard);
-       p_stats->ttl0_discard +=
-               HILO_64_REGPAIR(mstats.ttl0_discard);
+       p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
        p_stats->tpa_coalesced_pkts +=
                HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
        p_stats->tpa_coalesced_events +=
                HILO_64_REGPAIR(mstats.tpa_coalesced_events);
-       p_stats->tpa_aborts_num +=
-               HILO_64_REGPAIR(mstats.tpa_aborts_num);
+       p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
        p_stats->tpa_coalesced_bytes +=
                HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
 }
@@ -1428,16 +1469,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                        sizeof(port_stats));
 
        p_stats->rx_64_byte_packets             += port_stats.pmm.r64;
-       p_stats->rx_127_byte_packets            += port_stats.pmm.r127;
-       p_stats->rx_255_byte_packets            += port_stats.pmm.r255;
-       p_stats->rx_511_byte_packets            += port_stats.pmm.r511;
-       p_stats->rx_1023_byte_packets           += port_stats.pmm.r1023;
-       p_stats->rx_1518_byte_packets           += port_stats.pmm.r1518;
-       p_stats->rx_1522_byte_packets           += port_stats.pmm.r1522;
-       p_stats->rx_2047_byte_packets           += port_stats.pmm.r2047;
-       p_stats->rx_4095_byte_packets           += port_stats.pmm.r4095;
-       p_stats->rx_9216_byte_packets           += port_stats.pmm.r9216;
-       p_stats->rx_16383_byte_packets          += port_stats.pmm.r16383;
+       p_stats->rx_65_to_127_byte_packets      += port_stats.pmm.r127;
+       p_stats->rx_128_to_255_byte_packets     += port_stats.pmm.r255;
+       p_stats->rx_256_to_511_byte_packets     += port_stats.pmm.r511;
+       p_stats->rx_512_to_1023_byte_packets    += port_stats.pmm.r1023;
+       p_stats->rx_1024_to_1518_byte_packets   += port_stats.pmm.r1518;
+       p_stats->rx_1519_to_1522_byte_packets   += port_stats.pmm.r1522;
+       p_stats->rx_1519_to_2047_byte_packets   += port_stats.pmm.r2047;
+       p_stats->rx_2048_to_4095_byte_packets   += port_stats.pmm.r4095;
+       p_stats->rx_4096_to_9216_byte_packets   += port_stats.pmm.r9216;
+       p_stats->rx_9217_to_16383_byte_packets  += port_stats.pmm.r16383;
        p_stats->rx_crc_errors                  += port_stats.pmm.rfcs;
        p_stats->rx_mac_crtl_frames             += port_stats.pmm.rxcf;
        p_stats->rx_pause_frames                += port_stats.pmm.rxpf;
@@ -1481,44 +1522,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
                                  struct qed_ptt *p_ptt,
                                  struct qed_eth_stats *stats,
-                                 u16 statistics_bin)
+                                 u16 statistics_bin, bool b_get_port_stats)
 {
        __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
        __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
        __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
        __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
 
-       if (p_hwfn->mcp_info)
+       if (b_get_port_stats && p_hwfn->mcp_info)
                __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
 }
 
 static void _qed_get_vport_stats(struct qed_dev *cdev,
                                 struct qed_eth_stats *stats)
 {
-       u8      fw_vport = 0;
-       int     i;
+       u8 fw_vport = 0;
+       int i;
 
        memset(stats, 0, sizeof(*stats));
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-               struct qed_ptt *p_ptt;
-
-               /* The main vport index is relative first */
-               if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
-                       DP_ERR(p_hwfn, "No vport available!\n");
-                       continue;
+               struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+                                                   :  NULL;
+
+               if (IS_PF(cdev)) {
+                       /* The main vport index is relative first */
+                       if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+                               DP_ERR(p_hwfn, "No vport available!\n");
+                               goto out;
+                       }
                }
 
-               p_ptt = qed_ptt_acquire(p_hwfn);
-               if (!p_ptt) {
+               if (IS_PF(cdev) && !p_ptt) {
                        DP_ERR(p_hwfn, "Failed to acquire ptt\n");
                        continue;
                }
 
-               __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+               __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+                                     IS_PF(cdev) ? true : false);
 
-               qed_ptt_release(p_hwfn, p_ptt);
+out:
+               if (IS_PF(cdev) && p_ptt)
+                       qed_ptt_release(p_hwfn, p_ptt);
        }
 }
 
@@ -1552,10 +1598,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
                struct eth_mstorm_per_queue_stat mstats;
                struct eth_ustorm_per_queue_stat ustats;
                struct eth_pstorm_per_queue_stat pstats;
-               struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+               struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+                                                   : NULL;
                u32 addr = 0, len = 0;
 
-               if (!p_ptt) {
+               if (IS_PF(cdev) && !p_ptt) {
                        DP_ERR(p_hwfn, "Failed to acquire ptt\n");
                        continue;
                }
@@ -1572,7 +1619,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
                __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
                qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
 
-               qed_ptt_release(p_hwfn, p_ptt);
+               if (IS_PF(cdev))
+                       qed_ptt_release(p_hwfn, p_ptt);
        }
 
        /* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1641,53 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
 
        info->num_tc = 1;
 
-       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
-               for_each_hwfn(cdev, i)
-                       info->num_queues += FEAT_NUM(&cdev->hwfns[i],
-                                                    QED_PF_L2_QUE);
-               if (cdev->int_params.fp_msix_cnt)
-                       info->num_queues = min_t(u8, info->num_queues,
-                                                cdev->int_params.fp_msix_cnt);
+       if (IS_PF(cdev)) {
+               if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+                       for_each_hwfn(cdev, i)
+                           info->num_queues +=
+                           FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+                       if (cdev->int_params.fp_msix_cnt)
+                               info->num_queues =
+                                   min_t(u8, info->num_queues,
+                                         cdev->int_params.fp_msix_cnt);
+               } else {
+                       info->num_queues = cdev->num_hwfns;
+               }
+
+               info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+               ether_addr_copy(info->port_mac,
+                               cdev->hwfns[0].hw_info.hw_mac_addr);
        } else {
-               info->num_queues = cdev->num_hwfns;
-       }
+               qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+               if (cdev->num_hwfns > 1) {
+                       u8 queues = 0;
 
-       info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
-       ether_addr_copy(info->port_mac,
-                       cdev->hwfns[0].hw_info.hw_mac_addr);
+                       qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+                       info->num_queues += queues;
+               }
+
+               qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+                                           &info->num_vlan_filters);
+               qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+       }
 
        qed_fill_dev_info(cdev, &info->common);
 
+       if (IS_VF(cdev))
+               memset(info->common.hw_mac, 0, ETH_ALEN);
+
        return 0;
 }
 
 static void qed_register_eth_ops(struct qed_dev *cdev,
-                                struct qed_eth_cb_ops *ops,
-                                void *cookie)
+                                struct qed_eth_cb_ops *ops, void *cookie)
 {
-       cdev->protocol_ops.eth  = ops;
-       cdev->ops_cookie        = cookie;
+       cdev->protocol_ops.eth = ops;
+       cdev->ops_cookie = cookie;
+
+       /* For VF, we start bulletin reading */
+       if (IS_VF(cdev))
+               qed_vf_start_iov_wq(cdev);
 }
 
 static int qed_start_vport(struct qed_dev *cdev,
@@ -1744,9 +1813,7 @@ static int qed_update_vport(struct qed_dev *cdev,
                sp_rss_params.update_rss_capabilities = 1;
                sp_rss_params.update_rss_ind_table = 1;
                sp_rss_params.update_rss_key = 1;
-               sp_rss_params.rss_caps = QED_RSS_IPV4 |
-                                        QED_RSS_IPV6 |
-                                        QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+               sp_rss_params.rss_caps = params->rss_params.rss_caps;
                sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
                memcpy(sp_rss_params.rss_ind_table,
                       params->rss_params.rss_ind_table,
@@ -1899,6 +1966,39 @@ static int qed_stop_txq(struct qed_dev *cdev,
        return 0;
 }
 
+static int qed_tunn_configure(struct qed_dev *cdev,
+                             struct qed_tunn_params *tunn_params)
+{
+       struct qed_tunn_update_params tunn_info;
+       int i, rc;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       memset(&tunn_info, 0, sizeof(tunn_info));
+       if (tunn_params->update_vxlan_port == 1) {
+               tunn_info.update_vxlan_udp_port = 1;
+               tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+       }
+
+       if (tunn_params->update_geneve_port == 1) {
+               tunn_info.update_geneve_udp_port = 1;
+               tunn_info.geneve_udp_port = tunn_params->geneve_port;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+               rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+                                              QED_SPQ_MODE_EBLOCK, NULL);
+
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
                                        enum qed_filter_rx_mode_type type)
 {
@@ -2026,8 +2126,15 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
                                      cqe);
 }
 
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
 static const struct qed_eth_ops qed_eth_ops_pass = {
        .common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+       .iov = &qed_iov_ops_pass,
+#endif
        .fill_dev_info = &qed_fill_eth_dev_info,
        .register_ops = &qed_register_eth_ops,
        .vport_start = &qed_start_vport,
@@ -2041,16 +2148,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
        .fastpath_stop = &qed_fastpath_stop,
        .eth_cqe_completion = &qed_fp_cqe_completion,
        .get_vport_stats = &qed_get_vport_stats,
+       .tunn_config = &qed_tunn_configure,
 };
 
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
 {
-       if (version != QED_ETH_INTERFACE_VERSION) {
-               pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
-                         version, QED_ETH_INTERFACE_VERSION);
-               return NULL;
-       }
-
        return &qed_eth_ops_pass;
 }
 EXPORT_SYMBOL(qed_get_eth_ops);