max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
max_combined = 63;
- break;
- default:
+ else
max_combined = 8;
- break;
- }
}
return max_combined;
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
- break;
- default:
- break;
}
}
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
- break;
- default:
- break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
- break;
- default:
- break;
- }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
- break;
- default:
- break;
}
}
wr32m(wx, WX_PSR_VM_L2CTL(pool),
WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
- if (wx->mac.type == wx_mac_em) {
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
vf_shift = BIT(VMDQ_P(0));
/* Enable only the PF pools for Tx/Rx */
wr32(wx, WX_RDM_VF_RE(0), vf_shift);
{
u32 value, i;
- if (wx->mac.type == wx_mac_em) {
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
value = (wx->num_vfs == 0) ?
WX_CFG_PORT_CTL_NUM_VT_NONE :
WX_CFG_PORT_CTL_NUM_VT_8;
WX_RDB_PL_CFG_TUN_OUTL2HDR |
WX_RDB_PL_CFG_TUN_TUNHDR;
- if (wx->mac.type == wx_mac_em) {
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
for_each_set_bit(pool, &wx->fwd_bitmask, 8)
wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
} else {
/* Add starting offset to total pool count */
vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* double check we are limited to maximum pools */
vmdq_i = min_t(u16, 64, vmdq_i);
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
f->mask = WX_RSS_64Q_MASK;
else
f->mask = WX_RSS_8Q_MASK;
if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
return false;
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* start at VMDq register offset for SR-IOV enabled setups */
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
if (wx->num_vfs >= 32)
eitrsel = BIT(wx->num_vfs % 32) - 1;
- } else if (wx->mac.type == wx_mac_em) {
+ } else {
for (i = 0; i < wx->num_vfs; i++)
eitrsel |= BIT(i);
}
wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
}
- if (wx->mac.type == wx_mac_em) {
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
value = WX_CFG_PORT_CTL_NUM_VT_8;
} else {
if (num_vfs < 32)
if (VMDQ_P(0) < 32) {
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
bits &= ~BIT(VMDQ_P(0));
- if (wx->mac.type != wx_mac_em)
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
} else {
- if (wx->mac.type != wx_mac_em)
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
bits &= ~BIT(VMDQ_P(0) % 32);
bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
{
wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
- if (wx->mac.type != wx_mac_em) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
}
};
enum wx_pf_flags {
+ WX_FLAG_MULTI_64_FUNC,
WX_FLAG_SWFW_RING,
WX_FLAG_VMDQ_ENABLED,
WX_FLAG_VLAN_PROMISC,
wx->configure_fdir = txgbe_configure_fdir;
set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;