Two cases of overlapping changes, nothing fancy.
Signed-off-by: David S. Miller <davem@davemloft.net>
0 - disabled / RFC 3443 [Short] Pipe Model
1 - enabled / RFC 3443 Uniform Model (default)
-default_ttl - BOOL
+default_ttl - INTEGER
Default TTL value to use for MPLS packets where it cannot be
propagated from an IP header, either because one isn't present
or ip_ttl_propagate has been disabled.
XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com>
M: Magnus Karlsson <magnus.karlsson@intel.com>
+R: Jonathan Lemon <jonathan.lemon@gmail.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* shl dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
- /* mov ebx,dreg_lo */
- EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* shld dreg_hi,dreg_lo,cl */
+ EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+ /* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
- /* shr ebx,cl */
- EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
- /* or dreg_hi,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
-
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (4 bytes) when < 32 */
+ EMIT2(IA32_JB, 4);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* shl dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
-
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
- /* xor dreg_lo,dreg_lo */
- EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
-
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* lshr dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
- /* ashr dreg_hi,cl */
+ /* shrd dreg_lo,dreg_hi,cl */
+ EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
+ /* sar dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
-
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (5 bytes) when < 32 */
+ EMIT2(IA32_JB, 5);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* ashr dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
-
- /* ashr dreg_hi,imm8 */
+ /* sar dreg_hi,31 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
- /* ashr dreg_hi,imm8 */
- EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
- /* mov dreg_lo,dreg_hi */
- EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
-
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* lshr dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,cl */
+ EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+ /* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (4 bytes) when < 32 */
+ EMIT2(IA32_JB, 4);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* shr dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
- /* xor dreg_lo,dreg_lo */
- EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
-
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
}
/* Do LSH operation */
if (val < 32) {
- /* shl dreg_hi,imm8 */
- EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
- /* mov ebx,dreg_lo */
- EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* shld dreg_hi,dreg_lo,imm8 */
+ EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
/* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shr ebx,cl */
- EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
- /* or dreg_hi,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
/* Do RSH operation */
if (val < 32) {
- /* shr dreg_lo,imm8 */
- EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,imm8 */
+ EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
}
/* Do RSH operation */
if (val < 32) {
- /* shr dreg_lo,imm8 */
- EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,imm8 */
+ EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ int slave_cnt;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
- slave = rcu_dereference(bond->curr_active_slave);
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- } else {
- int slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph;
- if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ goto non_igmp;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
+ return NETDEV_TX_OK;
}
}
+non_igmp:
+ slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+ } else {
+ bond_tx_drop(bond_dev, skb);
+ }
return NETDEV_TX_OK;
}
if (ret)
dev_err(ds->dev, "failed to apply configuration\n");
- /* Configure IMP/CPU port, disable unused ports. Enabled
+ /* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable
*/
for (port = 0; port < dev->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
b53_enable_cpu_port(dev, port);
- else if (dsa_is_unused_port(ds, port))
+ else
b53_disable_port(ds, port);
}
err = PTR_ERR(chip->reset);
goto out;
}
+ if (chip->reset)
+ usleep_range(1000, 2000);
err = mv88e6xxx_detect(chip);
if (err)
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME));
- strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ bp->eth_stats.ptp_skip_tx_ts++;
+ netdev_err_once(bp->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
{ STATS_OFFSET32(driver_filtered_tx_pkt),
4, false, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi),
- 4, true, "Tx LPI entry count"}
+ 4, true, "Tx LPI entry count"},
+ { STATS_OFFSET32(ptp_skip_tx_ts),
+ 4, false, "ptp_skipped_tx_tstamp" },
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
u32 val_seq;
u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
+
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }
- /* Read Tx timestamp registers */
- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
- if (val_seq & 0x10000) {
+ if (!bail) {
/* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(bp->ptp_tx_skb);
- bp->ptp_tx_skb = NULL;
DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns);
} else {
- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&bp->ptp_task);
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
}
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
}
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
+
+ /* PTP */
+ u32 ptp_skip_tx_ts;
};
struct bnx2x_eth_q_stats {
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+ int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
+ int cp = bp->cp_nr_rings;
+
+ if (!ulp_stat)
+ return cp;
+
+ if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
+ return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+
+ return cp + ulp_stat;
}
static bool bnxt_need_reserve_rings(struct bnxt *bp)
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
{
- unsigned int stat;
-
- stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
- stat -= bp->cp_nr_rings;
- return stat;
+ return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
int bnxt_get_avail_msix(struct bnxt *bp, int num)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
- bnxt_cleanup_pci(bp);
bnxt_free_port_stats(bp);
free_netdev(dev);
}
if (system_state == SYSTEM_POWER_OFF) {
bnxt_clear_int_mode(bp);
+ pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
- int rc, i;
+ int rc = 0, i;
if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
return;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, false, true);
}
- if (bnxt_test_irq(bp)) {
+ if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int resv_msix;
- avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ avail_msix = min_t(int, resv_msix, avail_msix);
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
-#define GEM_SUBNSINCR_SIZE 16
+#define GEM_SUBNSINCRL_OFFSET 24
+#define GEM_SUBNSINCRL_SIZE 8
+#define GEM_SUBNSINCRH_OFFSET 0
+#define GEM_SUBNSINCRH_SIZE 16
+#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */
#define GEM_NSINCR_OFFSET 0
/* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2
+/* Scaled PPM fraction */
+#define PPM_FRACTION 16
+
/* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
* to take effect.
*/
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
- gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
+ /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
+ GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
+ GEM_SUBNSINCRL_SIZE)));
gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
* (temp / USEC_PER_SEC) + 0.5
*/
adj += (USEC_PER_SEC >> 1);
- adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */
+ adj >>= PPM_FRACTION; /* remove fractions */
adj = div_u64(adj, USEC_PER_SEC);
adj = neg_adj ? (word - adj) : (word + adj);
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
}
/* Set value */
- pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+ shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long next_id = (unsigned long)id + 1;
struct mlx5_fc *counter;
+ unsigned long tmp;
rcu_read_lock();
/* skip counters that are in idr, but not yet in counters list */
- while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
- &next_id)) != NULL &&
- list_empty(&counter->list))
- next_id++;
+ idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
+ counter, tmp, next_id) {
+ if (!list_empty(&counter->list))
+ break;
+ }
rcu_read_unlock();
return counter ? &counter->list : &fc_stats->counters;
struct ethtool_drvinfo *ed)
{
strlcpy(ed->driver, "nixge", sizeof(ed->driver));
- strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
+ strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int nixge_ethtools_get_coalesce(struct net_device *ndev,
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- /*
- * There is no way to determine the number of TSO
- * capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
- * one will be capable.
- */
- skb_set_queue_mapping(skb, 0);
-
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
}
}
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ /*
+ * There is no way to determine the number of TSO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
return ret;
}
+ slave_data->slave_node = slave_node;
slave_data->phy_node = of_parse_phandle(slave_node,
"phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
/* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev);
+ ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
ret = register_netdev(ndev);
if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n");
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
ret = register_netdev(ndev);
if (ret) {
dev_err(dev, "error registering net device\n");
};
struct cpsw_slave_data {
+ struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
-static void gtp_encap_destroy(struct sock *sk)
+static void __gtp_encap_destroy(struct sock *sk)
{
struct gtp_dev *gtp;
- gtp = rcu_dereference_sk_user_data(sk);
+ lock_sock(sk);
+ gtp = sk->sk_user_data;
if (gtp) {
+ if (gtp->sk0 == sk)
+ gtp->sk0 = NULL;
+ else
+ gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
sock_put(sk);
}
+ release_sock(sk);
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+ rtnl_lock();
+ __gtp_encap_destroy(sk);
+ rtnl_unlock();
}
static void gtp_encap_disable_sock(struct sock *sk)
if (!sk)
return;
- gtp_encap_destroy(sk);
+ __gtp_encap_destroy(sk);
}
static void gtp_encap_disable(struct gtp_dev *gtp)
{
struct gtp_dev *gtp = netdev_priv(dev);
- gtp_encap_disable(gtp);
gtp_hashtable_free(gtp);
list_del_rcu(>p->list);
unregister_netdevice_queue(dev, head);
goto out_sock;
}
- if (rcu_dereference_sk_user_data(sock->sk)) {
+ lock_sock(sock->sk);
+ if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
}
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock:
+ release_sock(sock->sk);
sockfd_put(sock);
return sk;
}
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN)
+ if (role > GTP_ROLE_SGSN) {
+ if (sk0)
+ gtp_encap_disable_sock(sk0);
+ if (sk1u)
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
+ }
}
gtp->sk0 = sk0;
}
- pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
return -ENOMEM;
return -EINVAL;
}
+ rtnl_lock();
rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
out_unlock:
rcu_read_unlock();
+ rtnl_unlock();
return err;
}
static void __exit gtp_fini(void)
{
- unregister_pernet_subsys(>p_net_ops);
genl_unregister_family(>p_genl_family);
rtnl_link_unregister(>p_link_ops);
+ unregister_pernet_subsys(>p_net_ops);
pr_info("GTP module unloaded\n");
}
static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
{
+ skb->ip_summed = CHECKSUM_NONE;
memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
skb_pull(skb, hdr_len);
pskb_trim_unique(skb, skb->len - icv_len);
}
skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb) {
- *pskb = NULL;
+ *pskb = skb;
+ if (!skb)
return RX_HANDLER_CONSUMED;
- }
pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
if (!pulled_sci) {
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
int i;
unsigned long gpio_bits = dev->driver_info->data;
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, i;
- u8 buf[ETH_ALEN], chipcode = 0;
+ u8 buf[ETH_ALEN] = {0}, chipcode = 0;
u32 phyid;
struct asix_common_private *priv;
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
usbnet_get_endpoints(dev,intf);
#define NETNEXT_VERSION "09"
/* Information for net */
-#define NET_VERSION "9"
+#define NET_VERSION "10"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define PAL_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_SUSPEND_FLAG 0xd38a
+#define PLA_INDICATE_FALG 0xd38c
+#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
#define PLA_LEDSEL 0xdd90
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_SUSPEND_FLAG */
+#define LINK_CHG_EVENT BIT(0)
+
+/* PLA_INDICATE_FALG */
+#define UPCOMING_RUNTIME_D3 BIT(0)
+
+/* PLA_EXTRA_STATUS */
+#define LINK_CHANGE_FLAG BIT(8)
+
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
return ret;
}
+static void rtl_set_unplug(struct r8152 *tp)
+{
+ if (tp->udev->state == USB_STATE_NOTATTACHED) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ smp_mb__after_atomic();
+ }
+}
+
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
}
if (ret == -ENODEV)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
return ret;
}
error1:
if (ret == -ENODEV)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
return ret;
}
napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
return;
case -ENOENT:
resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
} else if (res) {
netif_err(tp, intr, tp->netdev,
struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(netdev);
} else {
struct net_device_stats *stats = &netdev->stats;
ret = usb_submit_urb(agg->urb, mem_flags);
if (ret == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
} else if (ret) {
struct urb *urb = agg->urb;
return 0;
}
+static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
+ OWN_UPDATE | OWN_CLEAR);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_08:
+ case RTL_VER_09:
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
+ default:
+ break;
+ }
+
rxdy_gated_en(tp, false);
return 0;
return rtl_enable(tp);
}
-static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
- OWN_UPDATE | OWN_CLEAR);
-}
-
static void r8153_set_rx_early_timeout(struct r8152 *tp)
{
u32 ocp_data = tp->coalesce / 8;
128 / 8);
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
ocp_data);
- r8153b_rx_agg_chg_indicate(tp);
break;
default:
case RTL_VER_09:
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
ocp_data / 8);
- r8153b_rx_agg_chg_indicate(tp);
break;
default:
WARN_ON_ONCE(1);
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153b_queue_wake(struct r8152 *tp, bool enable)
+static void r8153_queue_wake(struct r8152 *tp, bool enable)
{
u32 ocp_data;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG);
if (enable)
- ocp_data |= BIT(0);
+ ocp_data |= UPCOMING_RUNTIME_D3;
else
- ocp_data &= ~BIT(0);
- ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38a, ocp_data);
+ ocp_data &= ~UPCOMING_RUNTIME_D3;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG);
+ ocp_data &= ~LINK_CHG_EVENT;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data);
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c);
- ocp_data &= ~BIT(0);
- ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ ocp_data &= ~LINK_CHANGE_FLAG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
static bool rtl_can_wakeup(struct r8152 *tp)
static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
{
if (enable) {
- r8153b_queue_wake(tp, true);
+ r8153_queue_wake(tp, true);
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
rtl_runtime_suspend_enable(tp, true);
r8153b_ups_en(tp, true);
} else {
r8153b_ups_en(tp, false);
- r8153b_queue_wake(tp, false);
+ r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
r8153_u2p3en(tp, true);
r8153b_u1u2en(tp, true);
r8153b_power_cut_en(tp, false);
r8153b_ups_en(tp, false);
- r8153b_queue_wake(tp, false);
+ r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
if (tp->coalesce != coalesce->rx_coalesce_usecs) {
tp->coalesce = coalesce->rx_coalesce_usecs;
- if (netif_running(tp->netdev) && netif_carrier_ok(netdev))
- r8153_set_rx_early_timeout(tp);
+ if (netif_running(netdev) && netif_carrier_ok(netdev)) {
+ netif_stop_queue(netdev);
+ napi_disable(&tp->napi);
+ tp->rtl_ops.disable(tp);
+ tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
+ clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ _rtl8152_set_rx_mode(netdev);
+ napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ }
}
mutex_unlock(&tp->control);
usb_set_intfdata(intf, NULL);
if (tp) {
- struct usb_device *udev = tp->udev;
-
- if (udev->state == USB_STATE_NOTATTACHED)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
return f;
}
+static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
+ __be32 src_vni, struct vxlan_fdb *f)
+{
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
return rc;
}
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-
*fdb = f;
return 0;
}
-static void vxlan_fdb_free(struct rcu_head *head)
+static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd;
list_for_each_entry_safe(rd, nd, &f->remotes, list) {
kfree(f);
}
+static void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ __vxlan_fdb_free(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify)
{
if (rc < 0)
return rc;
+ vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
if (err)
goto errout;
- /* notify default fdb entry */
if (f) {
+ vxlan_fdb_insert(vxlan, all_zeros_mac,
+ vxlan->default_dst.remote_vni, f);
+
+ /* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
- if (err)
- goto errout;
+ if (err) {
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ goto unregister;
+ }
}
list_add(&vxlan->next, &vn->vxlan_list);
* destroy the entry by hand here.
*/
if (f)
- vxlan_fdb_destroy(vxlan, f, false, false);
+ __vxlan_fdb_free(f);
+unregister:
if (unregister)
unregister_netdevice(dev);
return err;
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH_COMMON
tristate
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config AR5523
tristate "Atheros AR5523 wireless driver support"
depends on MAC80211 && USB
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_AR5523) := ar5523.o
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on (PCI || ATH25) && MAC80211
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
ath5k-y += caps.o
ath5k-y += initvals.o
ath5k-y += eeprom.o
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH6KL
tristate "Atheros mobile chipsets support"
depends on CFG80211
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: ISC */
#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#include <net/cfg80211.h>
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH9K_HW
tristate
config ATH9K_COMMON
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
ath9k-y += beacon.o \
gpio.o \
init.o \
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config WCN36XX
tristate "Qualcomm Atheros WCN3660/3680 support"
depends on MAC80211 && HAS_DMA
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_WCN36XX) := wcn36xx.o
wcn36xx-y += main.o \
dxe.o \
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_WIL6210) += wil6210.o
wil6210-y := main.o
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
+#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl_ax200_cfg_cc = {
.name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE,
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
+const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
IWL_DEVICE_AX210,
};
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
+extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
+extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
+extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
+extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105100)
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
+#define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
#endif /* CONFIG_IWLMVM */
trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
+ trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
+ (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg) ||
+ trans->cfg != &killer1650x_2ax_cfg &&
+ trans->cfg != &killer1650w_2ax_cfg &&
+ trans->cfg != &iwl_ax201_cfg_quz_hr) ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
struct ieee_types_vendor_header {
u8 element_id;
u8 len;
- u8 oui[4]; /* 0~2: oui, 3: oui_type */
- u8 oui_subtype;
- u8 version;
+ struct {
+ u8 oui[3];
+ u8 oui_type;
+ } __packed oui;
} __packed;
struct ieee_types_wmm_parameter {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
+
u8 qos_info_bitmap;
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
u8 qos_info_bitmap;
} __packed;
break;
case WLAN_EID_VENDOR_SPECIFIC:
- if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
- return -EINVAL;
-
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
- if (!memcmp
- (vendor_ie->vend_hdr.oui, wpa_oui,
- sizeof(wpa_oui))) {
+ /* 802.11 requires at least 3-byte OUI. */
+ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
+ return -EINVAL;
+
+ /* Not long enough for a match? Skip it. */
+ if (element_len < sizeof(wpa_oui))
+ break;
+
+ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
+ sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
+ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
/* Test to see if it is a WPA IE, if not, then
* it is a gen IE
*/
- if (!memcmp(pvendor_ie->oui, wpa_oui,
+ if (!memcmp(&pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
/* IE is a WPA/WPA2 IE so call set_wpa function
*/
goto next_ie;
}
- if (!memcmp(pvendor_ie->oui, wps_oui,
+ if (!memcmp(&pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
/* Test to see if it is a WPS IE,
* if so, enable wps session flag
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
struct mt76_wcid;
return dma_len;
}
+static struct sk_buff *
+mt76u_build_rx_skb(void *data, int len, int buf_size)
+{
+ struct sk_buff *skb;
+
+ if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ struct page *page;
+
+ /* slow path, not enough space for data and
+ * skb_shared_info
+ */
+ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
+ data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ len - MT_SKB_HEAD_LEN, buf_size);
+
+ return skb;
+ }
+
+ /* fast path */
+ skb = build_skb(data, buf_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
return 0;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
- dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
- return 0;
- }
-
- skb = build_skb(data, q->buf_size);
+ skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
if (!skb)
return 0;
- skb_reserve(skb, MT_DMA_HDR_LEN);
- __skb_put(skb, data_len);
len -= data_len;
-
while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
},
};
-static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
-};
-
-static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
-};
-
static const struct ieee80211_iface_combination
wl18xx_iface_combinations[] = {
{
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
* @idr: IDR handle.
* @entry: The type * to use as cursor.
+ * @tmp: A temporary placeholder for ID.
* @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
* after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry_ul(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ tmp = id, ++id)
/**
* idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
entry; \
++id, (entry) = idr_get_next((idr), &(id)))
+/**
+ * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @tmp: A temporary placeholder for ID.
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
+ */
+#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ tmp = id, ++id)
+
/*
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
* Note that the PHY may be able to transform from one connection
* technology to another, so, eg, don't clear 1000BaseX just
* because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings.
+ * clearing unsupported speeds and duplex settings. The port modes
+ * should not be cleared; phylink_set_port_modes() will help with this.
*
* If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
* or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
* based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly.
+ * @state->interface accordingly. See phylink_helper_basex_speed().
+ *
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
+ * MAC driver to return all supported link modes.
+ *
+ * If the @state->interface mode is not supported, then the @supported
+ * mask must be cleared.
*/
void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
}
/* Kernel-internal feature bits that are unallocated in user space. */
-#define DST_FEATURE_ECN_CA (1 << 31)
+#define DST_FEATURE_ECN_CA (1U << 31)
#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
* @skb: buffer
*
* If dst is not yet refcounted and not destroyed, grab a ref on it.
+ * Returns true if dst is refcounted.
*/
-static inline void skb_dst_force(struct sk_buff *skb)
+static inline bool skb_dst_force(struct sk_buff *skb)
{
if (skb_dst_is_noref(skb)) {
struct dst_entry *dst = skb_dst(skb);
skb->_skb_refdst = (unsigned long)dst;
}
+
+ return skb->_skb_refdst != 0UL;
}
/* Private flags in the private option extension */
-#define GUE_PFLAG_REMCSUM htonl(1 << 31)
+#define GUE_PFLAG_REMCSUM htonl(1U << 31)
#define GUE_PLEN_REMCSUM 4
#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM)
struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len;
unsigned int sync_queue_delay;
- struct task_struct *master_thread;
struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs;
};
+struct ip_vs_sync_thread_data;
+
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
spinlock_t sync_lock;
struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock;
- struct task_struct **backup_threads;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask;
volatile int sync_state;
struct mutex sync_mutex;
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+void tls_ctx_free(struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
struct list_head flush_node;
u16 queue_id;
bool zc;
+ enum {
+ XSK_READY = 0,
+ XSK_BOUND,
+ XSK_UNBOUND,
+ } state;
/* Protects multiple processes in the control path */
struct mutex mutex;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call ? call->debug_id : 0;
__entry->serial = serial;
__entry->why = why;
),
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
/* Rx and Tx ring - header status */
#define TP_STATUS_TS_SOFTWARE (1 << 29)
#define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */
-#define TP_STATUS_TS_RAW_HARDWARE (1 << 31)
+#define TP_STATUS_TS_RAW_HARDWARE (1U << 31)
/* Rx ring - feature request bits */
#define TP_FT_REQ_FILL_RXHASH 0x1
NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28,
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
- NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31,
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1U << 31,
};
/**
/* Check array->index_type */
index_type_id = array->index_type;
index_type = btf_type_by_id(btf, index_type_id);
- if (btf_type_is_resolve_source_only(index_type) ||
- btf_type_nosize_or_null(index_type)) {
+ if (btf_type_nosize_or_null(index_type) ||
+ btf_type_is_resolve_source_only(index_type)) {
btf_verifier_log_type(env, v->t, "Invalid index");
return -EINVAL;
}
/* Check array->type */
elem_type_id = array->type;
elem_type = btf_type_by_id(btf, elem_type_id);
- if (btf_type_is_resolve_source_only(elem_type) ||
- btf_type_nosize_or_null(elem_type)) {
+ if (btf_type_nosize_or_null(elem_type) ||
+ btf_type_is_resolve_source_only(elem_type)) {
btf_verifier_log_type(env, v->t,
"Invalid elem");
return -EINVAL;
const struct btf_type *member_type = btf_type_by_id(env->btf,
member_type_id);
- if (btf_type_is_resolve_source_only(member_type) ||
- btf_type_nosize_or_null(member_type)) {
+ if (btf_type_nosize_or_null(member_type) ||
+ btf_type_is_resolve_source_only(member_type)) {
btf_verifier_log_member(env, v->t, member,
"Invalid member");
return -EINVAL;
insn++;
CONT;
ALU_ARSH_X:
- DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
+ DST = (u64) (u32) (((s32) DST) >> SRC);
CONT;
ALU_ARSH_K:
- DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
+ DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC;
return ret;
}
-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface);
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV",
.iface = {
- .activate = batadv_iv_iface_activate,
.enable = batadv_iv_ogm_iface_enable,
+ .enabled = batadv_iv_iface_enabled,
.disable = batadv_iv_ogm_iface_disable,
.update_mac = batadv_iv_ogm_iface_update_mac,
.primary_set = batadv_iv_ogm_primary_iface_set,
batadv_hardif_recalc_extra_skbroom(soft_iface);
+ if (bat_priv->algo_ops->iface.enabled)
+ bat_priv->algo_ops->iface.enabled(hard_iface);
+
out:
return 0;
*/
void batadv_tt_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
/** @enable: init routing info when hard-interface is enabled */
int (*enable)(struct batadv_hard_iface *hard_iface);
+ /** @enabled: notification when hard-interface was enabled (optional) */
+ void (*enabled)(struct batadv_hard_iface *hard_iface);
+
/** @disable: de-init routing info when hard-interface is disabled */
void (*disable)(struct batadv_hard_iface *hard_iface);
int main(void)
{
- debug_fd = open("/dev/console", 00000002);
+ debug_fd = open("/dev/kmsg", 00000002);
dprintf(debug_fd, "Started bpfilter\n");
loop();
close(debug_fd);
struct net_bridge_fdb_entry *dst = NULL;
struct net_bridge_mdb_entry *mdst;
bool local_rcv, mcast_hit = false;
- const unsigned char *dest;
struct net_bridge *br;
u16 vid = 0;
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
local_rcv = !!(br->dev->flags & IFF_PROMISC);
- dest = eth_hdr(skb)->h_dest;
- if (is_multicast_ether_addr(dest)) {
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
/* by definition the broadcast is also a multicast address */
- if (is_broadcast_ether_addr(dest)) {
+ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
pkt_type = BR_PKT_BROADCAST;
local_rcv = true;
} else {
}
break;
case BR_PKT_UNICAST:
- dst = br_fdb_find_rcu(br, dest, vid);
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
default:
break;
}
int type;
int err = 0;
__be32 group;
+ u16 nsrcs;
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca;
type = grec->grec_type;
+ nsrcs = ntohs(grec->grec_nsrcs);
- len += ntohs(grec->grec_nsrcs) * 4;
+ len += nsrcs * 4;
if (!ip_mc_may_pull(skb, len))
return -EINVAL;
src = eth_hdr(skb)->h_source;
if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
type == IGMPV3_MODE_IS_INCLUDE) &&
- ntohs(grec->grec_nsrcs) == 0) {
+ nsrcs == 0) {
br_ip4_multicast_leave_group(br, port, group, vid, src);
} else {
err = br_ip4_multicast_add_group(br, port, group, vid,
len = skb_transport_offset(skb) + sizeof(*icmp6h);
for (i = 0; i < num; i++) {
- __be16 *nsrcs, _nsrcs;
+ __be16 *_nsrcs, __nsrcs;
+ u16 nsrcs;
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
nsrcs_offset + sizeof(_nsrcs))
return -EINVAL;
- nsrcs = skb_header_pointer(skb, nsrcs_offset,
- sizeof(_nsrcs), &_nsrcs);
- if (!nsrcs)
+ _nsrcs = skb_header_pointer(skb, nsrcs_offset,
+ sizeof(__nsrcs), &__nsrcs);
+ if (!_nsrcs)
return -EINVAL;
- grec_len = struct_size(grec, grec_src, ntohs(*nsrcs));
+ nsrcs = ntohs(*_nsrcs);
+ grec_len = struct_size(grec, grec_src, nsrcs);
if (!ipv6_mc_may_pull(skb, len + grec_len))
return -EINVAL;
src = eth_hdr(skb)->h_source;
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
- ntohs(*nsrcs) == 0) {
+ nsrcs == 0) {
br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
vid, src);
} else {
u16 vid)
{
unsigned int transport_len = ipv6_transport_len(skb);
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q;
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
- saddr.u.ip6 = ip6h->saddr;
+ saddr.u.ip6 = ipv6_hdr(skb)->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
- const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
if (p->state == BR_STATE_DISABLED)
goto out;
- if (!ether_addr_equal(dest, br->group_addr))
+ if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
goto out;
if (p->flags & BR_BPDU_GUARD) {
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
+ if (unlikely(!idev || !idev->cnf.forwarding))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
struct hsr_port *master;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
- skb->dev = master->dev;
- hsr_forward_skb(skb, master);
-
+ if (master) {
+ skb->dev = master->dev;
+ hsr_forward_skb(skb, master);
+ } else {
+ atomic_long_inc(&dev->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
return NETDEV_TX_OK;
}
rcu_read_unlock();
}
-/* According to comments in the declaration of struct net_device, this function
- * is "Called from unregister, can be used to call free_netdev". Ok then...
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
+void hsr_dev_destroy(struct net_device *hsr_dev)
{
struct hsr_priv *hsr;
struct hsr_port *port;
+ struct hsr_port *tmp;
hsr = netdev_priv(hsr_dev);
hsr_debugfs_term(hsr);
- rtnl_lock();
- hsr_for_each_port(hsr, port)
+ list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr_del_port(port);
- rtnl_unlock();
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer);
synchronize_rcu();
+
+ hsr_del_self_node(&hsr->self_node_db);
+ hsr_del_nodes(&hsr->node_db);
}
static const struct net_device_ops hsr_device_ops = {
dev->priv_flags |= IFF_NO_QUEUE;
dev->needs_free_netdev = true;
- dev->priv_destructor = hsr_dev_destroy;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
{
struct hsr_priv *hsr;
struct hsr_port *port;
+ struct hsr_port *tmp;
int res;
hsr = netdev_priv(hsr_dev);
return 0;
fail:
- hsr_for_each_port(hsr, port)
+ list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr_del_port(port);
err_add_port:
- hsr_del_node(&hsr->self_node_db);
+ hsr_del_self_node(&hsr->self_node_db);
return res;
}
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version);
+void hsr_dev_destroy(struct net_device *hsr_dev);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
return 0;
}
-void hsr_del_node(struct list_head *self_node_db)
+void hsr_del_self_node(struct list_head *self_node_db)
{
struct hsr_node *node;
}
}
+void hsr_del_nodes(struct list_head *node_db)
+{
+ struct hsr_node *node;
+ struct hsr_node *tmp;
+
+ list_for_each_entry_safe(node, tmp, node_db, mac_list)
+ kfree(node);
+}
+
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
* seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node.
struct hsr_node;
-void hsr_del_node(struct list_head *self_node_db);
+void hsr_del_self_node(struct list_head *self_node_db);
+void hsr_del_nodes(struct list_head *node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
}
+static void hsr_dellink(struct net_device *hsr_dev, struct list_head *head)
+{
+ hsr_dev_destroy(hsr_dev);
+ unregister_netdevice_queue(hsr_dev, head);
+}
+
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr;
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
+ .dellink = hsr_dellink,
.fill_info = hsr_fill_info,
};
if (port != master)
dev_put(port->dev);
+ kfree(port);
}
#include <net/net_namespace.h>
#include <net/addrconf.h>
+#define IPV6ONLY_FLAGS \
+ (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
+ IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
+ IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
+
static struct ipv4_devconf ipv4_devconf = {
.data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
+ /* Don't set IPv6 only flags to IPv4 addresses */
+ ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
ifap = &in_dev->ifa_list;
ifa1 = rtnl_dereference(*ifap);
if (pmc) {
im->interface = pmc->interface;
if (im->sfmode == MCAST_INCLUDE) {
- im->tomb = pmc->tomb;
- pmc->tomb = NULL;
-
- im->sources = pmc->sources;
- pmc->sources = NULL;
-
+ swap(im->tomb, pmc->tomb);
+ swap(im->sources, pmc->sources);
for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
} else {
return &raw_v6_hashinfo;
#endif
} else {
- pr_warn_once("Unexpected inet family %d\n",
- r->sdiag_family);
- WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
}
n = ip_neigh_gw4(dev, pkey);
}
- if (n && !refcount_inc_not_zero(&n->refcnt))
+ if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock_bh();
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->bytes_sent = 0;
+ tp->bytes_acked = 0;
+ tp->bytes_received = 0;
tp->bytes_retrans = 0;
tp->duplicate_sack[0].start_seq = 0;
tp->duplicate_sack[0].end_seq = 0;
goto out;
}
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
goto out;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
return err;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
static int __net_init __ip_vs_init(struct net *net)
{
struct netns_ipvs *ipvs;
- int ret;
ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL)
if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail;
- ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
- if (ret < 0)
- goto hook_fail;
-
return 0;
/*
* Error handling
*/
-hook_fail:
- ip_vs_sync_net_cleanup(ipvs);
sync_fail:
ip_vs_conn_net_cleanup(ipvs);
conn_fail:
net->ipvs = NULL;
}
+static int __net_init __ip_vs_dev_init(struct net *net)
+{
+ int ret;
+
+ ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ if (ret < 0)
+ goto hook_fail;
+ return 0;
+
+hook_fail:
+ return ret;
+}
+
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
};
static struct pernet_operations ipvs_core_dev_ops = {
+ .init = __ip_vs_dev_init,
.exit = __ip_vs_dev_cleanup,
};
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
}
goto out_dec;
}
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
return ret;
}
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
struct ip_vs_sync_thread_data {
+ struct task_struct *task;
struct netns_ipvs *ipvs;
struct socket *sock;
char *buf;
max(IPVS_SYNC_SEND_DELAY, 1));
ms->sync_queue_len++;
list_add_tail(&sb->list, &ms->sync_queue);
- if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
- wake_up_process(ms->master_thread);
+ if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
+ int id = (int)(ms - ipvs->ms);
+
+ wake_up_process(ipvs->master_tinfo[id].task);
+ }
} else
ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock);
spin_lock_bh(&ipvs->sync_lock);
if (ms->sync_queue_len &&
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
+ int id = (int)(ms - ipvs->ms);
+
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
- wake_up_process(ms->master_thread);
+ wake_up_process(ipvs->master_tinfo[id].task);
}
spin_unlock_bh(&ipvs->sync_lock);
}
if (sb)
ip_vs_sync_buff_release(sb);
- /* release the sending multicast socket */
- sock_release(tinfo->sock);
- kfree(tinfo);
-
return 0;
}
}
}
- /* release the sending multicast socket */
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
-
return 0;
}
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
- struct ip_vs_sync_thread_data *tinfo = NULL;
- struct task_struct **array = NULL, *task;
+ struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
+ struct task_struct *task;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST;
- if (ipvs->backup_threads)
+ if (ipvs->backup_tinfo)
goto out_early;
ipvs->bcfg = *c;
master_wakeup_work_handler);
ms->ipvs = ipvs;
}
- } else {
- array = kcalloc(count, sizeof(struct task_struct *),
- GFP_KERNEL);
- result = -ENOMEM;
- if (!array)
- goto out;
}
+ result = -ENOMEM;
+ ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
+ GFP_KERNEL);
+ if (!ti)
+ goto out;
for (id = 0; id < count; id++) {
- result = -ENOMEM;
- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
- if (!tinfo)
- goto out;
+ tinfo = &ti[id];
tinfo->ipvs = ipvs;
- tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) {
+ result = -ENOMEM;
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
goto out;
- } else {
- tinfo->buf = NULL;
}
tinfo->id = id;
if (state == IP_VS_STATE_MASTER)
result = PTR_ERR(task);
goto out;
}
- tinfo = NULL;
- if (state == IP_VS_STATE_MASTER)
- ipvs->ms[id].master_thread = task;
- else
- array[id] = task;
+ tinfo->task = task;
}
/* mark as active */
- if (state == IP_VS_STATE_BACKUP)
- ipvs->backup_threads = array;
+ if (state == IP_VS_STATE_MASTER)
+ ipvs->master_tinfo = ti;
+ else
+ ipvs->backup_tinfo = ti;
spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
out:
/* We do not need RTNL lock anymore, release it here so that
- * sock_release below and in the kthreads can use rtnl_lock
- * to leave the mcast group.
+ * sock_release below can use rtnl_lock to leave the mcast group.
*/
rtnl_unlock();
- count = id;
- while (count-- > 0) {
- if (state == IP_VS_STATE_MASTER)
- kthread_stop(ipvs->ms[count].master_thread);
- else
- kthread_stop(array[count]);
+ id = min(id, count - 1);
+ if (ti) {
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->task)
+ kthread_stop(tinfo->task);
+ }
}
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
mutex_unlock(&ipvs->sync_mutex);
- if (tinfo) {
- if (tinfo->sock)
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
+
+ /* No more mutexes, release socks */
+ if (ti) {
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ }
+ kfree(ti);
}
- kfree(array);
return result;
out_early:
int stop_sync_thread(struct netns_ipvs *ipvs, int state)
{
- struct task_struct **array;
+ struct ip_vs_sync_thread_data *ti, *tinfo;
int id;
int retc = -EINVAL;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
+ mutex_lock(&ipvs->sync_mutex);
if (state == IP_VS_STATE_MASTER) {
+ retc = -ESRCH;
if (!ipvs->ms)
- return -ESRCH;
+ goto err;
+ ti = ipvs->master_tinfo;
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
struct ipvs_master_sync_state *ms = &ipvs->ms[id];
int ret;
+ tinfo = &ti[id];
pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(ms->master_thread));
+ task_pid_nr(tinfo->task));
cancel_delayed_work_sync(&ms->master_wakeup_work);
- ret = kthread_stop(ms->master_thread);
+ ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
kfree(ipvs->ms);
ipvs->ms = NULL;
+ ipvs->master_tinfo = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!ipvs->backup_threads)
- return -ESRCH;
+ retc = -ESRCH;
+ if (!ipvs->backup_tinfo)
+ goto err;
+ ti = ipvs->backup_tinfo;
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
- array = ipvs->backup_threads;
retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) {
int ret;
+ tinfo = &ti[id];
pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(array[id]));
- ret = kthread_stop(array[id]);
+ task_pid_nr(tinfo->task));
+ ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
- kfree(array);
- ipvs->backup_threads = NULL;
+ ipvs->backup_tinfo = NULL;
+ } else {
+ goto err;
}
+ id = ipvs->threads_mask;
+ mutex_unlock(&ipvs->sync_mutex);
+
+ /* No more mutexes, release socks */
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ }
+ kfree(ti);
/* decrease the module use count */
ip_vs_use_count_dec();
+ return retc;
+err:
+ mutex_unlock(&ipvs->sync_mutex);
return retc;
}
{
int retc;
- mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n");
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n");
- mutex_unlock(&ipvs->sync_mutex);
}
struct nf_conntrack_tuple tuple;
struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
struct nf_conntrack_zone zone;
int err;
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
- u3, &zone);
+ nfmsg->nfgen_family, &zone);
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
- u3, &zone);
+ nfmsg->nfgen_family, &zone);
else {
+ u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
+
return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid,
nlmsg_report(nlh), u3);
/* See ip_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
- nf_ip_checksum(skb, state->hook, dataoff, 0)) {
+ nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT;
}
if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+ if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0;
inside = (void *)skb->data + hdrlen;
goto err;
}
+ if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
+ status = -ENETDOWN;
+ goto err;
+ }
+
*entry = (struct nf_queue_entry) {
.skb = skb,
.state = *state,
};
nf_queue_entry_get_refs(entry);
- skb_dst_force(skb);
switch (entry->state.pf) {
case AF_INET:
case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break;
- if ((protocol == 0 && !csum_fold(skb->csum)) ||
+ if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
+ !csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol,
skb->csum)) {
}
/* fall through */
case CHECKSUM_NONE:
- if (protocol == 0)
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
skb->csum = 0;
else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
unsigned short frametype, flags, window, timeout;
int ret;
- skb->sk = NULL; /* Initially we don't know who it's for */
+ skb_orphan(skb);
/*
* skb->data points to the netrom frame start
window = skb->data[20];
skb->sk = make;
+ skb->destructor = sock_efree;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
if (!conn_info) {
rc = -EPROTO;
- goto free_exit;
+ goto exit;
}
__skb_queue_head_init(&frags_q);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be16 diff[] = { ~(hdr->h_proto), ethertype };
- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
- ~skb->csum);
+ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
hdr->h_proto = ethertype;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~(stack->label_stack_entry), lse };
- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
- ~skb->csum);
+ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
stack->label_stack_entry = lse;
switch (rx->sk.sk_state) {
case RXRPC_UNBOUND:
+ case RXRPC_CLIENT_UNBOUND:
rx->srx.srx_family = AF_RXRPC;
rx->srx.srx_service = 0;
rx->srx.transport_type = SOCK_DGRAM;
}
rx->local = local;
- rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+ rx->sk.sk_state = RXRPC_CLIENT_BOUND;
/* Fall through */
- case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
if (!m->msg_name &&
test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
struct idr *idr = &idrinfo->action_idr;
struct tc_action *p;
unsigned long id = 1;
+ unsigned long tmp;
mutex_lock(&idrinfo->lock);
s_i = cb->args[0];
- idr_for_each_entry_ul(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, tmp, id) {
index++;
if (index < s_i)
continue;
struct idr *idr = &idrinfo->action_idr;
struct tc_action *p;
unsigned long id = 1;
+ unsigned long tmp;
nest = nla_nest_start_noflag(skb, 0);
if (nest == NULL)
goto nla_put_failure;
mutex_lock(&idrinfo->lock);
- idr_for_each_entry_ul(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, tmp, id) {
ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
struct tc_action *p;
int ret;
unsigned long id = 1;
+ unsigned long tmp;
- idr_for_each_entry_ul(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, tmp, id) {
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED)
module_put(ops->owner);
return f;
}
-static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
- unsigned long *handle)
-{
- struct cls_fl_head *head = fl_head_dereference(tp);
- struct cls_fl_filter *f;
-
- rcu_read_lock();
- while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
- /* don't return filters that are being deleted */
- if (refcount_inc_not_zero(&f->refcnt))
- break;
- ++(*handle);
- }
- rcu_read_unlock();
-
- return f;
-}
-
static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
bool *last, bool rtnl_held,
struct netlink_ext_ack *extack)
static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
+ struct cls_fl_head *head = fl_head_dereference(tp);
+ unsigned long id = arg->cookie, tmp;
struct cls_fl_filter *f;
arg->count = arg->skip;
- while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
+ idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
+ /* don't return filters that are being deleted */
+ if (!refcount_inc_not_zero(&f->refcnt))
+ continue;
if (arg->fn(tp, f, arg) < 0) {
__fl_put(f);
arg->stop = 1;
break;
}
__fl_put(f);
- arg->cookie++;
arg->count++;
}
+ arg->cookie = id;
}
static struct cls_fl_filter *
sctp_chunk_free(sack);
goto out;
}
+ SCTP_INC_STATS(sock_net(asoc->base.sk),
+ SCTP_MIB_OUTCTRLCHUNKS);
+ asoc->stats.octrlchunks++;
asoc->peer.sack_needed = 0;
if (del_timer(timer))
sctp_association_put(asoc);
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
int addr_len, int flags)
{
- struct inet_sock *inet = inet_sk(sk);
struct sctp_af *af;
- int err = 0;
+ int err = -EINVAL;
lock_sock(sk);
-
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
- /* We may need to bind the socket. */
- if (!inet->inet_num) {
- if (sk->sk_prot->get_port(sk, 0)) {
- release_sock(sk);
- return -EAGAIN;
- }
- inet->inet_sport = htons(inet->inet_num);
- }
-
/* Validate addr_len before calling common connect/connectx routine. */
- af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
- sctp_get_af_specific(addr->sa_family);
- if (!af || addr_len < af->sockaddr_len) {
- err = -EINVAL;
- } else {
- /* Pass correct addr len to common routine (so it knows there
- * is only one address being passed.
- */
+ af = sctp_get_af_specific(addr->sa_family);
+ if (af && addr_len >= af->sockaddr_len)
err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
- }
release_sock(sk);
return err;
int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
{
struct sctp_stream_out_ext *soute;
+ int ret;
soute = kzalloc(sizeof(*soute), GFP_KERNEL);
if (!soute)
return -ENOMEM;
SCTP_SO(stream, sid)->ext = soute;
- return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
+ ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
+ if (ret) {
+ kfree(SCTP_SO(stream, sid)->ext);
+ SCTP_SO(stream, sid)->ext = NULL;
+ }
+
+ return ret;
}
void sctp_stream_free(struct sctp_stream *stream)
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
- kfree(ctx);
+ tls_ctx_free(ctx);
}
static void tls_device_gc_task(struct work_struct *work)
}
crypto_info = &ctx->crypto_send.info;
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EOPNOTSUPP;
+ goto free_offload_ctx;
+ }
+
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
struct net_device *netdev;
int rc = 0;
+ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
+ return -EOPNOTSUPP;
+
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
ctx->sk_write_space(sk);
}
-static void tls_ctx_free(struct tls_context *ctx)
+void tls_ctx_free(struct tls_context *ctx)
{
if (!ctx)
return;
ctx->sk_destruct(sk);
/* Free ctx */
- kfree(ctx);
+ tls_ctx_free(ctx);
icsk->icsk_ulp_data = NULL;
}
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
- return !ingress_empty || ctx->recv_pkt;
+ return !ingress_empty || ctx->recv_pkt ||
+ !skb_queue_empty(&ctx->rx_list);
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
umem->dev = dev;
umem->queue_id = queue_id;
+
+ dev_hold(dev);
+
if (force_copy)
/* For copy-mode, we are done. */
goto out_rtnl_unlock;
goto err_unreg_umem;
rtnl_unlock();
- dev_hold(dev);
umem->zc = true;
return 0;
return err;
}
-static void xdp_umem_clear_dev(struct xdp_umem *umem)
+void xdp_umem_clear_dev(struct xdp_umem *umem)
{
struct netdev_bpf bpf;
int err;
+ ASSERT_RTNL();
+
if (!umem->dev)
return;
bpf.xsk.umem = NULL;
bpf.xsk.queue_id = umem->queue_id;
- rtnl_lock();
err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
- rtnl_unlock();
if (err)
WARN(1, "failed to disable umem!\n");
}
- rtnl_lock();
xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
- rtnl_unlock();
- if (umem->zc) {
- dev_put(umem->dev);
- umem->zc = false;
- }
+ dev_put(umem->dev);
+ umem->dev = NULL;
+ umem->zc = false;
}
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
static void xdp_umem_release(struct xdp_umem *umem)
{
+ rtnl_lock();
xdp_umem_clear_dev(umem);
+ rtnl_unlock();
ida_simple_remove(&umem_ida, umem->id);
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u16 queue_id, u16 flags);
+void xdp_umem_clear_dev(struct xdp_umem *umem);
bool xdp_umem_validate_queues(struct xdp_umem *umem);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
return 0;
}
+static void xsk_unbind_dev(struct xdp_sock *xs)
+{
+ struct net_device *dev = xs->dev;
+
+ if (!dev || xs->state != XSK_BOUND)
+ return;
+
+ xs->state = XSK_UNBOUND;
+
+ /* Wait for driver to stop using the xdp socket. */
+ xdp_del_sk_umem(xs->umem, xs);
+ xs->dev = NULL;
+ synchronize_net();
+ dev_put(dev);
+}
+
static int xsk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
sock_prot_inuse_add(net, sk->sk_prot, -1);
local_bh_enable();
- if (xs->dev) {
- struct net_device *dev = xs->dev;
-
- /* Wait for driver to stop using the xdp socket. */
- xdp_del_sk_umem(xs->umem, xs);
- xs->dev = NULL;
- synchronize_net();
- dev_put(dev);
- }
+ xsk_unbind_dev(xs);
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
return -EINVAL;
mutex_lock(&xs->mutex);
- if (xs->dev) {
+ if (xs->state != XSK_READY) {
err = -EBUSY;
goto out_release;
}
out_unlock:
if (err)
dev_put(dev);
+ else
+ xs->state = XSK_BOUND;
out_release:
mutex_unlock(&xs->mutex);
return err;
return -EFAULT;
mutex_lock(&xs->mutex);
+ if (xs->state != XSK_READY) {
+ mutex_unlock(&xs->mutex);
+ return -EBUSY;
+ }
q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
err = xsk_init_queue(entries, q, false);
mutex_unlock(&xs->mutex);
return -EFAULT;
mutex_lock(&xs->mutex);
- if (xs->umem) {
+ if (xs->state != XSK_READY || xs->umem) {
mutex_unlock(&xs->mutex);
return -EBUSY;
}
return -EFAULT;
mutex_lock(&xs->mutex);
+ if (xs->state != XSK_READY) {
+ mutex_unlock(&xs->mutex);
+ return -EBUSY;
+ }
if (!xs->umem) {
mutex_unlock(&xs->mutex);
return -EINVAL;
unsigned long pfn;
struct page *qpg;
+ if (xs->state != XSK_READY)
+ return -EBUSY;
+
if (offset == XDP_PGOFF_RX_RING) {
q = READ_ONCE(xs->rx);
} else if (offset == XDP_PGOFF_TX_RING) {
size, vma->vm_page_prot);
}
+static int xsk_notifier(struct notifier_block *this,
+ unsigned long msg, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct sock *sk;
+
+ switch (msg) {
+ case NETDEV_UNREGISTER:
+ mutex_lock(&net->xdp.lock);
+ sk_for_each(sk, &net->xdp.list) {
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ mutex_lock(&xs->mutex);
+ if (xs->dev == dev) {
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ xsk_unbind_dev(xs);
+
+ /* Clear device references in umem. */
+ xdp_umem_clear_dev(xs->umem);
+ }
+ mutex_unlock(&xs->mutex);
+ }
+ mutex_unlock(&net->xdp.lock);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static struct proto xsk_proto = {
.name = "XDP",
.owner = THIS_MODULE,
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
+ xs->state = XSK_READY;
mutex_init(&xs->mutex);
spin_lock_init(&xs->rx_lock);
spin_lock_init(&xs->tx_completion_lock);
.owner = THIS_MODULE,
};
+static struct notifier_block xsk_netdev_notifier = {
+ .notifier_call = xsk_notifier,
+};
+
static int __net_init xsk_net_init(struct net *net)
{
mutex_init(&net->xdp.lock);
err = register_pernet_subsys(&xsk_net_ops);
if (err)
goto out_sk;
+
+ err = register_netdevice_notifier(&xsk_netdev_notifier);
+ if (err)
+ goto out_pernet;
+
return 0;
+out_pernet:
+ unregister_pernet_subsys(&xsk_net_ops);
out_sk:
sock_unregister(PF_XDP);
out_proto:
/* Order producer and data */
smp_wmb(); /* B, matches C */
- q->prod_tail = q->prod_head,
+ q->prod_tail = q->prod_head;
WRITE_ONCE(q->ring->producer, q->prod_tail);
}
tristate
select XFRM
select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
if INET
config XFRM_USER
free_percpu(dev->tstats);
}
-static int xfrmi_create2(struct net_device *dev)
+static int xfrmi_create(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = dev_net(dev);
return err;
}
-static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p)
-{
- struct net_device *dev;
- struct xfrm_if *xi;
- char name[IFNAMSIZ];
- int err;
-
- if (p->name[0]) {
- strlcpy(name, p->name, IFNAMSIZ);
- } else {
- err = -EINVAL;
- goto failed;
- }
-
- dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup);
- if (!dev) {
- err = -EAGAIN;
- goto failed;
- }
-
- dev_net_set(dev, net);
-
- xi = netdev_priv(dev);
- xi->p = *p;
- xi->net = net;
- xi->dev = dev;
- xi->phydev = dev_get_by_index(net, p->link);
- if (!xi->phydev) {
- err = -ENODEV;
- goto failed_free;
- }
-
- err = xfrmi_create2(dev);
- if (err < 0)
- goto failed_dev_put;
-
- return xi;
-
-failed_dev_put:
- dev_put(xi->phydev);
-failed_free:
- free_netdev(dev);
-failed:
- return ERR_PTR(err);
-}
-
-static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p,
- int create)
+static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
{
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
for (xip = &xfrmn->xfrmi[0];
(xi = rtnl_dereference(*xip)) != NULL;
- xip = &xi->next) {
- if (xi->p.if_id == p->if_id) {
- if (create)
- return ERR_PTR(-EEXIST);
-
+ xip = &xi->next)
+ if (xi->p.if_id == p->if_id)
return xi;
- }
- }
- if (!create)
- return ERR_PTR(-ENODEV);
- return xfrmi_create(net, p);
+
+ return NULL;
}
static void xfrmi_dev_uninit(struct net_device *dev)
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(dev);
- struct xfrm_if_parms *p;
+ struct xfrm_if_parms p;
struct xfrm_if *xi;
+ int err;
- xi = netdev_priv(dev);
- p = &xi->p;
-
- xfrmi_netlink_parms(data, p);
+ xfrmi_netlink_parms(data, &p);
if (!tb[IFLA_IFNAME])
return -EINVAL;
- nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
- xi = xfrmi_locate(net, p, 1);
- return PTR_ERR_OR_ZERO(xi);
+ xi = xfrmi_locate(net, &p);
+ if (xi)
+ return -EEXIST;
+
+ xi = netdev_priv(dev);
+ xi->p = p;
+ xi->net = net;
+ xi->dev = dev;
+ xi->phydev = dev_get_by_index(net, p.link);
+ if (!xi->phydev)
+ return -ENODEV;
+
+ err = xfrmi_create(dev);
+ if (err < 0)
+ dev_put(xi->phydev);
+ return err;
}
static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
xfrmi_netlink_parms(data, &xi->p);
- xi = xfrmi_locate(net, &xi->p, 0);
-
- if (IS_ERR_OR_NULL(xi)) {
+ xi = xfrmi_locate(net, &xi->p);
+ if (!xi) {
xi = netdev_priv(dev);
} else {
if (xi->dev != dev)
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
write_seqcount_begin(&xfrm_policy_hash_generation);
- odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
- lockdep_is_held(&net->xfrm.xfrm_policy_lock));
-
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
hlist_for_each_entry_safe(policy, n,
&net->xfrm.policy_inexact[dir],
- bydst_inexact_list)
+ bydst_inexact_list) {
+ hlist_del_rcu(&policy->bydst);
hlist_del_init(&policy->bydst_inexact_list);
+ }
hmask = net->xfrm.policy_bydst[dir].hmask;
odst = net->xfrm.policy_bydst[dir].table;
- for (i = hmask; i >= 0; i--)
- INIT_HLIST_HEAD(odst + i);
+ for (i = hmask; i >= 0; i--) {
+ hlist_for_each_entry_safe(policy, n, odst + i, bydst)
+ hlist_del_rcu(&policy->bydst);
+ }
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
- hlist_del_rcu(&policy->bydst);
-
if (!chain) {
void *p = xfrm_policy_inexact_insert(policy, dir, 0);
err = -EINVAL;
switch (p->family) {
+ case AF_INET:
+ break;
+
+ case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+ break;
+#else
+ err = -EAFNOSUPPORT;
+ goto out;
+#endif
+
+ default:
+ goto out;
+ }
+
+ switch (p->sel.family) {
+ case AF_UNSPEC:
+ break;
+
case AF_INET:
if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
goto out;
}
memset(&info, 0, sizeof(info));
- ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ret = bpf_obj_get_info_by_fd(dummy_prog_fd, &info, &info_len);
if (ret) {
printf("can't get prog info - %s\n", strerror(errno));
return ret;
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
unsigned char value[0];
} BPF_PACKET_HEADER;
-__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
return srh;
}
-__attribute__((always_inline))
+static __always_inline
int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
uint32_t old_pad, uint32_t pad_off)
{
return 0;
}
-__attribute__((always_inline))
+static __always_inline
int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t *tlv_off, uint32_t *pad_size,
uint32_t *pad_off)
return 0;
}
-__attribute__((always_inline))
+static __always_inline
int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
-__attribute__((always_inline))
+static __always_inline
int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t tlv_off)
{
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
-__attribute__((always_inline))
+static __always_inline
int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
{
int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
},
.result = ACCEPT,
},
+{
+ "lsh64 by 0 imm",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "rsh64 by 0 imm",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh64 by 0 imm",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "lsh64 by 0 reg",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 1),
+ BPF_LD_IMM64(BPF_REG_2, 0),
+ BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "rsh64 by 0 reg",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_LD_IMM64(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh64 by 0 reg",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_LD_IMM64(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
{
"invalid 64-bit BPF_END",
.insns = {
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_ETF=m
CONFIG_TEST_BLACKHOLE_DEV=m
+CONFIG_KALLSYMS=y
echo "--------------------"
echo "running psock_tpacket test"
echo "--------------------"
-./in_netns.sh ./psock_tpacket
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- ret=1
+if [ -f /proc/kallsyms ]; then
+ ./in_netns.sh ./psock_tpacket
+ if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ ret=1
+ else
+ echo "[PASS]"
+ fi
else
- echo "[PASS]"
+ echo "[SKIP] CONFIG_KALLSYMS not enabled"
fi
echo "--------------------"
EXPECT_EQ(recv(self->cfd, recv_mem, send_len, MSG_WAITALL), send_len);
}
+TEST_F(tls, poll_wait_split)
+{
+ struct pollfd fd = { 0, 0, 0 };
+ char send_mem[20] = {};
+ char recv_mem[15];
+
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+ /* Send 20 bytes */
+ EXPECT_EQ(send(self->fd, send_mem, sizeof(send_mem), 0),
+ sizeof(send_mem));
+ /* Poll with inf. timeout */
+ EXPECT_EQ(poll(&fd, 1, -1), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), MSG_WAITALL),
+ sizeof(recv_mem));
+
+ /* Now the remaining 5 bytes of record data are in TLS ULP */
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+ EXPECT_EQ(poll(&fd, 1, -1), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0),
+ sizeof(send_mem) - sizeof(recv_mem));
+}
+
TEST_F(tls, blocking)
{
size_t data = 100000;
*ring = mmap(0, req.tp_block_size * req.tp_block_nr,
PROT_READ | PROT_WRITE, MAP_SHARED, fdt, 0);
- if (!*ring)
+ if (*ring == MAP_FAILED)
error(1, errno, "mmap");
return fdt;
return $lret
}
+check_hthresh_repeat()
+{
+ local log=$1
+ i=0
+
+ for i in $(seq 1 10);do
+ ip -net ns1 xfrm policy update src e000:0001::0000 dst ff01::0014:0000:0001 dir in tmpl src :: dst :: proto esp mode tunnel priority 100 action allow || break
+ ip -net ns1 xfrm policy set hthresh6 0 28 || break
+
+ ip -net ns1 xfrm policy update src e000:0001::0000 dst ff01::01 dir in tmpl src :: dst :: proto esp mode tunnel priority 100 action allow || break
+ ip -net ns1 xfrm policy set hthresh6 0 28 || break
+ done
+
+ if [ $i -ne 10 ] ;then
+ echo "FAIL: $log" 1>&2
+ ret=1
+ return 1
+ fi
+
+ echo "PASS: $log"
+ return 0
+}
+
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
sleep $((RANDOM%5))
done
-check_exceptions "exceptions and block policies after hresh change to normal"
+check_exceptions "exceptions and block policies after htresh change to normal"
+
+check_hthresh_repeat "policies with repeated htresh change"
for i in 1 2 3 4;do ip netns del ns$i;done
"$TC qdisc del dev $DEV1 clsact"
]
},
+ {
+ "id": "2ff3",
+ "name": "Add flower with max handle and then dump it",
+ "category": [
+ "filter",
+ "flower"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress"
+ ]
+ },
{
"id": "d052",
"name": "Add 1M filters with the same action",