/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_scd_bc_tbl - scheduler byte count table
+ * struct iwlagn_scd_bc_tbl_entry - scheduler byte count table entry
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+struct iwlagn_scd_bc_tbl_entry {
+ __le16 tfd_offset;
} __packed;
/**
trans_pcie->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210;
else
- trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE;
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
}
}
return -ENOMEM;
}
+#define BC_TABLE_SIZE (sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+
/*
* iwl_pcie_tx_alloc - allocate TX context
* Allocate all Tx DMA structures and initialize them
if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
return -EINVAL;
- bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= BC_TABLE_SIZE;
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
int num_tbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl;
int write_ptr = txq->write_ptr;
int txq_id = txq->id;
u8 sec_ctl = 0;
bc_ent = cpu_to_le16(len | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
bc_ent;
}
int read_ptr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
int txq_id = txq->id;
u8 sta_id = 0;
__le16 bc_ent;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
bc_ent;
}