wifi: iwlwifi: use bc entries instead of bc table also for pre-ax210
authorMiri Korenblit <miriam.rachel.korenblit@intel.com>
Sun, 11 May 2025 16:53:14 +0000 (19:53 +0300)
committerMiri Korenblit <miriam.rachel.korenblit@intel.com>
Thu, 15 May 2025 06:53:37 +0000 (09:53 +0300)
iwlagn_scd_bc_tbl is used for pre-ax210 devices,
and iwl_gen3_bc_tbl_entry is used for ax210 and on. But there is no
difference between the the 22000 version and the AX210+ one.

In order to unify the two, as first step make iwlagn_scd_bc_tbl an entry
as well, and adjust the code. In a later patch both structures will be
unified.

Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://patch.msgid.link/20250511195137.645cd82ebf48.Iaa7e88179372d60ef31157e379737b5babe54012@changeid
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c

index df4bb499446a9e166d436d2e0409df58db287ef5..ff58a59089fa38d0a695fe3f283e03789da7e549 100644 (file)
@@ -717,7 +717,7 @@ struct iwl_tfh_tfd {
 /* Fixed (non-configurable) rx data from phy */
 
 /**
- * struct iwlagn_scd_bc_tbl - scheduler byte count table
+ * struct iwlagn_scd_bc_tbl_entry - scheduler byte count table entry
  *     base physical address provided by SCD_DRAM_BASE_ADDR
  * For devices up to 22000:
  * @tfd_offset:
@@ -729,8 +729,8 @@ struct iwl_tfh_tfd {
  *             12-13 - number of 64 byte chunks
  *             14-16 - reserved
  */
-struct iwlagn_scd_bc_tbl {
-       __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+struct iwlagn_scd_bc_tbl_entry {
+       __le16 tfd_offset;
 } __packed;
 
 /**
index 694e1ed1eae01606e59be3e1559f4144172673a5..8e2ad3147510d719d31aa39e1b237c65392902cc 100644 (file)
@@ -3839,7 +3839,8 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
                trans_pcie->txqs.bc_tbl_size =
                        sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210;
        else
-               trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+               trans_pcie->txqs.bc_tbl_size =
+                       sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE;
        /*
         * For gen2 devices, we use a single allocation for each byte-count
         * table, but they're pretty small (1k) so use a DMA pool that we
index 53a02b45de5f0cf7a4d814d3b3419251e82a5fad..649fb55373c78212638b31f0df562d24021c4c37 100644 (file)
@@ -587,12 +587,12 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
                scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
        } else {
-               struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+               struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
 
                len = DIV_ROUND_UP(len, 4);
                WARN_ON(len > 0xFFF);
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-               scd_bc_tbl->tfd_offset[idx] = bc_ent;
+               scd_bc_tbl[idx].tfd_offset = bc_ent;
        }
 }
 
index d5ba4f3fd223b83ffe9e0cff77df6719989abf83..432ce44c4da4a364df964dae6bbee3dab0e64bf5 100644 (file)
@@ -796,6 +796,8 @@ error:
        return -ENOMEM;
 }
 
+#define BC_TABLE_SIZE  (sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+
 /*
  * iwl_pcie_tx_alloc - allocate TX context
  * Allocate all Tx DMA structures and initialize them
@@ -810,7 +812,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
        if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
                return -EINVAL;
 
-       bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
+       bc_tbls_size *= BC_TABLE_SIZE;
 
        /*It is not allowed to alloc twice, so warn when this happens.
         * We cannot rely on the previous allocation, so free and fail */
@@ -2065,7 +2067,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
                                             int num_tbs)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+       struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl;
        int write_ptr = txq->write_ptr;
        int txq_id = txq->id;
        u8 sec_ctl = 0;
@@ -2099,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
 
        bc_ent = cpu_to_le16(len | (sta_id << 12));
 
-       scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+       scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
 
        if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
-               scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+               scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
                        bc_ent;
 }
 
@@ -2312,7 +2314,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
                                            int read_ptr)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+       struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
        int txq_id = txq->id;
        u8 sta_id = 0;
        __le16 bc_ent;
@@ -2326,10 +2328,10 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
 
        bc_ent = cpu_to_le16(1 | (sta_id << 12));
 
-       scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+       scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
 
        if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
-               scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+               scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
                        bc_ent;
 }