wifi: iwlwifi: unify iwlagn_scd_bc_tbl_entry and iwl_gen3_bc_tbl_entry
authorMiri Korenblit <miriam.rachel.korenblit@intel.com>
Sun, 11 May 2025 16:53:15 +0000 (19:53 +0300)
committerMiri Korenblit <miriam.rachel.korenblit@intel.com>
Thu, 15 May 2025 06:53:37 +0000 (09:53 +0300)
As those are now the same, unify and adjust the documentation.

Reviewed-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://patch.msgid.link/20250511195137.b7ddfade8fec.I2bf97252c4bd751077ade204767eed02d815614d@changeid
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c

index ff58a59089fa38d0a695fe3f283e03789da7e549..ee9e41c3179727f7d2ed89a2ffce930dd331b2b3 100644 (file)
@@ -717,30 +717,19 @@ struct iwl_tfh_tfd {
 /* Fixed (non-configurable) rx data from phy */
 
 /**
- * struct iwlagn_scd_bc_tbl_entry - scheduler byte count table entry
+ * struct iwl_bc_tbl_entry - scheduler byte count table entry
  *     base physical address provided by SCD_DRAM_BASE_ADDR
  * For devices up to 22000:
  * @tfd_offset:
  *     For devices up to 22000:
  *              0-12 - tx command byte count
  *             12-16 - station index
- *     For 22000:
+ *     For 22000 and on:
  *              0-12 - tx command byte count
  *             12-13 - number of 64 byte chunks
  *             14-16 - reserved
  */
-struct iwlagn_scd_bc_tbl_entry {
-       __le16 tfd_offset;
-} __packed;
-
-/**
- * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
- * For AX210 and on:
- * @tfd_offset: 0-12 - tx command byte count
- *             12-13 - number of 64 byte chunks
- *             14-16 - reserved
- */
-struct iwl_gen3_bc_tbl_entry {
+struct iwl_bc_tbl_entry {
        __le16 tfd_offset;
 } __packed;
 
index 8e2ad3147510d719d31aa39e1b237c65392902cc..906fee5bf47ef8968c6cd21334d0ad4d81eaa731 100644 (file)
@@ -3785,6 +3785,7 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
 {
        struct iwl_trans_pcie *trans_pcie, **priv;
        struct iwl_trans *trans;
+       unsigned int bc_tbl_n_entries;
        int ret, addr_size;
        u32 bar0;
 
@@ -3833,14 +3834,14 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
        }
 
        if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
-               trans_pcie->txqs.bc_tbl_size =
-                       sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_BZ;
+               bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
        else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               trans_pcie->txqs.bc_tbl_size =
-                       sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210;
+               bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
        else
-               trans_pcie->txqs.bc_tbl_size =
-                       sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE;
+               bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
+
+       trans_pcie->txqs.bc_tbl_size =
+               sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
        /*
         * For gen2 devices, we use a single allocation for each byte-count
         * table, but they're pretty small (1k) so use a DMA pool that we
index 649fb55373c78212638b31f0df562d24021c4c37..df0545f09da95fdb62c4962dcd7a4da779e6b6b6 100644 (file)
@@ -561,6 +561,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
                                          int num_tbs)
 {
        int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+       struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
        u8 filled_tfd_size, num_fetch_chunks;
        u16 len = byte_cnt;
        __le16 bc_ent;
@@ -581,19 +582,15 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 
        if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
-               struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
                WARN_ON(len > 0x3FFF);
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
-               scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
        } else {
-               struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
-
                len = DIV_ROUND_UP(len, 4);
                WARN_ON(len > 0xFFF);
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-               scd_bc_tbl[idx].tfd_offset = bc_ent;
        }
+
+       scd_bc_tbl[idx].tfd_offset = bc_ent;
 }
 
 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
index 432ce44c4da4a364df964dae6bbee3dab0e64bf5..d050de23763833a55b7678cbb35344e88904ee5c 100644 (file)
@@ -796,7 +796,7 @@ error:
        return -ENOMEM;
 }
 
-#define BC_TABLE_SIZE  (sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+#define BC_TABLE_SIZE  (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
 
 /*
  * iwl_pcie_tx_alloc - allocate TX context
@@ -2067,7 +2067,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
                                             int num_tbs)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl;
+       struct iwl_bc_tbl_entry *scd_bc_tbl;
        int write_ptr = txq->write_ptr;
        int txq_id = txq->id;
        u8 sec_ctl = 0;
@@ -2314,7 +2314,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
                                            int read_ptr)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+       struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
        int txq_id = txq->id;
        u8 sta_id = 0;
        __le16 bc_ent;