iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
CFLAGS_pcie/drv.o += -Wno-override-init
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2018, 2020-2025 Intel Corporation
- */
-#ifndef __iwl_context_info_file_gen3_h__
-#define __iwl_context_info_file_gen3_h__
-
-#include "iwl-context-info.h"
-
-#define CSR_CTXT_INFO_BOOT_CTRL 0x0
-#define CSR_CTXT_INFO_ADDR 0x118
-#define CSR_IML_DATA_ADDR 0x120
-#define CSR_IML_SIZE_ADDR 0x128
-#define CSR_IML_RESP_ADDR 0x12c
-
-#define UNFRAGMENTED_PNVM_PAYLOADS_NUMBER 2
-
-/* Set bit for enabling automatic function boot */
-#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
-/* Set bit for initiating function boot */
-#define CSR_AUTO_FUNC_INIT BIT(7)
-
-/**
- * enum iwl_prph_scratch_mtr_format - tfd size configuration
- * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
- * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
- * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
- * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
- */
-enum iwl_prph_scratch_mtr_format {
- IWL_PRPH_MTR_FORMAT_16B = 0x0,
- IWL_PRPH_MTR_FORMAT_32B = 0x40000,
- IWL_PRPH_MTR_FORMAT_64B = 0x80000,
- IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
-};
-
-/**
- * enum iwl_prph_scratch_flags - PRPH scratch control flags
- * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
- * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
- * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
- * in hwm config.
- * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
- * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
- * multicomm.
- * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
- * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
- * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
- * completion descriptor, 1 for responses (legacy)
- * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
- * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
- * 3: 256 bit.
- * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
- * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K
- * appropriately; use the below values for this.
- * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
- * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
- * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
- * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
- * upon reset.
- * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset
- */
-enum iwl_prph_scratch_flags {
- IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
- IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
- IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
- IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
- IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
- IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
- IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
- IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
- IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
- IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20,
- IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
- IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
- IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
- IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
- IWL_PRPH_SCRATCH_TOP_RESET = BIT(30),
-};
-
-/**
- * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
- * @IWL_PRPH_SCRATCH_EXT_EXT_FSEQ: external FSEQ image provided
- * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
- * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
- * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
- */
-enum iwl_prph_scratch_ext_flags {
- IWL_PRPH_SCRATCH_EXT_EXT_FSEQ = BIT(0),
- IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
- IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
- IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
-};
-
-/**
- * struct iwl_prph_scratch_version - version structure
- * @mac_id: SKU and revision id
- * @version: prph scratch information version id
- * @size: the size of the context information in DWs
- * @reserved: reserved
- */
-struct iwl_prph_scratch_version {
- __le16 mac_id;
- __le16 version;
- __le16 size;
- __le16 reserved;
-} __packed; /* PERIPH_SCRATCH_VERSION_S */
-
-/**
- * struct iwl_prph_scratch_control - control structure
- * @control_flags: context information flags see &enum iwl_prph_scratch_flags
- * @control_flags_ext: context information for extended flags,
- * see &enum iwl_prph_scratch_ext_flags
- */
-struct iwl_prph_scratch_control {
- __le32 control_flags;
- __le32 control_flags_ext;
-} __packed; /* PERIPH_SCRATCH_CONTROL_S */
-
-/**
- * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
- * @pnvm_base_addr: PNVM start address
- * @pnvm_size: the size of the PNVM image in bytes
- * @reserved: reserved
- */
-struct iwl_prph_scratch_pnvm_cfg {
- __le64 pnvm_base_addr;
- __le32 pnvm_size;
- __le32 reserved;
-} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
-
-/**
- * struct iwl_prph_scrath_mem_desc_addr_array
- * @mem_descs: array of dram addresses.
- * Each address is the beggining of a pnvm payload.
- */
-struct iwl_prph_scrath_mem_desc_addr_array {
- __le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
-} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
-
-/**
- * struct iwl_prph_scratch_hwm_cfg - hwm config
- * @hwm_base_addr: hwm start address
- * @hwm_size: hwm size in DWs
- * @debug_token_config: debug preset
- */
-struct iwl_prph_scratch_hwm_cfg {
- __le64 hwm_base_addr;
- __le32 hwm_size;
- __le32 debug_token_config;
-} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
-
-/**
- * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
- * @free_rbd_addr: default queue free RB CB base address
- * @reserved: reserved
- */
-struct iwl_prph_scratch_rbd_cfg {
- __le64 free_rbd_addr;
- __le32 reserved;
-} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
-
-/**
- * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
- * @base_addr: reduce power table address
- * @size: the size of the entire power table image
- * @reserved: (reserved)
- */
-struct iwl_prph_scratch_uefi_cfg {
- __le64 base_addr;
- __le32 size;
- __le32 reserved;
-} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
-
-/**
- * struct iwl_prph_scratch_step_cfg - prph scratch step configuration
- * @mbx_addr_0: [0:7] revision,
- * [8:15] cnvi_to_cnvr length,
- * [16:23] cnvr_to_cnvi channel length,
- * [24:31] radio1 reserved
- * @mbx_addr_1: [0:7] radio2 reserved
- */
-
-struct iwl_prph_scratch_step_cfg {
- __le32 mbx_addr_0;
- __le32 mbx_addr_1;
-} __packed;
-
-/**
- * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
- * @version: version information of context info and HW
- * @control: control flags of FH configurations
- * @pnvm_cfg: ror configuration
- * @hwm_cfg: hwm configuration
- * @rbd_cfg: default RX queue configuration
- * @reduce_power_cfg: UEFI power reduction table
- * @step_cfg: step configuration
- */
-struct iwl_prph_scratch_ctrl_cfg {
- struct iwl_prph_scratch_version version;
- struct iwl_prph_scratch_control control;
- struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
- struct iwl_prph_scratch_hwm_cfg hwm_cfg;
- struct iwl_prph_scratch_rbd_cfg rbd_cfg;
- struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
- struct iwl_prph_scratch_step_cfg step_cfg;
-} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
-
-#define IWL_NUM_DRAM_FSEQ_ENTRIES 8
-
-/**
- * struct iwl_context_info_dram_fseq - images DRAM map (with fseq)
- * each entry in the map represents a DRAM chunk of up to 32 KB
- * @common: UMAC/LMAC/virtual images
- * @fseq_img: FSEQ image DRAM map
- */
-struct iwl_context_info_dram_fseq {
- struct iwl_context_info_dram_nonfseq common;
- __le64 fseq_img[IWL_NUM_DRAM_FSEQ_ENTRIES];
-} __packed; /* PERIPH_SCRATCH_DRAM_MAP_S */
-
-/**
- * struct iwl_prph_scratch - peripheral scratch mapping
- * @ctrl_cfg: control and configuration of prph scratch
- * @dram: firmware images addresses in DRAM
- * @fseq_override: FSEQ override parameters
- * @step_analog_params: STEP analog calibration values
- * @reserved: reserved
- */
-struct iwl_prph_scratch {
- struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
- __le32 fseq_override;
- __le32 step_analog_params;
- __le32 reserved[8];
- struct iwl_context_info_dram_fseq dram;
-} __packed; /* PERIPH_SCRATCH_S */
-
-/**
- * struct iwl_prph_info - peripheral information
- * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
- * @ipc_status_mirror: reflects the value in the IPC Status CSR register
- * @sleep_notif: indicates the peripheral sleep status
- * @reserved: reserved
- */
-struct iwl_prph_info {
- __le32 boot_stage_mirror;
- __le32 ipc_status_mirror;
- __le32 sleep_notif;
- __le32 reserved;
-} __packed; /* PERIPH_INFO_S */
-
-/**
- * struct iwl_context_info_gen3 - device INIT configuration
- * @version: version of the context information
- * @size: size of context information in DWs
- * @config: context in which the peripheral would execute - a subset of
- * capability csr register published by the peripheral
- * @prph_info_base_addr: the peripheral information structure start address
- * @cr_head_idx_arr_base_addr: the completion ring head index array
- * start address
- * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
- * start address
- * @cr_tail_idx_arr_base_addr: the completion ring tail index array
- * start address
- * @tr_head_idx_arr_base_addr: the transfer ring head index array
- * start address
- * @cr_idx_arr_size: number of entries in the completion ring index array
- * @tr_idx_arr_size: number of entries in the transfer ring index array
- * @mtr_base_addr: the message transfer ring start address
- * @mcr_base_addr: the message completion ring start address
- * @mtr_size: number of entries which the message transfer ring can hold
- * @mcr_size: number of entries which the message completion ring can hold
- * @mtr_doorbell_vec: the doorbell vector associated with the message
- * transfer ring
- * @mcr_doorbell_vec: the doorbell vector associated with the message
- * completion ring
- * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
- * completing a transfer descriptor in the message transfer ring
- * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
- * completing a completion descriptor in the message completion ring
- * @mtr_opt_header_size: the size of the optional header in the transfer
- * descriptor associated with the message transfer ring in DWs
- * @mtr_opt_footer_size: the size of the optional footer in the transfer
- * descriptor associated with the message transfer ring in DWs
- * @mcr_opt_header_size: the size of the optional header in the completion
- * descriptor associated with the message completion ring in DWs
- * @mcr_opt_footer_size: the size of the optional footer in the completion
- * descriptor associated with the message completion ring in DWs
- * @msg_rings_ctrl_flags: message rings control flags
- * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
- * after updating the Peripheral Information structure
- * @prph_scratch_base_addr: the peripheral scratch structure start address
- * @prph_scratch_size: the size of the peripheral scratch structure in DWs
- * @reserved: reserved
- */
-struct iwl_context_info_gen3 {
- __le16 version;
- __le16 size;
- __le32 config;
- __le64 prph_info_base_addr;
- __le64 cr_head_idx_arr_base_addr;
- __le64 tr_tail_idx_arr_base_addr;
- __le64 cr_tail_idx_arr_base_addr;
- __le64 tr_head_idx_arr_base_addr;
- __le16 cr_idx_arr_size;
- __le16 tr_idx_arr_size;
- __le64 mtr_base_addr;
- __le64 mcr_base_addr;
- __le16 mtr_size;
- __le16 mcr_size;
- __le16 mtr_doorbell_vec;
- __le16 mcr_doorbell_vec;
- __le16 mtr_msi_vec;
- __le16 mcr_msi_vec;
- u8 mtr_opt_header_size;
- u8 mtr_opt_footer_size;
- u8 mcr_opt_header_size;
- u8 mcr_opt_footer_size;
- __le16 msg_rings_ctrl_flags;
- __le16 prph_info_msi_vec;
- __le64 prph_scratch_base_addr;
- __le32 prph_scratch_size;
- __le32 reserved;
-} __packed; /* IPC_CONTEXT_INFO_S */
-
-int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img);
-void iwl_pcie_ctxt_info_gen3_kick(struct iwl_trans *trans);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
-
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-int
-iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
-void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-#endif /* __iwl_context_info_file_gen3_h__ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2018, 2020-2025 Intel Corporation
+ */
+#ifndef __iwl_context_info_file_v2_h__
+#define __iwl_context_info_file_v2_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define CSR_CTXT_INFO_ADDR 0x118
+#define CSR_IML_DATA_ADDR 0x120
+#define CSR_IML_SIZE_ADDR 0x128
+#define CSR_IML_RESP_ADDR 0x12c
+
+#define UNFRAGMENTED_PNVM_PAYLOADS_NUMBER 2
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+ IWL_PRPH_MTR_FORMAT_16B = 0x0,
+ IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+ IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+ IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
+ * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K
+ * appropriately; use the below values for this.
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
+ * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
+ * upon reset.
+ * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset
+ */
+enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+ IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+ IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+ IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+ IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+ IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+ IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20,
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
+ IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
+ IWL_PRPH_SCRATCH_TOP_RESET = BIT(30),
+};
+
+/**
+ * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
+ * @IWL_PRPH_SCRATCH_EXT_EXT_FSEQ: external FSEQ image provided
+ * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
+ * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
+ * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
+ */
+enum iwl_prph_scratch_ext_flags {
+ IWL_PRPH_SCRATCH_EXT_EXT_FSEQ = BIT(0),
+ IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
+ IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+ IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
+};
+
+/**
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/**
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @control_flags_ext: context information for extended flags,
+ * see &enum iwl_prph_scratch_ext_flags
+ */
+struct iwl_prph_scratch_control {
+ __le32 control_flags;
+ __le32 control_flags_ext;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/**
+ * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
+ * @pnvm_base_addr: PNVM start address
+ * @pnvm_size: the size of the PNVM image in bytes
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_pnvm_cfg {
+ __le64 pnvm_base_addr;
+ __le32 pnvm_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
+
+/**
+ * struct iwl_prph_scrath_mem_desc_addr_array
+ * @mem_descs: array of dram addresses.
+ * Each address is the beggining of a pnvm payload.
+ */
+struct iwl_prph_scrath_mem_desc_addr_array {
+ __le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
+} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
+
+/**
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @debug_token_config: debug preset
+ */
+struct iwl_prph_scratch_hwm_cfg {
+ __le64 hwm_base_addr;
+ __le32 hwm_size;
+ __le32 debug_token_config;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/**
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/**
+ * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: the size of the entire power table image
+ * @reserved: (reserved)
+ */
+struct iwl_prph_scratch_uefi_cfg {
+ __le64 base_addr;
+ __le32 size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
+/**
+ * struct iwl_prph_scratch_step_cfg - prph scratch step configuration
+ * @mbx_addr_0: [0:7] revision,
+ * [8:15] cnvi_to_cnvr length,
+ * [16:23] cnvr_to_cnvi channel length,
+ * [24:31] radio1 reserved
+ * @mbx_addr_1: [0:7] radio2 reserved
+ */
+
+struct iwl_prph_scratch_step_cfg {
+ __le32 mbx_addr_0;
+ __le32 mbx_addr_1;
+} __packed;
+
+/**
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @pnvm_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ * @reduce_power_cfg: UEFI power reduction table
+ * @step_cfg: step configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+ struct iwl_prph_scratch_version version;
+ struct iwl_prph_scratch_control control;
+ struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
+ struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+ struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
+ struct iwl_prph_scratch_step_cfg step_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+#define IWL_NUM_DRAM_FSEQ_ENTRIES 8
+
+/**
+ * struct iwl_context_info_dram_fseq - images DRAM map (with fseq)
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @common: UMAC/LMAC/virtual images
+ * @fseq_img: FSEQ image DRAM map
+ */
+struct iwl_context_info_dram_fseq {
+ struct iwl_context_info_dram_nonfseq common;
+ __le64 fseq_img[IWL_NUM_DRAM_FSEQ_ENTRIES];
+} __packed; /* PERIPH_SCRATCH_DRAM_MAP_S */
+
+/**
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @fseq_override: FSEQ override parameters
+ * @step_analog_params: STEP analog calibration values
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+ struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+ __le32 fseq_override;
+ __le32 step_analog_params;
+ __le32 reserved[8];
+ struct iwl_context_info_dram_fseq dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/**
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+ __le32 boot_stage_mirror;
+ __le32 ipc_status_mirror;
+ __le32 sleep_notif;
+ __le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/**
+ * struct iwl_context_info_v2 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_v2 {
+ __le16 version;
+ __le16 size;
+ __le32 config;
+ __le64 prph_info_base_addr;
+ __le64 cr_head_idx_arr_base_addr;
+ __le64 tr_tail_idx_arr_base_addr;
+ __le64 cr_tail_idx_arr_base_addr;
+ __le64 tr_head_idx_arr_base_addr;
+ __le16 cr_idx_arr_size;
+ __le16 tr_idx_arr_size;
+ __le64 mtr_base_addr;
+ __le64 mcr_base_addr;
+ __le16 mtr_size;
+ __le16 mcr_size;
+ __le16 mtr_doorbell_vec;
+ __le16 mcr_doorbell_vec;
+ __le16 mtr_msi_vec;
+ __le16 mcr_msi_vec;
+ u8 mtr_opt_header_size;
+ u8 mtr_opt_footer_size;
+ u8 mcr_opt_header_size;
+ u8 mcr_opt_footer_size;
+ __le16 msg_rings_ctrl_flags;
+ __le16 prph_info_msi_vec;
+ __le64 prph_scratch_base_addr;
+ __le32 prph_scratch_size;
+ __le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img);
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive);
+
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa);
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
+int
+iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
+void
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
+#endif /* __iwl_context_info_file_v2_h__ */
#include <linux/dmapool.h>
#include "fw/api/commands.h"
#include "pcie/internal.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
struct iwl_trans_dev_restart_data {
struct list_head list;
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+ return iwl_trans_pcie_ctx_info_v2_load_pnvm(trans, pnvm_data, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads,
capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2018-2025 Intel Corporation
- */
-#include <linux/dmi.h>
-#include "iwl-trans.h"
-#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
-#include "internal.h"
-#include "iwl-prph.h"
-
-static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- /* keep last */
- {}
-};
-
-static bool iwl_is_force_scu_active_approved(void)
-{
- return !!dmi_check_system(dmi_force_scu_active_approved_list);
-}
-
-static void
-iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
- struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
- u32 *control_flags)
-{
- enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
- struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
- u32 dbg_flags = 0;
-
- if (!iwl_trans_dbg_ini_valid(trans)) {
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
-
- iwl_pcie_alloc_fw_monitor(trans, 0);
-
- if (fw_mon->size) {
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
-
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM buffer destination\n");
-
- dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
- dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
- }
-
- goto out;
- }
-
- fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
-
- switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
- case IWL_FW_INI_LOCATION_SRAM_PATH:
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
- IWL_DEBUG_FW(trans,
- "WRT: Applying SMEM buffer destination\n");
- break;
-
- case IWL_FW_INI_LOCATION_NPK_PATH:
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
- IWL_DEBUG_FW(trans,
- "WRT: Applying NPK buffer destination\n");
- break;
-
- case IWL_FW_INI_LOCATION_DRAM_PATH:
- if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
- struct iwl_dram_data *frag =
- &trans->dbg.fw_mon_ini[alloc_id].frags[0];
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
- dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
- dbg_cfg->hwm_size = cpu_to_le32(frag->size);
- dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM destination (debug_token_config=%u)\n",
- dbg_cfg->debug_token_config);
- IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
- alloc_id,
- trans->dbg.fw_mon_ini[alloc_id].num_frags);
- }
- break;
- default:
- IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
- le32_to_cpu(fw_mon_cfg->buf_location));
- }
-out:
- if (dbg_flags)
- *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
-}
-
-int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_context_info_gen3 *ctxt_info_gen3;
- struct iwl_prph_scratch *prph_scratch;
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
- struct iwl_prph_info *prph_info;
- u32 control_flags = 0;
- u32 control_flags_ext = 0;
- int ret;
- int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->mac_cfg->base->min_txq_size);
-
- switch (trans->conf.rx_buf_size) {
- case IWL_AMSDU_DEF:
- return -EINVAL;
- case IWL_AMSDU_2K:
- break;
- case IWL_AMSDU_4K:
- control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
- break;
- case IWL_AMSDU_8K:
- control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
- /* if firmware supports the ext size, tell it */
- control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
- break;
- case IWL_AMSDU_12K:
- control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
- /* if firmware supports the ext size, tell it */
- control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
- break;
- }
-
- if (trans->conf.dsbr_urm_fw_dependent)
- control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
-
- if (trans->conf.dsbr_urm_permanent)
- control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
-
- if (trans->conf.ext_32khz_clock_valid)
- control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
-
- /* Allocate prph scratch */
- prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
- &trans_pcie->prph_scratch_dma_addr,
- GFP_KERNEL);
- if (!prph_scratch)
- return -ENOMEM;
-
- prph_sc_ctrl = &prph_scratch->ctrl_cfg;
-
- prph_sc_ctrl->version.version = 0;
- prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)trans->info.hw_rev);
- prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
-
- control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
- control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
-
- if (trans->mac_cfg->imr_enabled)
- control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
-
- if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
- iwl_is_force_scu_active_approved()) {
- control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
- IWL_DEBUG_FW(trans,
- "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
- IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
- }
-
- if (trans->do_top_reset) {
- WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC);
- control_flags |= IWL_PRPH_SCRATCH_TOP_RESET;
- }
-
- /* initialize RX default queue */
- prph_sc_ctrl->rbd_cfg.free_rbd_addr =
- cpu_to_le64(trans_pcie->rxq->bd_dma);
-
- iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
- &control_flags);
- prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
- prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
-
- /* initialize the Step equalizer data */
- prph_sc_ctrl->step_cfg.mbx_addr_0 =
- cpu_to_le32(trans->conf.mbx_addr_0_step);
- prph_sc_ctrl->step_cfg.mbx_addr_1 =
- cpu_to_le32(trans->conf.mbx_addr_1_step);
-
- /* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);
- if (ret)
- goto err_free_prph_scratch;
-
- /* Allocate prph information
- * currently we don't assign to the prph info anything, but it would get
- * assigned later
- *
- * We also use the second half of this page to give the device some
- * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
- * use this, but the hardware still reads/writes there and we can't let
- * it go do that with a NULL pointer.
- */
- BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
- prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
- &trans_pcie->prph_info_dma_addr,
- GFP_KERNEL);
- if (!prph_info) {
- ret = -ENOMEM;
- goto err_free_prph_scratch;
- }
-
- /* Allocate context info */
- ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
- sizeof(*ctxt_info_gen3),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
- if (!ctxt_info_gen3) {
- ret = -ENOMEM;
- goto err_free_prph_info;
- }
-
- ctxt_info_gen3->prph_info_base_addr =
- cpu_to_le64(trans_pcie->prph_info_dma_addr);
- ctxt_info_gen3->prph_scratch_base_addr =
- cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
-
- /*
- * This code assumes the FSEQ is last and we can make that
- * optional; old devices _should_ be fine with a bigger size,
- * but in simulation we check the size more precisely.
- */
- BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) +
- sizeof(prph_scratch->dram.fseq_img) !=
- sizeof(*prph_scratch));
- if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)
- ctxt_info_gen3->prph_scratch_size =
- cpu_to_le32(sizeof(*prph_scratch));
- else
- ctxt_info_gen3->prph_scratch_size =
- cpu_to_le32(offsetofend(typeof(*prph_scratch),
- dram.common));
-
- ctxt_info_gen3->cr_head_idx_arr_base_addr =
- cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
- ctxt_info_gen3->tr_tail_idx_arr_base_addr =
- cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
- ctxt_info_gen3->cr_tail_idx_arr_base_addr =
- cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
- ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
- ctxt_info_gen3->mcr_base_addr =
- cpu_to_le64(trans_pcie->rxq->used_bd_dma);
- ctxt_info_gen3->mtr_size =
- cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
- ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)));
-
- trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
- trans_pcie->prph_info = prph_info;
- trans_pcie->prph_scratch = prph_scratch;
-
- /* Allocate IML */
- trans_pcie->iml_len = fw->iml_len;
- trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,
- &trans_pcie->iml_dma_addr,
- GFP_KERNEL);
- if (!trans_pcie->iml) {
- ret = -ENOMEM;
- goto err_free_ctxt_info;
- }
-
- memcpy(trans_pcie->iml, fw->iml, fw->iml_len);
-
- return 0;
-
-err_free_ctxt_info:
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
- trans_pcie->ctxt_info_dma_addr);
- trans_pcie->ctxt_info_gen3 = NULL;
-err_free_prph_info:
- dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
- trans_pcie->prph_info_dma_addr);
-
-err_free_prph_scratch:
- dma_free_coherent(trans->dev,
- sizeof(*prph_scratch),
- prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- return ret;
-
-}
-
-void iwl_pcie_ctxt_info_gen3_kick(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);
-
- /* kick FW self load */
- iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
- iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
- iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);
-
- iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
- CSR_AUTO_FUNC_BOOT_ENA);
-}
-
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans_pcie->iml) {
- dma_free_coherent(trans->dev, trans_pcie->iml_len,
- trans_pcie->iml,
- trans_pcie->iml_dma_addr);
- trans_pcie->iml_dma_addr = 0;
- trans_pcie->iml_len = 0;
- trans_pcie->iml = NULL;
- }
-
- iwl_pcie_ctxt_info_free_fw_img(trans);
-
- if (alive)
- return;
-
- if (!trans_pcie->ctxt_info_gen3)
- return;
-
- /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
- trans_pcie->ctxt_info_dma_addr);
- trans_pcie->ctxt_info_dma_addr = 0;
- trans_pcie->ctxt_info_gen3 = NULL;
-
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
- trans_pcie->prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- trans_pcie->prph_scratch_dma_addr = 0;
- trans_pcie->prph_scratch = NULL;
-
- /* this is needed for the entire lifetime */
- dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
- trans_pcie->prph_info_dma_addr);
- trans_pcie->prph_info_dma_addr = 0;
- trans_pcie->prph_info = NULL;
-}
-
-static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- struct iwl_dram_data *dram)
-{
- u32 len, len0, len1;
-
- if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
- IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
- pnvm_data->n_chunks);
- return -EINVAL;
- }
-
- len0 = pnvm_data->chunks[0].len;
- len1 = pnvm_data->chunks[1].len;
- if (len1 > 0xFFFFFFFF - len0) {
- IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
- return -EINVAL;
- }
- len = len0 + len1;
-
- dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
- &dram->physical);
- if (!dram->block) {
- IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
- return -ENOMEM;
- }
-
- dram->size = len;
- memcpy(dram->block, pnvm_data->chunks[0].data, len0);
- memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
-
- return 0;
-}
-
-static int iwl_pcie_load_payloads_segments
- (struct iwl_trans *trans,
- struct iwl_dram_regions *dram_regions,
- const struct iwl_pnvm_image *pnvm_data)
-{
- struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
- struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
- struct iwl_prph_scrath_mem_desc_addr_array *addresses;
- const void *data;
- u32 len;
- int i;
-
- /* allocate and init DRAM descriptors array */
- len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
- desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
- (trans,
- len,
- &desc_dram->physical);
- if (!desc_dram->block) {
- IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
- return -ENOMEM;
- }
- desc_dram->size = len;
- memset(desc_dram->block, 0, len);
-
- /* allocate DRAM region for each payload */
- dram_regions->n_regions = 0;
- for (i = 0; i < pnvm_data->n_chunks; i++) {
- len = pnvm_data->chunks[i].len;
- data = pnvm_data->chunks[i].data;
-
- if (iwl_pcie_ctxt_info_alloc_dma(trans,
- data,
- len,
- cur_payload_dram)) {
- iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
- trans->dev);
- return -ENOMEM;
- }
-
- dram_regions->n_regions++;
- cur_payload_dram++;
- }
-
- /* fill desc with the DRAM payloads addresses */
- addresses = desc_dram->block;
- for (i = 0; i < pnvm_data->n_chunks; i++) {
- addresses->mem_descs[i] =
- cpu_to_le64(dram_regions->drams[i].physical);
- }
-
- return 0;
-
-}
-
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
- int ret = 0;
-
- /* only allocate the DRAM if not allocated yet */
- if (trans->pnvm_loaded)
- return 0;
-
- if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
- return -EBUSY;
-
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return 0;
-
- if (!pnvm_payloads->n_chunks) {
- IWL_DEBUG_FW(trans, "no payloads\n");
- return -EINVAL;
- }
-
- /* save payloads in several DRAM sections */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
- ret = iwl_pcie_load_payloads_segments(trans,
- dram_regions,
- pnvm_payloads);
- if (!ret)
- trans->pnvm_loaded = true;
- } else {
- /* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,
- &dram_regions->drams[0]);
- if (!ret) {
- dram_regions->n_regions = 1;
- trans->pnvm_loaded = true;
- }
- }
-
- return ret;
-}
-
-static inline size_t
-iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
-{
- size_t total_size = 0;
- int i;
-
- for (i = 0; i < dram_regions->n_regions; i++)
- total_size += dram_regions->drams[i].size;
-
- return total_size;
-}
-
-static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
-
- prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
- prph_sc_ctrl->pnvm_cfg.pnvm_size =
- cpu_to_le32(iwl_dram_regions_size(dram_regions));
-}
-
-static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
-
- prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
- prph_sc_ctrl->pnvm_cfg.pnvm_size =
- cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
-}
-
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return;
-
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
- iwl_pcie_set_pnvm_segments(trans);
- else
- iwl_pcie_set_contig_pnvm(trans);
-}
-
-int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
- int ret = 0;
-
- /* only allocate the DRAM if not allocated yet */
- if (trans->reduce_power_loaded)
- return 0;
-
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return 0;
-
- if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
- return -EBUSY;
-
- if (!payloads->n_chunks) {
- IWL_DEBUG_FW(trans, "no payloads\n");
- return -EINVAL;
- }
-
- /* save payloads in several DRAM sections */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
- ret = iwl_pcie_load_payloads_segments(trans,
- dram_regions,
- payloads);
- if (!ret)
- trans->reduce_power_loaded = true;
- } else {
- /* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_contig(trans, payloads,
- &dram_regions->drams[0]);
- if (!ret) {
- dram_regions->n_regions = 1;
- trans->reduce_power_loaded = true;
- }
- }
-
- return ret;
-}
-
-static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
-
- prph_sc_ctrl->reduce_power_cfg.base_addr =
- cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
- prph_sc_ctrl->reduce_power_cfg.size =
- cpu_to_le32(iwl_dram_regions_size(dram_regions));
-}
-
-static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
- &trans_pcie->prph_scratch->ctrl_cfg;
-
- prph_sc_ctrl->reduce_power_cfg.base_addr =
- cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
- prph_sc_ctrl->reduce_power_cfg.size =
- cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
-}
-
-void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return;
-
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
- iwl_pcie_set_reduce_power_segments(trans);
- else
- iwl_pcie_set_contig_reduce_power(trans);
-}
-
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-v2.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+static bool iwl_is_force_scu_active_approved(void)
+{
+ return !!dmi_check_system(dmi_force_scu_active_approved_list);
+}
+
+static void
+iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
+ u32 *control_flags)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 dbg_flags = 0;
+
+ if (!iwl_trans_dbg_ini_valid(trans)) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+
+ if (fw_mon->size) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM buffer destination\n");
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
+ }
+
+ goto out;
+ }
+
+ fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
+
+ switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
+ case IWL_FW_INI_LOCATION_SRAM_PATH:
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying SMEM buffer destination\n");
+ break;
+
+ case IWL_FW_INI_LOCATION_NPK_PATH:
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying NPK buffer destination\n");
+ break;
+
+ case IWL_FW_INI_LOCATION_DRAM_PATH:
+ if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (debug_token_config=%u)\n",
+ dbg_cfg->debug_token_config);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
+ alloc_id,
+ trans->dbg.fw_mon_ini[alloc_id].num_frags);
+ }
+ break;
+ default:
+ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
+ le32_to_cpu(fw_mon_cfg->buf_location));
+ }
+out:
+ if (dbg_flags)
+ *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
+}
+
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info_v2 *ctxt_info_v2;
+ struct iwl_prph_scratch *prph_scratch;
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ struct iwl_prph_info *prph_info;
+ u32 control_flags = 0;
+ u32 control_flags_ext = 0;
+ int ret;
+ int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->mac_cfg->base->min_txq_size);
+
+ switch (trans->conf.rx_buf_size) {
+ case IWL_AMSDU_DEF:
+ return -EINVAL;
+ case IWL_AMSDU_2K:
+ break;
+ case IWL_AMSDU_4K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ break;
+ case IWL_AMSDU_8K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
+ break;
+ case IWL_AMSDU_12K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
+ break;
+ }
+
+ if (trans->conf.dsbr_urm_fw_dependent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
+
+ if (trans->conf.dsbr_urm_permanent)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
+
+ if (trans->conf.ext_32khz_clock_valid)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
+
+ /* Allocate prph scratch */
+ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ &trans_pcie->prph_scratch_dma_addr,
+ GFP_KERNEL);
+ if (!prph_scratch)
+ return -ENOMEM;
+
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id =
+ cpu_to_le16((u16)trans->info.hw_rev);
+ prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+ control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
+ control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
+
+ if (trans->mac_cfg->imr_enabled)
+ control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+
+ if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iwl_is_force_scu_active_approved()) {
+ control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
+ IWL_DEBUG_FW(trans,
+ "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
+ }
+
+ if (trans->do_top_reset) {
+ WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC);
+ control_flags |= IWL_PRPH_SCRATCH_TOP_RESET;
+ }
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+ iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
+ &control_flags);
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
+
+ /* initialize the Step equalizer data */
+ prph_sc_ctrl->step_cfg.mbx_addr_0 =
+ cpu_to_le32(trans->conf.mbx_addr_0_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_1 =
+ cpu_to_le32(trans->conf.mbx_addr_1_step);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);
+ if (ret)
+ goto err_free_prph_scratch;
+
+ /* Allocate prph information
+ * currently we don't assign to the prph info anything, but it would get
+ * assigned later
+ *
+ * We also use the second half of this page to give the device some
+ * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
+ * use this, but the hardware still reads/writes there and we can't let
+ * it go do that with a NULL pointer.
+ */
+ BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
+ prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
+ &trans_pcie->prph_info_dma_addr,
+ GFP_KERNEL);
+ if (!prph_info) {
+ ret = -ENOMEM;
+ goto err_free_prph_scratch;
+ }
+
+ /* Allocate context info */
+ ctxt_info_v2 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_v2),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_v2) {
+ ret = -ENOMEM;
+ goto err_free_prph_info;
+ }
+
+ ctxt_info_v2->prph_info_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr);
+ ctxt_info_v2->prph_scratch_base_addr =
+ cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+
+ /*
+ * This code assumes the FSEQ is last and we can make that
+ * optional; old devices _should_ be fine with a bigger size,
+ * but in simulation we check the size more precisely.
+ */
+ BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) +
+ sizeof(prph_scratch->dram.fseq_img) !=
+ sizeof(*prph_scratch));
+ if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ else
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(offsetofend(typeof(*prph_scratch),
+ dram.common));
+
+ ctxt_info_v2->cr_head_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+ ctxt_info_v2->tr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
+ ctxt_info_v2->cr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
+ ctxt_info_v2->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
+ ctxt_info_v2->mcr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ ctxt_info_v2->mtr_size =
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
+ ctxt_info_v2->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)));
+
+ trans_pcie->ctxt_info_v2 = ctxt_info_v2;
+ trans_pcie->prph_info = prph_info;
+ trans_pcie->prph_scratch = prph_scratch;
+
+ /* Allocate IML */
+ trans_pcie->iml_len = fw->iml_len;
+ trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,
+ &trans_pcie->iml_dma_addr,
+ GFP_KERNEL);
+ if (!trans_pcie->iml) {
+ ret = -ENOMEM;
+ goto err_free_ctxt_info;
+ }
+
+ memcpy(trans_pcie->iml, fw->iml, fw->iml_len);
+
+ return 0;
+
+err_free_ctxt_info:
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_v2 = NULL;
+err_free_prph_info:
+ dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
+ trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+
+}
+
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);
+
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ CSR_AUTO_FUNC_BOOT_ENA);
+}
+
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->iml) {
+ dma_free_coherent(trans->dev, trans_pcie->iml_len,
+ trans_pcie->iml,
+ trans_pcie->iml_dma_addr);
+ trans_pcie->iml_dma_addr = 0;
+ trans_pcie->iml_len = 0;
+ trans_pcie->iml = NULL;
+ }
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ if (alive)
+ return;
+
+ if (!trans_pcie->ctxt_info_v2)
+ return;
+
+ /* ctxt_info_v2 and prph_scratch are still needed for PNVM load */
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info_v2 = NULL;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+ trans_pcie->prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ trans_pcie->prph_scratch_dma_addr = 0;
+ trans_pcie->prph_scratch = NULL;
+
+ /* this is needed for the entire lifetime */
+ dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
+ trans_pcie->prph_info_dma_addr);
+ trans_pcie->prph_info_dma_addr = 0;
+ trans_pcie->prph_info = NULL;
+}
+
+static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ struct iwl_dram_data *dram)
+{
+ u32 len, len0, len1;
+
+ if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
+ IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
+ pnvm_data->n_chunks);
+ return -EINVAL;
+ }
+
+ len0 = pnvm_data->chunks[0].len;
+ len1 = pnvm_data->chunks[1].len;
+ if (len1 > 0xFFFFFFFF - len0) {
+ IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
+ return -EINVAL;
+ }
+ len = len0 + len1;
+
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
+ &dram->physical);
+ if (!dram->block) {
+ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
+ return -ENOMEM;
+ }
+
+ dram->size = len;
+ memcpy(dram->block, pnvm_data->chunks[0].data, len0);
+ memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
+
+ return 0;
+}
+
+static int iwl_pcie_load_payloads_segments
+ (struct iwl_trans *trans,
+ struct iwl_dram_regions *dram_regions,
+ const struct iwl_pnvm_image *pnvm_data)
+{
+ struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
+ struct iwl_prph_scrath_mem_desc_addr_array *addresses;
+ const void *data;
+ u32 len;
+ int i;
+
+ /* allocate and init DRAM descriptors array */
+ len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
+ desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
+ (trans,
+ len,
+ &desc_dram->physical);
+ if (!desc_dram->block) {
+ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
+ return -ENOMEM;
+ }
+ desc_dram->size = len;
+ memset(desc_dram->block, 0, len);
+
+ /* allocate DRAM region for each payload */
+ dram_regions->n_regions = 0;
+ for (i = 0; i < pnvm_data->n_chunks; i++) {
+ len = pnvm_data->chunks[i].len;
+ data = pnvm_data->chunks[i].data;
+
+ if (iwl_pcie_ctxt_info_alloc_dma(trans,
+ data,
+ len,
+ cur_payload_dram)) {
+ iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
+ trans->dev);
+ return -ENOMEM;
+ }
+
+ dram_regions->n_regions++;
+ cur_payload_dram++;
+ }
+
+ /* fill desc with the DRAM payloads addresses */
+ addresses = desc_dram->block;
+ for (i = 0; i < pnvm_data->n_chunks; i++) {
+ addresses->mem_descs[i] =
+ cpu_to_le64(dram_regions->drams[i].physical);
+ }
+
+ return 0;
+
+}
+
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
+ int ret = 0;
+
+ /* only allocate the DRAM if not allocated yet */
+ if (trans->pnvm_loaded)
+ return 0;
+
+ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+ return -EBUSY;
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ if (!pnvm_payloads->n_chunks) {
+ IWL_DEBUG_FW(trans, "no payloads\n");
+ return -EINVAL;
+ }
+
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ pnvm_payloads);
+ if (!ret)
+ trans->pnvm_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->pnvm_loaded = true;
+ }
+ }
+
+ return ret;
+}
+
+static inline size_t
+iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
+{
+ size_t total_size = 0;
+ int i;
+
+ for (i = 0; i < dram_regions->n_regions; i++)
+ total_size += dram_regions->drams[i].size;
+
+ return total_size;
+}
+
+static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
+}
+
+static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
+}
+
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
+ iwl_pcie_set_pnvm_segments(trans);
+ else
+ iwl_pcie_set_contig_pnvm(trans);
+}
+
+int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
+ int ret = 0;
+
+ /* only allocate the DRAM if not allocated yet */
+ if (trans->reduce_power_loaded)
+ return 0;
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
+ return -EBUSY;
+
+ if (!payloads->n_chunks) {
+ IWL_DEBUG_FW(trans, "no payloads\n");
+ return -EINVAL;
+ }
+
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ payloads);
+ if (!ret)
+ trans->reduce_power_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_contig(trans, payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->reduce_power_loaded = true;
+ }
+ }
+
+ return ret;
+}
+
+static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
+
+ prph_sc_ctrl->reduce_power_cfg.base_addr =
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->reduce_power_cfg.size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
+}
+
+static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->reduce_power_cfg.base_addr =
+ cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
+ prph_sc_ctrl->reduce_power_cfg.size =
+ cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
+}
+
+void
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
+ iwl_pcie_set_reduce_power_segments(trans);
+ else
+ iwl_pcie_set_contig_reduce_power(trans);
+}
+
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
- * @ctxt_info_gen3: context information for gen3 devices
+ * @ctxt_info_v2: context information for v1 devices
* @prph_info: prph info for self init
* @prph_scratch: prph scratch for self init
* @ctxt_info_dma_addr: dma addr of context information
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
- struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_context_info_v2 *ctxt_info_v2;
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "fw/dbg.h"
/******************************************************************************
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "internal.h"
#include "fw/dbg.h"
iwl_pcie_ctxt_info_free_paging(trans);
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, false);
+ iwl_pcie_ctxt_info_v2_free(trans, false);
else
iwl_pcie_ctxt_info_free(trans);
* paging memory cannot be freed included since FW will still use it
*/
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, true);
+ iwl_pcie_ctxt_info_v2_free(trans, true);
else
iwl_pcie_ctxt_info_free(trans);
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
if (!top_reset_done) {
- ret = iwl_pcie_ctxt_info_gen3_alloc(trans, fw, img);
+ ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
if (ret)
goto out;
}
- iwl_pcie_ctxt_info_gen3_kick(trans);
+ iwl_pcie_ctxt_info_v2_kick(trans);
} else {
ret = iwl_pcie_ctxt_info_init(trans, img);
if (ret)
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000