iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
-iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o
-iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
+
+# Bus
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o pcie/drv.o
+iwlwifi-objs += pcie/gen1_2/rx.o pcie/gen1_2/tx.o pcie/gen1_2/trans.o
+iwlwifi-objs += pcie/gen1_2/trans-gen2.o pcie/gen1_2/tx-gen2.o
CFLAGS_pcie/drv.o += -Wno-override-init
#include "iwl-fh.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
-#include "pcie/internal.h"
+#include "pcie/gen1_2/internal.h"
#include "pcie/iwl-context-info-v2.h"
struct iwl_trans_dev_restart_data {
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-v2.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#include "iwl-prph.h"
static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#include "iwl-prph.h"
static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-prph.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \
struct _struct)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-op-mode.h"
+#include "iwl-drv.h"
+#include "pcie/iwl-context-info.h"
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
+
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+/**
+ * struct iwl_rx_mem_buffer
+ * @page_dma: bus address of rxb page
+ * @page: driver's pointer to the rxb page
+ * @list: list entry for the membuffer
+ * @invalid: rxb is in driver ownership - not owned by HW
+ * @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
+ */
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+ u32 offset;
+ u16 vid;
+ bool invalid;
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 tx;
+ u32 unhandled;
+};
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+ __le16 rbid;
+ __le16 reserved[3];
+ __le64 addr;
+} __packed;
+
+#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+ __le32 reserved1;
+ __le16 rbid;
+ u8 flags;
+ u8 reserved2[25];
+} __packed;
+
+/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+ __le16 rbid;
+ u8 flags;
+ u8 reserved[1];
+} __packed;
+
+/**
+ * struct iwl_rxq - Rx queue
+ * @id: queue index
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
+ * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @write_actual: actual write pointer written to device, since we update in
+ * blocks of 8 only
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
+ * @write_actual:
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock: per-queue lock
+ * @queue: actual rx queue. Not used for multi-rx queue.
+ * @next_rb_is_fragment: indicates that the previous RB that we handled set
+ * the fragmented flag, so the next one is still another fragment
+ * @napi: NAPI struct for this queue
+ * @queue_size: size of this queue
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rxq {
+ int id;
+ void *bd;
+ dma_addr_t bd_dma;
+ void *used_bd;
+ dma_addr_t used_bd_dma;
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 used_count;
+ u32 write_actual;
+ u32 queue_size;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ bool need_update, next_rb_is_fragment;
+ void *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+ struct napi_struct napi;
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ * the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ * of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+ atomic_t req_pending;
+ atomic_t req_ready;
+ struct list_head rbd_allocated;
+ struct list_head rbd_empty;
+ spinlock_t lock;
+ struct workqueue_struct *alloc_wq;
+ struct work_struct rx_alloc;
+};
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @trans: transport pointer (for configuration)
+ * @rxq: the rxq to get the rb stts from
+ */
+static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+ return le16_to_cpu(READ_ONCE(*rb_stts));
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+ return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/**
+ * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
+ * debugfs file
+ *
+ * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
+ * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
+ * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
+ * set the file can no longer be used.
+ */
+enum iwl_fw_mon_dbgfs_state {
+ IWL_FW_MON_DBGFS_STATE_CLOSED,
+ IWL_FW_MON_DBGFS_STATE_OPEN,
+ IWL_FW_MON_DBGFS_STATE_DISABLED,
+};
+#endif
+
+/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+ IWL_SHARED_IRQ_NON_RX = BIT(0),
+ IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+};
+
+/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+ IWL_IMAGE_RESP_DEF = 0,
+ IWL_IMAGE_RESP_SUCCESS = 1,
+ IWL_IMAGE_RESP_FAIL = 2,
+};
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/**
+ * struct cont_rec: continuous recording data structure
+ * @prev_wr_ptr: the last address that was read in monitor_data
+ * debugfs file
+ * @prev_wrap_cnt: the wrap count that was used during the last read in
+ * monitor_data debugfs file
+ * @state: the state of monitor_data debugfs file as described
+ * in &iwl_fw_mon_dbgfs_state enum
+ * @mutex: locked while reading from monitor_data debugfs file
+ */
+struct cont_rec {
+ u32 prev_wr_ptr;
+ u32 prev_wrap_cnt;
+ u8 state;
+ /* Used to sync monitor_data debugfs file with driver unload flow */
+ struct mutex mutex;
+};
+#endif
+
+enum iwl_pcie_fw_reset_state {
+ FW_RESET_IDLE,
+ FW_RESET_REQUESTED,
+ FW_RESET_OK,
+ FW_RESET_ERROR,
+ FW_RESET_TOP_REQUESTED,
+};
+
+/**
+ * enum iwl_pcie_imr_status - imr dma transfer state
+ * @IMR_D2S_IDLE: default value of the dma transfer
+ * @IMR_D2S_REQUESTED: dma transfer requested
+ * @IMR_D2S_COMPLETED: dma transfer completed
+ * @IMR_D2S_ERROR: dma transfer error
+ */
+enum iwl_pcie_imr_status {
+ IMR_D2S_IDLE,
+ IMR_D2S_REQUESTED,
+ IMR_D2S_COMPLETED,
+ IMR_D2S_ERROR,
+};
+
+/**
+ * struct iwl_pcie_txqs - TX queues data
+ *
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
+ */
+struct iwl_pcie_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ struct iwl_tso_hdr_page __percpu *tso_hdr_page;
+
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
+};
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
+ * @global_table: table mapping received VID from hw to rxb
+ * @rba: allocator for RX replenishing
+ * @ctxt_info: context information for FW self init
+ * @ctxt_info_v2: context information for v1 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @iml: image loader image virtual address
+ * @iml_len: image loader image size
+ * @iml_dma_addr: image loader image DMA address
+ * @trans: pointer to the generic transport area
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @kw: keep warm address
+ * @pnvm_data: holds info about pnvm payloads allocated in DRAM
+ * @reduced_tables_data: holds info about power reduced tablse
+ * payloads allocated in DRAM
+ * @pci_dev: basic pci-network driver stuff
+ * @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
+ * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
+ * @fw_mon_data: fw continuous recording data
+ * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround
+ * during commands in flight
+ * @msix_entries: array of MSI-X entries
+ * @msix_enabled: true if managed to enable MSI-X
+ * @shared_vec_mask: the type of causes the shared vector handles
+ * (see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
+ * @fh_init_mask: initial unmasked fh causes
+ * @hw_init_mask: initial unmasked hw causes
+ * @fh_mask: current unmasked fh causes
+ * @hw_mask: current unmasked hw causes
+ * @in_rescan: true if we have triggered a device rescan
+ * @base_rb_stts: base virtual address of receive buffer status for all queues
+ * @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @imr_status: imr dma state machine
+ * @imr_waitq: imr wait queue for dma completion
+ * @rf_name: name/version of the CRF, if any
+ * @use_ict: whether or not ICT (interrupt table) is used
+ * @ict_index: current ICT read index
+ * @ict_tbl: ICT table pointer
+ * @ict_tbl_dma: ICT table DMA address
+ * @inta_mask: interrupt (INT-A) mask
+ * @irq_lock: lock to synchronize IRQ handling
+ * @txq_memory: TXQ allocation array
+ * @sx_waitq: waitqueue for Sx transitions
+ * @sx_complete: completion for Sx transitions
+ * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
+ * @opmode_down: indicates opmode went away
+ * @num_rx_bufs: number of RX buffers to allocate/use
+ * @affinity_mask: IRQ affinity mask for each RX queue
+ * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
+ * enable/disable
+ * @fw_reset_state: state of FW reset handshake
+ * @fw_reset_waitq: waitqueue for FW reset handshake
+ * @is_down: indicates the NIC is down
+ * @isr_stats: interrupt statistics
+ * @napi_dev: (fake) netdev for NAPI registration
+ * @txqs: transport tx queues data.
+ * @me_present: WiAMT/CSME is detected as present (1), not present (0)
+ * or unknown (-1, so can still use it as a boolean safely)
+ * @me_recheck_wk: worker to recheck WiAMT/CSME presence
+ * @invalid_tx_cmd: invalid TX command buffer
+ * @wait_command_queue: wait queue for sync commands
+ */
+struct iwl_trans_pcie {
+ struct iwl_rxq *rxq;
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
+ struct iwl_rb_allocator rba;
+ union {
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_v2 *ctxt_info_v2;
+ };
+ struct iwl_prph_info *prph_info;
+ struct iwl_prph_scratch *prph_scratch;
+ void *iml;
+ size_t iml_len;
+ dma_addr_t ctxt_info_dma_addr;
+ dma_addr_t prph_info_dma_addr;
+ dma_addr_t prph_scratch_dma_addr;
+ dma_addr_t iml_dma_addr;
+ struct iwl_trans *trans;
+
+ struct net_device *napi_dev;
+
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ dma_addr_t ict_tbl_dma;
+ int ict_index;
+ bool use_ict;
+ bool is_down, opmode_down;
+ s8 debug_rfkill;
+ struct isr_statistics isr_stats;
+
+ spinlock_t irq_lock;
+ struct mutex mutex;
+ u32 inta_mask;
+ u32 scd_base_addr;
+ struct iwl_dma_ptr kw;
+
+ /* pnvm data */
+ struct iwl_dram_regions pnvm_data;
+ struct iwl_dram_regions reduced_tables_data;
+
+ struct iwl_txq *txq_memory;
+
+ /* PCI bus related data */
+ struct pci_dev *pci_dev;
+ u8 __iomem *hw_base;
+
+ bool ucode_write_complete;
+ bool sx_complete;
+ wait_queue_head_t ucode_write_waitq;
+ wait_queue_head_t sx_waitq;
+
+ u16 num_rx_bufs;
+
+ bool pcie_dbg_dumped_once;
+ u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
+
+ /*protect hw register */
+ spinlock_t reg_lock;
+ bool cmd_hold_nic_awake;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct cont_rec fw_mon_data;
+#endif
+
+ struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
+ bool msix_enabled;
+ u8 shared_vec_mask;
+ u32 alloc_vecs;
+ u32 def_irq;
+ u32 fh_init_mask;
+ u32 hw_init_mask;
+ u32 fh_mask;
+ u32 hw_mask;
+ cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
+ u16 tx_cmd_queue_size;
+ bool in_rescan;
+
+ void *base_rb_stts;
+ dma_addr_t base_rb_stts_dma;
+
+ enum iwl_pcie_fw_reset_state fw_reset_state;
+ wait_queue_head_t fw_reset_waitq;
+ enum iwl_pcie_imr_status imr_status;
+ wait_queue_head_t imr_waitq;
+ char rf_name[32];
+
+ struct iwl_pcie_txqs txqs;
+
+ s8 me_present;
+ struct delayed_work me_recheck_wk;
+
+ struct iwl_dma_ptr invalid_tx_cmd;
+
+ wait_queue_head_t wait_command_queue;
+};
+
+static inline struct iwl_trans_pcie *
+IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
+{
+ return (void *)trans->trans_specific;
+}
+
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
+}
+
+static inline struct iwl_trans *
+iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
+{
+ return container_of((void *)trans_pcie, struct iwl_trans,
+ trans_specific);
+}
+
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ * Other functions: iwl_pcie_XXX
+ */
+struct iwl_trans
+*iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info);
+void iwl_trans_pcie_free(struct iwl_trans *trans);
+void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ struct device *dev);
+
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
+#define _iwl_trans_pcie_grab_nic_access(trans, silent) \
+ __cond_lock(nic_access_nobh, \
+ likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
+
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
+
+/*****************************************************
+* RX
+******************************************************/
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
+
+/*****************************************************
+* ICT - interrupt handling
+******************************************************/
+irqreturn_t iwl_pcie_isr(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+/*
+ * Note that we put this struct *last* in the page. By doing that, we ensure
+ * that no TB referencing this page can trigger the 32-bit boundary hardware
+ * bug.
+ */
+struct iwl_tso_page_info {
+ dma_addr_t dma_addr;
+ struct page *next;
+ refcount_t use_count;
+};
+
+#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
+#define IWL_TSO_PAGE_INFO(addr) \
+ ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
+ IWL_TSO_PAGE_DATA_SIZE))
+
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout);
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd);
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+ bool shared_mode);
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset);
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta);
+
+static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
+{
+ dma_addr_t res;
+
+ res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
+ res += (unsigned long)addr & ~PAGE_MASK;
+
+ return res;
+}
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->mac_cfg->gen2)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
+}
+
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+static inline void
+iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
+ int queue_size);
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->mac_cfg->gen2) {
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
+
+ return le16_to_cpu(tfh_tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
+
+/*****************************************************
+* Error handling
+******************************************************/
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ if (!trans_pcie->msix_enabled) {
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(trans, CSR_INT, 0xffffffff);
+ iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+ } else {
+ /* disable all the interrupt we might use */
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ trans_pcie->fh_init_mask);
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ trans_pcie->hw_init_mask);
+ }
+ IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_self_init_dram *dram = &trans->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+ _iwl_disable_interrupts(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+}
+
+static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ /*
+ * fh/hw_mask keeps all the unmasked causes.
+ * Unlike msi, in msix cause is enabled when it is unset.
+ */
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+ trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ ~trans_pcie->fh_mask);
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ ~trans_pcie->hw_mask);
+ }
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+ _iwl_enable_interrupts(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+}
+static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
+ trans_pcie->hw_mask = msk;
+}
+
+static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
+ trans_pcie->fh_mask = msk;
+}
+
+static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ trans_pcie->hw_init_mask);
+ iwl_enable_fh_int_msk_msix(trans,
+ MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
+ }
+}
+
+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
+ bool top_reset)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
+ top_reset ? "RESET" : "ALIVE");
+
+ if (!trans_pcie->msix_enabled) {
+ /*
+ * When we'll receive the ALIVE interrupt, the ISR will call
+ * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
+ * interrupt (which is not really needed anymore) but also the
+ * RX interrupt which will allow us to receive the ALIVE
+ * notification (which is Rx) and continue the flow.
+ */
+ if (top_reset)
+ trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
+ else
+ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
+ CSR_INT_BIT_FH_RX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
+ : MSIX_HW_INT_CAUSES_REG_ALIVE;
+
+ iwl_enable_hw_int_msk_msix(trans, val);
+
+ if (top_reset)
+ return;
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
+ }
+}
+
+static inline const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ":shared_IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ":queue_%d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ":default_queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ":exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ":queue_%d", i);
+}
+
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ trans_pcie->fh_init_mask);
+ iwl_enable_hw_int_msk_msix(trans,
+ MSIX_HW_INT_CAUSES_REG_RF_KILL);
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
+ /*
+ * On 9000-series devices this bit isn't enabled by default, so
+ * when we power down the device we need set the bit to allow it
+ * to wake up the PCI-E bus for RF-kill interrupts.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
+ }
+}
+
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
+
+static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->debug_rfkill == 1)
+ return true;
+
+ return !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+}
+
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
+{
+ return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
+#else
+static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
+#endif
+
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
+/* common trans ops for all generations transports */
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx);
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
+/* transport gen 1 exported functions */
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
+
+/* common functions that are used by gen2 transport */
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
+void iwl_pcie_apm_config(struct iwl_trans *trans);
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
+bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ bool was_in_rfkill);
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size);
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
+void iwl_pcie_apply_destination(struct iwl_trans *trans);
+
+/* transport gen 2 exported functions */
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
+
+#endif /* __iwl_trans_int_pcie_h__ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "iwl-op-mode.h"
+#include "pcie/iwl-context-info-v2.h"
+#include "fw/dbg.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ * When the interrupt handler is called, the request is processed.
+ * The page is either stolen - transferred to the upper layer
+ * or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ * count, detaches the RBD and transfers it to the queue used list.
+ * When there are two used RBDs - they are transferred to the allocator empty
+ * list. Work is then scheduled for the allocator to start allocating
+ * eight buffers.
+ * When there are another 6 used RBDs - they are transferred to the allocator
+ * empty list and the driver tries to claim the pre-allocated buffers and
+ * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ * until ready.
+ * When there are 8+ buffers in the free list - either from allocation or from
+ * 8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ * the allocator has initial pool in the size of num_queues*(8-2) - the
+ * maximum missing RBDs per allocation request (request posted with 2
+ * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ * The queues supplies the recycle of the rest of the RBDs.
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rxq_alloc() Allocates rx_free
+ * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_pcie_rxq_restock.
+ * Used only during initialization.
+ * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index.
+ * iwl_pcie_rx_allocator() Background work for allocating pages.
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Posts and claims requests to the allocator.
+ * Calls iwl_pcie_rxq_restock to refill any empty
+ * slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
+ * ...
+ *
+ */
+
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
+ */
+static int iwl_rxq_space(const struct iwl_rxq *rxq)
+{
+ /* Make sure rx queue size is a power of 2 */
+ WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
+}
+
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO: remove this once fw does it */
+ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);
+ return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else if (trans->mac_cfg->mq_rx_supported) {
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else {
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ 1000);
+ }
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
+ */
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ u32 reg;
+
+ lockdep_assert_held(&rxq->lock);
+
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ rxq->need_update = true;
+ return;
+ }
+ }
+
+ rxq->write_actual = round_down(rxq->write, 8);
+ if (!trans->mac_cfg->mq_rx_supported)
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
+ HBUS_TARG_WRPTR_RX_Q(rxq->id));
+ else
+ iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
+ rxq->write_actual);
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ if (!rxq->need_update)
+ continue;
+ spin_lock_bh(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ rxq->need_update = false;
+ spin_unlock_bh(&rxq->lock);
+ }
+}
+
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+ BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
+
+ bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+ bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+ } else {
+ __le64 *bd = rxq->bd;
+
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ }
+
+ IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
+ (u32)rxb->vid, rxq->id, rxq->write);
+}
+
+/*
+ * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
+ */
+static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+
+ /*
+ * If the device isn't enabled - no need to try to add buffers...
+ * This can happen when we stop the device and still have an interrupt
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
+ * So don't try to restock if the APM has been already stopped.
+ */
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return;
+
+ spin_lock_bh(&rxq->lock);
+ while (rxq->free_count) {
+ /* Get next free Rx buffer, remove from free list */
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
+ rxb->invalid = false;
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
+ /* Point to Rx buffer via next RBD in circular buffer */
+ iwl_pcie_restock_bd(trans, rxq, rxb);
+ rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
+ rxq->free_count--;
+ }
+ spin_unlock_bh(&rxq->lock);
+
+ /*
+ * If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8.
+ */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_bh(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_unlock_bh(&rxq->lock);
+ }
+}
+
+/*
+ * iwl_pcie_rxsq_restock - restock implementation for single queue rx
+ */
+static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_rx_mem_buffer *rxb;
+
+ /*
+ * If the device isn't enabled - not need to try to add buffers...
+ * This can happen when we stop the device and still have an interrupt
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
+ * So don't try to restock if the APM has been already stopped.
+ */
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return;
+
+ spin_lock_bh(&rxq->lock);
+ while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+ __le32 *bd = (__le32 *)rxq->bd;
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
+ rxb->invalid = false;
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_bh(&rxq->lock);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_bh(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_unlock_bh(&rxq->lock);
+ }
+}
+
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static
+void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ if (trans->mac_cfg->mq_rx_supported)
+ iwl_pcie_rxmq_restock(trans, rxq);
+ else
+ iwl_pcie_rxsq_restock(trans, rxq);
+}
+
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+ u32 *offset, gfp_t priority)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
+ unsigned int rbsize = trans_pcie->rx_buf_bytes;
+ struct page *page;
+ gfp_t gfp_mask = priority;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+ trans_pcie->rx_page_order);
+ /*
+ * Issue an error if we don't have enough pre-allocated
+ * buffers.
+ */
+ if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
+ IWL_CRIT(trans,
+ "Failed to alloc_pages\n");
+ return NULL;
+ }
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
+ return page;
+}
+
+/*
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
+ *
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
+ */
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+
+ while (1) {
+ unsigned int offset;
+
+ spin_lock_bh(&rxq->lock);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_bh(&rxq->lock);
+ return;
+ }
+ spin_unlock_bh(&rxq->lock);
+
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
+ if (!page)
+ return;
+
+ spin_lock_bh(&rxq->lock);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_bh(&rxq->lock);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
+ rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
+ spin_unlock_bh(&rxq->lock);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ rxb->offset = offset;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ spin_lock_bh(&rxq->lock);
+ list_add(&rxb->list, &rxq->rx_used);
+ spin_unlock_bh(&rxq->lock);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
+
+ spin_lock_bh(&rxq->lock);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+
+ spin_unlock_bh(&rxq->lock);
+ }
+}
+
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ if (!trans_pcie->rx_pool)
+ return;
+
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
+ if (!trans_pcie->rx_pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
+ __free_pages(trans_pcie->rx_pool[i].page,
+ trans_pcie->rx_page_order);
+ trans_pcie->rx_pool[i].page = NULL;
+ }
+}
+
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct list_head local_empty;
+ int pending = atomic_read(&rba->req_pending);
+
+ IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
+
+ /* If we were scheduled - there is at least one request */
+ spin_lock_bh(&rba->lock);
+ /* swap out the rba->rbd_empty to a local list */
+ list_replace_init(&rba->rbd_empty, &local_empty);
+ spin_unlock_bh(&rba->lock);
+
+ while (pending) {
+ int i;
+ LIST_HEAD(local_allocated);
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ /* Do not post a warning if there are only a few requests */
+ if (pending < RX_PENDING_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+
+ /* List should never be empty - each reused RBD is
+ * returned to the list, and initial pool covers any
+ * possible gap between the time the page is allocated
+ * to the time the RBD is added.
+ */
+ BUG_ON(list_empty(&local_empty));
+ /* Get the first rxb from the rbd list */
+ rxb = list_first_entry(&local_empty,
+ struct iwl_rx_mem_buffer, list);
+ BUG_ON(rxb->page);
+
+ /* Alloc a new receive buffer */
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
+ if (!page)
+ continue;
+ rxb->page = page;
+
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ __free_pages(page, trans_pcie->rx_page_order);
+ continue;
+ }
+
+ /* move the allocated entry to the out list */
+ list_move(&rxb->list, &local_allocated);
+ i++;
+ }
+
+ atomic_dec(&rba->req_pending);
+ pending--;
+
+ if (!pending) {
+ pending = atomic_read(&rba->req_pending);
+ if (pending)
+ IWL_DEBUG_TPT(trans,
+ "Got more pending allocation requests = %d\n",
+ pending);
+ }
+
+ spin_lock_bh(&rba->lock);
+ /* add the allocated rbds to the allocator allocated list */
+ list_splice_tail(&local_allocated, &rba->rbd_allocated);
+ /* get more empty RBDs for current pending requests */
+ list_splice_tail_init(&rba->rbd_empty, &local_empty);
+ spin_unlock_bh(&rba->lock);
+
+ atomic_inc(&rba->req_ready);
+
+ }
+
+ spin_lock_bh(&rba->lock);
+ /* return unused rbds to the allocator empty list */
+ list_splice_tail(&local_empty, &rba->rbd_empty);
+ spin_unlock_bh(&rba->lock);
+
+ IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ * This function directly moves the allocated RBs to the queue's ownership
+ * and updates the relevant counters.
+ */
+static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ lockdep_assert_held(&rxq->lock);
+
+ /*
+ * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+ * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+ * function will return early, as there are no ready requests.
+ * atomic_dec_if_positive will perofrm the *actual* decrement only if
+ * req_ready > 0, i.e. - there are ready requests and the function
+ * hands one request to the caller.
+ */
+ if (atomic_dec_if_positive(&rba->req_ready) < 0)
+ return;
+
+ spin_lock(&rba->lock);
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+ /* Get next free Rx buffer, remove it from free list */
+ struct iwl_rx_mem_buffer *rxb =
+ list_first_entry(&rba->rbd_allocated,
+ struct iwl_rx_mem_buffer, list);
+
+ list_move(&rxb->list, &rxq->rx_free);
+ }
+ spin_unlock(&rba->lock);
+
+ rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+ rxq->free_count += RX_CLAIM_REQ_ALLOC;
+}
+
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+ struct iwl_rb_allocator *rba_p =
+ container_of(data, struct iwl_rb_allocator, rx_alloc);
+ struct iwl_trans_pcie *trans_pcie =
+ container_of(rba_p, struct iwl_trans_pcie, rba);
+
+ iwl_pcie_rx_allocator(trans_pcie->trans);
+}
+
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_transfer_desc);
+
+ return trans->mac_cfg->mq_rx_supported ?
+ sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return sizeof(struct iwl_rx_completion_desc_bz);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_completion_desc);
+
+ return sizeof(__le32);
+}
+
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ int free_size = iwl_pcie_free_bd_size(trans);
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+}
+
+static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
+{
+ bool use_rx_td = (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210);
+
+ if (use_rx_td)
+ return sizeof(__le16);
+
+ return sizeof(struct iwl_rb_status);
+}
+
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
+ struct device *dev = trans->dev;
+ int i;
+ int free_size;
+
+ spin_lock_init(&rxq->lock);
+ if (trans->mac_cfg->mq_rx_supported)
+ rxq->queue_size = iwl_trans_get_num_rbds(trans);
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
+
+ free_size = iwl_pcie_free_bd_size(trans);
+
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
+
+ if (trans->mac_cfg->mq_rx_supported) {
+ rxq->used_bd = dma_alloc_coherent(dev,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
+ GFP_KERNEL);
+ if (!rxq->used_bd)
+ goto err;
+ }
+
+ rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+ rxq->rb_stts_dma =
+ trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
+
+ return 0;
+
+err:
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ iwl_pcie_free_rxq_dma(trans, rxq);
+ }
+
+ return -ENOMEM;
+}
+
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, ret;
+
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&rba->lock);
+
+ /*
+ * Allocate the driver's pointer to receive buffer status.
+ * Allocate for all queues continuously (HW requirement).
+ */
+ trans_pcie->base_rb_stts =
+ dma_alloc_coherent(trans->dev,
+ rb_stts_size * trans->info.num_rxqs,
+ &trans_pcie->base_rb_stts_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->base_rb_stts) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ rxq->id = i;
+ ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+ if (ret)
+ goto err;
+ }
+ return 0;
+
+err:
+ if (trans_pcie->base_rb_stts) {
+ dma_free_coherent(trans->dev,
+ rb_stts_size * trans->info.num_rxqs,
+ trans_pcie->base_rb_stts,
+ trans_pcie->base_rb_stts_dma);
+ trans_pcie->base_rb_stts = NULL;
+ trans_pcie->base_rb_stts_dma = 0;
+ }
+ kfree(trans_pcie->rx_pool);
+ trans_pcie->rx_pool = NULL;
+ kfree(trans_pcie->global_table);
+ trans_pcie->global_table = NULL;
+ kfree(trans_pcie->rxq);
+ trans_pcie->rxq = NULL;
+
+ return ret;
+}
+
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+ switch (trans->conf.rx_buf_size) {
+ case IWL_AMSDU_4K:
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+ break;
+ case IWL_AMSDU_8K:
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ break;
+ case IWL_AMSDU_12K:
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
+ break;
+ default:
+ WARN_ON(1);
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+ }
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return;
+
+ /* Stop Rx DMA */
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ /* reset and flush pointers */
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k or 12k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size |
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ iwl_trans_release_nic_access(trans);
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /* W/A for interrupt coalescing bug in 7260 and 3160 */
+ if (trans->cfg->host_interrupt_operation_mode)
+ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+}
+
+static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size, enabled = 0;
+ int i;
+
+ switch (trans->conf.rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+ break;
+ case IWL_AMSDU_4K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+ break;
+ case IWL_AMSDU_8K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+ break;
+ case IWL_AMSDU_12K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+ break;
+ default:
+ WARN_ON(1);
+ rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+ }
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return;
+
+ /* Stop Rx DMA */
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
+ /* disable free amd used rx queue operation */
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ /* Tell device where to find RBD free table in DRAM */
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_FRBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].bd_dma);
+ /* Tell device where to find RBD used table in DRAM */
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_URBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].used_bd_dma);
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
+ /* Reset device indice tables */
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
+
+ enabled |= BIT(i) | BIT(i + 16);
+ }
+
+ /*
+ * Enable Rx DMA
+ * Rx buffer size 4 or 8k or 12k
+ * Min RB size 4 or 8
+ * Drop frames that exceed RB size
+ * 512 RBDs
+ */
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+ RFH_DMA_EN_ENABLE_VAL | rb_size |
+ RFH_RXF_DMA_MIN_RB_4_8 |
+ RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+ RFH_RXF_DMA_RBDCB_SIZE_512);
+
+ /*
+ * Activate DMA snooping.
+ * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
+ * Default queue is 0
+ */
+ iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
+ RFH_GEN_CFG_RFH_DMA_SNOOP |
+ RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP |
+ RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
+ trans->mac_cfg->integrated ?
+ RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
+ RFH_GEN_CFG_RB_CHUNK_SIZE_128));
+ /* Enable the relevant rx queues */
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+
+ iwl_trans_release_nic_access(trans);
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+{
+ lockdep_assert_held(&rxq->lock);
+
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ rxq->free_count = 0;
+ rxq->used_count = 0;
+}
+
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+
+static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
+{
+ return *(struct iwl_trans_pcie **)netdev_priv(dev);
+}
+
+static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
+ rxq->id, ret, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+ IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
+ budget);
+
+ if (ret < budget) {
+ int irq_line = rxq->id;
+
+ /* FIRST_RSS is shared with line 0 */
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+ rxq->id == 1)
+ irq_line = 0;
+
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, irq_line);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ if (unlikely(!trans_pcie->rxq))
+ return;
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ if (rxq && rxq->napi.poll)
+ napi_synchronize(&rxq->napi);
+ }
+}
+
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *def_rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, err, queue_size, allocator_pool_size, num_alloc;
+
+ if (!trans_pcie->rxq) {
+ err = iwl_pcie_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+ def_rxq = trans_pcie->rxq;
+
+ cancel_work_sync(&rba->rx_alloc);
+
+ spin_lock_bh(&rba->lock);
+ atomic_set(&rba->req_pending, 0);
+ atomic_set(&rba->req_ready, 0);
+ INIT_LIST_HEAD(&rba->rbd_allocated);
+ INIT_LIST_HEAD(&rba->rbd_empty);
+ spin_unlock_bh(&rba->lock);
+
+ /* free all first - we overwrite everything here */
+ iwl_pcie_free_rbs_pool(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ def_rxq->queue[i] = NULL;
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ spin_lock_bh(&rxq->lock);
+ /*
+ * Set read write pointer to reflect that we have processed
+ * and used all buffers, but have not restocked the Rx queue
+ * with fresh buffers
+ */
+ rxq->read = 0;
+ rxq->write = 0;
+ rxq->write_actual = 0;
+ memset(rxq->rb_stts, 0,
+ (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status));
+
+ iwl_pcie_rx_init_rxb_lists(rxq);
+
+ spin_unlock_bh(&rxq->lock);
+
+ if (!rxq->napi.poll) {
+ int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
+
+ if (trans_pcie->msix_enabled)
+ poll = iwl_pcie_napi_poll_msix;
+
+ netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
+ poll);
+ napi_enable(&rxq->napi);
+ }
+
+ }
+
+ /* move the pool to the default queue and allocator ownerships */
+ queue_size = trans->mac_cfg->mq_rx_supported ?
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
+ allocator_pool_size = trans->info.num_rxqs *
+ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+ num_alloc = queue_size + allocator_pool_size;
+
+ for (i = 0; i < num_alloc; i++) {
+ struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
+
+ if (i < allocator_pool_size)
+ list_add(&rxb->list, &rba->rbd_empty);
+ else
+ list_add(&rxb->list, &def_rxq->rx_used);
+ trans_pcie->global_table[i] = rxb;
+ rxb->vid = (u16)(i + 1);
+ rxb->invalid = true;
+ }
+
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+
+ return 0;
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = _iwl_pcie_rx_init(trans);
+
+ if (ret)
+ return ret;
+
+ if (trans->mac_cfg->mq_rx_supported)
+ iwl_pcie_rx_mq_hw_init(trans);
+ else
+ iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
+
+ iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
+
+ spin_lock_bh(&trans_pcie->rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+ spin_unlock_bh(&trans_pcie->rxq->lock);
+
+ return 0;
+}
+
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
+{
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /*
+ * We don't configure the RFH.
+ * Restock will be done at alive, after firmware configured the RFH.
+ */
+ return _iwl_pcie_rx_init(trans);
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ /*
+ * if rxq is NULL, it means that nothing has been allocated,
+ * exit now
+ */
+ if (!trans_pcie->rxq) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ cancel_work_sync(&rba->rx_alloc);
+
+ iwl_pcie_free_rbs_pool(trans);
+
+ if (trans_pcie->base_rb_stts) {
+ dma_free_coherent(trans->dev,
+ rb_stts_size * trans->info.num_rxqs,
+ trans_pcie->base_rb_stts,
+ trans_pcie->base_rb_stts_dma);
+ trans_pcie->base_rb_stts = NULL;
+ trans_pcie->base_rb_stts_dma = 0;
+ }
+
+ for (i = 0; i < trans->info.num_rxqs; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ iwl_pcie_free_rxq_dma(trans, rxq);
+
+ if (rxq->napi.poll) {
+ napi_disable(&rxq->napi);
+ netif_napi_del(&rxq->napi);
+ }
+ }
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
+ kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
+}
+
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+ struct iwl_rb_allocator *rba)
+{
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rxq *rxq, bool emergency)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ /* Move the RBD to the used list, will be moved to allocator in batches
+ * before claiming or posting a request*/
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ if (unlikely(emergency))
+ return;
+
+ /* Count the allocator owned RBDs */
+ rxq->used_count++;
+
+ /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+ * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+ * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+ * after but we still need to post another request.
+ */
+ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+ /* Move the 2 RBDs to the allocator ownership.
+ Allocator has another 6 from pool for the request completion*/
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
+
+ atomic_inc(&rba->req_pending);
+ queue_work(rba->alloc_wq, &rba->rx_alloc);
+ }
+}
+
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb,
+ bool emergency,
+ int i)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+ bool page_stolen = false;
+ int max_len = trans_pcie->rx_buf_bytes;
+ u32 offset = 0;
+
+ if (WARN_ON(!rxb))
+ return;
+
+ dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
+
+ while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
+ struct iwl_rx_packet *pkt;
+ bool reclaim;
+ int len;
+ struct iwl_rx_cmd_buffer rxcb = {
+ ._offset = rxb->offset + offset,
+ ._rx_page_order = trans_pcie->rx_page_order,
+ ._page = rxb->page,
+ ._page_stolen = false,
+ .truesize = max_len,
+ };
+
+ pkt = rxb_addr(&rxcb);
+
+ if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
+ IWL_DEBUG_RX(trans,
+ "Q %d: RB end marker at offset %d\n",
+ rxq->id, offset);
+ break;
+ }
+
+ WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
+ FH_RSCSR_RXQ_POS != rxq->id,
+ "frame on invalid queue - is on %d and indicates %d\n",
+ rxq->id,
+ (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
+ FH_RSCSR_RXQ_POS);
+
+ IWL_DEBUG_RX(trans,
+ "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
+ rxq->id, offset,
+ iwl_get_cmd_string(trans,
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
+
+ len = iwl_rx_packet_len(pkt);
+ len += sizeof(u32); /* account for status word */
+
+ offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+
+ /* check that what the device tells us made sense */
+ if (len < sizeof(*pkt) || offset > max_len)
+ break;
+
+ maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+ if (reclaim && !pkt->hdr.group_id) {
+ int i;
+
+ for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
+ if (trans->conf.no_reclaim_cmds[i] ==
+ pkt->hdr.cmd) {
+ reclaim = false;
+ break;
+ }
+ }
+ }
+
+ if (rxq->id == IWL_DEFAULT_RX_QUEUE)
+ iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+ &rxcb);
+ else
+ iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
+ &rxcb, rxq->id);
+
+ /*
+ * After here, we should always check rxcb._page_stolen,
+ * if it is true then one of the handlers took the page.
+ */
+
+ if (reclaim && txq) {
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index = iwl_txq_get_cmd_index(txq, index);
+
+ kfree_sensitive(txq->entries[cmd_index].free_buf);
+ txq->entries[cmd_index].free_buf = NULL;
+
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (!rxcb._page_stolen)
+ iwl_pcie_hcmd_complete(trans, &rxcb);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ page_stolen |= rxcb._page_stolen;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ break;
+ }
+
+ /* page was stolen from us -- free our reference */
+ if (page_stolen) {
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ /*
+ * free the page(s) as well to not break
+ * the invariant that the items on the used
+ * list have no page(s)
+ */
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
+ } else
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+}
+
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq, int i,
+ bool *join)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ u16 vid;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
+
+ if (!trans->mac_cfg->mq_rx_supported) {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ return rxb;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else {
+ __le32 *cd = rxq->used_bd;
+
+ vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
+ }
+
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
+ goto out_err;
+
+ rxb = trans_pcie->global_table[vid - 1];
+ if (rxb->invalid)
+ goto out_err;
+
+ IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
+
+ rxb->invalid = true;
+
+ return rxb;
+
+out_err:
+ WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+ iwl_force_nmi(trans);
+ return NULL;
+}
+
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
+ */
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq;
+ u32 r, i, count = 0, handled = 0;
+ bool emergency = false;
+
+ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
+ return budget;
+
+ rxq = &trans_pcie->rxq[queue];
+
+restart:
+ spin_lock(&rxq->lock);
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = iwl_get_closed_rb_stts(trans, rxq);
+ i = rxq->read;
+
+ /* W/A 9000 device step A0 wrap-around bug */
+ r &= (rxq->queue_size - 1);
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
+
+ while (i != r && ++handled < budget) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct iwl_rx_mem_buffer *rxb;
+ /* number of RBDs still waiting for page allocation */
+ u32 rb_pending_alloc =
+ atomic_read(&trans_pcie->rba.req_pending) *
+ RX_CLAIM_REQ_ALLOC;
+ bool join = false;
+
+ if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+ !emergency)) {
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
+ emergency = true;
+ IWL_DEBUG_TPT(trans,
+ "RX path is in emergency. Pending allocations %d\n",
+ rb_pending_alloc);
+ }
+
+ IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+
+ rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
+ if (!rxb)
+ goto out;
+
+ if (unlikely(join || rxq->next_rb_is_fragment)) {
+ rxq->next_rb_is_fragment = join;
+ /*
+ * We can only get a multi-RB in the following cases:
+ * - firmware issue, sending a too big notification
+ * - sniffer mode with a large A-MSDU
+ * - large MTU frames (>2k)
+ * since the multi-RB functionality is limited to newer
+ * hardware that cannot put multiple entries into a
+ * single RB.
+ *
+ * Right now, the higher layers aren't set up to deal
+ * with that, so discard all of these.
+ */
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else {
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+ }
+
+ i = (i + 1) & (rxq->queue_size - 1);
+
+ /*
+ * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+ * try to claim the pre-allocated buffers from the allocator.
+ * If not ready - will try to reclaim next time.
+ * There is no need to reschedule work - allocator exits only
+ * on success
+ */
+ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
+ iwl_pcie_rx_allocator_get(trans, rxq);
+
+ if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
+ /* Add the remaining empty RBDs for allocator use */
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
+ } else if (emergency) {
+ count++;
+ if (count == 8) {
+ count = 0;
+ if (rb_pending_alloc < rxq->queue_size / 3) {
+ IWL_DEBUG_TPT(trans,
+ "RX path exited emergency. Pending allocations %d\n",
+ rb_pending_alloc);
+ emergency = false;
+ }
+
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+ iwl_pcie_rxq_restock(trans, rxq);
+ goto restart;
+ }
+ }
+ }
+out:
+ /* Backtrack one entry */
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+
+ /*
+ * handle a case where in emergency there are some unallocated RBDs.
+ * those RBDs are in the used list, but are not tracked by the queue's
+ * used_count which counts allocator owned RBDs.
+ * unallocated emergency RBDs must be allocated on exit, otherwise
+ * when called again the function may not be in emergency mode and
+ * they will be handed to the allocator with no tracking in the RBD
+ * allocator counters, which will lead to them never being claimed back
+ * by the queue.
+ * by allocating them here, they are now in the queue free list, and
+ * will be restocked by the next call of iwl_pcie_rxq_restock.
+ */
+ if (unlikely(emergency && count))
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+
+ iwl_pcie_rxq_restock(trans, rxq);
+
+ return handled;
+}
+
+static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
+{
+ u8 queue = entry->entry;
+ struct msix_entry *entries = entry - queue;
+
+ return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
+}
+
+/*
+ * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
+ * This interrupt handler should be used with RSS queue only.
+ */
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+{
+ struct msix_entry *entry = dev_id;
+ struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ struct iwl_trans *trans = trans_pcie->trans;
+ struct iwl_rxq *rxq;
+
+ trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
+
+ if (WARN_ON(entry->entry >= trans->info.num_rxqs))
+ return IRQ_NONE;
+
+ if (!trans_pcie->rxq) {
+ if (net_ratelimit())
+ IWL_ERR(trans,
+ "[%d] Got MSI-X interrupt before we have Rx queues\n",
+ entry->entry);
+ return IRQ_NONE;
+ }
+
+ rxq = &trans_pcie->rxq[entry->entry];
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+ IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
+
+ local_bh_disable();
+ if (!napi_schedule(&rxq->napi))
+ iwl_pcie_clear_irq(trans, entry->entry);
+ local_bh_enable();
+
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+ if (trans->cfg->internal_wimax_coex &&
+ !trans->mac_cfg->base->apmg_not_supported &&
+ (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ iwl_op_mode_wimax_active(trans->op_mode);
+ wake_up(&trans_pcie->wait_command_queue);
+ return;
+ }
+
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
+ if (!trans_pcie->txqs.txq[i])
+ continue;
+ timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
+ }
+
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ u32 inta;
+
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
+}
+
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ bool hw_rfkill, prev, report;
+
+ mutex_lock(&trans_pcie->mutex);
+ prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ }
+ if (trans_pcie->opmode_down)
+ report = hw_rfkill;
+ else
+ report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rfkill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ if (prev != report)
+ iwl_trans_pcie_rf_kill(trans, report, from_irq);
+ mutex_unlock(&trans_pcie->mutex);
+
+ if (hw_rfkill) {
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ if (trans_pcie->opmode_down)
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+}
+
+static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 state;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ u32 val = iwl_read32(trans, CSR_IPC_STATE);
+
+ state = u32_get_bits(val, CSR_IPC_STATE_RESET);
+ IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);
+ } else {
+ state = CSR_IPC_STATE_RESET_SW_READY;
+ }
+
+ switch (state) {
+ case CSR_IPC_STATE_RESET_SW_READY:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "Reset flow completed\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ break;
+ }
+ fallthrough;
+ case CSR_IPC_STATE_RESET_TOP_READY:
+ if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "TOP Reset continues\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ break;
+ }
+ fallthrough;
+ case CSR_IPC_STATE_RESET_NONE:
+ IWL_FW_CHECK_FAILED(trans,
+ "Invalid reset interrupt (state=%d)!\n",
+ state);
+ break;
+ case CSR_IPC_STATE_RESET_TOP_FOLLOWER:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ /* if we were in reset, wake that up */
+ IWL_INFO(trans,
+ "TOP reset from BT while doing reset\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ IWL_INFO(trans, "TOP reset from BT\n");
+ trans->state = IWL_TRANS_NO_FW;
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_BY_BT);
+ }
+ break;
+ }
+}
+
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+{
+ struct iwl_trans *trans = dev_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ u32 inta = 0;
+ u32 handled = 0;
+ bool polling = false;
+
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ goto out;
+ }
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
+
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
+ inta, iwl_read32(trans, CSR_INT_MASK));
+
+ spin_unlock_bh(&trans_pcie->irq_lock);
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ isr_stats->hw++;
+ iwl_pcie_irq_handle_error(trans);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ goto out;
+ }
+
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
+ isr_stats->sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ if (trans->mac_cfg->gen2) {
+ /*
+ * We can restock, since firmware configured
+ * the RFH
+ */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
+
+ handled |= CSR_INT_BIT_ALIVE;
+ }
+
+ if (inta & CSR_INT_BIT_RESET_DONE) {
+ iwl_trans_pcie_handle_reset_interrupt(trans);
+ handled |= CSR_INT_BIT_RESET_DONE;
+ }
+
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ iwl_pcie_handle_rfkill_irq(trans, true);
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(trans, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ isr_stats->sw++;
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ trans_pcie->fw_reset_state = FW_RESET_ERROR;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ iwl_pcie_irq_handle_error(trans);
+ }
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
+
+ isr_stats->wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(trans, CSR_FH_INT_STATUS,
+ CSR_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(trans,
+ CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this;
+ * periodic interrupt will detect any dangling Rx activity.
+ */
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ isr_stats->rx++;
+
+ local_bh_disable();
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
+ local_bh_enable();
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ /* Wake up IMR write routine, now that write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ isr_stats->unhandled++;
+ }
+
+ if (inta & ~(trans_pcie->inta_mask)) {
+ IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~trans_pcie->inta_mask);
+ }
+
+ if (!polling) {
+ spin_lock_bh(&trans_pcie->irq_lock);
+ /* only Re-enable all interrupt if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ /* we are loading the firmware, enable FH_TX interrupt only */
+ else if (handled & CSR_INT_BIT_FH_TX)
+ iwl_enable_fw_load_int(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans, false);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ }
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+
+/* Free dram table */
+void iwl_pcie_free_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->ict_tbl) {
+ dma_free_coherent(trans->dev, ICT_SIZE,
+ trans_pcie->ict_tbl,
+ trans_pcie->ict_tbl_dma);
+ trans_pcie->ict_tbl = NULL;
+ trans_pcie->ict_tbl_dma = 0;
+ }
+}
+
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->ict_tbl =
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
+ if (!trans_pcie->ict_tbl)
+ return -ENOMEM;
+
+ /* just an API sanity check ... it is guaranteed to be aligned */
+ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+ iwl_pcie_free_ict(trans);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 val;
+
+ if (!trans_pcie->ict_tbl)
+ return;
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+ _iwl_disable_interrupts(trans);
+
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+
+ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE |
+ CSR_DRAM_INIT_TBL_WRAP_CHECK |
+ CSR_DRAM_INIT_TBL_WRITE_POINTER;
+
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+
+ iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
+ trans_pcie->use_ict = true;
+ trans_pcie->ict_index = 0;
+ iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+ _iwl_enable_interrupts(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+ trans_pcie->use_ict = false;
+ spin_unlock_bh(&trans_pcie->irq_lock);
+}
+
+irqreturn_t iwl_pcie_isr(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+
+ if (!trans)
+ return IRQ_NONE;
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+{
+ struct msix_entry *entry = dev_id;
+ struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ struct iwl_trans *trans = trans_pcie->trans;
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
+ u32 inta_fh, inta_hw;
+ bool polling = false;
+ bool sw_err;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
+
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+ inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ /*
+ * Clear causes registers to avoid being handling the same cause.
+ */
+ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+
+ trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
+
+ if (unlikely(!(inta_fh | inta_hw))) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ entry->entry, inta_fh, trans_pcie->fh_mask,
+ iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if (inta_fh & ~trans_pcie->fh_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta_fh & ~trans_pcie->fh_mask);
+ }
+
+ inta_fh &= trans_pcie->fh_mask;
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+ local_bh_disable();
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
+ local_bh_enable();
+ }
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+ local_bh_disable();
+ if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[1].napi);
+ }
+ local_bh_enable();
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
+ trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
+ isr_stats->tx++;
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ /*
+ * Wake up uCode load routine,
+ * now that load is complete
+ */
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
+ else
+ sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
+
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
+ IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
+ inta_hw);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ trans->request_top_reset = 1;
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ }
+ }
+
+ /* Error detected by uCode */
+ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
+ IWL_ERR(trans,
+ "Microcode SW error detected. Restarting 0x%X.\n",
+ inta_fh);
+ isr_stats->sw++;
+ /* during FW reset flow report errors from there */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_ERROR;
+ wake_up(&trans_pcie->imr_waitq);
+ } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ trans_pcie->fw_reset_state = FW_RESET_ERROR;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ iwl_pcie_irq_handle_error(trans);
+ }
+ }
+
+ /* After checking FH register check HW register */
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ entry->entry, inta_hw, trans_pcie->hw_mask,
+ iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+ if (inta_hw & ~trans_pcie->hw_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt 0x%08x\n",
+ inta_hw & ~trans_pcie->hw_mask);
+ }
+
+ inta_hw &= trans_pcie->hw_mask;
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ if (trans->mac_cfg->gen2) {
+ /* We can restock, since firmware configured the RFH */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
+ }
+
+ /*
+ * In some rare cases when the HW is in a bad state, we may
+ * get this interrupt too early, when prph_info is still NULL.
+ * So make sure that it's not NULL to prevent crashing.
+ */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
+ u32 sleep_notif =
+ le32_to_cpu(trans_pcie->prph_info->sleep_notif);
+ if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
+ sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
+ IWL_DEBUG_ISR(trans,
+ "Sx interrupt: sleep notification = 0x%x\n",
+ sleep_notif);
+ trans_pcie->sx_complete = true;
+ wake_up(&trans_pcie->sx_waitq);
+ } else {
+ /* uCode wakes up after power-down sleep */
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
+
+ isr_stats->wakeup++;
+ }
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ }
+
+ /* HW RF KILL switch toggled */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
+ iwl_pcie_handle_rfkill_irq(trans, true);
+
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ IWL_ERR(trans,
+ "Hardware error detected. Restarting.\n");
+
+ isr_stats->hw++;
+ trans->dbg.hw_error = true;
+ iwl_pcie_irq_handle_error(trans);
+ }
+
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)
+ iwl_trans_pcie_handle_reset_interrupt(trans);
+
+ if (!polling)
+ iwl_pcie_clear_irq(trans, entry->entry);
+
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return IRQ_HANDLED;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-trans.h"
+#include "iwl-prph.h"
+#include "pcie/iwl-context-info.h"
+#include "pcie/iwl-context-info-v2.h"
+#include "internal.h"
+#include "fw/dbg.h"
+
+#define FW_RESET_TIMEOUT (HZ / 5)
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
+
+ iwl_pcie_apm_config(trans);
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ return 0;
+}
+
+static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_gen2_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ mdelay(5);
+ }
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ /* Stop device's DMA activity */
+ iwl_pcie_apm_stop_master(trans);
+
+ iwl_trans_pcie_sw_reset(trans, false);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ else
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+ UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
+ else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
+ else
+ iwl_write32(trans, CSR_DOORBELL_VECTOR,
+ UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
+
+ /* wait 200ms */
+ ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
+ trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
+ FW_RESET_TIMEOUT);
+ if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
+ bool reset_done;
+ u32 inta_hw;
+
+ if (trans_pcie->msix_enabled) {
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ reset_done =
+ inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
+ } else {
+ inta_hw = iwl_read32(trans, CSR_INT_MASK);
+ reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
+ }
+
+ IWL_ERR(trans,
+ "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
+ inta_hw, reset_done);
+
+ if (!reset_done) {
+ struct iwl_fw_error_dump_mode mode = {
+ .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
+ .context = IWL_ERR_CONTEXT_FROM_OPMODE,
+ };
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_RESET_HS_TIMEOUT);
+ iwl_op_mode_dump_error(trans->op_mode, &mode);
+ }
+ }
+
+ trans_pcie->fw_reset_state = FW_RESET_IDLE;
+}
+
+static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ if (trans->state >= IWL_TRANS_FW_STARTED &&
+ trans->conf.fw_reset_handshake) {
+ /*
+ * Reset handshake can dump firmware on timeout, but that
+ * should assume that the firmware is already dead.
+ */
+ trans->state = IWL_TRANS_NO_FW;
+ iwl_trans_pcie_fw_reset_handshake(trans);
+ }
+
+ trans_pcie->is_down = true;
+
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_pcie_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
+ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_txq_gen2_tx_free(trans);
+ iwl_pcie_rx_stop(trans);
+ }
+
+ iwl_pcie_ctxt_info_free_paging(trans);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, false);
+ else
+ iwl_pcie_ctxt_info_free(trans);
+
+ /* Stop the device, and put it in low power state */
+ iwl_pcie_gen2_apm_stop(trans, false);
+
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_trans_pcie_sw_reset(trans, true);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* clear all status bits */
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+}
+
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool was_in_rfkill;
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
+ NULL);
+
+ mutex_lock(&trans_pcie->mutex);
+ trans_pcie->opmode_down = true;
+ was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ _iwl_trans_pcie_gen2_stop_device(trans);
+ iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
+static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->mac_cfg->base->min_txq_size);
+ int ret;
+
+ /* TODO: most of the logic can be removed in A0 - but not in Z0 */
+ spin_lock_bh(&trans_pcie->irq_lock);
+ ret = iwl_pcie_gen2_apm_init(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ if (ret)
+ return ret;
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (iwl_pcie_gen2_rx_init(trans))
+ return -ENOMEM;
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
+ return -ENOMEM;
+
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+
+ return 0;
+}
+
+static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ char *buf = trans_pcie->rf_name;
+ size_t buflen = sizeof(trans_pcie->rf_name);
+ size_t pos;
+ u32 version;
+
+ if (buf[0])
+ return;
+
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
+ pos = scnprintf(buf, buflen, "JF");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
+ pos = scnprintf(buf, buflen, "GF");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
+ pos = scnprintf(buf, buflen, "GF4");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+ pos = scnprintf(buf, buflen, "HR");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+ pos = scnprintf(buf, buflen, "HR1");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+ pos = scnprintf(buf, buflen, "HRCDB");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
+ pos = scnprintf(buf, buflen, "FM");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
+ if (SILICON_Z_STEP ==
+ CSR_HW_RFID_STEP(trans->info.hw_rf_id))
+ pos = scnprintf(buf, buflen, "WHTC");
+ else
+ pos = scnprintf(buf, buflen, "WH");
+ break;
+ default:
+ return;
+ }
+
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+ version = iwl_read_prph(trans, CNVI_MBOX_C);
+ switch (version) {
+ case 0x20000:
+ pos += scnprintf(buf + pos, buflen - pos, " B3");
+ break;
+ case 0x120000:
+ pos += scnprintf(buf + pos, buflen - pos, " B5");
+ break;
+ default:
+ pos += scnprintf(buf + pos, buflen - pos,
+ " (0x%x)", version);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
+ trans->info.hw_rf_id);
+
+ IWL_INFO(trans, "Detected RF %s\n", buf);
+
+ /*
+ * also add a \n for debugfs - need to do it after printing
+ * since our IWL_INFO machinery wants to see a static \n at
+ * the end of the string
+ */
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+}
+
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_reset_ict(trans);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* now that we got alive we can free the fw image & the context info.
+ * paging memory cannot be freed included since FW will still use it
+ */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, true);
+ else
+ iwl_pcie_ctxt_info_free(trans);
+
+ /*
+ * Re-enable all the interrupts, including the RF-Kill one, now that
+ * the firmware is alive.
+ */
+ iwl_enable_interrupts(trans);
+ mutex_lock(&trans_pcie->mutex);
+ iwl_pcie_check_hw_rf_kill(trans);
+
+ iwl_pcie_get_rf_name(trans);
+ mutex_unlock(&trans_pcie->mutex);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->step_urm = !!(iwl_read_umac_prph(trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+}
+
+static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->mac_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ return true;
+ }
+
+ if (trans->mac_cfg->integrated &&
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+ return true;
+ }
+
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ /* First clear the interrupt, just in case */
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
+ MSIX_HW_INT_CAUSES_REG_IML);
+ /* In this case, unfortunately the same ROM bug exists in the
+ * device (not setting LTR correctly), but we don't have control
+ * over the settings from the host due to some hardware security
+ * features. The only workaround we've been able to come up with
+ * so far is to try to keep the CPU and device busy by polling
+ * it and the IML (image loader) completed interrupt.
+ */
+ return false;
+ }
+
+ /* nothing needs to be done on other devices */
+ return true;
+}
+
+static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
+{
+/* in practice, this seems to complete in around 20-30ms at most, wait 100 */
+#define IML_WAIT_TIMEOUT (HZ / 10)
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long end_time = jiffies + IML_WAIT_TIMEOUT;
+ u32 value, loops = 0;
+ bool irq = false;
+
+ if (WARN_ON(!trans_pcie->iml))
+ return;
+
+ value = iwl_read32(trans, CSR_LTR_LAST_MSG);
+ IWL_DEBUG_INFO(trans, "Polling for IML load - CSR_LTR_LAST_MSG=0x%x\n",
+ value);
+
+ while (time_before(jiffies, end_time)) {
+ if (iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD) &
+ MSIX_HW_INT_CAUSES_REG_IML) {
+ irq = true;
+ break;
+ }
+ /* Keep the CPU and device busy. */
+ value = iwl_read32(trans, CSR_LTR_LAST_MSG);
+ loops++;
+ }
+
+ IWL_DEBUG_INFO(trans,
+ "Polled for IML load: irq=%d, loops=%d, CSR_LTR_LAST_MSG=0x%x\n",
+ irq, loops, value);
+
+ /* We don't fail here even if we timed out - maybe we get lucky and the
+ * interrupt comes in later (and we get alive from firmware) and then
+ * we're all happy - but if not we'll fail on alive timeout or get some
+ * other error out.
+ */
+}
+
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill, keep_ram_busy;
+ bool top_reset_done = false;
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+again:
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /*
+ * We enabled the RF-Kill interrupt and the handler may very
+ * well be running. Disable the interrupts to make sure no other
+ * interrupt can be fired.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* Make sure it finished running */
+ iwl_pcie_synchronize_irqs(trans);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_gen2_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ goto out;
+ }
+
+ if (WARN_ON(trans->do_top_reset &&
+ trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC))
+ return -EINVAL;
+
+ /* we need to wait later - set state */
+ if (trans->do_top_reset)
+ trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (!top_reset_done) {
+ ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
+ if (ret)
+ goto out;
+ }
+
+ iwl_pcie_ctxt_info_v2_kick(trans);
+ } else {
+ ret = iwl_pcie_ctxt_info_init(trans, img);
+ if (ret)
+ goto out;
+ }
+
+ keep_ram_busy = !iwl_pcie_set_ltr(trans);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
+ iwl_read32(trans, CSR_FUNC_SCRATCH));
+ iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_ROM_START);
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ } else {
+ iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+ }
+
+ if (keep_ram_busy)
+ iwl_pcie_spin_for_iml(trans);
+
+ if (trans->do_top_reset) {
+ trans->do_top_reset = 0;
+
+#define FW_TOP_RESET_TIMEOUT (HZ / 4)
+ ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
+ trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED,
+ FW_TOP_RESET_TIMEOUT);
+
+ if (trans_pcie->fw_reset_state != FW_RESET_OK) {
+ if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED)
+ IWL_ERR(trans,
+ "TOP reset interrupted by error (state %d)!\n",
+ trans_pcie->fw_reset_state);
+ else
+ IWL_ERR(trans, "TOP reset timed out!\n");
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ ret = -EIO;
+ goto out;
+ }
+
+ msleep(10);
+ IWL_INFO(trans, "TOP reset successful, reinit now\n");
+ /* now load the firmware again properly */
+ trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
+ ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
+ top_reset_done = true;
+ goto again;
+ }
+
+ /* re-check RF-Kill state since we may have missed the interrupt */
+ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill)
+ ret = -ERFKILL;
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/seq_file.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-scd.h"
+#include "iwl-agn-hw.h"
+#include "fw/error-dump.h"
+#include "fw/dbg.h"
+#include "fw/api/tx.h"
+#include "fw/acpi.h"
+#include "mei/iwl-mei.h"
+#include "internal.h"
+#include "iwl-fh.h"
+#include "pcie/iwl-context-info-v2.h"
+
+/* extended range in FW SRAM */
+#define IWL_FW_MEM_EXTENDED_START 0x40000
+#define IWL_FW_MEM_EXTENDED_END 0x57FFF
+
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
+{
+#define PCI_DUMP_SIZE 352
+#define PCI_MEM_DUMP_SIZE 64
+#define PCI_PARENT_DUMP_SIZE 524
+#define PREFIX_LEN 32
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct pci_dev *pdev = trans_pcie->pci_dev;
+ u32 i, pos, alloc_size, *ptr, *buf;
+ char *prefix;
+
+ if (trans_pcie->pcie_dbg_dumped_once)
+ return;
+
+ /* Should be a multiple of 4 */
+ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
+
+ /* Alloc a max size buffer */
+ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
+ alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
+
+ buf = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!buf)
+ return;
+ prefix = (char *)buf + alloc_size - PREFIX_LEN;
+
+ IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
+
+ /* Print wifi device registers */
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ IWL_ERR(trans, "iwlwifi device config registers:\n");
+ for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
+ for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
+ *ptr = iwl_read32(trans, i);
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
+ for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 32, 4, buf, i, 0);
+ }
+
+ /* Print parent device registers next */
+ if (!pdev->bus->self)
+ goto out;
+
+ pdev = pdev->bus->self;
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+
+ IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
+ pci_name(pdev));
+ for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ /* Print root port AER registers */
+ pos = 0;
+ pdev = pcie_find_root_port(pdev);
+ if (pdev)
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
+ pci_name(pdev));
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
+ 4, buf, i, 0);
+ }
+ goto out;
+
+err_read:
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+ IWL_ERR(trans, "Read failed at 0x%X\n", i);
+out:
+ trans_pcie->pcie_dbg_dumped_once = 1;
+ kfree(buf);
+}
+
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
+{
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_SW_RESET);
+ usleep_range(10000, 20000);
+ } else {
+ iwl_set_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(5000, 6000);
+ }
+
+ if (retake_ownership)
+ return iwl_pcie_prepare_card_hw(trans);
+
+ return 0;
+}
+
+static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ if (!fw_mon->size)
+ return;
+
+ dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
+ fw_mon->physical);
+
+ fw_mon->block = NULL;
+ fw_mon->physical = 0;
+ fw_mon->size = 0;
+}
+
+static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
+ u8 max_power)
+{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ void *block = NULL;
+ dma_addr_t physical = 0;
+ u32 size = 0;
+ u8 power;
+
+ if (fw_mon->size) {
+ memset(fw_mon->block, 0, fw_mon->size);
+ return;
+ }
+
+ /* need at least 2 KiB, so stop at 11 */
+ for (power = max_power; power >= 11; power--) {
+ size = BIT(power);
+ block = dma_alloc_coherent(trans->dev, size, &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!block)
+ continue;
+
+ IWL_INFO(trans,
+ "Allocated 0x%08x bytes for firmware monitor.\n",
+ size);
+ break;
+ }
+
+ if (WARN_ON_ONCE(!block))
+ return;
+
+ if (power != max_power)
+ IWL_ERR(trans,
+ "Sorry - debug buffer is only %luK while you requested %luK\n",
+ (unsigned long)BIT(power - 10),
+ (unsigned long)BIT(max_power - 10));
+
+ fw_mon->block = block;
+ fw_mon->physical = physical;
+ fw_mon->size = size;
+}
+
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+{
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (WARN(max_power > 26,
+ "External buffer size for monitor is too big %d, check the FW TLV\n",
+ max_power))
+ return;
+
+ iwl_pcie_alloc_fw_monitor_block(trans, max_power);
+}
+
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
+}
+
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+{
+ if (trans->mac_cfg->base->apmg_not_supported)
+ return;
+
+ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ else
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+void iwl_pcie_apm_config(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 lctl;
+ u16 cap;
+
+ /*
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
+ */
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+ IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
+ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+ trans->ltr_enabled ? "En" : "Dis");
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
+{
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_HAP_WAKE);
+
+ iwl_pcie_apm_config(trans);
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (trans->mac_cfg->base->pll_cfg)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (!trans->mac_cfg->base->apmg_not_supported) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ return 0;
+}
+
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ ret = iwl_trans_pcie_sw_reset(trans, true);
+
+ if (!ret)
+ ret = iwl_finish_nic_init(trans);
+
+ if (WARN_ON(ret)) {
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ ret = iwl_trans_pcie_sw_reset(trans, true);
+ if (ret)
+ IWL_ERR(trans,
+ "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+{
+ int ret;
+
+ /* stop device's busmaster DMA activity */
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
+ 100);
+ usleep_range(10000, 20000);
+ } else {
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ }
+
+ if (ret < 0)
+ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(trans, "stop master\n");
+}
+
+static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_WAKE_ME);
+ else if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_WAKE_ME |
+ CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
+ mdelay(5);
+ }
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ /* Stop device's DMA activity */
+ iwl_pcie_apm_stop_master(trans);
+
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
+ iwl_trans_pcie_sw_reset(trans, false);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ /* nic_init */
+ spin_lock_bh(&trans_pcie->irq_lock);
+ ret = iwl_pcie_apm_init(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+
+ if (ret)
+ return ret;
+
+ iwl_pcie_set_pwr(trans, false);
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ ret = iwl_pcie_rx_init(trans);
+ if (ret)
+ return ret;
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_pcie_tx_init(trans)) {
+ iwl_pcie_rx_free(trans);
+ return -ENOMEM;
+ }
+
+ if (trans->mac_cfg->base->shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+ }
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+{
+ int ret;
+
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
+ CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
+ HW_READY_TIMEOUT);
+
+ if (ret >= 0)
+ iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+
+ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+{
+ int ret;
+ int iter;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_pcie_set_hw_ready(trans);
+ /* If the card is ready, exit 0 */
+ if (ret >= 0) {
+ trans->csme_own = false;
+ return 0;
+ }
+
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ usleep_range(1000, 2000);
+
+ for (iter = 0; iter < 10; iter++) {
+ int t = 0;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_WAKE_ME);
+
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0) {
+ trans->csme_own = false;
+ return 0;
+ }
+
+ if (iwl_mei_is_connected()) {
+ IWL_DEBUG_INFO(trans,
+ "Couldn't prepare the card but SAP is connected\n");
+ trans->csme_own = true;
+ if (trans->mac_cfg->device_family !=
+ IWL_DEVICE_FAMILY_9000)
+ IWL_ERR(trans,
+ "SAP not supported for this NIC family\n");
+
+ return -EBUSY;
+ }
+
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
+
+ IWL_ERR(trans, "Couldn't prepare the card\n");
+
+ return ret;
+}
+
+/*
+ * ucode
+ */
+static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
+ u32 dst_addr, dma_addr_t phy_addr,
+ u32 byte_cnt)
+{
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
+
+ iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
+ BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+}
+
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
+ u32 dst_addr, dma_addr_t phy_addr,
+ u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return -EIO;
+
+ iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+ byte_cnt);
+ iwl_trans_release_nic_access(trans);
+
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
+ if (!ret) {
+ IWL_ERR(trans, "Failed to load firmware chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
+{
+ u8 *v_addr;
+ dma_addr_t p_addr;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
+ int ret = 0;
+
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
+
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!v_addr) {
+ IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+ chunk_sz = PAGE_SIZE;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+ &p_addr, GFP_KERNEL);
+ if (!v_addr)
+ return -ENOMEM;
+ }
+
+ for (offset = 0; offset < section->len; offset += chunk_sz) {
+ u32 copy_size, dst_addr;
+ bool extended_addr = false;
+
+ copy_size = min_t(u32, chunk_sz, section->len - offset);
+ dst_addr = section->offset + offset;
+
+ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWL_FW_MEM_EXTENDED_END)
+ extended_addr = true;
+
+ if (extended_addr)
+ iwl_set_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+ memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+ copy_size);
+
+ if (extended_addr)
+ iwl_clear_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+ if (ret) {
+ IWL_ERR(trans,
+ "Could not load the [%d] uCode section\n",
+ section_num);
+ break;
+ }
+ }
+
+ dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
+ return ret;
+}
+
+static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0, sec_num = 0x1;
+ u32 val, last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < image->num_sec; i++) {
+ last_read_idx = i;
+
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+
+ /* Notify ucode of loaded section number and status */
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+
+ sec_num = (sec_num << 1) | 0x1;
+ }
+
+ *first_ucode_section = last_read_idx;
+
+ iwl_enable_interrupts(trans);
+
+ if (trans->mac_cfg->gen2) {
+ if (cpu == 1)
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ } else {
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1)
+ *first_ucode_section = 0;
+ else
+ (*first_ucode_section)++;
+
+ for (i = *first_ucode_section; i < image->num_sec; i++) {
+ last_read_idx = i;
+
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+ image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &trans->dbg.fw_mon_cfg[alloc_id];
+ struct iwl_dram_data *frag;
+
+ if (!iwl_trans_dbg_ini_valid(trans))
+ return;
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
+ return;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH ||
+ !trans->dbg.fw_mon_ini[alloc_id].num_frags)
+ return;
+
+ frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ frag->physical >> MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (frag->physical + frag->size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+}
+
+void iwl_pcie_apply_destination(struct iwl_trans *trans)
+{
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
+ const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ int i;
+
+ if (iwl_trans_dbg_ini_valid(trans)) {
+ iwl_pcie_apply_destination_ini(trans);
+ return;
+ }
+
+ IWL_INFO(trans, "Applying debug destination %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
+
+ if (dest->monitor_mode == EXTERNAL_MODE)
+ iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
+ else
+ IWL_WARN(trans, "PCI should have external buffer debug\n");
+
+ for (i = 0; i < trans->dbg.n_dest_reg; i++) {
+ u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+ u32 val = le32_to_cpu(dest->reg_ops[i].val);
+
+ switch (dest->reg_ops[i].op) {
+ case CSR_ASSIGN:
+ iwl_write32(trans, addr, val);
+ break;
+ case CSR_SETBIT:
+ iwl_set_bit(trans, addr, BIT(val));
+ break;
+ case CSR_CLEARBIT:
+ iwl_clear_bit(trans, addr, BIT(val));
+ break;
+ case PRPH_ASSIGN:
+ iwl_write_prph(trans, addr, val);
+ break;
+ case PRPH_SETBIT:
+ iwl_set_bits_prph(trans, addr, BIT(val));
+ break;
+ case PRPH_CLEARBIT:
+ iwl_clear_bits_prph(trans, addr, BIT(val));
+ break;
+ case PRPH_BLOCKBIT:
+ if (iwl_read_prph(trans, addr) & BIT(val)) {
+ IWL_ERR(trans,
+ "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+ val, addr);
+ goto monitor;
+ }
+ break;
+ default:
+ IWL_ERR(trans, "FW debug - unknown OP %d\n",
+ dest->reg_ops[i].op);
+ break;
+ }
+ }
+
+monitor:
+ if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ fw_mon->physical >> dest->base_shift);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (fw_mon->physical + fw_mon->size -
+ 256) >> dest->end_shift);
+ else
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (fw_mon->physical + fw_mon->size) >>
+ dest->end_shift);
+ }
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* load to FW the binary non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+ if (ret)
+ return ret;
+
+ if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
+ /* load to FW the binary sections of CPU2 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ if (iwl_pcie_dbg_on(trans))
+ iwl_pcie_apply_destination(trans);
+
+ iwl_enable_interrupts(trans);
+
+ /* release CPU reset */
+ iwl_write32(trans, CSR_RESET, 0);
+
+ return 0;
+}
+
+static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ if (iwl_pcie_dbg_on(trans))
+ iwl_pcie_apply_destination(trans);
+
+ IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
+ iwl_read_prph(trans, WFPM_GP2));
+
+ /*
+ * Set default value. On resume reading the values that were
+ * zeored can provide debug data on the resume flow.
+ * This is for debugging only and has no functional impact.
+ */
+ iwl_write_prph(trans, WFPM_GP2, 0x01010101);
+
+ /* configure the ucode to be ready to get the secured image */
+ /* release CPU reset */
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* load to FW the binary sections of CPU2 */
+ return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+ &first_ucode_section);
+}
+
+bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill = iwl_is_rfkill_set(trans);
+ bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ bool report;
+
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ if (trans_pcie->opmode_down)
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+
+ report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+ if (prev != report)
+ iwl_trans_pcie_rf_kill(trans, report, false);
+
+ return hw_rfkill;
+}
+
+struct iwl_causes_list {
+ u16 mask_reg;
+ u8 bit;
+ u8 addr;
+};
+
+#define IWL_CAUSE(reg, mask) \
+ { \
+ .mask_reg = reg, \
+ .bit = ilog2(mask), \
+ .addr = ilog2(mask) + \
+ ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
+ (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
+ 0xffff), /* causes overflow warning */ \
+ }
+
+static const struct iwl_causes_list causes_list_common[] = {
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
+ IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
+};
+
+static const struct iwl_causes_list causes_list_pre_bz[] = {
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
+};
+
+static const struct iwl_causes_list causes_list_bz[] = {
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
+};
+
+static void iwl_pcie_map_list(struct iwl_trans *trans,
+ const struct iwl_causes_list *causes,
+ int arr_size, int val)
+{
+ int i;
+
+ for (i = 0; i < arr_size; i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ BIT(causes[i].bit));
+ }
+}
+
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ iwl_pcie_map_list(trans, causes_list_common,
+ ARRAY_SIZE(causes_list_common), val);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_pcie_map_list(trans, causes_list_bz,
+ ARRAY_SIZE(causes_list_bz), val);
+ else
+ iwl_pcie_map_list(trans, causes_list_pre_bz,
+ ARRAY_SIZE(causes_list_pre_bz), val);
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->info.num_rxqs; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+{
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ if (!trans_pcie->msix_enabled) {
+ if (trans->mac_cfg->mq_rx_supported &&
+ test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_write_umac_prph(trans, UREG_CHICK,
+ UREG_CHICK_MSI_ENABLE);
+ return;
+ }
+ /*
+ * The IVAR table needs to be configured again after reset,
+ * but if the device is disabled, we can't write to
+ * prph.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+
+ /*
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
+ */
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
+}
+
+static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+{
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ if (!trans_pcie->msix_enabled)
+ return;
+
+ trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
+ trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+ trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+}
+
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ trans_pcie->is_down = true;
+
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_pcie_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
+ if (!from_irq)
+ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+
+ /* Power-down device's busmaster DMA clocks */
+ if (!trans->mac_cfg->base->apmg_not_supported) {
+ iwl_write_prph(trans, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_pcie_apm_stop(trans, false);
+
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_trans_pcie_sw_reset(trans, true);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* clear all status bits */
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+}
+
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->msix_enabled) {
+ int i;
+
+ for (i = 0; i < trans_pcie->alloc_vecs; i++)
+ synchronize_irq(trans_pcie->msix_entries[i].vector);
+ } else {
+ synchronize_irq(trans_pcie->pci_dev->irq);
+ }
+}
+
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+ int ret;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /*
+ * We enabled the RF-Kill interrupt and the handler may very
+ * well be running. Disable the interrupts to make sure no other
+ * interrupt can be fired.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* Make sure it finished running */
+ iwl_pcie_synchronize_irqs(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ goto out;
+ }
+
+ /*
+ * Now, we load the firmware and don't want to be interrupted, even
+ * by the RF-Kill interrupt (hence mask all the interrupt besides the
+ * FH_TX interrupt which is needed to load the firmware). If the
+ * RF-Kill switch is toggled, we will find out after having loaded
+ * the firmware and return the proper value to the caller.
+ */
+ iwl_enable_fw_load_int(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Load the given image to the HW */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ ret = iwl_pcie_load_given_ucode_8000(trans, img);
+ else
+ ret = iwl_pcie_load_given_ucode(trans, img);
+
+ /* re-check RF-Kill state since we may have missed the interrupt */
+ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill)
+ ret = -ERFKILL;
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
+}
+
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
+{
+ iwl_pcie_reset_ict(trans);
+ iwl_pcie_tx_start(trans);
+}
+
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ bool was_in_rfkill)
+{
+ bool hw_rfkill;
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL_HW, &trans->status);
+ set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ } else {
+ clear_bit(STATUS_RFKILL_HW, &trans->status);
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+ if (hw_rfkill != was_in_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
+}
+
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool was_in_rfkill;
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
+ NULL);
+
+ mutex_lock(&trans_pcie->mutex);
+ trans_pcie->opmode_down = true;
+ was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ _iwl_trans_pcie_stop_device(trans, false);
+ iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
+{
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
+ state ? "disabled" : "enabled");
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->mac_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
+}
+
+static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
+ bool test, bool reset)
+{
+ iwl_disable_interrupts(trans);
+
+ /*
+ * in testing mode, the host stays awake and the
+ * hardware won't be reset (not even partially)
+ */
+ if (test)
+ return;
+
+ iwl_pcie_disable_ict(trans);
+
+ iwl_pcie_synchronize_irqs(trans);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ } else {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ }
+
+ if (reset) {
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+ }
+
+ iwl_pcie_set_pwr(trans, true);
+}
+
+static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
+ UREG_DOORBELL_TO_ISR6_RESUME);
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
+ suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
+ CSR_IPC_SLEEP_CONTROL_RESUME);
+ else
+ return 0;
+
+ ret = wait_event_timeout(trans_pcie->sx_waitq,
+ trans_pcie->sx_complete, 2 * HZ);
+
+ /* Invalidate it toward next suspend or resume */
+ trans_pcie->sx_complete = false;
+
+ if (!ret) {
+ IWL_ERR(trans, "Timeout %s D3\n",
+ suspend ? "entering" : "exiting");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+{
+ int ret;
+
+ if (!reset)
+ /* Enable persistence mode to avoid reset */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSISTENCE);
+
+ ret = iwl_pcie_d3_handshake(trans, true);
+ if (ret)
+ return ret;
+
+ iwl_pcie_d3_complete_suspend(trans, test, reset);
+
+ return 0;
+}
+
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 val;
+ int ret;
+
+ if (test) {
+ iwl_enable_interrupts(trans);
+ *status = IWL_D3_STATUS_ALIVE;
+ ret = 0;
+ goto out;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
+ iwl_pcie_set_pwr(trans, false);
+
+ if (!reset) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else {
+ iwl_trans_pcie_tx_reset(trans);
+
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans,
+ "Failed to resume the device (RX reset)\n");
+ return ret;
+ }
+ }
+
+ IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
+ iwl_read_umac_prph(trans, WFPM_GP2));
+
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+ *status = IWL_D3_STATUS_RESET;
+ else
+ *status = IWL_D3_STATUS_ALIVE;
+
+out:
+ if (*status == IWL_D3_STATUS_ALIVE)
+ ret = iwl_pcie_d3_handshake(trans, false);
+ else
+ trans->state = IWL_TRANS_NO_FW;
+
+ return ret;
+}
+
+static void
+iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
+ struct iwl_trans *trans,
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_irqs, num_irqs, i, ret;
+ u16 pci_cmd;
+ u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
+
+ if (!mac_cfg->mq_rx_supported)
+ goto enable_msi;
+
+ if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
+ max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
+
+ max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
+ for (i = 0; i < max_irqs; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_irqs);
+ if (num_irqs < 0) {
+ IWL_DEBUG_INFO(trans,
+ "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+ num_irqs);
+ goto enable_msi;
+ }
+ trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled. %d interrupt vectors were allocated\n",
+ num_irqs);
+
+ /*
+ * In case the OS provides fewer interrupts than requested, different
+ * causes will share the same interrupt vector as follows:
+ * One interrupt less: non rx causes shared with FBQ.
+ * Two interrupts less: non rx causes shared with FBQ and RSS.
+ * More than two interrupts: we will use fewer RSS queues.
+ */
+ if (num_irqs <= max_irqs - 2) {
+ info->num_rxqs = num_irqs + 1;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+ IWL_SHARED_IRQ_FIRST_RSS;
+ } else if (num_irqs == max_irqs - 1) {
+ info->num_rxqs = num_irqs;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+ } else {
+ info->num_rxqs = num_irqs - 1;
+ }
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
+ info->num_rxqs, trans_pcie->shared_vec_mask);
+
+ WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);
+
+ trans_pcie->alloc_vecs = num_irqs;
+ trans_pcie->msix_enabled = true;
+ return;
+
+enable_msi:
+ info->num_rxqs = 1;
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+}
+
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,
+ struct iwl_trans_info *info)
+{
+#if defined(CONFIG_SMP)
+ int iter_rx_q, i, ret, cpu, offset;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+ iter_rx_q = info->num_rxqs - 1 + i;
+ offset = 1 + i;
+ for (; i < iter_rx_q ; i++) {
+ /*
+ * Get the cpu prior to the place to search
+ * (i.e. return will be > i - 1).
+ */
+ cpu = cpumask_next(i - offset, cpu_online_mask);
+ cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+ ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->affinity_mask[i]);
+ if (ret)
+ IWL_ERR(trans_pcie->trans,
+ "Failed to set affinity mask for IRQ %d\n",
+ trans_pcie->msix_entries[i].vector);
+ }
+#endif
+}
+
+static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
+ struct iwl_trans_pcie *trans_pcie,
+ struct iwl_trans_info *info)
+{
+ int i;
+
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ int ret;
+ struct msix_entry *msix_entry;
+ const char *qname = queue_name(&pdev->dev, trans_pcie, i);
+
+ if (!qname)
+ return -ENOMEM;
+
+ msix_entry = &trans_pcie->msix_entries[i];
+ ret = devm_request_threaded_irq(&pdev->dev,
+ msix_entry->vector,
+ iwl_pcie_msix_isr,
+ (i == trans_pcie->def_irq) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ qname,
+ msix_entry);
+ if (ret) {
+ IWL_ERR(trans_pcie->trans,
+ "Error allocating IRQ %d\n", i);
+
+ return ret;
+ }
+ }
+ iwl_pcie_irq_set_affinity(trans_pcie->trans, info);
+
+ return 0;
+}
+
+static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
+{
+ u32 hpm, wprot;
+
+ switch (trans->mac_cfg->device_family) {
+ case IWL_DEVICE_FAMILY_9000:
+ wprot = PREG_PRPH_WPROT_9000;
+ break;
+ case IWL_DEVICE_FAMILY_22000:
+ wprot = PREG_PRPH_WPROT_22000;
+ break;
+ default:
+ return 0;
+ }
+
+ hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
+ if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {
+ u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
+
+ if (wprot_val & PREG_WFPM_ACCESS) {
+ IWL_ERR(trans,
+ "Error, can not clear persistence bit\n");
+ return -EPERM;
+ }
+ iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
+ hpm & ~PERSISTENCE_BIT);
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+ int ret;
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret < 0)
+ return ret;
+
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ udelay(20);
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_PG_EN |
+ HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ udelay(20);
+ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+ return iwl_trans_pcie_sw_reset(trans, true);
+}
+
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int err;
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ err = iwl_pcie_prepare_card_hw(trans);
+ if (err) {
+ IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+ return err;
+ }
+
+ err = iwl_trans_pcie_clear_persistence_bit(trans);
+ if (err)
+ return err;
+
+ err = iwl_trans_pcie_sw_reset(trans, true);
+ if (err)
+ return err;
+
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->mac_cfg->integrated) {
+ err = iwl_pcie_gen2_force_power_gating(trans);
+ if (err)
+ return err;
+ }
+
+ err = iwl_pcie_apm_init(trans);
+ if (err)
+ return err;
+
+ iwl_pcie_init_msix(trans_pcie);
+
+ /* From now on, the op_mode will be kept updated about RF kill state */
+ iwl_enable_rfkill_int(trans);
+
+ trans_pcie->opmode_down = false;
+
+ /* Set is_down to false here so that...*/
+ trans_pcie->is_down = false;
+
+ /* ...rfkill can call stop_device and set it false if needed */
+ iwl_pcie_check_hw_rf_kill(trans);
+
+ return 0;
+}
+
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ mutex_lock(&trans_pcie->mutex);
+ ret = _iwl_trans_pcie_start_hw(trans);
+ mutex_unlock(&trans_pcie->mutex);
+
+ return ret;
+}
+
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ iwl_disable_interrupts(trans);
+
+ iwl_pcie_apm_stop(trans, true);
+
+ iwl_disable_interrupts(trans);
+
+ iwl_pcie_disable_ict(trans);
+
+ mutex_unlock(&trans_pcie->mutex);
+
+ iwl_pcie_synchronize_irqs(trans);
+}
+
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
+{
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0x00FFFFFF;
+ else
+ return 0x000FFFFF;
+}
+
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+{
+ u32 mask = iwl_trans_pcie_prph_msk(trans);
+
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+ ((reg & mask) | (3 << 24)));
+ return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+}
+
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
+{
+ u32 mask = iwl_trans_pcie_prph_msk(trans);
+
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
+ ((addr & mask) | (3 << 24)));
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+}
+
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_free_rbs_pool(trans);
+
+ trans_pcie->rx_page_order =
+ iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans->conf.rx_buf_size);
+}
+
+void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ struct device *dev)
+{
+ u8 i;
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
+
+ /* free DRAM payloads */
+ for (i = 0; i < dram_regions->n_regions; i++) {
+ dma_free_coherent(dev, dram_regions->drams[i].size,
+ dram_regions->drams[i].block,
+ dram_regions->drams[i].physical);
+ }
+ dram_regions->n_regions = 0;
+
+ /* free DRAM addresses array */
+ if (desc_dram->block) {
+ dma_free_coherent(dev, desc_dram->size,
+ desc_dram->block,
+ desc_dram->physical);
+ }
+ memset(desc_dram, 0, sizeof(*desc_dram));
+}
+
+static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);
+}
+
+static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_cmd_header_wide bad_cmd = {
+ .cmd = INVALID_WR_PTR_CMD,
+ .group_id = DEBUG_GROUP,
+ .sequence = cpu_to_le16(0xffff),
+ .length = cpu_to_le16(0),
+ .version = 0,
+ };
+ int ret;
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,
+ sizeof(bad_cmd));
+ if (ret)
+ return ret;
+ memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
+ return 0;
+}
+
+void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ iwl_pcie_synchronize_irqs(trans);
+
+ if (trans->mac_cfg->gen2)
+ iwl_txq_gen2_tx_free(trans);
+ else
+ iwl_pcie_tx_free(trans);
+ iwl_pcie_rx_free(trans);
+
+ if (trans_pcie->rba.alloc_wq) {
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+ trans_pcie->rba.alloc_wq = NULL;
+ }
+
+ if (trans_pcie->msix_enabled) {
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ irq_set_affinity_hint(
+ trans_pcie->msix_entries[i].vector,
+ NULL);
+ }
+
+ trans_pcie->msix_enabled = false;
+ } else {
+ iwl_pcie_free_ict(trans);
+ }
+
+ free_netdev(trans_pcie->napi_dev);
+
+ iwl_pcie_free_invalid_tx_cmd(trans);
+
+ iwl_pcie_free_fw_monitor(trans);
+
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
+ trans->dev);
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
+ trans->dev);
+
+ mutex_destroy(&trans_pcie->mutex);
+
+ if (trans_pcie->txqs.tso_hdr_page) {
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
+
+ if (p && p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
+ }
+
+ iwl_trans_free(trans);
+}
+
+static union acpi_object *
+iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)
+{
+#ifdef CONFIG_ACPI
+ struct iwl_dsm_internal_product_reset_cmd pldr_arg = {
+ .cmd = cmd,
+ .value = value,
+ };
+ union acpi_object arg = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(pldr_arg),
+ .buffer.pointer = (void *)&pldr_arg,
+ };
+ static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
+ 0x81, 0x4F, 0x75, 0xE4,
+ 0xDD, 0x26, 0xB5, 0xFD);
+
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET))
+ return ERR_PTR(-ENODEV);
+
+ return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,
+ DSM_INTERNAL_FUNC_PRODUCT_RESET,
+ &arg, &dsm_guid);
+#else
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_MODE,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset mode is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,
+ bool integrated)
+{
+ union acpi_object *res;
+ u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;
+
+ if (!integrated)
+ mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
+ DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_SET_MODE,
+ mode);
+ if (IS_ERR(res)) {
+ if (enable)
+ IWL_ERR_DEV(&pdev->dev,
+ "ACPI _DSM not available (%d), cannot do product reset\n",
+ (int)PTR_ERR(res));
+ return;
+ }
+
+ ACPI_FREE(res);
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",
+ enable ? "En" : "Dis");
+ iwl_trans_pcie_check_product_reset_mode(pdev);
+}
+
+void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
+{
+ union acpi_object *res;
+
+ res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
+ DSM_INTERNAL_PLDR_CMD_GET_STATUS,
+ 0);
+ if (IS_ERR(res))
+ return;
+
+ if (res->type != ACPI_TYPE_INTEGER)
+ IWL_ERR_DEV(&pdev->dev,
+ "unexpected return type from product reset DSM\n");
+ else
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "product reset status is 0x%llx\n",
+ res->integer.value);
+
+ ACPI_FREE(res);
+}
+
+static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p, *ref;
+ acpi_status status;
+ int ret = -EINVAL;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),
+ "_PRR", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");
+ goto out;
+ }
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {
+ pci_err(pdev, "Bad _PRR return type\n");
+ goto out;
+ }
+
+ ref = &p->package.elements[0];
+ if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ pci_err(pdev, "_PRR wasn't a reference\n");
+ goto out;
+ }
+
+ status = acpi_evaluate_object(ref->reference.handle,
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ pci_err(pdev,
+ "Failed to call _RST on object returned by _PRR (%d)\n",
+ status);
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(buffer.pointer);
+ if (!ret) {
+ IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");
+ return;
+ }
+ IWL_DEBUG_DEV_POWER(&pdev->dev,
+ "No BIOS support, using pci_reset_function()\n");
+#endif
+ pci_reset_function(pdev);
+}
+
+struct iwl_trans_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+ enum iwl_reset_mode mode;
+ bool integrated;
+};
+
+static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
+{
+ struct iwl_trans_pcie_removal *removal =
+ container_of(wk, struct iwl_trans_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+ struct pci_bus *bus;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ /* in this case, something else already removed the device */
+ if (!bus)
+ goto out;
+
+ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
+
+ if (removal->mode == IWL_RESET_MODE_PROD_RESET) {
+ struct pci_dev *bt = NULL;
+
+ if (!removal->integrated) {
+ /* discrete devices have WiFi/BT at function 0/1 */
+ int slot = PCI_SLOT(pdev->devfn);
+ int func = PCI_FUNC(pdev->devfn);
+
+ if (func == 0)
+ bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));
+ else
+ pci_info(pdev, "Unexpected function %d\n",
+ func);
+ } else {
+ /* on integrated we have to look up by ID (same bus) */
+ static const struct pci_device_id bt_device_ids[] = {
+#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }
+ BT_DEV(0xA876), /* LNL */
+ BT_DEV(0xE476), /* PTL-P */
+ BT_DEV(0xE376), /* PTL-H */
+ BT_DEV(0xD346), /* NVL-H */
+ BT_DEV(0x6E74), /* NVL-S */
+ BT_DEV(0x4D76), /* WCL */
+ BT_DEV(0xD246), /* RZL-H */
+ BT_DEV(0x6C46), /* RZL-M */
+ {}
+ };
+ struct pci_dev *tmp = NULL;
+
+ for_each_pci_dev(tmp) {
+ if (tmp->bus != bus)
+ continue;
+
+ if (pci_match_id(bt_device_ids, tmp)) {
+ bt = tmp;
+ break;
+ }
+ }
+ }
+
+ if (bt) {
+ pci_info(bt, "Removal by WiFi due to product reset\n");
+ pci_stop_and_remove_bus_device(bt);
+ pci_dev_put(bt);
+ }
+ }
+
+ iwl_trans_pcie_set_product_reset(pdev,
+ removal->mode ==
+ IWL_RESET_MODE_PROD_RESET,
+ removal->integrated);
+ if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)
+ iwl_trans_pcie_call_reset(pdev);
+
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+
+ if (removal->mode >= IWL_RESET_MODE_RESCAN) {
+ if (bus->parent)
+ bus = bus->parent;
+ pci_rescan_bus(bus);
+ }
+
+out:
+ pci_unlock_rescan_remove();
+
+ kfree(removal);
+ module_put(THIS_MODULE);
+}
+
+void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie_removal *removal;
+ char _msg = 0, *msg = &_msg;
+
+ if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
+ mode == IWL_RESET_MODE_BACKOFF))
+ return;
+
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return;
+
+ if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
+ mode = IWL_RESET_MODE_FUNC_RESET;
+ if (trans_pcie->me_present < 0)
+ msg = " instead of product reset as ME may be present";
+ else
+ msg = " instead of product reset as ME is present";
+ }
+
+ IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);
+
+ iwl_pcie_dump_csr(trans);
+
+ /*
+ * get a module reference to avoid doing this
+ * while unloading anyway and to avoid
+ * scheduling a work with code that's being
+ * removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans,
+ "Module is being unloaded - abort\n");
+ return;
+ }
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ /*
+ * we don't need to clear this flag, because
+ * the trans will be freed and reallocated.
+ */
+ set_bit(STATUS_TRANS_DEAD, &trans->status);
+
+ removal->pdev = to_pci_dev(trans->dev);
+ removal->mode = mode;
+ removal->integrated = trans->mac_cfg->integrated;
+ INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+EXPORT_SYMBOL(iwl_trans_pcie_reset);
+
+/*
+ * This version doesn't disable BHs but rather assumes they're
+ * already disabled.
+ */
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+{
+ int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
+ u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
+ u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
+
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return false;
+
+ spin_lock(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_hold_nic_awake)
+ goto out;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
+ mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ }
+
+ /* this bit wakes up the NIC */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * HW with volatile SRAM must save/restore contents to/from
+ * host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP
+ * check is a good idea before accessing the SRAM of HW with
+ * volatile SRAM (e.g. reading Event Log).
+ *
+ * 5000 series and later (including 1000 series) have non-volatile SRAM,
+ * and do not save/restore SRAM when power cycling.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
+ if (unlikely(ret < 0)) {
+ u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+
+ if (silent) {
+ spin_unlock(&trans_pcie->reg_lock);
+ return false;
+ }
+
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ cntrl);
+
+ iwl_trans_pcie_dump_regs(trans);
+
+ if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
+ iwl_trans_pcie_reset(trans,
+ IWL_RESET_MODE_REMOVE_ONLY);
+ else
+ iwl_write32(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+
+ spin_unlock(&trans_pcie->reg_lock);
+ return false;
+ }
+
+out:
+ /*
+ * Fool sparse by faking we release the lock - sparse will
+ * track nic_access anyway.
+ */
+ __release(&trans_pcie->reg_lock);
+ return true;
+}
+
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+{
+ bool ret;
+
+ local_bh_disable();
+ ret = __iwl_trans_pcie_grab_nic_access(trans, false);
+ if (ret) {
+ /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
+ return ret;
+ }
+ local_bh_enable();
+ return false;
+}
+
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ /*
+ * Fool sparse by faking we acquiring the lock - sparse will
+ * track nic_access anyway.
+ */
+ __acquire(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_hold_nic_awake)
+ goto out;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+out:
+ __release(nic_access_nobh);
+ spin_unlock_bh(&trans_pcie->reg_lock);
+}
+
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+#define IWL_MAX_HW_ERRS 5
+ unsigned int num_consec_hw_errors = 0;
+ int offs = 0;
+ u32 *vals = buf;
+
+ while (offs < dwords) {
+ /* limit the time we spin here under lock to 1/2s */
+ unsigned long end = jiffies + HZ / 2;
+ bool resched = false;
+
+ if (iwl_trans_grab_nic_access(trans)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR,
+ addr + 4 * offs);
+
+ while (offs < dwords) {
+ vals[offs] = iwl_read32(trans,
+ HBUS_TARG_MEM_RDAT);
+
+ if (iwl_trans_is_hw_error_value(vals[offs]))
+ num_consec_hw_errors++;
+ else
+ num_consec_hw_errors = 0;
+
+ if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {
+ iwl_trans_release_nic_access(trans);
+ return -EIO;
+ }
+
+ offs++;
+
+ if (time_after(jiffies, end)) {
+ resched = true;
+ break;
+ }
+ }
+ iwl_trans_release_nic_access(trans);
+
+ if (resched)
+ cond_resched();
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ int offs, ret = 0;
+ const u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
+ ofs, val);
+}
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
+ return -EINVAL;
+
+ data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+ data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+ data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+ data->fr_bd_wid = 0;
+
+ return 0;
+}
+
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ unsigned long now = jiffies;
+ bool overflow_tx;
+ u8 wr_ptr;
+
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
+ return -EINVAL;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
+ txq = trans_pcie->txqs.txq[txq_idx];
+
+ spin_lock_bh(&txq->lock);
+ overflow_tx = txq->overflow_tx ||
+ !skb_queue_empty(&txq->overflow_q);
+ spin_unlock_bh(&txq->lock);
+
+ wr_ptr = READ_ONCE(txq->write_ptr);
+
+ while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
+ overflow_tx) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = READ_ONCE(txq->write_ptr);
+
+ /*
+ * If write pointer moved during the wait, warn only
+ * if the TX came from op mode. In case TX came from
+ * trans layer (overflow TX) don't warn.
+ */
+ if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
+ wr_ptr = write_ptr;
+
+ usleep_range(1000, 2000);
+
+ spin_lock_bh(&txq->lock);
+ overflow_tx = txq->overflow_tx ||
+ !skb_queue_empty(&txq->overflow_q);
+ spin_unlock_bh(&txq->lock);
+ }
+
+ if (txq->read_ptr != txq->write_ptr) {
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", txq_idx);
+ iwl_txq_log_scd_error(trans, txq);
+ return -ETIMEDOUT;
+ }
+
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
+
+ return 0;
+}
+
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int cnt;
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0;
+ cnt < trans->mac_cfg->base->num_of_queues;
+ cnt++) {
+
+ if (cnt == trans->conf.cmd_queue)
+ continue;
+ if (!test_bit(cnt, trans_pcie->txqs.queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_bh(&trans_pcie->reg_lock);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ spin_unlock_bh(&trans_pcie->reg_lock);
+}
+
+static const char *get_csr_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
+{
+ int i;
+ static const u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(trans, "CSR values:\n");
+ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(trans, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(trans, csr_tbl[i]));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops); \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+struct iwl_dbgfs_tx_queue_priv {
+ struct iwl_trans *trans;
+};
+
+struct iwl_dbgfs_tx_queue_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state;
+
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+}
+
+static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+
+ *pos = ++state->pos;
+
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
+ return NULL;
+
+ return state;
+}
+
+static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
+{
+ kfree(v);
+}
+
+static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+ struct iwl_trans *trans = priv->trans;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
+
+ seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
+ (unsigned int)state->pos,
+ !!test_bit(state->pos, trans_pcie->txqs.queue_used),
+ !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
+ if (txq)
+ seq_printf(seq,
+ "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
+ txq->read_ptr, txq->write_ptr,
+ txq->need_update, txq->frozen,
+ txq->n_window, txq->ampdu);
+ else
+ seq_puts(seq, "(unallocated)");
+
+ if (state->pos == trans->conf.cmd_queue)
+ seq_puts(seq, " (HCMD)");
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
+ .start = iwl_dbgfs_tx_queue_seq_start,
+ .next = iwl_dbgfs_tx_queue_seq_next,
+ .stop = iwl_dbgfs_tx_queue_seq_stop,
+ .show = iwl_dbgfs_tx_queue_seq_show,
+};
+
+static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
+ return -ENOMEM;
+
+ priv->trans = inode->i_private;
+ return 0;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ char *buf;
+ int pos = 0, i, ret;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 121 * trans->info.num_rxqs;
+
+ if (!trans_pcie->rxq)
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ spin_lock_bh(&rxq->lock);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+ i);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
+ rxq->write_actual);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
+ rxq->need_update);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ u32 r = iwl_get_closed_rb_stts(trans, rxq);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tclosed_rb_num: %u\n", r);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tclosed_rb_num: Not Allocated\n");
+ }
+ spin_unlock_bh(&rxq->lock);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ int pos = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ isr_stats->hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ isr_stats->sw);
+ if (isr_stats->sw || isr_stats->hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ isr_stats->err_code);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ isr_stats->sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ isr_stats->alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ isr_stats->ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ isr_stats->wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n", isr_stats->rx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ isr_stats->tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ isr_stats->unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ u32 reset_flag;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
+ if (ret)
+ return ret;
+ if (reset_flag == 0)
+ memset(isr_stats, 0, sizeof(*isr_stats));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+
+ iwl_pcie_dump_csr(trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf = NULL;
+ ssize_t ret;
+
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ char buf[100];
+ int pos;
+
+ pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
+ trans_pcie->debug_rfkill,
+ !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool new_value;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &new_value);
+ if (ret)
+ return ret;
+ if (new_value == trans_pcie->debug_rfkill)
+ return count;
+ IWL_WARN(trans, "changing debug rfkill %d->%d\n",
+ trans_pcie->debug_rfkill, new_value);
+ trans_pcie->debug_rfkill = new_value;
+ iwl_pcie_handle_rfkill_irq(trans, false);
+
+ return count;
+}
+
+static int iwl_dbgfs_monitor_data_open(struct inode *inode,
+ struct file *file)
+{
+ struct iwl_trans *trans = inode->i_private;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans->dbg.dest_tlv ||
+ trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
+ IWL_ERR(trans, "Debug destination is not set to DRAM\n");
+ return -ENOENT;
+ }
+
+ if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
+ return -EBUSY;
+
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
+ return simple_open(inode, file);
+}
+
+static int iwl_dbgfs_monitor_data_release(struct inode *inode,
+ struct file *file)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
+
+ if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
+ return 0;
+}
+
+static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
+ void *buf, ssize_t *size,
+ ssize_t *bytes_copied)
+{
+ ssize_t buf_size_left = count - *bytes_copied;
+
+ buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
+ if (*size > buf_size_left)
+ *size = buf_size_left;
+
+ *size -= copy_to_user(user_buf, buf, *size);
+ *bytes_copied += *size;
+
+ if (buf_size_left == *size)
+ return true;
+ return false;
+}
+
+static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
+ struct cont_rec *data = &trans_pcie->fw_mon_data;
+ u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
+ ssize_t size, bytes_copied = 0;
+ bool b_full;
+
+ if (trans->dbg.dest_tlv) {
+ write_ptr_addr =
+ le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
+ } else {
+ write_ptr_addr = MON_BUFF_WRPTR;
+ wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
+ }
+
+ if (unlikely(!trans->dbg.rec_on))
+ return 0;
+
+ mutex_lock(&data->mutex);
+ if (data->state ==
+ IWL_FW_MON_DBGFS_STATE_DISABLED) {
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /* write_ptr position in bytes rather then DW */
+ write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
+ wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
+
+ if (data->prev_wrap_cnt == wrap_cnt) {
+ size = write_ptr - data->prev_wr_ptr;
+ curr_buf = cpu_addr + data->prev_wr_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ curr_buf, &size,
+ &bytes_copied);
+ data->prev_wr_ptr += size;
+
+ } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
+ write_ptr < data->prev_wr_ptr) {
+ size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
+ curr_buf = cpu_addr + data->prev_wr_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ curr_buf, &size,
+ &bytes_copied);
+ data->prev_wr_ptr += size;
+
+ if (!b_full) {
+ size = write_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ cpu_addr, &size,
+ &bytes_copied);
+ data->prev_wr_ptr = size;
+ data->prev_wrap_cnt++;
+ }
+ } else {
+ if (data->prev_wrap_cnt == wrap_cnt - 1 &&
+ write_ptr > data->prev_wr_ptr)
+ IWL_WARN(trans,
+ "write pointer passed previous write pointer, start copying from the beginning\n");
+ else if (!unlikely(data->prev_wrap_cnt == 0 &&
+ data->prev_wr_ptr == 0))
+ IWL_WARN(trans,
+ "monitor data is out of sync, start copying from the beginning\n");
+
+ size = write_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ cpu_addr, &size,
+ &bytes_copied);
+ data->prev_wr_ptr = size;
+ data->prev_wrap_cnt = wrap_cnt;
+ }
+
+ mutex_unlock(&data->mutex);
+
+ return bytes_copied;
+}
+
+static ssize_t iwl_dbgfs_rf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->rf_name[0])
+ return -ENODEV;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ trans_pcie->rf_name,
+ strlen(trans_pcie->rf_name));
+}
+
+static ssize_t iwl_dbgfs_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ static const char * const modes[] = {
+ [IWL_RESET_MODE_SW_RESET] = "sw",
+ [IWL_RESET_MODE_REPROBE] = "reprobe",
+ [IWL_RESET_MODE_TOP_RESET] = "top",
+ [IWL_RESET_MODE_REMOVE_ONLY] = "remove",
+ [IWL_RESET_MODE_RESCAN] = "rescan",
+ [IWL_RESET_MODE_FUNC_RESET] = "function",
+ [IWL_RESET_MODE_PROD_RESET] = "product",
+ };
+ char buf[10] = {};
+ int mode;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ mode = sysfs_match_string(modes, buf);
+ if (mode < 0)
+ return mode;
+
+ if (mode < IWL_RESET_MODE_REMOVE_ONLY) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return -EINVAL;
+ if (mode == IWL_RESET_MODE_TOP_RESET) {
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ return -EINVAL;
+ trans->request_top_reset = 1;
+ }
+ iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);
+ return count;
+ }
+
+ iwl_trans_pcie_reset(trans, mode);
+
+ return count;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+DEBUGFS_READ_FILE_OPS(rf);
+DEBUGFS_WRITE_FILE_OPS(reset);
+
+static const struct file_operations iwl_dbgfs_tx_queue_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_tx_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static const struct file_operations iwl_dbgfs_monitor_data_ops = {
+ .read = iwl_dbgfs_monitor_data_read,
+ .open = iwl_dbgfs_monitor_data_open,
+ .release = iwl_dbgfs_monitor_data_release,
+};
+
+/* Create the debugfs files and directories */
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+{
+ struct dentry *dir = trans->dbgfs_dir;
+
+ DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
+ DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
+ DEBUGFS_ADD_FILE(interrupt, dir, 0600);
+ DEBUGFS_ADD_FILE(csr, dir, 0200);
+ DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
+ DEBUGFS_ADD_FILE(rfkill, dir, 0600);
+ DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
+ DEBUGFS_ADD_FILE(rf, dir, 0400);
+ DEBUGFS_ADD_FILE(reset, dir, 0200);
+}
+
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct cont_rec *data = &trans_pcie->fw_mon_data;
+
+ mutex_lock(&data->mutex);
+ data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
+ mutex_unlock(&data->mutex);
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
+ cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
+
+ return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ int allocated_rb_nums)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_len = trans_pcie->rx_buf_bytes;
+ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ u32 i, r, j, rb_len = 0;
+
+ spin_lock_bh(&rxq->lock);
+
+ r = iwl_get_closed_rb_stts(trans, rxq);
+
+ for (i = rxq->read, j = 0;
+ i != r && j < allocated_rb_nums;
+ i = (i + 1) & RX_QUEUE_MASK, j++) {
+ struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+ struct iwl_fw_error_dump_rb *rb;
+
+ dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
+ max_len, DMA_FROM_DEVICE);
+
+ rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+ (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+ rb = (void *)(*data)->data;
+ rb->index = cpu_to_le32(i);
+ memcpy(rb->data, page_address(rxb->page), max_len);
+
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+ spin_unlock_bh(&rxq->lock);
+
+ return rb_len;
+}
+#define IWL_CSR_TO_DUMP (0x250)
+
+static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
+ __le32 *val;
+ int i;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+ (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
+ val = (void *)(*data)->data;
+
+ for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return csr_len;
+}
+
+static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+ __le32 *val;
+ int i;
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return 0;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+ (*data)->len = cpu_to_le32(fh_regs_len);
+ val = (void *)(*data)->data;
+
+ if (!trans->mac_cfg->gen2)
+ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
+ i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+ else
+ for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
+ i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
+ i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
+ i));
+
+ iwl_trans_release_nic_access(trans);
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return sizeof(**data) + fh_regs_len;
+}
+
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+ u32 monitor_len)
+{
+ u32 buf_size_in_dwords = (monitor_len >> 2);
+ u32 *buffer = (u32 *)fw_mon_data->data;
+ u32 i;
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return 0;
+
+ iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+ for (i = 0; i < buf_size_in_dwords; i++)
+ buffer[i] = iwl_read_umac_prph_no_grab(trans,
+ MON_DMARB_RD_DATA_ADDR);
+ iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+ iwl_trans_release_nic_access(trans);
+
+ return monitor_len;
+}
+
+static void
+iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data)
+{
+ u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
+ base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
+ write_ptr = DBGC_CUR_DBGBUF_STATUS;
+ wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
+ } else if (trans->dbg.dest_tlv) {
+ write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
+
+ write_ptr_val = iwl_read_prph(trans, write_ptr);
+ fw_mon_data->fw_mon_cycle_cnt =
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+ fw_mon_data->fw_mon_base_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base));
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ fw_mon_data->fw_mon_base_high_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base_high));
+ write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
+ /* convert wrtPtr to DWs, to align with all HWs */
+ write_ptr_val >>= 2;
+ }
+ fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
+}
+
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ u32 monitor_len)
+{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ u32 len = 0;
+
+ if (trans->dbg.dest_tlv ||
+ (fw_mon->size &&
+ (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+ fw_mon_data = (void *)(*data)->data;
+
+ iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
+
+ len += sizeof(**data) + sizeof(*fw_mon_data);
+ if (fw_mon->size) {
+ memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
+ monitor_len = fw_mon->size;
+ } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
+ u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ if (trans->dbg.dest_tlv->version) {
+ base = (iwl_read_prph(trans, base) &
+ IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg.dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->mac_cfg->base->smem_offset;
+ } else {
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg.dest_tlv->base_shift;
+ }
+
+ iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
+ monitor_len =
+ iwl_trans_pci_dump_marbh_monitor(trans,
+ fw_mon_data,
+ monitor_len);
+ } else {
+ /* Didn't match anything - output no monitor data */
+ monitor_len = 0;
+ }
+
+ len += monitor_len;
+ (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+ }
+
+ return len;
+}
+
+static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
+{
+ if (trans->dbg.fw_mon.size) {
+ *len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fw_mon) +
+ trans->dbg.fw_mon.size;
+ return trans->dbg.fw_mon.size;
+ } else if (trans->dbg.dest_tlv) {
+ u32 base, end, cfg_reg, monitor_len;
+
+ if (trans->dbg.dest_tlv->version == 1) {
+ cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
+ cfg_reg = iwl_read_prph(trans, cfg_reg);
+ base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg.dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->mac_cfg->base->smem_offset;
+
+ monitor_len =
+ (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
+ trans->dbg.dest_tlv->end_shift;
+ monitor_len *= IWL_M2S_UNIT_SIZE;
+ } else {
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
+
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg.dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg.dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000 ||
+ trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
+ end += (1 << trans->dbg.dest_tlv->end_shift);
+ monitor_len = end - base;
+ }
+ *len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fw_mon) +
+ monitor_len;
+ return monitor_len;
+ }
+ return 0;
+}
+
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ struct iwl_trans_dump_data *dump_data;
+ u32 len, num_rbs = 0, monitor_len = 0;
+ int i, ptr;
+ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ !trans->mac_cfg->mq_rx_supported &&
+ dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+
+ if (!dump_mask)
+ return NULL;
+
+ /* transport dump header */
+ len = sizeof(*dump_data);
+
+ /* host commands */
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
+ len += sizeof(*data) +
+ cmdq->n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
+
+ /* FW monitor */
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
+
+ /* CSR registers */
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+ /* FH registers */
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (trans->mac_cfg->gen2)
+ len += sizeof(*data) +
+ (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
+ iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND -
+ FH_MEM_LOWER_BOUND);
+ }
+
+ if (dump_rbs) {
+ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ /* RBs */
+ spin_lock_bh(&rxq->lock);
+ num_rbs = iwl_get_closed_rb_stts(trans, rxq);
+ num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
+ spin_unlock_bh(&rxq->lock);
+
+ len += num_rbs * (sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_rb) +
+ (PAGE_SIZE << trans_pcie->rx_page_order));
+ }
+
+ /* Paged memory for gen2 HW */
+ if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ for (i = 0; i < trans->init_dram.paging_cnt; i++)
+ len += sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_paging) +
+ trans->init_dram.paging[i].size;
+
+ dump_data = vzalloc(len);
+ if (!dump_data)
+ return NULL;
+
+ len = 0;
+ data = (void *)dump_data->data;
+
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
+ u16 tfd_size = trans_pcie->txqs.tfd.size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
+ u8 tfdidx;
+ u32 caplen, cmdlen;
+
+ if (trans->mac_cfg->gen2)
+ tfdidx = idx;
+ else
+ tfdidx = ptr;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+ (u8 *)cmdq->tfds +
+ tfd_size * tfdidx);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd,
+ caplen);
+ if (sanitize_ops && sanitize_ops->frob_hcmd)
+ sanitize_ops->frob_hcmd(sanitize_ctx,
+ txcmd->data,
+ caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_txq_dec_wrap(trans, ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
+ }
+
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ if (dump_rbs)
+ len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
+
+ /* Paged memory for gen2 HW */
+ if (trans->mac_cfg->gen2 &&
+ dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
+ for (i = 0; i < trans->init_dram.paging_cnt; i++) {
+ struct iwl_fw_error_dump_paging *paging;
+ u32 page_len = trans->init_dram.paging[i].size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+ data->len = cpu_to_le32(sizeof(*paging) + page_len);
+ paging = (void *)data->data;
+ paging->index = cpu_to_le32(i);
+ memcpy(paging->data,
+ trans->init_dram.paging[i].block, page_len);
+ data = iwl_fw_error_next_data(data);
+
+ len += sizeof(*data) + sizeof(*paging) + page_len;
+ }
+ }
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+
+ dump_data->len = len;
+
+ return dump_data;
+}
+
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
+{
+ if (enable)
+ iwl_enable_interrupts(trans);
+ else
+ iwl_disable_interrupts(trans);
+}
+
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
+{
+ u32 inta_addr, sw_err_bit;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->msix_enabled) {
+ inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
+ else
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ } else {
+ inta_addr = CSR_INT;
+ sw_err_bit = CSR_INT_BIT_SW_ERR;
+ }
+
+ iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
+}
+
+struct iwl_trans *
+iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
+{
+ struct iwl_trans_pcie *trans_pcie, **priv;
+ struct iwl_trans *trans;
+ unsigned int bc_tbl_n_entries;
+ int ret, addr_size;
+ u32 bar0;
+
+ /* reassign our BAR 0 if invalid due to possible runtime PM races */
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
+ if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ ret = pci_assign_resource(pdev, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
+ mac_cfg);
+ if (!trans)
+ return ERR_PTR(-ENOMEM);
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
+
+ if (trans->mac_cfg->gen2) {
+ trans_pcie->txqs.tfd.addr_size = 64;
+ trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans_pcie->txqs.tfd.addr_size = 36;
+ trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
+
+ info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+
+ trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans_pcie->txqs.tso_hdr_page) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
+ else
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
+
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->mac_cfg->gen2) {
+ trans_pcie->txqs.bc_pool =
+ dmam_pool_create("iwlwifi:bc", trans->dev,
+ trans_pcie->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans_pcie->txqs.bc_pool) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ }
+
+ /* Some things must not change even if the config does */
+ WARN_ON(trans_pcie->txqs.tfd.addr_size !=
+ (trans->mac_cfg->gen2 ? 64 : 36));
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ */
+ trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
+ if (!trans_pcie->napi_dev) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
+ priv = netdev_priv(trans_pcie->napi_dev);
+ *priv = trans_pcie;
+
+ trans_pcie->trans = trans;
+ trans_pcie->opmode_down = true;
+ spin_lock_init(&trans_pcie->irq_lock);
+ spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
+ mutex_init(&trans_pcie->mutex);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+ init_waitqueue_head(&trans_pcie->fw_reset_waitq);
+ init_waitqueue_head(&trans_pcie->imr_waitq);
+
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!trans_pcie->rba.alloc_wq) {
+ ret = -ENOMEM;
+ goto out_free_ndev;
+ }
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
+ trans_pcie->debug_rfkill = -1;
+
+ if (!mac_cfg->base->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+ }
+
+ pci_set_master(pdev);
+
+ addr_size = trans_pcie->txqs.tfd.addr_size;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto out_no_pci;
+ }
+ }
+
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n");
+ goto out_no_pci;
+ }
+
+ trans_pcie->hw_base = pcim_iomap(pdev, 0, 0);
+ if (!trans_pcie->hw_base) {
+ dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n");
+ ret = -ENODEV;
+ goto out_no_pci;
+ }
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
+ info->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ if (info->hw_rev == 0xffffffff) {
+ dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
+ ret = -EIO;
+ goto out_no_pci;
+ }
+
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ info->hw_rev_step = info->hw_rev & 0xF;
+ else
+ info->hw_rev_step = (info->hw_rev & 0xC) >> 2;
+
+ IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);
+
+ iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);
+
+ init_waitqueue_head(&trans_pcie->sx_waitq);
+
+ ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
+ if (ret)
+ goto out_no_pci;
+
+ if (trans_pcie->msix_enabled) {
+ ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);
+ if (ret)
+ goto out_no_pci;
+ } else {
+ ret = iwl_pcie_alloc_ict(trans);
+ if (ret)
+ goto out_no_pci;
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (ret) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
+ mutex_init(&trans_pcie->fw_mon_data.mutex);
+#endif
+
+ iwl_dbg_tlv_init(trans);
+
+ return trans;
+
+out_free_ict:
+ iwl_pcie_free_ict(trans);
+out_no_pci:
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_ndev:
+ free_netdev(trans_pcie->napi_dev);
+out_free_tso:
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
+out_free_trans:
+ iwl_trans_free(trans);
+ return ERR_PTR(ret);
+}
+
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ iwl_write_prph(trans, IMR_UREG_CHICK,
+ iwl_read_prph(trans, IMR_UREG_CHICK) |
+ IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
+ (u32)(src_addr & 0xFFFFFFFF));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
+ iwl_get_dma_hi_addr(src_addr));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
+}
+
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = -1;
+
+ trans_pcie->imr_status = IMR_D2S_REQUESTED;
+ iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
+ ret = wait_event_timeout(trans_pcie->imr_waitq,
+ trans_pcie->imr_status !=
+ IMR_D2S_REQUESTED, 5 * HZ);
+ if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
+ IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
+ return -ETIMEDOUT;
+ }
+ trans_pcie->imr_status = IMR_D2S_IDLE;
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
+ */
+#include <net/tso.h>
+#include <linux/tcp.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "fw/api/tx.h"
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "iwl-scd.h"
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ struct page *ret;
+ dma_addr_t phys;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ info = IWL_TSO_PAGE_INFO(page_address(ret));
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(ret);
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+
+ /* set the chaining pointer to the previous page if there */
+ info->next = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta,
+ bool unmap)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ /*
+ * This is a bit odd, but performance does not matter here, what
+ * matters are the expectations of the calling code and TB cleanup
+ * function.
+ *
+ * As such, if unmap is set, then create another mapping for the TB
+ * entry as it will be unmapped later. On the other hand, if it is not
+ * set, then the TB entry will not be unmapped and instead we simply
+ * reference and sync the mapping that get_workaround_page() created.
+ */
+ if (unmap) {
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ } else {
+ phys = iwl_pcie_get_tso_page_phys(page_address(page));
+ dma_sync_single_for_device(trans->dev, phys, len,
+ DMA_TO_DEVICE);
+ }
+
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (!unmap)
+ goto trace;
+
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta,
+ int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
+ dma_addr_t start_hdr_phys;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct sg_table *sgt;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_network_header_len(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len);
+ if (!sgt)
+ return -ENOMEM;
+
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *pos_hdr = start_hdr;
+
+ total_len -= data_left;
+
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
+
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ tb_len = pos_hdr - start_hdr;
+ tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, tb_len);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = pos_hdr;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
+ goto out_err;
+
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL, false);
+ if (ret)
+ goto out_err;
+
+ data_left -= tb_len;
+ data_offset += tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
+ len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_pcie_free_tso_pages(trans, skb, out_meta);
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL, true);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL,
+ true);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_v9, dram_info) >
+ IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd, dram_info) >
+ IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_v9);
+ else
+ len = sizeof(struct iwl_tx_cmd);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->mac_cfg->base->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+/*
+ * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ } else {
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ }
+
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_gen2_get_num_tbs(tfd);
+ struct iwl_tfh_tb *tb;
+
+ /* Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+}
+
+static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->entries)
+ return;
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans->conf.cb_data_offs +
+ sizeof(void *));
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ memset(out_meta, 0, sizeof(*out_meta));
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd *tx_cmd =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd->len);
+ } else {
+ struct iwl_tx_cmd_v9 *tx_cmd_v9 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_v9->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans->conf.cmd_queue) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (!WARN_ON_ONCE(!skb))
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_trans_pcie_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans->conf.cmd_queue)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ timer_delete_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans_pcie->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->txqs.queue_used);
+}
+
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans_pcie->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return ERR_PTR(-EINVAL);
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return ERR_PTR(-ENOMEM);
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ return txq;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ERR_PTR(ret);
+}
+
+static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (WARN_ONCE(trans_pcie->txqs.txq[qid],
+ "queue %d already allocated\n", qid)) {
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans_pcie->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ /* take the min with bytecount table entries allowed */
+ size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
+ /* but must be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->info.hw_rev_step == SILICON_A_STEP) {
+ size = 4096;
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ } else {
+ do {
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (!IS_ERR(txq))
+ break;
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
+ size, sta_mask, tid,
+ PTR_ERR(txq));
+ size /= 2;
+ } while (size >= 16);
+ }
+
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
+
+ if (trans->conf.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans->conf.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_free(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
+ if (!trans_pcie->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans_pcie->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txqs.txq[txq_id] = queue;
+ ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans_pcie->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans->conf.cmd_queue));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int i, cmd_pos, idx;
+ u16 copy_size, cmd_size, tb0_size;
+ bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ struct iwl_tfh_tfd *tfd;
+ unsigned long flags;
+
+ if (WARN_ON(cmd->flags & CMD_BLOCK_TXQS))
+ return -EINVAL;
+
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
+ if (!cmd->len[i])
+ continue;
+
+ /* need at least IWL_FIRST_TB_SIZE copied */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than the
+ * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
+ * separate TFDs, then we will need to increase the size of the buffers
+ */
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ spin_lock_irqsave(&txq->lock, flags);
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&txq->lock, flags);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ idx = -ENOSPC;
+ goto free_dup_buf;
+ }
+
+ out_cmd = txq->entries[idx].cmd;
+ out_meta = &txq->entries[idx].meta;
+
+ /* re-initialize, this also marks the SG list as unused */
+ memset(out_meta, 0, sizeof(*out_meta));
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
+ INDEX_TO_SEQ(txq->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+
+ /* and copy the data that needs to be copied */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
+ if (!cmd->len[i])
+ continue;
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+ * in total (for bi-directional DMA), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
+ }
+ }
+
+ IWL_DEBUG_HC(trans,
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ iwl_get_cmd_string(trans, cmd->id), group_id,
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
+
+ /* start the TFD with the minimum copy bytes */
+ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
+ iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
+ tb0_size);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > tb0_size) {
+ phys_addr = dma_map_single(trans->dev,
+ (u8 *)out_cmd + tb0_size,
+ copy_size - tb0_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
+ copy_size - tb0_size);
+ }
+
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ void *data = (void *)(uintptr_t)cmddata[i];
+
+ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, data,
+ cmdlen[i], DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+ }
+
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+ out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kfree_sensitive(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
+
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ spin_lock(&trans_pcie->reg_lock);
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ spin_unlock(&trans_pcie->reg_lock);
+
+out:
+ spin_unlock_irqrestore(&txq->lock, flags);
+free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
+ return idx;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tcp.h>
+#include <net/ip6_checksum.h>
+#include <net/tso.h>
+
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
+#include "iwl-fh.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-scd.h"
+#include "iwl-op-mode.h"
+#include "internal.h"
+#include "fw/api/tx.h"
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+
+
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->id;
+
+ lockdep_assert_held(&txq->lock);
+
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
+ txq_id != trans->conf.cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ /*
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
+ */
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
+ return;
+ }
+ }
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
+ if (!txq->block)
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ txq->write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
+
+ if (!test_bit(i, trans_pcie->txqs.queue_used))
+ continue;
+
+ spin_lock_bh(&txq->lock);
+ if (txq->need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ txq->need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ hi_n_len |= iwl_get_dma_hi_addr(addr);
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ dma_addr_t addr, u16 len, bool reset)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *tfd;
+ u32 num_tbs;
+
+ tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
+
+ if (reset)
+ memset(tfd, 0, trans_pcie->txqs.tfd.size);
+
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
+ return -EINVAL;
+
+ iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return num_tbs;
+}
+
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
+ return;
+
+ spin_lock(&trans_pcie->reg_lock);
+
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
+ spin_unlock(&trans_pcie->reg_lock);
+ return;
+ }
+
+ trans_pcie->cmd_hold_nic_awake = false;
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock(&trans_pcie->reg_lock);
+}
+
+static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
+ struct page *page)
+{
+ struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
+
+ /* Decrease internal use count and unmap/free page if needed */
+ if (refcount_dec_and_test(&info->use_count)) {
+ dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ __free_page(page);
+ }
+}
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta)
+{
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct iwl_tso_page_info *info;
+ struct page *tmp = next;
+
+ info = IWL_TSO_PAGE_INFO(page_address(next));
+ next = info->next;
+
+ /* Unmap the scatter gather list that is on the last page */
+ if (!next && cmd_meta->sg_offset) {
+ struct sg_table *sgt;
+
+ sgt = (void *)((u8 *)page_address(tmp) +
+ cmd_meta->sg_offset);
+
+ dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
+ }
+
+ iwl_pcie_free_and_unmap_tso_page(trans, tmp);
+ }
+}
+
+static inline dma_addr_t
+iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
+}
+
+static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+}
+
+/**
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans: transport private data
+ * @txq: tx queue
+ * @read_ptr: the TXQ read_ptr to free
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ int read_ptr)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->reclaim_lock);
+
+ if (!txq->entries)
+ return;
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ if (trans->mac_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, read_ptr));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
+ txq, read_ptr);
+
+ /* free SKB */
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
+/*
+ * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ if (!txq) {
+ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+ return;
+ }
+
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans->conf.cmd_queue) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
+ struct iwl_cmd_meta *cmd_meta =
+ &txq->entries[txq->read_ptr].meta;
+
+ if (WARN_ON_ONCE(!skb))
+ continue;
+
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+ }
+ iwl_txq_free_tfd(trans, txq, txq->read_ptr);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+
+ if (txq->read_ptr == txq->write_ptr &&
+ txq_id == trans->conf.cmd_queue)
+ iwl_pcie_clear_cmd_in_flight(trans);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_trans_pcie_wake_queue(trans, txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ struct device *dev = trans->dev;
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans->conf.cmd_queue)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->txqs.tfd.size *
+ trans->mac_cfg->base->max_tfd_queue_size,
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
+ txq->tfds = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ timer_delete_sync(&txq->stuck_timer);
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+void iwl_pcie_tx_start(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int nq = trans->mac_cfg->base->num_of_queues;
+ int chan;
+ u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+ /* reset context data, TX status and translation data */
+ iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
+
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+ trans_pcie->txqs.scd_bc_tbls.dma >> 10);
+
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ if (trans->mac_cfg->base->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+ iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
+ trans->conf.cmd_fifo,
+ IWL_DEF_WD_TIMEOUT);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_scd_activate_fifos(trans);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Enable L1-Active */
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ /*
+ * we should never get here in gen2 trans mode return early to avoid
+ * having invalid accesses
+ */
+ if (WARN_ON_ONCE(trans->mac_cfg->gen2))
+ return;
+
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ if (trans->mac_cfg->gen2)
+ iwl_write_direct64(trans,
+ FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->dma_addr);
+ else
+ iwl_write_direct32(trans,
+ FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ /*
+ * Send 0 as the scd_base_addr since the device may have be reset
+ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+ * contain garbage.
+ */
+ iwl_pcie_tx_start(trans);
+}
+
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ch, ret;
+ u32 mask = 0;
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+
+ if (!iwl_trans_grab_nic_access(trans))
+ goto out;
+
+ /* Stop each Tx DMA channel */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+ }
+
+ /* Wait for DMA channels to be idle */
+ ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+ iwl_trans_release_nic_access(trans);
+
+out:
+ spin_unlock_bh(&trans_pcie->irq_lock);
+}
+
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
+ */
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ /* Turn off all Tx DMA fifos */
+ iwl_scd_deactivate_fifos(trans);
+
+ /* Turn off all Tx DMA channels */
+ iwl_pcie_tx_stop_fh(trans);
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq_memory)
+ return 0;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
+ txq_id++)
+ iwl_pcie_txq_unmap(trans, txq_id);
+
+ return 0;
+}
+
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
+{
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* Tx queues */
+ if (trans_pcie->txq_memory) {
+ for (txq_id = 0;
+ txq_id < trans->mac_cfg->base->num_of_queues;
+ txq_id++) {
+ iwl_pcie_txq_free(trans, txq_id);
+ trans_pcie->txqs.txq[txq_id] = NULL;
+ }
+ }
+
+ kfree(trans_pcie->txq_memory);
+ trans_pcie->txq_memory = NULL;
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->mac_cfg->gen2) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t num_entries = trans->mac_cfg->gen2 ?
+ slots_num : trans->mac_cfg->base->max_tfd_queue_size;
+ size_t tfd_sz;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
+ return -EINVAL;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device
+ */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->mac_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+ txq->tfds = NULL;
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+}
+
+#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;
+
+ if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
+ return -EINVAL;
+
+ bc_tbls_size *= BC_TABLE_SIZE;
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq_memory)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
+ bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq_memory =
+ kcalloc(trans->mac_cfg->base->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq_memory) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
+ txq_id++) {
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
+
+ if (cmd_queue)
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->mac_cfg->base->min_txq_size);
+ else
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->mac_cfg->base->min_ba_txq_size);
+ trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
+ slots_num, cmd_queue);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
+ }
+
+ return 0;
+
+error:
+ iwl_pcie_tx_free(trans);
+
+ return ret;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken.
+ */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ u32 tfd_queue_max_size =
+ trans->mac_cfg->base->max_tfd_queue_size;
+ int ret;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
+ */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+ spin_lock_init(&txq->reclaim_lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+ int txq_id, slots_num;
+ bool alloc = false;
+
+ if (!trans_pcie->txq_memory) {
+ ret = iwl_pcie_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_bh(&trans_pcie->irq_lock);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_scd_deactivate_fifos(trans);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_bh(&trans_pcie->irq_lock);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
+ txq_id++) {
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
+
+ if (cmd_queue)
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->mac_cfg->base->min_txq_size);
+ else
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->mac_cfg->base->min_ba_txq_size);
+ ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
+ cmd_queue);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+
+ /*
+ * Tell nic where to find circular buffer of TFDs for a
+ * given Tx queue, and enable the DMA channel used for that
+ * queue.
+ * Circular buffer (TFD queue in DRAM) physical base address
+ */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+ trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
+ }
+
+ iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
+ if (trans->mac_cfg->base->num_of_queues > 20)
+ iwl_set_bits_prph(trans, SCD_GP_CTRL,
+ SCD_GP_CTRL_ENABLE_31_QUEUES);
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_pcie_tx_free(trans);
+ return ret;
+}
+
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
+ const struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
+ return 0;
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set (see above.)
+ */
+ if (!_iwl_trans_pcie_grab_nic_access(trans, false))
+ return -EIO;
+
+ /*
+ * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
+ * There, we also returned immediately if cmd_hold_nic_awake is
+ * already true, so it's OK to unconditionally set it to true.
+ */
+ trans_pcie->cmd_hold_nic_awake = true;
+ spin_unlock(&trans_pcie->reg_lock);
+
+ return 0;
+}
+
+static void iwl_txq_progress(struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->wd_timeout)
+ return;
+
+ /*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->read_ptr == txq->write_ptr)
+ timer_delete(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
+ int read_ptr, int write_ptr)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, read_ptr);
+ int w = iwl_txq_get_cmd_index(q, write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int nfreed = 0;
+ u16 r;
+
+ lockdep_assert_held(&txq->lock);
+
+ idx = iwl_txq_get_cmd_index(txq, idx);
+ r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+
+ if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||
+ (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
+ WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx,
+ trans->mac_cfg->base->max_tfd_queue_size,
+ txq->write_ptr, txq->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
+ r = iwl_txq_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+
+ if (nfreed++ > 0) {
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, txq->write_ptr, r);
+ iwl_force_nmi(trans);
+ }
+ }
+
+ if (txq->read_ptr == txq->write_ptr)
+ iwl_pcie_clear_cmd_in_flight(trans);
+
+ iwl_txq_progress(txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+ u16 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int fifo = -1;
+ bool scd_bug = false;
+
+ if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
+ WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+
+ txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
+
+ if (cfg) {
+ fifo = cfg->fifo;
+
+ /* Disable the scheduler prior configuring the cmd queue */
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
+ iwl_scd_enable_set_active(trans, 0);
+
+ /* Stop this Tx queue before configuring it */
+ iwl_scd_txq_set_inactive(trans, txq_id);
+
+ /* Set this queue as a chain-building queue unless it is CMD */
+ if (txq_id != trans->conf.cmd_queue)
+ iwl_scd_txq_set_chain(trans, txq_id);
+
+ if (cfg->aggregate) {
+ u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+ /* enable aggregations for the queue */
+ iwl_scd_txq_enable_agg(trans, txq_id);
+ txq->ampdu = true;
+ } else {
+ /*
+ * disable aggregations for the queue, this will also
+ * make the ra_tid mapping configuration irrelevant
+ * since it is now a non-AGG queue.
+ */
+ iwl_scd_txq_disable_agg(trans, txq_id);
+
+ ssn = txq->read_ptr;
+ }
+ } else {
+ /*
+ * If we need to move the SCD write pointer by steps of
+ * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
+ * the op_mode know by returning true later.
+ * Do this only in case cfg is NULL since this trick can
+ * be done only if we have DQA enabled which is true for mvm
+ * only. And mvm never sets a cfg pointer.
+ * This is really ugly, but this is the easiest way out for
+ * this sad hardware issue.
+ * This bug has been fixed on devices 9000 and up.
+ */
+ scd_bug = !trans->mac_cfg->mq_rx_supported &&
+ !((ssn - txq->write_ptr) & 0x3f) &&
+ (ssn != txq->write_ptr);
+ if (scd_bug)
+ ssn++;
+ }
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
+
+ if (cfg) {
+ u8 frame_limit = cfg->frame_limit;
+
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_trans_write_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
+ SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
+
+ /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+
+ /* enable the scheduler for this queue (only) */
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
+ iwl_scd_enable_set_active(trans, BIT(txq_id));
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d on FIFO %d WrPtr: %d\n",
+ txq_id, fifo, ssn & 0xff);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d WrPtr: %d\n",
+ txq_id, ssn & 0xff);
+ }
+
+ return scd_bug;
+}
+
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+ bool shared_mode)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ txq->ampdu = !shared_mode;
+}
+
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
+ bool configure_scd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 stts_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+ static const u32 zero_val[4] = {};
+
+ trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txqs.txq[txq_id]->frozen = false;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
+ return;
+ }
+
+ if (configure_scd) {
+ iwl_scd_txq_set_inactive(trans, txq_id);
+
+ iwl_trans_pcie_write_mem(trans, stts_addr,
+ (const void *)zero_val,
+ ARRAY_SIZE(zero_val));
+ }
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+ trans_pcie->txqs.txq[txq_id]->ampdu = false;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
+
+ if (i == trans->conf.cmd_queue)
+ continue;
+
+ /* we skip the command queue (obviously) so it's OK to nest */
+ spin_lock_nested(&txq->lock, 1);
+
+ if (!block && !(WARN_ON_ONCE(!txq->block))) {
+ txq->block--;
+ if (!txq->block) {
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ txq->write_ptr | (i << 8));
+ }
+ } else if (block) {
+ txq->block++;
+ }
+
+ spin_unlock(&txq->lock);
+ }
+}
+
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int idx;
+ u16 copy_size, cmd_size, tb0_size;
+ bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
+ int i, ret;
+ u32 cmd_pos;
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ unsigned long flags;
+
+ if (WARN(!trans->conf.wide_cmd_header &&
+ group_id > IWL_ALWAYS_LONG_GROUP,
+ "unsupported wide command %#x\n", cmd->id))
+ return -EINVAL;
+
+ if (group_id != 0) {
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ copy_size = sizeof(struct iwl_cmd_header);
+ cmd_size = sizeof(struct iwl_cmd_header);
+ }
+
+ /* need one for the header if the first is NOCOPY */
+ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
+ if (!cmd->len[i])
+ continue;
+
+ /* need at least IWL_FIRST_TB_SIZE copied */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
+ * allocated into separate TFDs, then we will need to
+ * increase the size of the buffers.
+ */
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ iwl_get_cmd_string(trans, cmd->id),
+ cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ spin_lock_irqsave(&txq->lock, flags);
+
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&txq->lock, flags);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
+ idx = -ENOSPC;
+ goto free_dup_buf;
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ out_cmd = txq->entries[idx].cmd;
+ out_meta = &txq->entries[idx].meta;
+
+ /* re-initialize, this also marks the SG list as unused */
+ memset(out_meta, 0, sizeof(*out_meta));
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+ if (group_id != 0) {
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size -
+ sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
+ INDEX_TO_SEQ(txq->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ } else {
+ out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
+ INDEX_TO_SEQ(txq->write_ptr));
+ out_cmd->hdr.group_id = 0;
+
+ cmd_pos = sizeof(struct iwl_cmd_header);
+ copy_size = sizeof(struct iwl_cmd_header);
+ }
+
+ /* and copy the data that needs to be copied */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
+ if (!cmd->len[i])
+ continue;
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+ * in total (for bi-directional DMA), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
+ }
+ }
+
+ IWL_DEBUG_HC(trans,
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ iwl_get_cmd_string(trans, cmd->id),
+ group_id, out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
+
+ /* start the TFD with the minimum copy bytes */
+ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+ iwl_pcie_txq_build_tfd(trans, txq,
+ iwl_txq_get_first_tb_dma(txq, idx),
+ tb0_size, true);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > tb0_size) {
+ phys_addr = dma_map_single(trans->dev,
+ ((u8 *)&out_cmd->hdr) + tb0_size,
+ copy_size - tb0_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+ copy_size - tb0_size, false);
+ }
+
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ void *data = (void *)(uintptr_t)cmddata[i];
+
+ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, data,
+ cmdlen[i], DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
+ }
+
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+ out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kfree_sensitive(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
+
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
+ if (ret < 0) {
+ idx = ret;
+ goto out;
+ }
+
+ if (cmd->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, true);
+
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+ out:
+ spin_unlock_irqrestore(&txq->lock, flags);
+ free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
+ return idx;
+}
+
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ */
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ u8 group_id;
+ u32 cmd_id;
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index;
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN(txq_id != trans->conf.cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
+ txq->write_ptr)) {
+ iwl_print_hex_error(trans, pkt, 32);
+ return;
+ }
+
+ spin_lock_bh(&txq->lock);
+
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
+ cmd = txq->entries[cmd_index].cmd;
+ meta = &txq->entries[cmd_index].meta;
+ group_id = cmd->hdr.group_id;
+ cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
+
+ if (trans->mac_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, meta,
+ iwl_txq_get_tfd(trans, txq, index));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ struct page *p = rxb_steal_page(rxb);
+
+ meta->source->resp_pkt = pkt;
+ meta->source->_rx_page_addr = (unsigned long)page_address(p);
+ meta->source->_rx_page_order = trans_pcie->rx_page_order;
+ }
+
+ if (meta->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, false);
+
+ iwl_pcie_cmdq_reclaim(trans, txq_id, index);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
+ IWL_WARN(trans,
+ "HCMD_ACTIVE already clear for command %s\n",
+ iwl_get_cmd_string(trans, cmd_id));
+ }
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ iwl_get_cmd_string(trans, cmd_id));
+ wake_up(&trans_pcie->wait_command_queue);
+ }
+
+ meta->flags = 0;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_txq *txq, u8 hdr_len,
+ struct iwl_cmd_meta *out_meta)
+{
+ u16 head_tb_len;
+ int i;
+
+ /*
+ * Set up TFD's third entry to point directly to remainder
+ * of skb's head, if any
+ */
+ head_tb_len = skb_headlen(skb) - hdr_len;
+
+ if (head_tb_len > 0) {
+ dma_addr_t tb_phys = dma_map_single(trans->dev,
+ skb->data + hdr_len,
+ head_tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, head_tb_len);
+ iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
+ }
+
+ /* set up the remaining entries to point to the data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ return -EINVAL;
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
+ tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+ skb_frag_size(frag), false);
+ if (tb_idx < 0)
+ return tb_idx;
+
+ out_meta->tbs |= BIT(tb_idx);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_INET
+static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
+ size_t len, struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ dma_addr_t phys;
+ void *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+ goto out;
+ }
+
+ /* We don't have enough room on this page, get a new one. */
+ iwl_pcie_free_and_unmap_tso_page(trans, p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+
+ /* set the chaining pointer to NULL */
+ info->next = NULL;
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(p->page);
+ p->page = NULL;
+
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+out:
+ *page_ptr = p->page;
+ /* Return an internal reference for the caller */
+ refcount_inc(&info->use_count);
+ ret = p->pos;
+ p->pos += len;
+
+ return ret;
+}
+
+/**
+ * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
+ * @sgt: scatter gather table
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
+ *
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
+ *
+ * Returns: Address for TB entry
+ */
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
+{
+ struct scatterlist *sg;
+ unsigned int sg_offset = 0;
+ int i;
+
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
+ }
+
+ WARN_ON_ONCE(1);
+
+ return DMA_MAPPING_ERROR;
+}
+
+/**
+ * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
+ * @trans: transport private data
+ * @skb: the SKB to map
+ * @cmd_meta: command meta to store the scatter list information for unmapping
+ * @hdr: output argument for TSO headers
+ * @hdr_room: requested length for TSO headers
+ * @offset: offset into the data from which mapping should start
+ *
+ * Allocate space for a scatter gather list and TSO headers and map the SKB
+ * using the scatter gather list. The SKB is unmapped again when the page is
+ * free'ed again at the end of the operation.
+ *
+ * Returns: newly allocated and mapped scatter gather table with list
+ */
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset)
+{
+ struct sg_table *sgt;
+ unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
+ int orig_nents;
+
+ if (WARN_ON_ONCE(skb_has_frag_list(skb)))
+ return NULL;
+
+ *hdr = iwl_pcie_get_page_hdr(trans,
+ hdr_room + __alignof__(struct sg_table) +
+ sizeof(struct sg_table) +
+ n_segments * sizeof(struct scatterlist),
+ skb);
+ if (!*hdr)
+ return NULL;
+
+ sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
+ sgt->sgl = (void *)(sgt + 1);
+
+ sg_init_table(sgt->sgl, n_segments);
+
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
+ if (WARN_ON_ONCE(orig_nents <= 0))
+ return NULL;
+
+ sgt->orig_nents = orig_nents;
+
+ /* And map the entire SKB */
+ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
+ return NULL;
+
+ /* Store non-zero (i.e. valid) offset for unmapping */
+ cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
+
+ return sgt;
+}
+
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_txq *txq, u8 hdr_len,
+ struct iwl_cmd_meta *out_meta,
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
+ u16 length, iv_len, amsdu_pad;
+ dma_addr_t start_hdr_phys;
+ u8 *start_hdr, *pos_hdr;
+ struct sg_table *sgt;
+ struct tso_t tso;
+
+ /* if the packet is protected, then it must be CCMP or GCMP */
+ BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
+ iv_len = ieee80211_has_protected(hdr->frame_control) ?
+ IEEE80211_CCMP_HDR_LEN : 0;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans_pcie->txqs.tfd.size,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
+
+ ip_hdrlen = skb_network_header_len(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len + iv_len);
+ if (!sgt)
+ return -ENOMEM;
+
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+ pos_hdr = start_hdr;
+ memcpy(pos_hdr, skb->data + hdr_len, iv_len);
+ pos_hdr += iv_len;
+
+ /*
+ * Pull the ieee80211 header + IV to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len + iv_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left =
+ min_t(unsigned int, mss, total_len);
+ unsigned int hdr_tb_len;
+ dma_addr_t hdr_tb_phys;
+ u8 *subf_hdrs_start = pos_hdr;
+
+ total_len -= data_left;
+
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
+
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ hdr_tb_len = pos_hdr - start_hdr;
+ hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
+ hdr_tb_len, false);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ hdr_tb_phys, hdr_tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = pos_hdr;
+
+ /* put the payload */
+ while (data_left) {
+ unsigned int size = min_t(unsigned int, tso.size,
+ data_left);
+ dma_addr_t tb_phys;
+
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
+ return -EINVAL;
+
+ iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+ size, false);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ tb_phys, size);
+
+ data_left -= size;
+ data_offset += size;
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
+ /* re -add the WiFi header and IV */
+ skb_push(skb, hdr_len + iv_len);
+
+ return 0;
+}
+#else /* CONFIG_INET */
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_txq *txq, u8 hdr_len,
+ struct iwl_cmd_meta *out_meta,
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
+{
+ /* No A-MSDU without CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_bc_tbl_entry *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
+ bc_ent;
+}
+
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr;
+ struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq;
+ dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+ void *tb1_addr;
+ void *tfd;
+ u16 len, tb1_len;
+ bool wait_write_ptr;
+ __le16 fc;
+ u8 hdr_len;
+ u16 wifi_seq;
+ bool amsdu;
+
+ txq = trans_pcie->txqs.txq[txq_id];
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ /* mac80211 always puts the full header into the SKB's head,
+ * so there's no need to check if it's readable there
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdr_len = ieee80211_hdrlen(fc);
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans->conf.cb_data_offs +
+ sizeof(void *));
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(txq->ampdu &&
+ (wifi_seq & 0xff) != txq->write_ptr,
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(txq->write_ptr)));
+
+ tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
+ scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd_v6, scratch);
+
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[txq->write_ptr].meta;
+ memset(out_meta, 0, sizeof(*out_meta));
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +
+ hdr_len - IWL_FIRST_TB_SIZE;
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+ amsdu = ieee80211_is_data_qos(fc) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ if (!amsdu) {
+ tb1_len = ALIGN(len, 4);
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (tb1_len != len)
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
+ } else {
+ tb1_len = len;
+ }
+
+ /*
+ * The first TB points to bi-directional DMA data, we'll
+ * memcpy the data into it later.
+ */
+ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+ IWL_FIRST_TB_SIZE, true);
+
+ /* there must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_v6, scratch) >
+ IWL_FIRST_TB_SIZE);
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+ goto out_err;
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans_pcie->txqs.tfd.size,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+ hdr_len);
+
+ /*
+ * If gso_size wasn't set, don't give the frame "amsdu treatment"
+ * (adding subframes, etc.).
+ * This can happen in some testing flows when the amsdu was already
+ * pre-built, and we just need to send the resulting skb.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size) {
+ if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
+ out_meta, dev_cmd,
+ tb1_len)))
+ goto out_err;
+ } else {
+ struct sk_buff *frag;
+
+ if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
+ out_meta)))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
+ out_meta)))
+ goto out_err;
+ }
+ }
+
+ /* building the A-MSDU might have changed this data, so memcpy it now */
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_txq_gen1_tfd_get_num_tbs(tfd));
+
+ wait_write_ptr = ieee80211_has_morefrags(fc);
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
+ }
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+out_err:
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
+ spin_unlock(&txq->lock);
+ return -1;
+}
+
+static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ int read_ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans->conf.cmd_queue)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
+ bc_ent;
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+ int txq_read_ptr, txq_write_ptr;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans->conf.cmd_queue))
+ return;
+
+ if (WARN_ON(!txq))
+ return;
+
+ tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+
+ spin_lock_bh(&txq->reclaim_lock);
+
+ spin_lock(&txq->lock);
+ txq_read_ptr = txq->read_ptr;
+ txq_write_ptr = txq->write_ptr;
+ spin_unlock(&txq->lock);
+
+ /* There is nothing to do if we are flushing an empty queue */
+ if (is_flush && txq_write_ptr == txq_read_ptr)
+ goto out;
+
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
+
+ if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
+ if (read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
+
+ /* Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used
+ */
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
+
+ if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
+ IWL_ERR(trans,
+ "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free,
+ trans->mac_cfg->base->max_tfd_queue_size,
+ txq_write_ptr, txq_read_ptr);
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ NULL);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ read_ptr != tfd_num;
+ txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
+
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq_read_ptr, txq_id))
+ continue;
+
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+
+ __skb_queue_tail(skbs, skb);
+
+ txq->entries[read_ptr].skb = NULL;
+
+ if (!trans->mac_cfg->gen2)
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
+ txq_read_ptr);
+
+ iwl_txq_free_tfd(trans, txq, txq_read_ptr);
+ }
+
+ spin_lock(&txq->lock);
+ txq->read_ptr = txq_read_ptr;
+
+ iwl_txq_progress(txq);
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+ skb_queue_splice_init(&txq->overflow_q,
+ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+ * Remember this state so that wait_for_txq_empty will know we
+ * are adding more packets to the TFD queue. It cannot rely on
+ * the state of &txq->overflow_q, as we just emptied it, but
+ * haven't TXed the content yet.
+ */
+ txq->overflow_tx = true;
+
+ /*
+ * This is tricky: we are in reclaim path and are holding
+ * reclaim_lock, so noone will try to access the txq data
+ * from that path. We stopped tx, so we can't have tx as well.
+ * Bottom line, we can unlock and re-lock later.
+ */
+ spin_unlock(&txq->lock);
+
+ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans->conf.cb_data_offs +
+ sizeof(void *));
+
+ /*
+ * Note that we can very well be overflowing again.
+ * In that case, iwl_txq_space will be small again
+ * and we won't wake mac80211's queue.
+ */
+ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
+ }
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
+ iwl_trans_pcie_wake_queue(trans, txq);
+
+ spin_lock(&txq->lock);
+ txq->overflow_tx = false;
+ }
+
+ spin_unlock(&txq->lock);
+out:
+ spin_unlock_bh(&txq->reclaim_lock);
+}
+
+/* Set wr_ptr of specific device and txq */
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+
+ txq->write_ptr = ptr;
+ txq->read_ptr = txq->write_ptr;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->read_ptr == txq->write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ timer_delete(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd,
+ const char *cmd_str)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (trans->mac_cfg->gen2)
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_trans_pcie_sync_nmi(trans);
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ }
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ if (trans->mac_cfg->gen2)
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);
+}
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
- * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2016-2017 Intel Deutschland GmbH
- */
-#ifndef __iwl_trans_int_pcie_h__
-#define __iwl_trans_int_pcie_h__
-
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include <linux/cpu.h>
-
-#include "iwl-fh.h"
-#include "iwl-csr.h"
-#include "iwl-trans.h"
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "iwl-op-mode.h"
-#include "iwl-drv.h"
-#include "iwl-context-info.h"
-
-/*
- * RX related structures and functions
- */
-#define RX_NUM_QUEUES 1
-#define RX_POST_REQ_ALLOC 2
-#define RX_CLAIM_REQ_ALLOC 8
-#define RX_PENDING_WATERMARK 16
-#define FIRST_RX_QUEUE 512
-
-struct iwl_host_cmd;
-
-/*This file includes the declaration that are internal to the
- * trans_pcie layer */
-
-/**
- * struct iwl_rx_mem_buffer
- * @page_dma: bus address of rxb page
- * @page: driver's pointer to the rxb page
- * @list: list entry for the membuffer
- * @invalid: rxb is in driver ownership - not owned by HW
- * @vid: index of this rxb in the global table
- * @offset: indicates which offset of the page (in bytes)
- * this buffer uses (if multiple RBs fit into one page)
- */
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
- u32 offset;
- u16 vid;
- bool invalid;
-};
-
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 tx;
- u32 unhandled;
-};
-
-/**
- * struct iwl_rx_transfer_desc - transfer descriptor
- * @addr: ptr to free buffer start address
- * @rbid: unique tag of the buffer
- * @reserved: reserved
- */
-struct iwl_rx_transfer_desc {
- __le16 rbid;
- __le16 reserved[3];
- __le64 addr;
-} __packed;
-
-#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
-
-/**
- * struct iwl_rx_completion_desc - completion descriptor
- * @reserved1: reserved
- * @rbid: unique tag of the received buffer
- * @flags: flags (0: fragmented, all others: reserved)
- * @reserved2: reserved
- */
-struct iwl_rx_completion_desc {
- __le32 reserved1;
- __le16 rbid;
- u8 flags;
- u8 reserved2[25];
-} __packed;
-
-/**
- * struct iwl_rx_completion_desc_bz - Bz completion descriptor
- * @rbid: unique tag of the received buffer
- * @flags: flags (0: fragmented, all others: reserved)
- * @reserved: reserved
- */
-struct iwl_rx_completion_desc_bz {
- __le16 rbid;
- u8 flags;
- u8 reserved[1];
-} __packed;
-
-/**
- * struct iwl_rxq - Rx queue
- * @id: queue index
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
- * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
- * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
- * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @write_actual: actual write pointer written to device, since we update in
- * blocks of 8 only
- * @free_count: Number of pre-allocated buffers in rx_free
- * @used_count: Number of RBDs handled to allocator to use for allocation
- * @write_actual:
- * @rx_free: list of RBDs with allocated RB ready for use
- * @rx_used: list of RBDs with no RB attached
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- * @lock: per-queue lock
- * @queue: actual rx queue. Not used for multi-rx queue.
- * @next_rb_is_fragment: indicates that the previous RB that we handled set
- * the fragmented flag, so the next one is still another fragment
- * @napi: NAPI struct for this queue
- * @queue_size: size of this queue
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rxq {
- int id;
- void *bd;
- dma_addr_t bd_dma;
- void *used_bd;
- dma_addr_t used_bd_dma;
- u32 read;
- u32 write;
- u32 free_count;
- u32 used_count;
- u32 write_actual;
- u32 queue_size;
- struct list_head rx_free;
- struct list_head rx_used;
- bool need_update, next_rb_is_fragment;
- void *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
- struct napi_struct napi;
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-};
-
-/**
- * struct iwl_rb_allocator - Rx allocator
- * @req_pending: number of requests the allcator had not processed yet
- * @req_ready: number of requests honored and ready for claiming
- * @rbd_allocated: RBDs with pages allocated and ready to be handled to
- * the queue. This is a list of &struct iwl_rx_mem_buffer
- * @rbd_empty: RBDs with no page attached for allocator use. This is a list
- * of &struct iwl_rx_mem_buffer
- * @lock: protects the rbd_allocated and rbd_empty lists
- * @alloc_wq: work queue for background calls
- * @rx_alloc: work struct for background calls
- */
-struct iwl_rb_allocator {
- atomic_t req_pending;
- atomic_t req_ready;
- struct list_head rbd_allocated;
- struct list_head rbd_empty;
- spinlock_t lock;
- struct workqueue_struct *alloc_wq;
- struct work_struct rx_alloc;
-};
-
-/**
- * iwl_get_closed_rb_stts - get closed rb stts from different structs
- * @trans: transport pointer (for configuration)
- * @rxq: the rxq to get the rb stts from
- */
-static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- __le16 *rb_stts = rxq->rb_stts;
-
- return le16_to_cpu(READ_ONCE(*rb_stts));
- } else {
- struct iwl_rb_status *rb_stts = rxq->rb_stts;
-
- return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
- }
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/**
- * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
- * debugfs file
- *
- * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
- * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
- * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
- * set the file can no longer be used.
- */
-enum iwl_fw_mon_dbgfs_state {
- IWL_FW_MON_DBGFS_STATE_CLOSED,
- IWL_FW_MON_DBGFS_STATE_OPEN,
- IWL_FW_MON_DBGFS_STATE_DISABLED,
-};
-#endif
-
-/**
- * enum iwl_shared_irq_flags - level of sharing for irq
- * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
- * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
- */
-enum iwl_shared_irq_flags {
- IWL_SHARED_IRQ_NON_RX = BIT(0),
- IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
-};
-
-/**
- * enum iwl_image_response_code - image response values
- * @IWL_IMAGE_RESP_DEF: the default value of the register
- * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
- * @IWL_IMAGE_RESP_FAIL: iml reading failed
- */
-enum iwl_image_response_code {
- IWL_IMAGE_RESP_DEF = 0,
- IWL_IMAGE_RESP_SUCCESS = 1,
- IWL_IMAGE_RESP_FAIL = 2,
-};
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/**
- * struct cont_rec: continuous recording data structure
- * @prev_wr_ptr: the last address that was read in monitor_data
- * debugfs file
- * @prev_wrap_cnt: the wrap count that was used during the last read in
- * monitor_data debugfs file
- * @state: the state of monitor_data debugfs file as described
- * in &iwl_fw_mon_dbgfs_state enum
- * @mutex: locked while reading from monitor_data debugfs file
- */
-struct cont_rec {
- u32 prev_wr_ptr;
- u32 prev_wrap_cnt;
- u8 state;
- /* Used to sync monitor_data debugfs file with driver unload flow */
- struct mutex mutex;
-};
-#endif
-
-enum iwl_pcie_fw_reset_state {
- FW_RESET_IDLE,
- FW_RESET_REQUESTED,
- FW_RESET_OK,
- FW_RESET_ERROR,
- FW_RESET_TOP_REQUESTED,
-};
-
-/**
- * enum iwl_pcie_imr_status - imr dma transfer state
- * @IMR_D2S_IDLE: default value of the dma transfer
- * @IMR_D2S_REQUESTED: dma transfer requested
- * @IMR_D2S_COMPLETED: dma transfer completed
- * @IMR_D2S_ERROR: dma transfer error
- */
-enum iwl_pcie_imr_status {
- IMR_D2S_IDLE,
- IMR_D2S_REQUESTED,
- IMR_D2S_COMPLETED,
- IMR_D2S_ERROR,
-};
-
-/**
- * struct iwl_pcie_txqs - TX queues data
- *
- * @queue_used: bit mask of used queues
- * @queue_stopped: bit mask of stopped queues
- * @txq: array of TXQ data structures representing the TXQs
- * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @bc_pool: bytecount DMA allocations pool
- * @bc_tbl_size: bytecount table size
- * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
- * (and similar usage)
- * @tfd: TFD data
- * @tfd.max_tbs: max number of buffers per TFD
- * @tfd.size: TFD size
- * @tfd.addr_size: TFD/TB address size
- */
-struct iwl_pcie_txqs {
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- struct dma_pool *bc_pool;
- size_t bc_tbl_size;
- struct iwl_tso_hdr_page __percpu *tso_hdr_page;
-
- struct {
- u8 max_tbs;
- u16 size;
- u8 addr_size;
- } tfd;
-
- struct iwl_dma_ptr scd_bc_tbls;
-};
-
-/**
- * struct iwl_trans_pcie - PCIe transport specific data
- * @rxq: all the RX queue data
- * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
- * @global_table: table mapping received VID from hw to rxb
- * @rba: allocator for RX replenishing
- * @ctxt_info: context information for FW self init
- * @ctxt_info_v2: context information for v1 devices
- * @prph_info: prph info for self init
- * @prph_scratch: prph scratch for self init
- * @ctxt_info_dma_addr: dma addr of context information
- * @prph_info_dma_addr: dma addr of prph info
- * @prph_scratch_dma_addr: dma addr of prph scratch
- * @ctxt_info_dma_addr: dma addr of context information
- * @iml: image loader image virtual address
- * @iml_len: image loader image size
- * @iml_dma_addr: image loader image DMA address
- * @trans: pointer to the generic transport area
- * @scd_base_addr: scheduler sram base address in SRAM
- * @kw: keep warm address
- * @pnvm_data: holds info about pnvm payloads allocated in DRAM
- * @reduced_tables_data: holds info about power reduced tablse
- * payloads allocated in DRAM
- * @pci_dev: basic pci-network driver stuff
- * @hw_base: pci hardware address support
- * @ucode_write_complete: indicates that the ucode has been copied.
- * @ucode_write_waitq: wait queue for uCode load
- * @rx_page_order: page order for receive buffer size
- * @rx_buf_bytes: RX buffer (RB) size in bytes
- * @reg_lock: protect hw register access
- * @mutex: to protect stop_device / start_fw / start_hw
- * @fw_mon_data: fw continuous recording data
- * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround
- * during commands in flight
- * @msix_entries: array of MSI-X entries
- * @msix_enabled: true if managed to enable MSI-X
- * @shared_vec_mask: the type of causes the shared vector handles
- * (see iwl_shared_irq_flags).
- * @alloc_vecs: the number of interrupt vectors allocated by the OS
- * @def_irq: default irq for non rx causes
- * @fh_init_mask: initial unmasked fh causes
- * @hw_init_mask: initial unmasked hw causes
- * @fh_mask: current unmasked fh causes
- * @hw_mask: current unmasked hw causes
- * @in_rescan: true if we have triggered a device rescan
- * @base_rb_stts: base virtual address of receive buffer status for all queues
- * @base_rb_stts_dma: base physical address of receive buffer status
- * @supported_dma_mask: DMA mask to validate the actual address against,
- * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
- * @alloc_page_lock: spinlock for the page allocator
- * @alloc_page: allocated page to still use parts of
- * @alloc_page_used: how much of the allocated page was already used (bytes)
- * @imr_status: imr dma state machine
- * @imr_waitq: imr wait queue for dma completion
- * @rf_name: name/version of the CRF, if any
- * @use_ict: whether or not ICT (interrupt table) is used
- * @ict_index: current ICT read index
- * @ict_tbl: ICT table pointer
- * @ict_tbl_dma: ICT table DMA address
- * @inta_mask: interrupt (INT-A) mask
- * @irq_lock: lock to synchronize IRQ handling
- * @txq_memory: TXQ allocation array
- * @sx_waitq: waitqueue for Sx transitions
- * @sx_complete: completion for Sx transitions
- * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
- * @opmode_down: indicates opmode went away
- * @num_rx_bufs: number of RX buffers to allocate/use
- * @affinity_mask: IRQ affinity mask for each RX queue
- * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
- * enable/disable
- * @fw_reset_state: state of FW reset handshake
- * @fw_reset_waitq: waitqueue for FW reset handshake
- * @is_down: indicates the NIC is down
- * @isr_stats: interrupt statistics
- * @napi_dev: (fake) netdev for NAPI registration
- * @txqs: transport tx queues data.
- * @me_present: WiAMT/CSME is detected as present (1), not present (0)
- * or unknown (-1, so can still use it as a boolean safely)
- * @me_recheck_wk: worker to recheck WiAMT/CSME presence
- * @invalid_tx_cmd: invalid TX command buffer
- * @wait_command_queue: wait queue for sync commands
- */
-struct iwl_trans_pcie {
- struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer *rx_pool;
- struct iwl_rx_mem_buffer **global_table;
- struct iwl_rb_allocator rba;
- union {
- struct iwl_context_info *ctxt_info;
- struct iwl_context_info_v2 *ctxt_info_v2;
- };
- struct iwl_prph_info *prph_info;
- struct iwl_prph_scratch *prph_scratch;
- void *iml;
- size_t iml_len;
- dma_addr_t ctxt_info_dma_addr;
- dma_addr_t prph_info_dma_addr;
- dma_addr_t prph_scratch_dma_addr;
- dma_addr_t iml_dma_addr;
- struct iwl_trans *trans;
-
- struct net_device *napi_dev;
-
- /* INT ICT Table */
- __le32 *ict_tbl;
- dma_addr_t ict_tbl_dma;
- int ict_index;
- bool use_ict;
- bool is_down, opmode_down;
- s8 debug_rfkill;
- struct isr_statistics isr_stats;
-
- spinlock_t irq_lock;
- struct mutex mutex;
- u32 inta_mask;
- u32 scd_base_addr;
- struct iwl_dma_ptr kw;
-
- /* pnvm data */
- struct iwl_dram_regions pnvm_data;
- struct iwl_dram_regions reduced_tables_data;
-
- struct iwl_txq *txq_memory;
-
- /* PCI bus related data */
- struct pci_dev *pci_dev;
- u8 __iomem *hw_base;
-
- bool ucode_write_complete;
- bool sx_complete;
- wait_queue_head_t ucode_write_waitq;
- wait_queue_head_t sx_waitq;
-
- u16 num_rx_bufs;
-
- bool pcie_dbg_dumped_once;
- u32 rx_page_order;
- u32 rx_buf_bytes;
- u32 supported_dma_mask;
-
- /* allocator lock for the two values below */
- spinlock_t alloc_page_lock;
- struct page *alloc_page;
- u32 alloc_page_used;
-
- /*protect hw register */
- spinlock_t reg_lock;
- bool cmd_hold_nic_awake;
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct cont_rec fw_mon_data;
-#endif
-
- struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
- bool msix_enabled;
- u8 shared_vec_mask;
- u32 alloc_vecs;
- u32 def_irq;
- u32 fh_init_mask;
- u32 hw_init_mask;
- u32 fh_mask;
- u32 hw_mask;
- cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
- u16 tx_cmd_queue_size;
- bool in_rescan;
-
- void *base_rb_stts;
- dma_addr_t base_rb_stts_dma;
-
- enum iwl_pcie_fw_reset_state fw_reset_state;
- wait_queue_head_t fw_reset_waitq;
- enum iwl_pcie_imr_status imr_status;
- wait_queue_head_t imr_waitq;
- char rf_name[32];
-
- struct iwl_pcie_txqs txqs;
-
- s8 me_present;
- struct delayed_work me_recheck_wk;
-
- struct iwl_dma_ptr invalid_tx_cmd;
-
- wait_queue_head_t wait_command_queue;
-};
-
-static inline struct iwl_trans_pcie *
-IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
-{
- return (void *)trans->trans_specific;
-}
-
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
-{
- /*
- * Before sending the interrupt the HW disables it to prevent
- * a nested interrupt. This is done by writing 1 to the corresponding
- * bit in the mask register. After handling the interrupt, it should be
- * re-enabled by clearing this bit. This register is defined as
- * write 1 clear (W1C) register, meaning that it's being clear
- * by writing 1 to the bit.
- */
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
-}
-
-static inline struct iwl_trans *
-iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
-{
- return container_of((void *)trans_pcie, struct iwl_trans,
- trans_specific);
-}
-
-/*
- * Convention: trans API functions: iwl_trans_pcie_XXX
- * Other functions: iwl_pcie_XXX
- */
-struct iwl_trans
-*iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct iwl_mac_cfg *mac_cfg,
- struct iwl_trans_info *info);
-void iwl_trans_pcie_free(struct iwl_trans *trans);
-void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
- struct device *dev);
-
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
-#define _iwl_trans_pcie_grab_nic_access(trans, silent) \
- __cond_lock(nic_access_nobh, \
- likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
-
-void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
-void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
-
-/*****************************************************
-* RX
-******************************************************/
-int iwl_pcie_rx_init(struct iwl_trans *trans);
-int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
-irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
-irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
-irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
-irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
-int iwl_pcie_rx_stop(struct iwl_trans *trans);
-void iwl_pcie_rx_free(struct iwl_trans *trans);
-void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
-void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
-void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
-void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq);
-
-/*****************************************************
-* ICT - interrupt handling
-******************************************************/
-irqreturn_t iwl_pcie_isr(int irq, void *data);
-int iwl_pcie_alloc_ict(struct iwl_trans *trans);
-void iwl_pcie_free_ict(struct iwl_trans *trans);
-void iwl_pcie_reset_ict(struct iwl_trans *trans);
-void iwl_pcie_disable_ict(struct iwl_trans *trans);
-
-/*****************************************************
-* TX / HCMD
-******************************************************/
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
-/*
- * Note that we put this struct *last* in the page. By doing that, we ensure
- * that no TB referencing this page can trigger the 32-bit boundary hardware
- * bug.
- */
-struct iwl_tso_page_info {
- dma_addr_t dma_addr;
- struct page *next;
- refcount_t use_count;
-};
-
-#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
-#define IWL_TSO_PAGE_INFO(addr) \
- ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
- IWL_TSO_PAGE_DATA_SIZE))
-
-int iwl_pcie_tx_init(struct iwl_trans *trans);
-void iwl_pcie_tx_start(struct iwl_trans *trans);
-int iwl_pcie_tx_stop(struct iwl_trans *trans);
-void iwl_pcie_tx_free(struct iwl_trans *trans);
-bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout);
-void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
- bool configure_scd);
-void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
- bool shared_mode);
-int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
-void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb);
-void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue);
-
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
- unsigned int len);
-struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room,
- unsigned int offset);
-
-void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_cmd_meta *cmd_meta);
-
-static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
-{
- dma_addr_t res;
-
- res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
- res += (unsigned long)addr & ~PAGE_MASK;
-
- return res;
-}
-
-static inline dma_addr_t
-iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans->mac_cfg->gen2)
- idx = iwl_txq_get_cmd_index(txq, idx);
-
- return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
-}
-
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
-
-static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else {
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
- }
-}
-
-/**
- * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->mac_cfg->base->max_tfd_queue_size - 1);
-}
-
-/**
- * iwl_txq_dec_wrap - decrement queue index, wrap back to end
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->mac_cfg->base->max_tfd_queue_size - 1);
-}
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
-
-static inline void
-iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len);
-
-static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- tfd->num_tbs = 0;
-
- iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
- trans_pcie->invalid_tx_cmd.size);
-}
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd);
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int timeout);
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue);
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
- int queue_size);
-
-static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
-
- if (trans->mac_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return le16_to_cpu(tfh_tb->tb_len);
- }
-
- tfd = (struct iwl_tfd *)_tfd;
- tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush);
-void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
-void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
- unsigned long txqs, bool freeze);
-int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
-int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
-
-/*****************************************************
-* Error handling
-******************************************************/
-void iwl_pcie_dump_csr(struct iwl_trans *trans);
-
-/*****************************************************
-* Helpers
-******************************************************/
-static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- clear_bit(STATUS_INT_ENABLED, &trans->status);
- if (!trans_pcie->msix_enabled) {
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(trans, CSR_INT, 0xffffffff);
- iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
- } else {
- /* disable all the interrupt we might use */
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
- trans_pcie->fh_init_mask);
- iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
- trans_pcie->hw_init_mask);
- }
- IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
-}
-
-static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
- int start)
-{
- int i = 0;
-
- while (start < fw->num_sec &&
- fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
- fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
- start++;
- i++;
- }
-
- return i;
-}
-
-static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
- struct iwl_self_init_dram *dram = &trans->init_dram;
- int i;
-
- if (!dram->fw) {
- WARN_ON(dram->fw_cnt);
- return;
- }
-
- for (i = 0; i < dram->fw_cnt; i++)
- dma_free_coherent(trans->dev, dram->fw[i].size,
- dram->fw[i].block, dram->fw[i].physical);
-
- kfree(dram->fw);
- dram->fw_cnt = 0;
- dram->fw = NULL;
-}
-
-static inline void iwl_disable_interrupts(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_bh(&trans_pcie->irq_lock);
- _iwl_disable_interrupts(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
-}
-
-static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans->status);
- if (!trans_pcie->msix_enabled) {
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
- } else {
- /*
- * fh/hw_mask keeps all the unmasked causes.
- * Unlike msi, in msix cause is enabled when it is unset.
- */
- trans_pcie->hw_mask = trans_pcie->hw_init_mask;
- trans_pcie->fh_mask = trans_pcie->fh_init_mask;
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
- ~trans_pcie->fh_mask);
- iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
- ~trans_pcie->hw_mask);
- }
-}
-
-static inline void iwl_enable_interrupts(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_bh(&trans_pcie->irq_lock);
- _iwl_enable_interrupts(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
-}
-static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
- trans_pcie->hw_mask = msk;
-}
-
-static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
- trans_pcie->fh_mask = msk;
-}
-
-static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
- if (!trans_pcie->msix_enabled) {
- trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
- } else {
- iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
- trans_pcie->hw_init_mask);
- iwl_enable_fh_int_msk_msix(trans,
- MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
- }
-}
-
-static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
- bool top_reset)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
- top_reset ? "RESET" : "ALIVE");
-
- if (!trans_pcie->msix_enabled) {
- /*
- * When we'll receive the ALIVE interrupt, the ISR will call
- * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
- * interrupt (which is not really needed anymore) but also the
- * RX interrupt which will allow us to receive the ALIVE
- * notification (which is Rx) and continue the flow.
- */
- if (top_reset)
- trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
- else
- trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
- CSR_INT_BIT_FH_RX;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
- } else {
- u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
- : MSIX_HW_INT_CAUSES_REG_ALIVE;
-
- iwl_enable_hw_int_msk_msix(trans, val);
-
- if (top_reset)
- return;
- /*
- * Leave all the FH causes enabled to get the ALIVE
- * notification.
- */
- iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
- }
-}
-
-static inline const char *queue_name(struct device *dev,
- struct iwl_trans_pcie *trans_p, int i)
-{
- if (trans_p->shared_vec_mask) {
- int vec = trans_p->shared_vec_mask &
- IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
- if (i == 0)
- return DRV_NAME ":shared_IRQ";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ":queue_%d", i + vec);
- }
- if (i == 0)
- return DRV_NAME ":default_queue";
-
- if (i == trans_p->alloc_vecs - 1)
- return DRV_NAME ":exception";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ":queue_%d", i);
-}
-
-static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- if (!trans_pcie->msix_enabled) {
- trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
- } else {
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
- trans_pcie->fh_init_mask);
- iwl_enable_hw_int_msk_msix(trans,
- MSIX_HW_INT_CAUSES_REG_RF_KILL);
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
- /*
- * On 9000-series devices this bit isn't enabled by default, so
- * when we power down the device we need set the bit to allow it
- * to wake up the PCI-E bus for RF-kill interrupts.
- */
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
- }
-}
-
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
-
-static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->mutex);
-
- if (trans_pcie->debug_rfkill == 1)
- return true;
-
- return !(iwl_read32(trans, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-}
-
-static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
-{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
-static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
-{
- return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
-}
-
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
-void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
-void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
-#else
-static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
-#endif
-
-void iwl_pcie_rx_allocator_work(struct work_struct *data);
-
-/* common trans ops for all generations transports */
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
-int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
-void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
-void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
-void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
-u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
-u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
-void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
-int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
-int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
-int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
-struct iwl_trans_dump_data *
-iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
- const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx);
-int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset);
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
-void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
-void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
- u32 mask, u32 value);
-int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
- u32 *val);
-bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
-void __releases(nic_access_nobh)
-iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
-
-/* transport gen 1 exported functions */
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
-int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img,
- bool run_in_rfkill);
-void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
-
-/* common functions that are used by gen2 transport */
-int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
-void iwl_pcie_apm_config(struct iwl_trans *trans);
-int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
-void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
-bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
-void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
- bool was_in_rfkill);
-void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
-void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
-int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size);
-void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
-void iwl_pcie_apply_destination(struct iwl_trans *trans);
-
-/* transport gen 2 exported functions */
-int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img,
- bool run_in_rfkill);
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
-void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd);
-int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd);
-void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr, u32 byte_cnt);
-int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr, u32 byte_cnt);
-int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data);
-
-#endif /* __iwl_trans_int_pcie_h__ */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
- * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2016-2017 Intel Deutschland GmbH
- */
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/gfp.h>
-
-#include "iwl-prph.h"
-#include "iwl-io.h"
-#include "internal.h"
-#include "iwl-op-mode.h"
-#include "iwl-context-info-v2.h"
-#include "fw/dbg.h"
-
-/******************************************************************************
- *
- * RX path functions
- *
- ******************************************************************************/
-
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
- * When the interrupt handler is called, the request is processed.
- * The page is either stolen - transferred to the upper layer
- * or reused - added immediately to the iwl->rxq->rx_free list.
- * + When the page is stolen - the driver updates the matching queue's used
- * count, detaches the RBD and transfers it to the queue used list.
- * When there are two used RBDs - they are transferred to the allocator empty
- * list. Work is then scheduled for the allocator to start allocating
- * eight buffers.
- * When there are another 6 used RBDs - they are transferred to the allocator
- * empty list and the driver tries to claim the pre-allocated buffers and
- * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
- * until ready.
- * When there are 8+ buffers in the free list - either from allocation or from
- * 8 reused unstolen pages - restock is called to update the FW and indexes.
- * + In order to make sure the allocator always has RBDs to use for allocation
- * the allocator has initial pool in the size of num_queues*(8-2) - the
- * maximum missing RBDs per allocation request (request posted with 2
- * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
- * The queues supplies the recycle of the rest of the RBDs.
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + If there are no allocated buffers in iwl->rxq->rx_free,
- * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
- * If there were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_rxq_alloc() Allocates rx_free
- * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock.
- * Used only during initialization.
- * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index.
- * iwl_pcie_rx_allocator() Background work for allocating pages.
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Posts and claims requests to the allocator.
- * Calls iwl_pcie_rxq_restock to refill any empty
- * slots.
- *
- * RBD life-cycle:
- *
- * Init:
- * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
- *
- * Regular Receive interrupt:
- * Page Stolen:
- * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
- * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
- * Page not Stolen:
- * rxq.queue -> rxq.rx_free -> rxq.queue
- * ...
- *
- */
-
-/*
- * iwl_rxq_space - Return number of free slots available in queue.
- */
-static int iwl_rxq_space(const struct iwl_rxq *rxq)
-{
- /* Make sure rx queue size is a power of 2 */
- WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
-
- /*
- * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
- * between empty and completely full queues.
- * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
- * defined for negative dividends.
- */
- return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
-}
-
-/*
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/*
- * iwl_pcie_rx_stop - stops the Rx DMA
- */
-int iwl_pcie_rx_stop(struct iwl_trans *trans)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- /* TODO: remove this once fw does it */
- iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);
- return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,
- RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else if (trans->mac_cfg->mq_rx_supported) {
- iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
- return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
- RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else {
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
- 1000);
- }
-}
-
-/*
- * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
- */
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- u32 reg;
-
- lockdep_assert_held(&rxq->lock);
-
- /*
- * explicitly wake up the NIC if:
- * 1. shadow registers aren't enabled
- * 2. there is a chance that the NIC is asleep
- */
- if (!trans->mac_cfg->base->shadow_reg_enable &&
- test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
- reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- rxq->need_update = true;
- return;
- }
- }
-
- rxq->write_actual = round_down(rxq->write, 8);
- if (!trans->mac_cfg->mq_rx_supported)
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
- HBUS_TARG_WRPTR_RX_Q(rxq->id));
- else
- iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
- rxq->write_actual);
-}
-
-static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- if (!rxq->need_update)
- continue;
- spin_lock_bh(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- rxq->need_update = false;
- spin_unlock_bh(&rxq->lock);
- }
-}
-
-static void iwl_pcie_restock_bd(struct iwl_trans *trans,
- struct iwl_rxq *rxq,
- struct iwl_rx_mem_buffer *rxb)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_rx_transfer_desc *bd = rxq->bd;
-
- BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
-
- bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
- bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
- } else {
- __le64 *bd = rxq->bd;
-
- bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
- }
-
- IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
- (u32)rxb->vid, rxq->id, rxq->write);
-}
-
-/*
- * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
- */
-static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_mem_buffer *rxb;
-
- /*
- * If the device isn't enabled - no need to try to add buffers...
- * This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts because we
- * have to (see comment there). On the other hand, since the APM is
- * stopped, we cannot access the HW (in particular not prph).
- * So don't try to restock if the APM has been already stopped.
- */
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- return;
-
- spin_lock_bh(&rxq->lock);
- while (rxq->free_count) {
- /* Get next free Rx buffer, remove from free list */
- rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
- list);
- list_del(&rxb->list);
- rxb->invalid = false;
- /* some low bits are expected to be unset (depending on hw) */
- WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
- /* Point to Rx buffer via next RBD in circular buffer */
- iwl_pcie_restock_bd(trans, rxq, rxb);
- rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
- rxq->free_count--;
- }
- spin_unlock_bh(&rxq->lock);
-
- /*
- * If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8.
- */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_bh(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_bh(&rxq->lock);
- }
-}
-
-/*
- * iwl_pcie_rxsq_restock - restock implementation for single queue rx
- */
-static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- struct iwl_rx_mem_buffer *rxb;
-
- /*
- * If the device isn't enabled - not need to try to add buffers...
- * This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts because we
- * have to (see comment there). On the other hand, since the APM is
- * stopped, we cannot access the HW (in particular not prph).
- * So don't try to restock if the APM has been already stopped.
- */
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- return;
-
- spin_lock_bh(&rxq->lock);
- while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
- __le32 *bd = (__le32 *)rxq->bd;
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
- list);
- list_del(&rxb->list);
- rxb->invalid = false;
-
- /* Point to Rx buffer via next RBD in circular buffer */
- bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_bh(&rxq->lock);
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_bh(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_bh(&rxq->lock);
- }
-}
-
-/*
- * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static
-void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
-{
- if (trans->mac_cfg->mq_rx_supported)
- iwl_pcie_rxmq_restock(trans, rxq);
- else
- iwl_pcie_rxsq_restock(trans, rxq);
-}
-
-/*
- * iwl_pcie_rx_alloc_page - allocates and returns a page.
- *
- */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- u32 *offset, gfp_t priority)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
- unsigned int rbsize = trans_pcie->rx_buf_bytes;
- struct page *page;
- gfp_t gfp_mask = priority;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- if (trans_pcie->alloc_page) {
- spin_lock_bh(&trans_pcie->alloc_page_lock);
- /* recheck */
- if (trans_pcie->alloc_page) {
- *offset = trans_pcie->alloc_page_used;
- page = trans_pcie->alloc_page;
- trans_pcie->alloc_page_used += rbsize;
- if (trans_pcie->alloc_page_used >= allocsize)
- trans_pcie->alloc_page = NULL;
- else
- get_page(page);
- spin_unlock_bh(&trans_pcie->alloc_page_lock);
- return page;
- }
- spin_unlock_bh(&trans_pcie->alloc_page_lock);
- }
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
- trans_pcie->rx_page_order);
- /*
- * Issue an error if we don't have enough pre-allocated
- * buffers.
- */
- if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
- IWL_CRIT(trans,
- "Failed to alloc_pages\n");
- return NULL;
- }
-
- if (2 * rbsize <= allocsize) {
- spin_lock_bh(&trans_pcie->alloc_page_lock);
- if (!trans_pcie->alloc_page) {
- get_page(page);
- trans_pcie->alloc_page = page;
- trans_pcie->alloc_page_used = rbsize;
- }
- spin_unlock_bh(&trans_pcie->alloc_page_lock);
- }
-
- *offset = 0;
- return page;
-}
-
-/*
- * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
- *
- * A used RBD is an Rx buffer that has been given to the stack. To use it again
- * a page must be allocated and the RBD must point to the page. This function
- * doesn't change the HW pointer but handles the list of pages that is used by
- * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
- * allocated buffers.
- */
-void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
-
- while (1) {
- unsigned int offset;
-
- spin_lock_bh(&rxq->lock);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_bh(&rxq->lock);
- return;
- }
- spin_unlock_bh(&rxq->lock);
-
- page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
- if (!page)
- return;
-
- spin_lock_bh(&rxq->lock);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_bh(&rxq->lock);
- __free_pages(page, trans_pcie->rx_page_order);
- return;
- }
- rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
- list);
- list_del(&rxb->list);
- spin_unlock_bh(&rxq->lock);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- rxb->offset = offset;
- /* Get physical address of the RB */
- rxb->page_dma =
- dma_map_page(trans->dev, page, rxb->offset,
- trans_pcie->rx_buf_bytes,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, rxb->page_dma)) {
- rxb->page = NULL;
- spin_lock_bh(&rxq->lock);
- list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_bh(&rxq->lock);
- __free_pages(page, trans_pcie->rx_page_order);
- return;
- }
-
- spin_lock_bh(&rxq->lock);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
-
- spin_unlock_bh(&rxq->lock);
- }
-}
-
-void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- if (!trans_pcie->rx_pool)
- return;
-
- for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
- if (!trans_pcie->rx_pool[i].page)
- continue;
- dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
- __free_pages(trans_pcie->rx_pool[i].page,
- trans_pcie->rx_page_order);
- trans_pcie->rx_pool[i].page = NULL;
- }
-}
-
-/*
- * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
- *
- * Allocates for each received request 8 pages
- * Called as a scheduled work item.
- */
-static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct list_head local_empty;
- int pending = atomic_read(&rba->req_pending);
-
- IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
-
- /* If we were scheduled - there is at least one request */
- spin_lock_bh(&rba->lock);
- /* swap out the rba->rbd_empty to a local list */
- list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock_bh(&rba->lock);
-
- while (pending) {
- int i;
- LIST_HEAD(local_allocated);
- gfp_t gfp_mask = GFP_KERNEL;
-
- /* Do not post a warning if there are only a few requests */
- if (pending < RX_PENDING_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
-
- /* List should never be empty - each reused RBD is
- * returned to the list, and initial pool covers any
- * possible gap between the time the page is allocated
- * to the time the RBD is added.
- */
- BUG_ON(list_empty(&local_empty));
- /* Get the first rxb from the rbd list */
- rxb = list_first_entry(&local_empty,
- struct iwl_rx_mem_buffer, list);
- BUG_ON(rxb->page);
-
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
- gfp_mask);
- if (!page)
- continue;
- rxb->page = page;
-
- /* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page,
- rxb->offset,
- trans_pcie->rx_buf_bytes,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, rxb->page_dma)) {
- rxb->page = NULL;
- __free_pages(page, trans_pcie->rx_page_order);
- continue;
- }
-
- /* move the allocated entry to the out list */
- list_move(&rxb->list, &local_allocated);
- i++;
- }
-
- atomic_dec(&rba->req_pending);
- pending--;
-
- if (!pending) {
- pending = atomic_read(&rba->req_pending);
- if (pending)
- IWL_DEBUG_TPT(trans,
- "Got more pending allocation requests = %d\n",
- pending);
- }
-
- spin_lock_bh(&rba->lock);
- /* add the allocated rbds to the allocator allocated list */
- list_splice_tail(&local_allocated, &rba->rbd_allocated);
- /* get more empty RBDs for current pending requests */
- list_splice_tail_init(&rba->rbd_empty, &local_empty);
- spin_unlock_bh(&rba->lock);
-
- atomic_inc(&rba->req_ready);
-
- }
-
- spin_lock_bh(&rba->lock);
- /* return unused rbds to the allocator empty list */
- list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock_bh(&rba->lock);
-
- IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
-}
-
-/*
- * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
-.*
-.* Called by queue when the queue posted allocation request and
- * has freed 8 RBDs in order to restock itself.
- * This function directly moves the allocated RBs to the queue's ownership
- * and updates the relevant counters.
- */
-static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- lockdep_assert_held(&rxq->lock);
-
- /*
- * atomic_dec_if_positive returns req_ready - 1 for any scenario.
- * If req_ready is 0 atomic_dec_if_positive will return -1 and this
- * function will return early, as there are no ready requests.
- * atomic_dec_if_positive will perofrm the *actual* decrement only if
- * req_ready > 0, i.e. - there are ready requests and the function
- * hands one request to the caller.
- */
- if (atomic_dec_if_positive(&rba->req_ready) < 0)
- return;
-
- spin_lock(&rba->lock);
- for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
- /* Get next free Rx buffer, remove it from free list */
- struct iwl_rx_mem_buffer *rxb =
- list_first_entry(&rba->rbd_allocated,
- struct iwl_rx_mem_buffer, list);
-
- list_move(&rxb->list, &rxq->rx_free);
- }
- spin_unlock(&rba->lock);
-
- rxq->used_count -= RX_CLAIM_REQ_ALLOC;
- rxq->free_count += RX_CLAIM_REQ_ALLOC;
-}
-
-void iwl_pcie_rx_allocator_work(struct work_struct *data)
-{
- struct iwl_rb_allocator *rba_p =
- container_of(data, struct iwl_rb_allocator, rx_alloc);
- struct iwl_trans_pcie *trans_pcie =
- container_of(rba_p, struct iwl_trans_pcie, rba);
-
- iwl_pcie_rx_allocator(trans_pcie->trans);
-}
-
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- return sizeof(struct iwl_rx_transfer_desc);
-
- return trans->mac_cfg->mq_rx_supported ?
- sizeof(__le64) : sizeof(__le32);
-}
-
-static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- return sizeof(struct iwl_rx_completion_desc_bz);
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- return sizeof(struct iwl_rx_completion_desc);
-
- return sizeof(__le32);
-}
-
-static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- int free_size = iwl_pcie_free_bd_size(trans);
-
- if (rxq->bd)
- dma_free_coherent(trans->dev,
- free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- rxq->rb_stts_dma = 0;
- rxq->rb_stts = NULL;
-
- if (rxq->used_bd)
- dma_free_coherent(trans->dev,
- iwl_pcie_used_bd_size(trans) *
- rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
-}
-
-static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
-{
- bool use_rx_td = (trans->mac_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210);
-
- if (use_rx_td)
- return sizeof(__le16);
-
- return sizeof(struct iwl_rb_status);
-}
-
-static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
- struct device *dev = trans->dev;
- int i;
- int free_size;
-
- spin_lock_init(&rxq->lock);
- if (trans->mac_cfg->mq_rx_supported)
- rxq->queue_size = iwl_trans_get_num_rbds(trans);
- else
- rxq->queue_size = RX_QUEUE_SIZE;
-
- free_size = iwl_pcie_free_bd_size(trans);
-
- /*
- * Allocate the circular buffer of Read Buffer Descriptors
- * (RBDs)
- */
- rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err;
-
- if (trans->mac_cfg->mq_rx_supported) {
- rxq->used_bd = dma_alloc_coherent(dev,
- iwl_pcie_used_bd_size(trans) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
- if (!rxq->used_bd)
- goto err;
- }
-
- rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
- rxq->rb_stts_dma =
- trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
-
- return 0;
-
-err:
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- iwl_pcie_free_rxq_dma(trans, rxq);
- }
-
- return -ENOMEM;
-}
-
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i, ret;
-
- if (WARN_ON(trans_pcie->rxq))
- return -EINVAL;
-
- trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),
- GFP_KERNEL);
- trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
- sizeof(trans_pcie->rx_pool[0]),
- GFP_KERNEL);
- trans_pcie->global_table =
- kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
- sizeof(trans_pcie->global_table[0]),
- GFP_KERNEL);
- if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
- !trans_pcie->global_table) {
- ret = -ENOMEM;
- goto err;
- }
-
- spin_lock_init(&rba->lock);
-
- /*
- * Allocate the driver's pointer to receive buffer status.
- * Allocate for all queues continuously (HW requirement).
- */
- trans_pcie->base_rb_stts =
- dma_alloc_coherent(trans->dev,
- rb_stts_size * trans->info.num_rxqs,
- &trans_pcie->base_rb_stts_dma,
- GFP_KERNEL);
- if (!trans_pcie->base_rb_stts) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- rxq->id = i;
- ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
- if (ret)
- goto err;
- }
- return 0;
-
-err:
- if (trans_pcie->base_rb_stts) {
- dma_free_coherent(trans->dev,
- rb_stts_size * trans->info.num_rxqs,
- trans_pcie->base_rb_stts,
- trans_pcie->base_rb_stts_dma);
- trans_pcie->base_rb_stts = NULL;
- trans_pcie->base_rb_stts_dma = 0;
- }
- kfree(trans_pcie->rx_pool);
- trans_pcie->rx_pool = NULL;
- kfree(trans_pcie->global_table);
- trans_pcie->global_table = NULL;
- kfree(trans_pcie->rxq);
- trans_pcie->rxq = NULL;
-
- return ret;
-}
-
-static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
-
- switch (trans->conf.rx_buf_size) {
- case IWL_AMSDU_4K:
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
- break;
- case IWL_AMSDU_8K:
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- break;
- case IWL_AMSDU_12K:
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
- break;
- default:
- WARN_ON(1);
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
- }
-
- if (!iwl_trans_grab_nic_access(trans))
- return;
-
- /* Stop Rx DMA */
- iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- /* reset and flush pointers */
- iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
- iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
- iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k or 12k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size |
- (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- iwl_trans_release_nic_access(trans);
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- /* W/A for interrupt coalescing bug in 7260 and 3160 */
- if (trans->cfg->host_interrupt_operation_mode)
- iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
-}
-
-static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 rb_size, enabled = 0;
- int i;
-
- switch (trans->conf.rx_buf_size) {
- case IWL_AMSDU_2K:
- rb_size = RFH_RXF_DMA_RB_SIZE_2K;
- break;
- case IWL_AMSDU_4K:
- rb_size = RFH_RXF_DMA_RB_SIZE_4K;
- break;
- case IWL_AMSDU_8K:
- rb_size = RFH_RXF_DMA_RB_SIZE_8K;
- break;
- case IWL_AMSDU_12K:
- rb_size = RFH_RXF_DMA_RB_SIZE_12K;
- break;
- default:
- WARN_ON(1);
- rb_size = RFH_RXF_DMA_RB_SIZE_4K;
- }
-
- if (!iwl_trans_grab_nic_access(trans))
- return;
-
- /* Stop Rx DMA */
- iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
- /* disable free amd used rx queue operation */
- iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- /* Tell device where to find RBD free table in DRAM */
- iwl_write_prph64_no_grab(trans,
- RFH_Q_FRBDCB_BA_LSB(i),
- trans_pcie->rxq[i].bd_dma);
- /* Tell device where to find RBD used table in DRAM */
- iwl_write_prph64_no_grab(trans,
- RFH_Q_URBDCB_BA_LSB(i),
- trans_pcie->rxq[i].used_bd_dma);
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_prph64_no_grab(trans,
- RFH_Q_URBD_STTS_WPTR_LSB(i),
- trans_pcie->rxq[i].rb_stts_dma);
- /* Reset device indice tables */
- iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
- iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
- iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
-
- enabled |= BIT(i) | BIT(i + 16);
- }
-
- /*
- * Enable Rx DMA
- * Rx buffer size 4 or 8k or 12k
- * Min RB size 4 or 8
- * Drop frames that exceed RB size
- * 512 RBDs
- */
- iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
- RFH_DMA_EN_ENABLE_VAL | rb_size |
- RFH_RXF_DMA_MIN_RB_4_8 |
- RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
- RFH_RXF_DMA_RBDCB_SIZE_512);
-
- /*
- * Activate DMA snooping.
- * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
- * Default queue is 0
- */
- iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
- RFH_GEN_CFG_RFH_DMA_SNOOP |
- RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
- RFH_GEN_CFG_SERVICE_DMA_SNOOP |
- RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
- trans->mac_cfg->integrated ?
- RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
- RFH_GEN_CFG_RB_CHUNK_SIZE_128));
- /* Enable the relevant rx queues */
- iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
-
- iwl_trans_release_nic_access(trans);
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
-{
- lockdep_assert_held(&rxq->lock);
-
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- rxq->free_count = 0;
- rxq->used_count = 0;
-}
-
-static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
-
-static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
-{
- return *(struct iwl_trans_pcie **)netdev_priv(dev);
-}
-
-static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
-{
- struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
- struct iwl_trans_pcie *trans_pcie;
- struct iwl_trans *trans;
- int ret;
-
- trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
- trans = trans_pcie->trans;
-
- ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
-
- IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
- rxq->id, ret, budget);
-
- if (ret < budget) {
- spin_lock(&trans_pcie->irq_lock);
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
-
- napi_complete_done(&rxq->napi, ret);
- }
-
- return ret;
-}
-
-static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
-{
- struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
- struct iwl_trans_pcie *trans_pcie;
- struct iwl_trans *trans;
- int ret;
-
- trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
- trans = trans_pcie->trans;
-
- ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
- IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
- budget);
-
- if (ret < budget) {
- int irq_line = rxq->id;
-
- /* FIRST_RSS is shared with line 0 */
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
- rxq->id == 1)
- irq_line = 0;
-
- spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_clear_irq(trans, irq_line);
- spin_unlock(&trans_pcie->irq_lock);
-
- napi_complete_done(&rxq->napi, ret);
- }
-
- return ret;
-}
-
-void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- if (unlikely(!trans_pcie->rxq))
- return;
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- if (rxq && rxq->napi.poll)
- napi_synchronize(&rxq->napi);
- }
-}
-
-static int _iwl_pcie_rx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *def_rxq;
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i, err, queue_size, allocator_pool_size, num_alloc;
-
- if (!trans_pcie->rxq) {
- err = iwl_pcie_rx_alloc(trans);
- if (err)
- return err;
- }
- def_rxq = trans_pcie->rxq;
-
- cancel_work_sync(&rba->rx_alloc);
-
- spin_lock_bh(&rba->lock);
- atomic_set(&rba->req_pending, 0);
- atomic_set(&rba->req_ready, 0);
- INIT_LIST_HEAD(&rba->rbd_allocated);
- INIT_LIST_HEAD(&rba->rbd_empty);
- spin_unlock_bh(&rba->lock);
-
- /* free all first - we overwrite everything here */
- iwl_pcie_free_rbs_pool(trans);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- def_rxq->queue[i] = NULL;
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- spin_lock_bh(&rxq->lock);
- /*
- * Set read write pointer to reflect that we have processed
- * and used all buffers, but have not restocked the Rx queue
- * with fresh buffers
- */
- rxq->read = 0;
- rxq->write = 0;
- rxq->write_actual = 0;
- memset(rxq->rb_stts, 0,
- (trans->mac_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(__le16) : sizeof(struct iwl_rb_status));
-
- iwl_pcie_rx_init_rxb_lists(rxq);
-
- spin_unlock_bh(&rxq->lock);
-
- if (!rxq->napi.poll) {
- int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
-
- if (trans_pcie->msix_enabled)
- poll = iwl_pcie_napi_poll_msix;
-
- netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
- poll);
- napi_enable(&rxq->napi);
- }
-
- }
-
- /* move the pool to the default queue and allocator ownerships */
- queue_size = trans->mac_cfg->mq_rx_supported ?
- trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
- allocator_pool_size = trans->info.num_rxqs *
- (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
- num_alloc = queue_size + allocator_pool_size;
-
- for (i = 0; i < num_alloc; i++) {
- struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
-
- if (i < allocator_pool_size)
- list_add(&rxb->list, &rba->rbd_empty);
- else
- list_add(&rxb->list, &def_rxq->rx_used);
- trans_pcie->global_table[i] = rxb;
- rxb->vid = (u16)(i + 1);
- rxb->invalid = true;
- }
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
-
- return 0;
-}
-
-int iwl_pcie_rx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret = _iwl_pcie_rx_init(trans);
-
- if (ret)
- return ret;
-
- if (trans->mac_cfg->mq_rx_supported)
- iwl_pcie_rx_mq_hw_init(trans);
- else
- iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
-
- iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
-
- spin_lock_bh(&trans_pcie->rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
- spin_unlock_bh(&trans_pcie->rxq->lock);
-
- return 0;
-}
-
-int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
-{
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- /*
- * We don't configure the RFH.
- * Restock will be done at alive, after firmware configured the RFH.
- */
- return _iwl_pcie_rx_init(trans);
-}
-
-void iwl_pcie_rx_free(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
-
- /*
- * if rxq is NULL, it means that nothing has been allocated,
- * exit now
- */
- if (!trans_pcie->rxq) {
- IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
- return;
- }
-
- cancel_work_sync(&rba->rx_alloc);
-
- iwl_pcie_free_rbs_pool(trans);
-
- if (trans_pcie->base_rb_stts) {
- dma_free_coherent(trans->dev,
- rb_stts_size * trans->info.num_rxqs,
- trans_pcie->base_rb_stts,
- trans_pcie->base_rb_stts_dma);
- trans_pcie->base_rb_stts = NULL;
- trans_pcie->base_rb_stts_dma = 0;
- }
-
- for (i = 0; i < trans->info.num_rxqs; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- iwl_pcie_free_rxq_dma(trans, rxq);
-
- if (rxq->napi.poll) {
- napi_disable(&rxq->napi);
- netif_napi_del(&rxq->napi);
- }
- }
- kfree(trans_pcie->rx_pool);
- kfree(trans_pcie->global_table);
- kfree(trans_pcie->rxq);
-
- if (trans_pcie->alloc_page)
- __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
-}
-
-static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
- struct iwl_rb_allocator *rba)
-{
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
-}
-
-/*
- * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
- *
- * Called when a RBD can be reused. The RBD is transferred to the allocator.
- * When there are 2 empty RBDs - a request for allocation is posted
- */
-static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer *rxb,
- struct iwl_rxq *rxq, bool emergency)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
- /* Move the RBD to the used list, will be moved to allocator in batches
- * before claiming or posting a request*/
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- if (unlikely(emergency))
- return;
-
- /* Count the allocator owned RBDs */
- rxq->used_count++;
-
- /* If we have RX_POST_REQ_ALLOC new released rx buffers -
- * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
- * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
- * after but we still need to post another request.
- */
- if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
- /* Move the 2 RBDs to the allocator ownership.
- Allocator has another 6 from pool for the request completion*/
- iwl_pcie_rx_move_to_allocator(rxq, rba);
-
- atomic_inc(&rba->req_pending);
- queue_work(rba->alloc_wq, &rba->rx_alloc);
- }
-}
-
-static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
- struct iwl_rxq *rxq,
- struct iwl_rx_mem_buffer *rxb,
- bool emergency,
- int i)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
- bool page_stolen = false;
- int max_len = trans_pcie->rx_buf_bytes;
- u32 offset = 0;
-
- if (WARN_ON(!rxb))
- return;
-
- dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
-
- while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
- struct iwl_rx_packet *pkt;
- bool reclaim;
- int len;
- struct iwl_rx_cmd_buffer rxcb = {
- ._offset = rxb->offset + offset,
- ._rx_page_order = trans_pcie->rx_page_order,
- ._page = rxb->page,
- ._page_stolen = false,
- .truesize = max_len,
- };
-
- pkt = rxb_addr(&rxcb);
-
- if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
- IWL_DEBUG_RX(trans,
- "Q %d: RB end marker at offset %d\n",
- rxq->id, offset);
- break;
- }
-
- WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
- FH_RSCSR_RXQ_POS != rxq->id,
- "frame on invalid queue - is on %d and indicates %d\n",
- rxq->id,
- (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
- FH_RSCSR_RXQ_POS);
-
- IWL_DEBUG_RX(trans,
- "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
- rxq->id, offset,
- iwl_get_cmd_string(trans,
- WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
- pkt->hdr.group_id, pkt->hdr.cmd,
- le16_to_cpu(pkt->hdr.sequence));
-
- len = iwl_rx_packet_len(pkt);
- len += sizeof(u32); /* account for status word */
-
- offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
-
- /* check that what the device tells us made sense */
- if (len < sizeof(*pkt) || offset > max_len)
- break;
-
- maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
- if (reclaim && !pkt->hdr.group_id) {
- int i;
-
- for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
- if (trans->conf.no_reclaim_cmds[i] ==
- pkt->hdr.cmd) {
- reclaim = false;
- break;
- }
- }
- }
-
- if (rxq->id == IWL_DEFAULT_RX_QUEUE)
- iwl_op_mode_rx(trans->op_mode, &rxq->napi,
- &rxcb);
- else
- iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
- &rxcb, rxq->id);
-
- /*
- * After here, we should always check rxcb._page_stolen,
- * if it is true then one of the handlers took the page.
- */
-
- if (reclaim && txq) {
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index = iwl_txq_get_cmd_index(txq, index);
-
- kfree_sensitive(txq->entries[cmd_index].free_buf);
- txq->entries[cmd_index].free_buf = NULL;
-
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking
- * iwl_trans_send_cmd()
- * as we reclaim the driver command queue */
- if (!rxcb._page_stolen)
- iwl_pcie_hcmd_complete(trans, &rxcb);
- else
- IWL_WARN(trans, "Claim null rxb?\n");
- }
-
- page_stolen |= rxcb._page_stolen;
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- break;
- }
-
- /* page was stolen from us -- free our reference */
- if (page_stolen) {
- __free_pages(rxb->page, trans_pcie->rx_page_order);
- rxb->page = NULL;
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- if (rxb->page != NULL) {
- rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, rxb->offset,
- trans_pcie->rx_buf_bytes,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, rxb->page_dma)) {
- /*
- * free the page(s) as well to not break
- * the invariant that the items on the used
- * list have no page(s)
- */
- __free_pages(rxb->page, trans_pcie->rx_page_order);
- rxb->page = NULL;
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
- } else {
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- }
- } else
- iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
-}
-
-static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
- struct iwl_rxq *rxq, int i,
- bool *join)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_mem_buffer *rxb;
- u16 vid;
-
- BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
- BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
-
- if (!trans->mac_cfg->mq_rx_supported) {
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
- return rxb;
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
-
- vid = le16_to_cpu(cd[i].rbid);
- *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
- } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_rx_completion_desc *cd = rxq->used_bd;
-
- vid = le16_to_cpu(cd[i].rbid);
- *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
- } else {
- __le32 *cd = rxq->used_bd;
-
- vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
- }
-
- if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
- goto out_err;
-
- rxb = trans_pcie->global_table[vid - 1];
- if (rxb->invalid)
- goto out_err;
-
- IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
-
- rxb->invalid = true;
-
- return rxb;
-
-out_err:
- WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
- iwl_force_nmi(trans);
- return NULL;
-}
-
-/*
- * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
- */
-static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq;
- u32 r, i, count = 0, handled = 0;
- bool emergency = false;
-
- if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
- return budget;
-
- rxq = &trans_pcie->rxq[queue];
-
-restart:
- spin_lock(&rxq->lock);
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = iwl_get_closed_rb_stts(trans, rxq);
- i = rxq->read;
-
- /* W/A 9000 device step A0 wrap-around bug */
- r &= (rxq->queue_size - 1);
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
-
- while (i != r && ++handled < budget) {
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct iwl_rx_mem_buffer *rxb;
- /* number of RBDs still waiting for page allocation */
- u32 rb_pending_alloc =
- atomic_read(&trans_pcie->rba.req_pending) *
- RX_CLAIM_REQ_ALLOC;
- bool join = false;
-
- if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
- !emergency)) {
- iwl_pcie_rx_move_to_allocator(rxq, rba);
- emergency = true;
- IWL_DEBUG_TPT(trans,
- "RX path is in emergency. Pending allocations %d\n",
- rb_pending_alloc);
- }
-
- IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
-
- rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
- if (!rxb)
- goto out;
-
- if (unlikely(join || rxq->next_rb_is_fragment)) {
- rxq->next_rb_is_fragment = join;
- /*
- * We can only get a multi-RB in the following cases:
- * - firmware issue, sending a too big notification
- * - sniffer mode with a large A-MSDU
- * - large MTU frames (>2k)
- * since the multi-RB functionality is limited to newer
- * hardware that cannot put multiple entries into a
- * single RB.
- *
- * Right now, the higher layers aren't set up to deal
- * with that, so discard all of these.
- */
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else {
- iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
- }
-
- i = (i + 1) & (rxq->queue_size - 1);
-
- /*
- * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
- * try to claim the pre-allocated buffers from the allocator.
- * If not ready - will try to reclaim next time.
- * There is no need to reschedule work - allocator exits only
- * on success
- */
- if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
- iwl_pcie_rx_allocator_get(trans, rxq);
-
- if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
- /* Add the remaining empty RBDs for allocator use */
- iwl_pcie_rx_move_to_allocator(rxq, rba);
- } else if (emergency) {
- count++;
- if (count == 8) {
- count = 0;
- if (rb_pending_alloc < rxq->queue_size / 3) {
- IWL_DEBUG_TPT(trans,
- "RX path exited emergency. Pending allocations %d\n",
- rb_pending_alloc);
- emergency = false;
- }
-
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- iwl_pcie_rxq_restock(trans, rxq);
- goto restart;
- }
- }
- }
-out:
- /* Backtrack one entry */
- rxq->read = i;
- spin_unlock(&rxq->lock);
-
- /*
- * handle a case where in emergency there are some unallocated RBDs.
- * those RBDs are in the used list, but are not tracked by the queue's
- * used_count which counts allocator owned RBDs.
- * unallocated emergency RBDs must be allocated on exit, otherwise
- * when called again the function may not be in emergency mode and
- * they will be handed to the allocator with no tracking in the RBD
- * allocator counters, which will lead to them never being claimed back
- * by the queue.
- * by allocating them here, they are now in the queue free list, and
- * will be restocked by the next call of iwl_pcie_rxq_restock.
- */
- if (unlikely(emergency && count))
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
-
- iwl_pcie_rxq_restock(trans, rxq);
-
- return handled;
-}
-
-static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
-{
- u8 queue = entry->entry;
- struct msix_entry *entries = entry - queue;
-
- return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
-}
-
-/*
- * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
- * This interrupt handler should be used with RSS queue only.
- */
-irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
-{
- struct msix_entry *entry = dev_id;
- struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
- struct iwl_trans *trans = trans_pcie->trans;
- struct iwl_rxq *rxq;
-
- trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
-
- if (WARN_ON(entry->entry >= trans->info.num_rxqs))
- return IRQ_NONE;
-
- if (!trans_pcie->rxq) {
- if (net_ratelimit())
- IWL_ERR(trans,
- "[%d] Got MSI-X interrupt before we have Rx queues\n",
- entry->entry);
- return IRQ_NONE;
- }
-
- rxq = &trans_pcie->rxq[entry->entry];
- lock_map_acquire(&trans->sync_cmd_lockdep_map);
- IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
-
- local_bh_disable();
- if (!napi_schedule(&rxq->napi))
- iwl_pcie_clear_irq(trans, entry->entry);
- local_bh_enable();
-
- lock_map_release(&trans->sync_cmd_lockdep_map);
-
- return IRQ_HANDLED;
-}
-
-/*
- * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
- */
-static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (trans->cfg->internal_wimax_coex &&
- !trans->mac_cfg->base->apmg_not_supported &&
- (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
- APMS_CLK_VAL_MRB_FUNC_MODE) ||
- (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
- APMG_PS_CTRL_VAL_RESET_REQ))) {
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans_pcie->wait_command_queue);
- return;
- }
-
- for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
- if (!trans_pcie->txqs.txq[i])
- continue;
- timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
- }
-
- /* The STATUS_FW_ERROR bit is set in this function. This must happen
- * before we wake up the command caller, to ensure a proper cleanup. */
- iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- wake_up(&trans_pcie->wait_command_queue);
-}
-
-static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
-{
- u32 inta;
-
- lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- /* the thread will service interrupts and re-enable them */
- return inta;
-}
-
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta;
- u32 val = 0;
- u32 read;
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read)
- return 0;
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- return inta;
-}
-
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
- bool hw_rfkill, prev, report;
-
- mutex_lock(&trans_pcie->mutex);
- prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill) {
- set_bit(STATUS_RFKILL_OPMODE, &trans->status);
- set_bit(STATUS_RFKILL_HW, &trans->status);
- }
- if (trans_pcie->opmode_down)
- report = hw_rfkill;
- else
- report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-
- IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rfkill ? "disable radio" : "enable radio");
-
- isr_stats->rfkill++;
-
- if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report, from_irq);
- mutex_unlock(&trans_pcie->mutex);
-
- if (hw_rfkill) {
- if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status))
- IWL_DEBUG_RF_KILL(trans,
- "Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans_pcie->wait_command_queue);
- } else {
- clear_bit(STATUS_RFKILL_HW, &trans->status);
- if (trans_pcie->opmode_down)
- clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
- }
-}
-
-static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 state;
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
- u32 val = iwl_read32(trans, CSR_IPC_STATE);
-
- state = u32_get_bits(val, CSR_IPC_STATE_RESET);
- IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);
- } else {
- state = CSR_IPC_STATE_RESET_SW_READY;
- }
-
- switch (state) {
- case CSR_IPC_STATE_RESET_SW_READY:
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
- IWL_DEBUG_ISR(trans, "Reset flow completed\n");
- trans_pcie->fw_reset_state = FW_RESET_OK;
- wake_up(&trans_pcie->fw_reset_waitq);
- break;
- }
- fallthrough;
- case CSR_IPC_STATE_RESET_TOP_READY:
- if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) {
- IWL_DEBUG_ISR(trans, "TOP Reset continues\n");
- trans_pcie->fw_reset_state = FW_RESET_OK;
- wake_up(&trans_pcie->fw_reset_waitq);
- break;
- }
- fallthrough;
- case CSR_IPC_STATE_RESET_NONE:
- IWL_FW_CHECK_FAILED(trans,
- "Invalid reset interrupt (state=%d)!\n",
- state);
- break;
- case CSR_IPC_STATE_RESET_TOP_FOLLOWER:
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
- /* if we were in reset, wake that up */
- IWL_INFO(trans,
- "TOP reset from BT while doing reset\n");
- trans_pcie->fw_reset_state = FW_RESET_OK;
- wake_up(&trans_pcie->fw_reset_waitq);
- } else {
- IWL_INFO(trans, "TOP reset from BT\n");
- trans->state = IWL_TRANS_NO_FW;
- iwl_trans_schedule_reset(trans,
- IWL_ERR_TYPE_TOP_RESET_BY_BT);
- }
- break;
- }
-}
-
-irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
-{
- struct iwl_trans *trans = dev_id;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
- u32 inta = 0;
- u32 handled = 0;
- bool polling = false;
-
- lock_map_acquire(&trans->sync_cmd_lockdep_map);
-
- spin_lock_bh(&trans_pcie->irq_lock);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (likely(trans_pcie->use_ict))
- inta = iwl_pcie_int_cause_ict(trans);
- else
- inta = iwl_pcie_int_cause_non_ict(trans);
-
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- IWL_DEBUG_ISR(trans,
- "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
- inta, trans_pcie->inta_mask,
- iwl_read32(trans, CSR_INT_MASK),
- iwl_read32(trans, CSR_FH_INT_STATUS));
- if (inta & (~trans_pcie->inta_mask))
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)\n",
- inta & (~trans_pcie->inta_mask));
- }
-
- inta &= trans_pcie->inta_mask;
-
- /*
- * Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC.
- */
- if (unlikely(!inta)) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- /*
- * Re-enable interrupts here since we don't
- * have anything to service
- */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
- lock_map_release(&trans->sync_cmd_lockdep_map);
- return IRQ_NONE;
- }
-
- if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
- /*
- * Hardware disappeared. It might have
- * already raised an interrupt.
- */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- spin_unlock_bh(&trans_pcie->irq_lock);
- goto out;
- }
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- */
- /* There is a hardware bug in the interrupt mask function that some
- * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
- * they are disabled in the CSR_INT_MASK register. Furthermore the
- * ICT interrupt handling mechanism has another bug that might cause
- * these unmasked interrupts fail to be detected. We workaround the
- * hardware bugs here by ACKing all the possible interrupts so that
- * interrupt coalescing can still be achieved.
- */
- iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
-
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
- inta, iwl_read32(trans, CSR_INT_MASK));
-
- spin_unlock_bh(&trans_pcie->irq_lock);
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(trans, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(trans);
-
- isr_stats->hw++;
- iwl_pcie_irq_handle_error(trans);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- goto out;
- }
-
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(trans,
- "Scheduler finished to transmit the frame/frames.\n");
- isr_stats->sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(trans, "Alive interrupt\n");
- isr_stats->alive++;
- if (trans->mac_cfg->gen2) {
- /*
- * We can restock, since firmware configured
- * the RFH
- */
- iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
- }
-
- handled |= CSR_INT_BIT_ALIVE;
- }
-
- if (inta & CSR_INT_BIT_RESET_DONE) {
- iwl_trans_pcie_handle_reset_interrupt(trans);
- handled |= CSR_INT_BIT_RESET_DONE;
- }
-
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- iwl_pcie_handle_rfkill_irq(trans, true);
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(trans, "Microcode CT kill error detected.\n");
- isr_stats->ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(trans, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- isr_stats->sw++;
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
- trans_pcie->fw_reset_state = FW_RESET_ERROR;
- wake_up(&trans_pcie->fw_reset_waitq);
- } else {
- iwl_pcie_irq_handle_error(trans);
- }
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_check_wrptr(trans);
- iwl_pcie_txq_check_wrptrs(trans);
-
- isr_stats->wakeup++;
-
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
- IWL_DEBUG_ISR(trans, "Rx interrupt\n");
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- iwl_write32(trans, CSR_FH_INT_STATUS,
- CSR_FH_INT_RX_MASK);
- }
- if (inta & CSR_INT_BIT_RX_PERIODIC) {
- handled |= CSR_INT_BIT_RX_PERIODIC;
- iwl_write32(trans,
- CSR_INT, CSR_INT_BIT_RX_PERIODIC);
- }
- /* Sending RX interrupt require many steps to be done in the
- * device:
- * 1- write interrupt to current index in ICT table.
- * 2- dma RX frame.
- * 3- update RX shared data to indicate last write index.
- * 4- send interrupt.
- * This could lead to RX race, driver could receive RX interrupt
- * but the shared data changes does not reflect this;
- * periodic interrupt will detect any dangling Rx activity.
- */
-
- /* Disable periodic interrupt; we use it as just a one-shot. */
- iwl_write8(trans, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_DIS);
-
- /*
- * Enable periodic interrupt in 8 msec only if we received
- * real RX interrupt (instead of just periodic int), to catch
- * any dangling Rx interrupt. If it was just the periodic
- * interrupt, there was no dangling Rx activity, and no need
- * to extend the periodic interrupt; one-shot is enough.
- */
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
- iwl_write8(trans, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
-
- isr_stats->rx++;
-
- local_bh_disable();
- if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
- polling = true;
- __napi_schedule(&trans_pcie->rxq[0].napi);
- }
- local_bh_enable();
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
- IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
- isr_stats->tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- trans_pcie->ucode_write_complete = true;
- wake_up(&trans_pcie->ucode_write_waitq);
- /* Wake up IMR write routine, now that write to SRAM is complete */
- if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
- trans_pcie->imr_status = IMR_D2S_COMPLETED;
- wake_up(&trans_pcie->ucode_write_waitq);
- }
- }
-
- if (inta & ~handled) {
- IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- isr_stats->unhandled++;
- }
-
- if (inta & ~(trans_pcie->inta_mask)) {
- IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~trans_pcie->inta_mask);
- }
-
- if (!polling) {
- spin_lock_bh(&trans_pcie->irq_lock);
- /* only Re-enable all interrupt if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- /* we are loading the firmware, enable FH_TX interrupt only */
- else if (handled & CSR_INT_BIT_FH_TX)
- iwl_enable_fw_load_int(trans);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(trans);
- /* Re-enable the ALIVE / Rx interrupt if it occurred */
- else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans, false);
- spin_unlock_bh(&trans_pcie->irq_lock);
- }
-
-out:
- lock_map_release(&trans->sync_cmd_lockdep_map);
- return IRQ_HANDLED;
-}
-
-/******************************************************************************
- *
- * ICT functions
- *
- ******************************************************************************/
-
-/* Free dram table */
-void iwl_pcie_free_ict(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans_pcie->ict_tbl) {
- dma_free_coherent(trans->dev, ICT_SIZE,
- trans_pcie->ict_tbl,
- trans_pcie->ict_tbl_dma);
- trans_pcie->ict_tbl = NULL;
- trans_pcie->ict_tbl_dma = 0;
- }
-}
-
-/*
- * allocate dram shared table, it is an aligned memory
- * block of ICT_SIZE.
- * also reset all data related to ICT table interrupt.
- */
-int iwl_pcie_alloc_ict(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
- &trans_pcie->ict_tbl_dma, GFP_KERNEL);
- if (!trans_pcie->ict_tbl)
- return -ENOMEM;
-
- /* just an API sanity check ... it is guaranteed to be aligned */
- if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
- iwl_pcie_free_ict(trans);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-void iwl_pcie_reset_ict(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 val;
-
- if (!trans_pcie->ict_tbl)
- return;
-
- spin_lock_bh(&trans_pcie->irq_lock);
- _iwl_disable_interrupts(trans);
-
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
-
- val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE |
- CSR_DRAM_INIT_TBL_WRAP_CHECK |
- CSR_DRAM_INIT_TBL_WRITE_POINTER;
-
- IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
-
- iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
- trans_pcie->use_ict = true;
- trans_pcie->ict_index = 0;
- iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
- _iwl_enable_interrupts(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
-}
-
-/* Device is going down disable ict interrupt usage */
-void iwl_pcie_disable_ict(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_bh(&trans_pcie->irq_lock);
- trans_pcie->use_ict = false;
- spin_unlock_bh(&trans_pcie->irq_lock);
-}
-
-irqreturn_t iwl_pcie_isr(int irq, void *data)
-{
- struct iwl_trans *trans = data;
-
- if (!trans)
- return IRQ_NONE;
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- return IRQ_WAKE_THREAD;
-}
-
-irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
-{
- return IRQ_WAKE_THREAD;
-}
-
-irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
-{
- struct msix_entry *entry = dev_id;
- struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
- struct iwl_trans *trans = trans_pcie->trans;
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
- u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
- u32 inta_fh, inta_hw;
- bool polling = false;
- bool sw_err;
-
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
- inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
-
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
- inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
-
- lock_map_acquire(&trans->sync_cmd_lockdep_map);
-
- spin_lock_bh(&trans_pcie->irq_lock);
- inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
- inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
- /*
- * Clear causes registers to avoid being handling the same cause.
- */
- iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
- iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
- spin_unlock_bh(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
-
- if (unlikely(!(inta_fh | inta_hw))) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- lock_map_release(&trans->sync_cmd_lockdep_map);
- return IRQ_NONE;
- }
-
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- IWL_DEBUG_ISR(trans,
- "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
- entry->entry, inta_fh, trans_pcie->fh_mask,
- iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
- if (inta_fh & ~trans_pcie->fh_mask)
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)\n",
- inta_fh & ~trans_pcie->fh_mask);
- }
-
- inta_fh &= trans_pcie->fh_mask;
-
- if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
- inta_fh & MSIX_FH_INT_CAUSES_Q0) {
- local_bh_disable();
- if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
- polling = true;
- __napi_schedule(&trans_pcie->rxq[0].napi);
- }
- local_bh_enable();
- }
-
- if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
- inta_fh & MSIX_FH_INT_CAUSES_Q1) {
- local_bh_disable();
- if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
- polling = true;
- __napi_schedule(&trans_pcie->rxq[1].napi);
- }
- local_bh_enable();
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
- trans_pcie->imr_status == IMR_D2S_REQUESTED) {
- IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
- isr_stats->tx++;
-
- /* Wake up IMR routine once write to SRAM is complete */
- if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
- trans_pcie->imr_status = IMR_D2S_COMPLETED;
- wake_up(&trans_pcie->ucode_write_waitq);
- }
- } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
- IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
- isr_stats->tx++;
- /*
- * Wake up uCode load routine,
- * now that load is complete
- */
- trans_pcie->ucode_write_complete = true;
- wake_up(&trans_pcie->ucode_write_waitq);
-
- /* Wake up IMR routine once write to SRAM is complete */
- if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
- trans_pcie->imr_status = IMR_D2S_COMPLETED;
- wake_up(&trans_pcie->ucode_write_waitq);
- }
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
- else
- sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
-
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
- IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
- inta_hw);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- trans->request_top_reset = 1;
- iwl_op_mode_nic_error(trans->op_mode,
- IWL_ERR_TYPE_TOP_FATAL_ERROR);
- iwl_trans_schedule_reset(trans,
- IWL_ERR_TYPE_TOP_FATAL_ERROR);
- }
- }
-
- /* Error detected by uCode */
- if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
- IWL_ERR(trans,
- "Microcode SW error detected. Restarting 0x%X.\n",
- inta_fh);
- isr_stats->sw++;
- /* during FW reset flow report errors from there */
- if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
- trans_pcie->imr_status = IMR_D2S_ERROR;
- wake_up(&trans_pcie->imr_waitq);
- } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
- trans_pcie->fw_reset_state = FW_RESET_ERROR;
- wake_up(&trans_pcie->fw_reset_waitq);
- } else {
- iwl_pcie_irq_handle_error(trans);
- }
- }
-
- /* After checking FH register check HW register */
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- IWL_DEBUG_ISR(trans,
- "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
- entry->entry, inta_hw, trans_pcie->hw_mask,
- iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
- if (inta_hw & ~trans_pcie->hw_mask)
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt 0x%08x\n",
- inta_hw & ~trans_pcie->hw_mask);
- }
-
- inta_hw &= trans_pcie->hw_mask;
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
- IWL_DEBUG_ISR(trans, "Alive interrupt\n");
- isr_stats->alive++;
- if (trans->mac_cfg->gen2) {
- /* We can restock, since firmware configured the RFH */
- iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
- }
- }
-
- /*
- * In some rare cases when the HW is in a bad state, we may
- * get this interrupt too early, when prph_info is still NULL.
- * So make sure that it's not NULL to prevent crashing.
- */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
- u32 sleep_notif =
- le32_to_cpu(trans_pcie->prph_info->sleep_notif);
- if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
- sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
- IWL_DEBUG_ISR(trans,
- "Sx interrupt: sleep notification = 0x%x\n",
- sleep_notif);
- trans_pcie->sx_complete = true;
- wake_up(&trans_pcie->sx_waitq);
- } else {
- /* uCode wakes up after power-down sleep */
- IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_check_wrptr(trans);
- iwl_pcie_txq_check_wrptrs(trans);
-
- isr_stats->wakeup++;
- }
- }
-
- /* Chip got too hot and stopped itself */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
- IWL_ERR(trans, "Microcode CT kill error detected.\n");
- isr_stats->ctkill++;
- }
-
- /* HW RF KILL switch toggled */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
- iwl_pcie_handle_rfkill_irq(trans, true);
-
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
- IWL_ERR(trans,
- "Hardware error detected. Restarting.\n");
-
- isr_stats->hw++;
- trans->dbg.hw_error = true;
- iwl_pcie_irq_handle_error(trans);
- }
-
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)
- iwl_trans_pcie_handle_reset_interrupt(trans);
-
- if (!polling)
- iwl_pcie_clear_irq(trans, entry->entry);
-
- lock_map_release(&trans->sync_cmd_lockdep_map);
-
- return IRQ_HANDLED;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2025 Intel Corporation
- */
-#include "iwl-trans.h"
-#include "iwl-prph.h"
-#include "iwl-context-info.h"
-#include "iwl-context-info-v2.h"
-#include "internal.h"
-#include "fw/dbg.h"
-
-#define FW_RESET_TIMEOUT (HZ / 5)
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
-{
- int ret = 0;
-
- IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_HAP_WAKE);
-
- iwl_pcie_apm_config(trans);
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- set_bit(STATUS_DEVICE_ENABLED, &trans->status);
-
- return 0;
-}
-
-static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
-{
- IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
-
- if (op_mode_leave) {
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_pcie_gen2_apm_init(trans);
-
- /* inform ME that we are leaving */
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_WAKE_ME |
- CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
- mdelay(1);
- iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- mdelay(5);
- }
-
- clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
-
- /* Stop device's DMA activity */
- iwl_pcie_apm_stop_master(trans);
-
- iwl_trans_pcie_sw_reset(trans, false);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
- else
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-
-void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
-
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
- UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
- else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
- iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
- UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
- else
- iwl_write32(trans, CSR_DOORBELL_VECTOR,
- UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
-
- /* wait 200ms */
- ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
- trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
- FW_RESET_TIMEOUT);
- if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
- bool reset_done;
- u32 inta_hw;
-
- if (trans_pcie->msix_enabled) {
- inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
- reset_done =
- inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
- } else {
- inta_hw = iwl_read32(trans, CSR_INT_MASK);
- reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
- }
-
- IWL_ERR(trans,
- "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
- inta_hw, reset_done);
-
- if (!reset_done) {
- struct iwl_fw_error_dump_mode mode = {
- .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
- .context = IWL_ERR_CONTEXT_FROM_OPMODE,
- };
- iwl_op_mode_nic_error(trans->op_mode,
- IWL_ERR_TYPE_RESET_HS_TIMEOUT);
- iwl_op_mode_dump_error(trans->op_mode, &mode);
- }
- }
-
- trans_pcie->fw_reset_state = FW_RESET_IDLE;
-}
-
-static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->mutex);
-
- if (trans_pcie->is_down)
- return;
-
- if (trans->state >= IWL_TRANS_FW_STARTED &&
- trans->conf.fw_reset_handshake) {
- /*
- * Reset handshake can dump firmware on timeout, but that
- * should assume that the firmware is already dead.
- */
- trans->state = IWL_TRANS_NO_FW;
- iwl_trans_pcie_fw_reset_handshake(trans);
- }
-
- trans_pcie->is_down = true;
-
- /* tell the device to stop sending interrupts */
- iwl_disable_interrupts(trans);
-
- /* device going down, Stop using ICT table */
- iwl_pcie_disable_ict(trans);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans,
- "DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_rx_napi_sync(trans);
- iwl_txq_gen2_tx_free(trans);
- iwl_pcie_rx_stop(trans);
- }
-
- iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_v2_free(trans, false);
- else
- iwl_pcie_ctxt_info_free(trans);
-
- /* Stop the device, and put it in low power state */
- iwl_pcie_gen2_apm_stop(trans, false);
-
- /* re-take ownership to prevent other users from stealing the device */
- iwl_trans_pcie_sw_reset(trans, true);
-
- /*
- * Upon stop, the IVAR table gets erased, so msi-x won't
- * work. This causes a bug in RF-KILL flows, since the interrupt
- * that enables radio won't fire on the correct irq, and the
- * driver won't be able to handle the interrupt.
- * Configure the IVAR table again after reset.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
-
- /*
- * Upon stop, the APM issues an interrupt if HW RF kill is set.
- * This is a bug in certain verions of the hardware.
- * Certain devices also keep sending HW RF kill interrupt all
- * the time, unless the interrupt is ACKed even if the interrupt
- * should be masked. Re-ACK all the interrupts here.
- */
- iwl_disable_interrupts(trans);
-
- /* clear all status bits */
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- clear_bit(STATUS_INT_ENABLED, &trans->status);
- clear_bit(STATUS_TPOWER_PMI, &trans->status);
-
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-}
-
-void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool was_in_rfkill;
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
- NULL);
-
- mutex_lock(&trans_pcie->mutex);
- trans_pcie->opmode_down = true;
- was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_gen2_stop_device(trans);
- iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
- mutex_unlock(&trans_pcie->mutex);
-}
-
-static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->mac_cfg->base->min_txq_size);
- int ret;
-
- /* TODO: most of the logic can be removed in A0 - but not in Z0 */
- spin_lock_bh(&trans_pcie->irq_lock);
- ret = iwl_pcie_gen2_apm_init(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
- if (ret)
- return ret;
-
- iwl_op_mode_nic_config(trans->op_mode);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (iwl_pcie_gen2_rx_init(trans))
- return -ENOMEM;
-
- /* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
- return -ENOMEM;
-
- /* enable shadow regs in HW */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
- IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
-
- return 0;
-}
-
-static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- char *buf = trans_pcie->rf_name;
- size_t buflen = sizeof(trans_pcie->rf_name);
- size_t pos;
- u32 version;
-
- if (buf[0])
- return;
-
- switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
- pos = scnprintf(buf, buflen, "JF");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
- pos = scnprintf(buf, buflen, "GF");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
- pos = scnprintf(buf, buflen, "GF4");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
- pos = scnprintf(buf, buflen, "HR");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
- pos = scnprintf(buf, buflen, "HR1");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
- pos = scnprintf(buf, buflen, "HRCDB");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
- pos = scnprintf(buf, buflen, "FM");
- break;
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
- if (SILICON_Z_STEP ==
- CSR_HW_RFID_STEP(trans->info.hw_rf_id))
- pos = scnprintf(buf, buflen, "WHTC");
- else
- pos = scnprintf(buf, buflen, "WH");
- break;
- default:
- return;
- }
-
- switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
- case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
- version = iwl_read_prph(trans, CNVI_MBOX_C);
- switch (version) {
- case 0x20000:
- pos += scnprintf(buf + pos, buflen - pos, " B3");
- break;
- case 0x120000:
- pos += scnprintf(buf + pos, buflen - pos, " B5");
- break;
- default:
- pos += scnprintf(buf + pos, buflen - pos,
- " (0x%x)", version);
- break;
- }
- break;
- default:
- break;
- }
-
- pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
- trans->info.hw_rf_id);
-
- IWL_INFO(trans, "Detected RF %s\n", buf);
-
- /*
- * also add a \n for debugfs - need to do it after printing
- * since our IWL_INFO machinery wants to see a static \n at
- * the end of the string
- */
- pos += scnprintf(buf + pos, buflen - pos, "\n");
-}
-
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_pcie_reset_ict(trans);
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->txqs.queue_stopped, 0,
- sizeof(trans_pcie->txqs.queue_stopped));
- memset(trans_pcie->txqs.queue_used, 0,
- sizeof(trans_pcie->txqs.queue_used));
-
- /* now that we got alive we can free the fw image & the context info.
- * paging memory cannot be freed included since FW will still use it
- */
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_v2_free(trans, true);
- else
- iwl_pcie_ctxt_info_free(trans);
-
- /*
- * Re-enable all the interrupts, including the RF-Kill one, now that
- * the firmware is alive.
- */
- iwl_enable_interrupts(trans);
- mutex_lock(&trans_pcie->mutex);
- iwl_pcie_check_hw_rf_kill(trans);
-
- iwl_pcie_get_rf_name(trans);
- mutex_unlock(&trans_pcie->mutex);
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans->step_urm = !!(iwl_read_umac_prph(trans,
- CNVI_PMU_STEP_FLOW) &
- CNVI_PMU_STEP_FLOW_FORCE_URM);
-}
-
-static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
-{
- u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
- /*
- * To workaround hardware latency issues during the boot process,
- * initialize the LTR to ~250 usec (see ltr_val above).
- * The firmware initializes this again later (to a smaller value).
- */
- if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
- trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
- !trans->mac_cfg->integrated) {
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
- return true;
- }
-
- if (trans->mac_cfg->integrated &&
- trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
- iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
- return true;
- }
-
- if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
- /* First clear the interrupt, just in case */
- iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
- MSIX_HW_INT_CAUSES_REG_IML);
- /* In this case, unfortunately the same ROM bug exists in the
- * device (not setting LTR correctly), but we don't have control
- * over the settings from the host due to some hardware security
- * features. The only workaround we've been able to come up with
- * so far is to try to keep the CPU and device busy by polling
- * it and the IML (image loader) completed interrupt.
- */
- return false;
- }
-
- /* nothing needs to be done on other devices */
- return true;
-}
-
-static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
-{
-/* in practice, this seems to complete in around 20-30ms at most, wait 100 */
-#define IML_WAIT_TIMEOUT (HZ / 10)
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long end_time = jiffies + IML_WAIT_TIMEOUT;
- u32 value, loops = 0;
- bool irq = false;
-
- if (WARN_ON(!trans_pcie->iml))
- return;
-
- value = iwl_read32(trans, CSR_LTR_LAST_MSG);
- IWL_DEBUG_INFO(trans, "Polling for IML load - CSR_LTR_LAST_MSG=0x%x\n",
- value);
-
- while (time_before(jiffies, end_time)) {
- if (iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD) &
- MSIX_HW_INT_CAUSES_REG_IML) {
- irq = true;
- break;
- }
- /* Keep the CPU and device busy. */
- value = iwl_read32(trans, CSR_LTR_LAST_MSG);
- loops++;
- }
-
- IWL_DEBUG_INFO(trans,
- "Polled for IML load: irq=%d, loops=%d, CSR_LTR_LAST_MSG=0x%x\n",
- irq, loops, value);
-
- /* We don't fail here even if we timed out - maybe we get lucky and the
- * interrupt comes in later (and we get alive from firmware) and then
- * we're all happy - but if not we'll fail on alive timeout or get some
- * other error out.
- */
-}
-
-int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img,
- bool run_in_rfkill)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill, keep_ram_busy;
- bool top_reset_done = false;
- int ret;
-
- mutex_lock(&trans_pcie->mutex);
-again:
- /* This may fail if AMT took ownership of the device */
- if (iwl_pcie_prepare_card_hw(trans)) {
- IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
- }
-
- iwl_enable_rfkill_int(trans);
-
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
- /*
- * We enabled the RF-Kill interrupt and the handler may very
- * well be running. Disable the interrupts to make sure no other
- * interrupt can be fired.
- */
- iwl_disable_interrupts(trans);
-
- /* Make sure it finished running */
- iwl_pcie_synchronize_irqs(trans);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
- if (hw_rfkill && !run_in_rfkill) {
- ret = -ERFKILL;
- goto out;
- }
-
- /* Someone called stop_device, don't try to start_fw */
- if (trans_pcie->is_down) {
- IWL_WARN(trans,
- "Can't start_fw since the HW hasn't been started\n");
- ret = -EIO;
- goto out;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
- ret = iwl_pcie_gen2_nic_init(trans);
- if (ret) {
- IWL_ERR(trans, "Unable to init nic\n");
- goto out;
- }
-
- if (WARN_ON(trans->do_top_reset &&
- trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC))
- return -EINVAL;
-
- /* we need to wait later - set state */
- if (trans->do_top_reset)
- trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED;
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- if (!top_reset_done) {
- ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
- if (ret)
- goto out;
- }
-
- iwl_pcie_ctxt_info_v2_kick(trans);
- } else {
- ret = iwl_pcie_ctxt_info_init(trans, img);
- if (ret)
- goto out;
- }
-
- keep_ram_busy = !iwl_pcie_set_ltr(trans);
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
- iwl_read32(trans, CSR_FUNC_SCRATCH));
- iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_ROM_START);
- } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
- } else {
- iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
- }
-
- if (keep_ram_busy)
- iwl_pcie_spin_for_iml(trans);
-
- if (trans->do_top_reset) {
- trans->do_top_reset = 0;
-
-#define FW_TOP_RESET_TIMEOUT (HZ / 4)
- ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
- trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED,
- FW_TOP_RESET_TIMEOUT);
-
- if (trans_pcie->fw_reset_state != FW_RESET_OK) {
- if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED)
- IWL_ERR(trans,
- "TOP reset interrupted by error (state %d)!\n",
- trans_pcie->fw_reset_state);
- else
- IWL_ERR(trans, "TOP reset timed out!\n");
- iwl_op_mode_nic_error(trans->op_mode,
- IWL_ERR_TYPE_TOP_RESET_FAILED);
- iwl_trans_schedule_reset(trans,
- IWL_ERR_TYPE_TOP_RESET_FAILED);
- ret = -EIO;
- goto out;
- }
-
- msleep(10);
- IWL_INFO(trans, "TOP reset successful, reinit now\n");
- /* now load the firmware again properly */
- trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
- ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
- top_reset_done = true;
- goto again;
- }
-
- /* re-check RF-Kill state since we may have missed the interrupt */
- hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
- if (hw_rfkill && !run_in_rfkill)
- ret = -ERFKILL;
-
-out:
- mutex_unlock(&trans_pcie->mutex);
- return ret;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
- * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2016-2017 Intel Deutschland GmbH
- */
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/debugfs.h>
-#include <linux/sched.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/wait.h>
-#include <linux/seq_file.h>
-
-#include "iwl-drv.h"
-#include "iwl-trans.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-scd.h"
-#include "iwl-agn-hw.h"
-#include "fw/error-dump.h"
-#include "fw/dbg.h"
-#include "fw/api/tx.h"
-#include "fw/acpi.h"
-#include "mei/iwl-mei.h"
-#include "internal.h"
-#include "iwl-fh.h"
-#include "iwl-context-info-v2.h"
-
-/* extended range in FW SRAM */
-#define IWL_FW_MEM_EXTENDED_START 0x40000
-#define IWL_FW_MEM_EXTENDED_END 0x57FFF
-
-void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
-{
-#define PCI_DUMP_SIZE 352
-#define PCI_MEM_DUMP_SIZE 64
-#define PCI_PARENT_DUMP_SIZE 524
-#define PREFIX_LEN 32
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct pci_dev *pdev = trans_pcie->pci_dev;
- u32 i, pos, alloc_size, *ptr, *buf;
- char *prefix;
-
- if (trans_pcie->pcie_dbg_dumped_once)
- return;
-
- /* Should be a multiple of 4 */
- BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
- BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
- BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
-
- /* Alloc a max size buffer */
- alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
- alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
- alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
- alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
-
- buf = kmalloc(alloc_size, GFP_ATOMIC);
- if (!buf)
- return;
- prefix = (char *)buf + alloc_size - PREFIX_LEN;
-
- IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
-
- /* Print wifi device registers */
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
- IWL_ERR(trans, "iwlwifi device config registers:\n");
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
- if (pci_read_config_dword(pdev, i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
- for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
- *ptr = iwl_read32(trans, i);
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (pos) {
- IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
- for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
- if (pci_read_config_dword(pdev, pos + i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
- 32, 4, buf, i, 0);
- }
-
- /* Print parent device registers next */
- if (!pdev->bus->self)
- goto out;
-
- pdev = pdev->bus->self;
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
-
- IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
- pci_name(pdev));
- for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
- if (pci_read_config_dword(pdev, i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- /* Print root port AER registers */
- pos = 0;
- pdev = pcie_find_root_port(pdev);
- if (pdev)
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (pos) {
- IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
- pci_name(pdev));
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
- for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
- if (pci_read_config_dword(pdev, pos + i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
- 4, buf, i, 0);
- }
- goto out;
-
-err_read:
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
- IWL_ERR(trans, "Read failed at 0x%X\n", i);
-out:
- trans_pcie->pcie_dbg_dumped_once = 1;
- kfree(buf);
-}
-
-int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
-{
- /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_SW_RESET);
- usleep_range(10000, 20000);
- } else {
- iwl_set_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_SW_RESET);
- usleep_range(5000, 6000);
- }
-
- if (retake_ownership)
- return iwl_pcie_prepare_card_hw(trans);
-
- return 0;
-}
-
-static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
-{
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
-
- if (!fw_mon->size)
- return;
-
- dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
- fw_mon->physical);
-
- fw_mon->block = NULL;
- fw_mon->physical = 0;
- fw_mon->size = 0;
-}
-
-static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
- u8 max_power)
-{
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- void *block = NULL;
- dma_addr_t physical = 0;
- u32 size = 0;
- u8 power;
-
- if (fw_mon->size) {
- memset(fw_mon->block, 0, fw_mon->size);
- return;
- }
-
- /* need at least 2 KiB, so stop at 11 */
- for (power = max_power; power >= 11; power--) {
- size = BIT(power);
- block = dma_alloc_coherent(trans->dev, size, &physical,
- GFP_KERNEL | __GFP_NOWARN);
- if (!block)
- continue;
-
- IWL_INFO(trans,
- "Allocated 0x%08x bytes for firmware monitor.\n",
- size);
- break;
- }
-
- if (WARN_ON_ONCE(!block))
- return;
-
- if (power != max_power)
- IWL_ERR(trans,
- "Sorry - debug buffer is only %luK while you requested %luK\n",
- (unsigned long)BIT(power - 10),
- (unsigned long)BIT(max_power - 10));
-
- fw_mon->block = block;
- fw_mon->physical = physical;
- fw_mon->size = size;
-}
-
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
-{
- if (!max_power) {
- /* default max_power is maximum */
- max_power = 26;
- } else {
- max_power += 11;
- }
-
- if (WARN(max_power > 26,
- "External buffer size for monitor is too big %d, check the FW TLV\n",
- max_power))
- return;
-
- iwl_pcie_alloc_fw_monitor_block(trans, max_power);
-}
-
-static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
-{
- iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
- ((reg & 0x0000ffff) | (2 << 28)));
- return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
-}
-
-static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
-{
- iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
- iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
- ((reg & 0x0000ffff) | (3 << 28)));
-}
-
-static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
-{
- if (trans->mac_cfg->base->apmg_not_supported)
- return;
-
- if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- else
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-void iwl_pcie_apm_config(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 lctl;
- u16 cap;
-
- /*
- * L0S states have been found to be unstable with our devices
- * and in newer hardware they are not officially supported at
- * all, so we must always set the L0S_DISABLED bit.
- */
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
-
- pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
-
- pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
- trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
- IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
- (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
- trans->ltr_enabled ? "En" : "Dis");
-}
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-static int iwl_pcie_apm_init(struct iwl_trans *trans)
-{
- int ret;
-
- IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_HAP_WAKE);
-
- iwl_pcie_apm_config(trans);
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (trans->mac_cfg->base->pll_cfg)
- iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- if (trans->cfg->host_interrupt_operation_mode) {
- /*
- * This is a bit of an abuse - This is needed for 7260 / 3160
- * only check host_interrupt_operation_mode even if this is
- * not related to host_interrupt_operation_mode.
- *
- * Enable the oscillator to count wake up time for L1 exit. This
- * consumes slightly more power (100uA) - but allows to be sure
- * that we wake up from L1 on time.
- *
- * This looks weird: read twice the same register, discard the
- * value, set a bit, and yet again, read that same register
- * just to discard the value. But that's the way the hardware
- * seems to like it.
- */
- iwl_read_prph(trans, OSC_CLK);
- iwl_read_prph(trans, OSC_CLK);
- iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
- iwl_read_prph(trans, OSC_CLK);
- iwl_read_prph(trans, OSC_CLK);
- }
-
- /*
- * Enable DMA clock and wait for it to stabilize.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
- * bits do not disable clocks. This preserves any hardware
- * bits already set by default in "CLK_CTRL_REG" after reset.
- */
- if (!trans->mac_cfg->base->apmg_not_supported) {
- iwl_write_prph(trans, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
- APMG_RTC_INT_STT_RFKILL);
- }
-
- set_bit(STATUS_DEVICE_ENABLED, &trans->status);
-
- return 0;
-}
-
-/*
- * Enable LP XTAL to avoid HW bug where device may consume much power if
- * FW is not loaded after device reset. LP XTAL is disabled by default
- * after device HW reset. Do it only if XTAL is fed by internal source.
- * Configure device's "persistence" mode to avoid resetting XTAL again when
- * SHRD_HW_RST occurs in S3.
- */
-static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
-{
- int ret;
- u32 apmg_gp1_reg;
- u32 apmg_xtal_cfg_reg;
- u32 dl_cfg_reg;
-
- /* Force XTAL ON */
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
-
- ret = iwl_trans_pcie_sw_reset(trans, true);
-
- if (!ret)
- ret = iwl_finish_nic_init(trans);
-
- if (WARN_ON(ret)) {
- /* Release XTAL ON request */
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
- return;
- }
-
- /*
- * Clear "disable persistence" to avoid LP XTAL resetting when
- * SHRD_HW_RST is applied in S3.
- */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_PERSIST_DIS);
-
- /*
- * Force APMG XTAL to be active to prevent its disabling by HW
- * caused by APMG idle state.
- */
- apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
- SHR_APMG_XTAL_CFG_REG);
- iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
- apmg_xtal_cfg_reg |
- SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
-
- ret = iwl_trans_pcie_sw_reset(trans, true);
- if (ret)
- IWL_ERR(trans,
- "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
-
- /* Enable LP XTAL by indirect access through CSR */
- apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
- iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
- SHR_APMG_GP1_WF_XTAL_LP_EN |
- SHR_APMG_GP1_CHICKEN_BIT_SELECT);
-
- /* Clear delay line clock power up */
- dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
- iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
- ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
-
- /*
- * Enable persistence mode to avoid LP XTAL resetting when
- * SHRD_HW_RST is applied in S3.
- */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSISTENCE);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /* Activates XTAL resources monitor */
- __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
- CSR_MONITOR_XTAL_RESOURCES);
-
- /* Release XTAL ON request */
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
- udelay(10);
-
- /* Release APMG XTAL */
- iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
- apmg_xtal_cfg_reg &
- ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
-}
-
-void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
-{
- int ret;
-
- /* stop device's busmaster DMA activity */
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
-
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
- 100);
- usleep_range(10000, 20000);
- } else {
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- }
-
- if (ret < 0)
- IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(trans, "stop master\n");
-}
-
-static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
-{
- IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
-
- if (op_mode_leave) {
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_pcie_apm_init(trans);
-
- /* inform ME that we are leaving */
- if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->mac_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000) {
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_WAKE_ME |
- CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
- mdelay(1);
- iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- }
- mdelay(5);
- }
-
- clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
-
- /* Stop device's DMA activity */
- iwl_pcie_apm_stop_master(trans);
-
- if (trans->cfg->lp_xtal_workaround) {
- iwl_pcie_apm_lp_xtal_enable(trans);
- return;
- }
-
- iwl_trans_pcie_sw_reset(trans, false);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-
-static int iwl_pcie_nic_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- /* nic_init */
- spin_lock_bh(&trans_pcie->irq_lock);
- ret = iwl_pcie_apm_init(trans);
- spin_unlock_bh(&trans_pcie->irq_lock);
-
- if (ret)
- return ret;
-
- iwl_pcie_set_pwr(trans, false);
-
- iwl_op_mode_nic_config(trans->op_mode);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- ret = iwl_pcie_rx_init(trans);
- if (ret)
- return ret;
-
- /* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_tx_init(trans)) {
- iwl_pcie_rx_free(trans);
- return -ENOMEM;
- }
-
- if (trans->mac_cfg->base->shadow_reg_enable) {
- /* enable shadow regs in HW */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
- IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
- }
-
- return 0;
-}
-
-#define HW_READY_TIMEOUT (50)
-
-/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
-{
- int ret;
-
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
- CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
- HW_READY_TIMEOUT);
-
- if (ret >= 0)
- iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
-
- IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
- return ret;
-}
-
-/* Note: returns standard 0/-ERROR code */
-int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
-{
- int ret;
- int iter;
-
- IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
-
- ret = iwl_pcie_set_hw_ready(trans);
- /* If the card is ready, exit 0 */
- if (ret >= 0) {
- trans->csme_own = false;
- return 0;
- }
-
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- usleep_range(1000, 2000);
-
- for (iter = 0; iter < 10; iter++) {
- int t = 0;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_WAKE_ME);
-
- do {
- ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0) {
- trans->csme_own = false;
- return 0;
- }
-
- if (iwl_mei_is_connected()) {
- IWL_DEBUG_INFO(trans,
- "Couldn't prepare the card but SAP is connected\n");
- trans->csme_own = true;
- if (trans->mac_cfg->device_family !=
- IWL_DEVICE_FAMILY_9000)
- IWL_ERR(trans,
- "SAP not supported for this NIC family\n");
-
- return -EBUSY;
- }
-
- usleep_range(200, 1000);
- t += 200;
- } while (t < 150000);
- msleep(25);
- }
-
- IWL_ERR(trans, "Couldn't prepare the card\n");
-
- return ret;
-}
-
-/*
- * ucode
- */
-static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
- u32 dst_addr, dma_addr_t phy_addr,
- u32 byte_cnt)
-{
- iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
- iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
- dst_addr);
-
- iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
- iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
- iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
- BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
- iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-}
-
-static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
- u32 dst_addr, dma_addr_t phy_addr,
- u32 byte_cnt)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- trans_pcie->ucode_write_complete = false;
-
- if (!iwl_trans_grab_nic_access(trans))
- return -EIO;
-
- iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
- byte_cnt);
- iwl_trans_release_nic_access(trans);
-
- ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
- trans_pcie->ucode_write_complete, 5 * HZ);
- if (!ret) {
- IWL_ERR(trans, "Failed to load firmware chunk!\n");
- iwl_trans_pcie_dump_regs(trans);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
- const struct fw_desc *section)
-{
- u8 *v_addr;
- dma_addr_t p_addr;
- u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
- int ret = 0;
-
- IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
- section_num);
-
- v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
- GFP_KERNEL | __GFP_NOWARN);
- if (!v_addr) {
- IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
- chunk_sz = PAGE_SIZE;
- v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
- &p_addr, GFP_KERNEL);
- if (!v_addr)
- return -ENOMEM;
- }
-
- for (offset = 0; offset < section->len; offset += chunk_sz) {
- u32 copy_size, dst_addr;
- bool extended_addr = false;
-
- copy_size = min_t(u32, chunk_sz, section->len - offset);
- dst_addr = section->offset + offset;
-
- if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
- dst_addr <= IWL_FW_MEM_EXTENDED_END)
- extended_addr = true;
-
- if (extended_addr)
- iwl_set_bits_prph(trans, LMPM_CHICK,
- LMPM_CHICK_EXTENDED_ADDR_SPACE);
-
- memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
- ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
- copy_size);
-
- if (extended_addr)
- iwl_clear_bits_prph(trans, LMPM_CHICK,
- LMPM_CHICK_EXTENDED_ADDR_SPACE);
-
- if (ret) {
- IWL_ERR(trans,
- "Could not load the [%d] uCode section\n",
- section_num);
- break;
- }
- }
-
- dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
- return ret;
-}
-
-static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
- const struct fw_img *image,
- int cpu,
- int *first_ucode_section)
-{
- int shift_param;
- int i, ret = 0, sec_num = 0x1;
- u32 val, last_read_idx = 0;
-
- if (cpu == 1) {
- shift_param = 0;
- *first_ucode_section = 0;
- } else {
- shift_param = 16;
- (*first_ucode_section)++;
- }
-
- for (i = *first_ucode_section; i < image->num_sec; i++) {
- last_read_idx = i;
-
- /*
- * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
- * CPU1 to CPU2.
- * PAGING_SEPARATOR_SECTION delimiter - separate between
- * CPU2 non paged to CPU2 paging sec.
- */
- if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
- image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
- IWL_DEBUG_FW(trans,
- "Break since Data not valid or Empty section, sec = %d\n",
- i);
- break;
- }
-
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
-
- /* Notify ucode of loaded section number and status */
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
-
- sec_num = (sec_num << 1) | 0x1;
- }
-
- *first_ucode_section = last_read_idx;
-
- iwl_enable_interrupts(trans);
-
- if (trans->mac_cfg->gen2) {
- if (cpu == 1)
- iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
- 0xFFFF);
- else
- iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
- 0xFFFFFFFF);
- } else {
- if (cpu == 1)
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
- 0xFFFF);
- else
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
- 0xFFFFFFFF);
- }
-
- return 0;
-}
-
-static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
- const struct fw_img *image,
- int cpu,
- int *first_ucode_section)
-{
- int i, ret = 0;
- u32 last_read_idx = 0;
-
- if (cpu == 1)
- *first_ucode_section = 0;
- else
- (*first_ucode_section)++;
-
- for (i = *first_ucode_section; i < image->num_sec; i++) {
- last_read_idx = i;
-
- /*
- * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
- * CPU1 to CPU2.
- * PAGING_SEPARATOR_SECTION delimiter - separate between
- * CPU2 non paged to CPU2 paging sec.
- */
- if (!image->sec[i].data ||
- image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
- image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
- IWL_DEBUG_FW(trans,
- "Break since Data not valid or Empty section, sec = %d\n",
- i);
- break;
- }
-
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
- }
-
- *first_ucode_section = last_read_idx;
-
- return 0;
-}
-
-static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
-{
- enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
- struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
- &trans->dbg.fw_mon_cfg[alloc_id];
- struct iwl_dram_data *frag;
-
- if (!iwl_trans_dbg_ini_valid(trans))
- return;
-
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_SRAM_PATH) {
- IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
- /* set sram monitor by enabling bit 7 */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
-
- return;
- }
-
- if (le32_to_cpu(fw_mon_cfg->buf_location) !=
- IWL_FW_INI_LOCATION_DRAM_PATH ||
- !trans->dbg.fw_mon_ini[alloc_id].num_frags)
- return;
-
- frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
-
- IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
- alloc_id);
-
- iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- frag->physical >> MON_BUFF_SHIFT_VER2);
- iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (frag->physical + frag->size - 256) >>
- MON_BUFF_SHIFT_VER2);
-}
-
-void iwl_pcie_apply_destination(struct iwl_trans *trans)
-{
- const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
- const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- int i;
-
- if (iwl_trans_dbg_ini_valid(trans)) {
- iwl_pcie_apply_destination_ini(trans);
- return;
- }
-
- IWL_INFO(trans, "Applying debug destination %s\n",
- get_fw_dbg_mode_string(dest->monitor_mode));
-
- if (dest->monitor_mode == EXTERNAL_MODE)
- iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
- else
- IWL_WARN(trans, "PCI should have external buffer debug\n");
-
- for (i = 0; i < trans->dbg.n_dest_reg; i++) {
- u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
- u32 val = le32_to_cpu(dest->reg_ops[i].val);
-
- switch (dest->reg_ops[i].op) {
- case CSR_ASSIGN:
- iwl_write32(trans, addr, val);
- break;
- case CSR_SETBIT:
- iwl_set_bit(trans, addr, BIT(val));
- break;
- case CSR_CLEARBIT:
- iwl_clear_bit(trans, addr, BIT(val));
- break;
- case PRPH_ASSIGN:
- iwl_write_prph(trans, addr, val);
- break;
- case PRPH_SETBIT:
- iwl_set_bits_prph(trans, addr, BIT(val));
- break;
- case PRPH_CLEARBIT:
- iwl_clear_bits_prph(trans, addr, BIT(val));
- break;
- case PRPH_BLOCKBIT:
- if (iwl_read_prph(trans, addr) & BIT(val)) {
- IWL_ERR(trans,
- "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
- val, addr);
- goto monitor;
- }
- break;
- default:
- IWL_ERR(trans, "FW debug - unknown OP %d\n",
- dest->reg_ops[i].op);
- break;
- }
- }
-
-monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
- iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- fw_mon->physical >> dest->base_shift);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (fw_mon->physical + fw_mon->size -
- 256) >> dest->end_shift);
- else
- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (fw_mon->physical + fw_mon->size) >>
- dest->end_shift);
- }
-}
-
-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
-{
- int ret = 0;
- int first_ucode_section;
-
- IWL_DEBUG_FW(trans, "working with %s CPU\n",
- image->is_dual_cpus ? "Dual" : "Single");
-
- /* load to FW the binary non secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
- if (ret)
- return ret;
-
- if (image->is_dual_cpus) {
- /* set CPU2 header address */
- iwl_write_prph(trans,
- LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
- LMPM_SECURE_CPU2_HDR_MEM_SPACE);
-
- /* load to FW the binary sections of CPU2 */
- ret = iwl_pcie_load_cpu_sections(trans, image, 2,
- &first_ucode_section);
- if (ret)
- return ret;
- }
-
- if (iwl_pcie_dbg_on(trans))
- iwl_pcie_apply_destination(trans);
-
- iwl_enable_interrupts(trans);
-
- /* release CPU reset */
- iwl_write32(trans, CSR_RESET, 0);
-
- return 0;
-}
-
-static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
- const struct fw_img *image)
-{
- int ret = 0;
- int first_ucode_section;
-
- IWL_DEBUG_FW(trans, "working with %s CPU\n",
- image->is_dual_cpus ? "Dual" : "Single");
-
- if (iwl_pcie_dbg_on(trans))
- iwl_pcie_apply_destination(trans);
-
- IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
- iwl_read_prph(trans, WFPM_GP2));
-
- /*
- * Set default value. On resume reading the values that were
- * zeored can provide debug data on the resume flow.
- * This is for debugging only and has no functional impact.
- */
- iwl_write_prph(trans, WFPM_GP2, 0x01010101);
-
- /* configure the ucode to be ready to get the secured image */
- /* release CPU reset */
- iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
-
- /* load to FW the binary Secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
- &first_ucode_section);
- if (ret)
- return ret;
-
- /* load to FW the binary sections of CPU2 */
- return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
- &first_ucode_section);
-}
-
-bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill = iwl_is_rfkill_set(trans);
- bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- bool report;
-
- if (hw_rfkill) {
- set_bit(STATUS_RFKILL_HW, &trans->status);
- set_bit(STATUS_RFKILL_OPMODE, &trans->status);
- } else {
- clear_bit(STATUS_RFKILL_HW, &trans->status);
- if (trans_pcie->opmode_down)
- clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
- }
-
- report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-
- if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report, false);
-
- return hw_rfkill;
-}
-
-struct iwl_causes_list {
- u16 mask_reg;
- u8 bit;
- u8 addr;
-};
-
-#define IWL_CAUSE(reg, mask) \
- { \
- .mask_reg = reg, \
- .bit = ilog2(mask), \
- .addr = ilog2(mask) + \
- ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
- (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
- 0xffff), /* causes overflow warning */ \
- }
-
-static const struct iwl_causes_list causes_list_common[] = {
- IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
- IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
- IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
- IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
-};
-
-static const struct iwl_causes_list causes_list_pre_bz[] = {
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
-};
-
-static const struct iwl_causes_list causes_list_bz[] = {
- IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
-};
-
-static void iwl_pcie_map_list(struct iwl_trans *trans,
- const struct iwl_causes_list *causes,
- int arr_size, int val)
-{
- int i;
-
- for (i = 0; i < arr_size; i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
- iwl_clear_bit(trans, causes[i].mask_reg,
- BIT(causes[i].bit));
- }
-}
-
-static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- /*
- * Access all non RX causes and map them to the default irq.
- * In case we are missing at least one interrupt vector,
- * the first interrupt vector will serve non-RX and FBQ causes.
- */
- iwl_pcie_map_list(trans, causes_list_common,
- ARRAY_SIZE(causes_list_common), val);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_pcie_map_list(trans, causes_list_bz,
- ARRAY_SIZE(causes_list_bz), val);
- else
- iwl_pcie_map_list(trans, causes_list_pre_bz,
- ARRAY_SIZE(causes_list_pre_bz), val);
-}
-
-static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 offset =
- trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
- u32 val, idx;
-
- /*
- * The first RX queue - fallback queue, which is designated for
- * management frame, command responses etc, is always mapped to the
- * first interrupt vector. The other RX queues are mapped to
- * the other (N - 2) interrupt vectors.
- */
- val = BIT(MSIX_FH_INT_CAUSES_Q(0));
- for (idx = 1; idx < trans->info.num_rxqs; idx++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
- MSIX_FH_INT_CAUSES_Q(idx - offset));
- val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
- }
- iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
-
- val = MSIX_FH_INT_CAUSES_Q(0);
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
- val |= MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
-
- if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
- iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
-}
-
-void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
-{
- struct iwl_trans *trans = trans_pcie->trans;
-
- if (!trans_pcie->msix_enabled) {
- if (trans->mac_cfg->mq_rx_supported &&
- test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_write_umac_prph(trans, UREG_CHICK,
- UREG_CHICK_MSI_ENABLE);
- return;
- }
- /*
- * The IVAR table needs to be configured again after reset,
- * but if the device is disabled, we can't write to
- * prph.
- */
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
-
- /*
- * Each cause from the causes list above and the RX causes is
- * represented as a byte in the IVAR table. The first nibble
- * represents the bound interrupt vector of the cause, the second
- * represents no auto clear for this cause. This will be set if its
- * interrupt vector is bound to serve other causes.
- */
- iwl_pcie_map_rx_causes(trans);
-
- iwl_pcie_map_non_rx_causes(trans);
-}
-
-static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
-{
- struct iwl_trans *trans = trans_pcie->trans;
-
- iwl_pcie_conf_msix_hw(trans_pcie);
-
- if (!trans_pcie->msix_enabled)
- return;
-
- trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
- trans_pcie->fh_mask = trans_pcie->fh_init_mask;
- trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
- trans_pcie->hw_mask = trans_pcie->hw_init_mask;
-}
-
-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->mutex);
-
- if (trans_pcie->is_down)
- return;
-
- trans_pcie->is_down = true;
-
- /* tell the device to stop sending interrupts */
- iwl_disable_interrupts(trans);
-
- /* device going down, Stop using ICT table */
- iwl_pcie_disable_ict(trans);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans,
- "DEVICE_ENABLED bit was set and is now cleared\n");
- if (!from_irq)
- iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_rx_napi_sync(trans);
- iwl_pcie_tx_stop(trans);
- iwl_pcie_rx_stop(trans);
-
- /* Power-down device's busmaster DMA clocks */
- if (!trans->mac_cfg->base->apmg_not_supported) {
- iwl_write_prph(trans, APMG_CLK_DIS_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
- }
-
- /* Make sure (redundant) we've released our request to stay awake */
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
- else
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_pcie_apm_stop(trans, false);
-
- /* re-take ownership to prevent other users from stealing the device */
- iwl_trans_pcie_sw_reset(trans, true);
-
- /*
- * Upon stop, the IVAR table gets erased, so msi-x won't
- * work. This causes a bug in RF-KILL flows, since the interrupt
- * that enables radio won't fire on the correct irq, and the
- * driver won't be able to handle the interrupt.
- * Configure the IVAR table again after reset.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
-
- /*
- * Upon stop, the APM issues an interrupt if HW RF kill is set.
- * This is a bug in certain verions of the hardware.
- * Certain devices also keep sending HW RF kill interrupt all
- * the time, unless the interrupt is ACKed even if the interrupt
- * should be masked. Re-ACK all the interrupts here.
- */
- iwl_disable_interrupts(trans);
-
- /* clear all status bits */
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- clear_bit(STATUS_INT_ENABLED, &trans->status);
- clear_bit(STATUS_TPOWER_PMI, &trans->status);
-
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-}
-
-void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans_pcie->msix_enabled) {
- int i;
-
- for (i = 0; i < trans_pcie->alloc_vecs; i++)
- synchronize_irq(trans_pcie->msix_entries[i].vector);
- } else {
- synchronize_irq(trans_pcie->pci_dev->irq);
- }
-}
-
-int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct fw_img *img,
- bool run_in_rfkill)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- int ret;
-
- /* This may fail if AMT took ownership of the device */
- if (iwl_pcie_prepare_card_hw(trans)) {
- IWL_WARN(trans, "Exit HW not ready\n");
- return -EIO;
- }
-
- iwl_enable_rfkill_int(trans);
-
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
- /*
- * We enabled the RF-Kill interrupt and the handler may very
- * well be running. Disable the interrupts to make sure no other
- * interrupt can be fired.
- */
- iwl_disable_interrupts(trans);
-
- /* Make sure it finished running */
- iwl_pcie_synchronize_irqs(trans);
-
- mutex_lock(&trans_pcie->mutex);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
- if (hw_rfkill && !run_in_rfkill) {
- ret = -ERFKILL;
- goto out;
- }
-
- /* Someone called stop_device, don't try to start_fw */
- if (trans_pcie->is_down) {
- IWL_WARN(trans,
- "Can't start_fw since the HW hasn't been started\n");
- ret = -EIO;
- goto out;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
- ret = iwl_pcie_nic_init(trans);
- if (ret) {
- IWL_ERR(trans, "Unable to init nic\n");
- goto out;
- }
-
- /*
- * Now, we load the firmware and don't want to be interrupted, even
- * by the RF-Kill interrupt (hence mask all the interrupt besides the
- * FH_TX interrupt which is needed to load the firmware). If the
- * RF-Kill switch is toggled, we will find out after having loaded
- * the firmware and return the proper value to the caller.
- */
- iwl_enable_fw_load_int(trans);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Load the given image to the HW */
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- ret = iwl_pcie_load_given_ucode_8000(trans, img);
- else
- ret = iwl_pcie_load_given_ucode(trans, img);
-
- /* re-check RF-Kill state since we may have missed the interrupt */
- hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
- if (hw_rfkill && !run_in_rfkill)
- ret = -ERFKILL;
-
-out:
- mutex_unlock(&trans_pcie->mutex);
- return ret;
-}
-
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
-{
- iwl_pcie_reset_ict(trans);
- iwl_pcie_tx_start(trans);
-}
-
-void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
- bool was_in_rfkill)
-{
- bool hw_rfkill;
-
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- * Don't call the op_mode if the rkfill state hasn't changed.
- * This allows the op_mode to call stop_device from the rfkill
- * notification without endless recursion. Under very rare
- * circumstances, we might have a small recursion if the rfkill
- * state changed exactly now while we were called from stop_device.
- * This is very unlikely but can happen and is supported.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill) {
- set_bit(STATUS_RFKILL_HW, &trans->status);
- set_bit(STATUS_RFKILL_OPMODE, &trans->status);
- } else {
- clear_bit(STATUS_RFKILL_HW, &trans->status);
- clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
- }
- if (hw_rfkill != was_in_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
-}
-
-void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool was_in_rfkill;
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
- NULL);
-
- mutex_lock(&trans_pcie->mutex);
- trans_pcie->opmode_down = true;
- was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_stop_device(trans, false);
- iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
- mutex_unlock(&trans_pcie->mutex);
-}
-
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
-{
- struct iwl_trans_pcie __maybe_unused *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->mutex);
-
- IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
- state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
- !WARN_ON(trans->mac_cfg->gen2))
- _iwl_trans_pcie_stop_device(trans, from_irq);
-}
-
-static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
-{
- iwl_disable_interrupts(trans);
-
- /*
- * in testing mode, the host stays awake and the
- * hardware won't be reset (not even partially)
- */
- if (test)
- return;
-
- iwl_pcie_disable_ict(trans);
-
- iwl_pcie_synchronize_irqs(trans);
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
- } else {
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- }
-
- if (reset) {
- /*
- * reset TX queues -- some of their registers reset during S3
- * so if we don't reset everything here the D3 image would try
- * to execute some invalid memory upon resume
- */
- iwl_trans_pcie_tx_reset(trans);
- }
-
- iwl_pcie_set_pwr(trans, true);
-}
-
-static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
- iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
- suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
- UREG_DOORBELL_TO_ISR6_RESUME);
- else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
- suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
- CSR_IPC_SLEEP_CONTROL_RESUME);
- else
- return 0;
-
- ret = wait_event_timeout(trans_pcie->sx_waitq,
- trans_pcie->sx_complete, 2 * HZ);
-
- /* Invalidate it toward next suspend or resume */
- trans_pcie->sx_complete = false;
-
- if (!ret) {
- IWL_ERR(trans, "Timeout %s D3\n",
- suspend ? "entering" : "exiting");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
-{
- int ret;
-
- if (!reset)
- /* Enable persistence mode to avoid reset */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSISTENCE);
-
- ret = iwl_pcie_d3_handshake(trans, true);
- if (ret)
- return ret;
-
- iwl_pcie_d3_complete_suspend(trans, test, reset);
-
- return 0;
-}
-
-int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 val;
- int ret;
-
- if (test) {
- iwl_enable_interrupts(trans);
- *status = IWL_D3_STATUS_ALIVE;
- ret = 0;
- goto out;
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
- else
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
- iwl_pcie_set_pwr(trans, false);
-
- if (!reset) {
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- } else {
- iwl_trans_pcie_tx_reset(trans);
-
- ret = iwl_pcie_rx_init(trans);
- if (ret) {
- IWL_ERR(trans,
- "Failed to resume the device (RX reset)\n");
- return ret;
- }
- }
-
- IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
- iwl_read_umac_prph(trans, WFPM_GP2));
-
- val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
- *status = IWL_D3_STATUS_RESET;
- else
- *status = IWL_D3_STATUS_ALIVE;
-
-out:
- if (*status == IWL_D3_STATUS_ALIVE)
- ret = iwl_pcie_d3_handshake(trans, false);
- else
- trans->state = IWL_TRANS_NO_FW;
-
- return ret;
-}
-
-static void
-iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
- struct iwl_trans *trans,
- const struct iwl_mac_cfg *mac_cfg,
- struct iwl_trans_info *info)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_irqs, num_irqs, i, ret;
- u16 pci_cmd;
- u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
-
- if (!mac_cfg->mq_rx_supported)
- goto enable_msi;
-
- if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
- max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
-
- max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
- for (i = 0; i < max_irqs; i++)
- trans_pcie->msix_entries[i].entry = i;
-
- num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
- MSIX_MIN_INTERRUPT_VECTORS,
- max_irqs);
- if (num_irqs < 0) {
- IWL_DEBUG_INFO(trans,
- "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
- num_irqs);
- goto enable_msi;
- }
- trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
-
- IWL_DEBUG_INFO(trans,
- "MSI-X enabled. %d interrupt vectors were allocated\n",
- num_irqs);
-
- /*
- * In case the OS provides fewer interrupts than requested, different
- * causes will share the same interrupt vector as follows:
- * One interrupt less: non rx causes shared with FBQ.
- * Two interrupts less: non rx causes shared with FBQ and RSS.
- * More than two interrupts: we will use fewer RSS queues.
- */
- if (num_irqs <= max_irqs - 2) {
- info->num_rxqs = num_irqs + 1;
- trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
- IWL_SHARED_IRQ_FIRST_RSS;
- } else if (num_irqs == max_irqs - 1) {
- info->num_rxqs = num_irqs;
- trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
- } else {
- info->num_rxqs = num_irqs - 1;
- }
-
- IWL_DEBUG_INFO(trans,
- "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
- info->num_rxqs, trans_pcie->shared_vec_mask);
-
- WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);
-
- trans_pcie->alloc_vecs = num_irqs;
- trans_pcie->msix_enabled = true;
- return;
-
-enable_msi:
- info->num_rxqs = 1;
- ret = pci_enable_msi(pdev);
- if (ret) {
- dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- }
- }
-}
-
-static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,
- struct iwl_trans_info *info)
-{
-#if defined(CONFIG_SMP)
- int iter_rx_q, i, ret, cpu, offset;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
- iter_rx_q = info->num_rxqs - 1 + i;
- offset = 1 + i;
- for (; i < iter_rx_q ; i++) {
- /*
- * Get the cpu prior to the place to search
- * (i.e. return will be > i - 1).
- */
- cpu = cpumask_next(i - offset, cpu_online_mask);
- cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
- ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
- &trans_pcie->affinity_mask[i]);
- if (ret)
- IWL_ERR(trans_pcie->trans,
- "Failed to set affinity mask for IRQ %d\n",
- trans_pcie->msix_entries[i].vector);
- }
-#endif
-}
-
-static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
- struct iwl_trans_pcie *trans_pcie,
- struct iwl_trans_info *info)
-{
- int i;
-
- for (i = 0; i < trans_pcie->alloc_vecs; i++) {
- int ret;
- struct msix_entry *msix_entry;
- const char *qname = queue_name(&pdev->dev, trans_pcie, i);
-
- if (!qname)
- return -ENOMEM;
-
- msix_entry = &trans_pcie->msix_entries[i];
- ret = devm_request_threaded_irq(&pdev->dev,
- msix_entry->vector,
- iwl_pcie_msix_isr,
- (i == trans_pcie->def_irq) ?
- iwl_pcie_irq_msix_handler :
- iwl_pcie_irq_rx_msix_handler,
- IRQF_SHARED,
- qname,
- msix_entry);
- if (ret) {
- IWL_ERR(trans_pcie->trans,
- "Error allocating IRQ %d\n", i);
-
- return ret;
- }
- }
- iwl_pcie_irq_set_affinity(trans_pcie->trans, info);
-
- return 0;
-}
-
-static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
-{
- u32 hpm, wprot;
-
- switch (trans->mac_cfg->device_family) {
- case IWL_DEVICE_FAMILY_9000:
- wprot = PREG_PRPH_WPROT_9000;
- break;
- case IWL_DEVICE_FAMILY_22000:
- wprot = PREG_PRPH_WPROT_22000;
- break;
- default:
- return 0;
- }
-
- hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
- if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {
- u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
-
- if (wprot_val & PREG_WFPM_ACCESS) {
- IWL_ERR(trans,
- "Error, can not clear persistence bit\n");
- return -EPERM;
- }
- iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
- hpm & ~PERSISTENCE_BIT);
- }
-
- return 0;
-}
-
-static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
-{
- int ret;
-
- ret = iwl_finish_nic_init(trans);
- if (ret < 0)
- return ret;
-
- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
- udelay(20);
- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_PG_EN |
- HPM_HIPM_GEN_CFG_CR_SLP_EN);
- udelay(20);
- iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
-
- return iwl_trans_pcie_sw_reset(trans, true);
-}
-
-static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int err;
-
- lockdep_assert_held(&trans_pcie->mutex);
-
- err = iwl_pcie_prepare_card_hw(trans);
- if (err) {
- IWL_ERR(trans, "Error while preparing HW: %d\n", err);
- return err;
- }
-
- err = iwl_trans_pcie_clear_persistence_bit(trans);
- if (err)
- return err;
-
- err = iwl_trans_pcie_sw_reset(trans, true);
- if (err)
- return err;
-
- if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->mac_cfg->integrated) {
- err = iwl_pcie_gen2_force_power_gating(trans);
- if (err)
- return err;
- }
-
- err = iwl_pcie_apm_init(trans);
- if (err)
- return err;
-
- iwl_pcie_init_msix(trans_pcie);
-
- /* From now on, the op_mode will be kept updated about RF kill state */
- iwl_enable_rfkill_int(trans);
-
- trans_pcie->opmode_down = false;
-
- /* Set is_down to false here so that...*/
- trans_pcie->is_down = false;
-
- /* ...rfkill can call stop_device and set it false if needed */
- iwl_pcie_check_hw_rf_kill(trans);
-
- return 0;
-}
-
-int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- mutex_lock(&trans_pcie->mutex);
- ret = _iwl_trans_pcie_start_hw(trans);
- mutex_unlock(&trans_pcie->mutex);
-
- return ret;
-}
-
-void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- mutex_lock(&trans_pcie->mutex);
-
- /* disable interrupts - don't enable HW RF kill interrupt */
- iwl_disable_interrupts(trans);
-
- iwl_pcie_apm_stop(trans, true);
-
- iwl_disable_interrupts(trans);
-
- iwl_pcie_disable_ict(trans);
-
- mutex_unlock(&trans_pcie->mutex);
-
- iwl_pcie_synchronize_irqs(trans);
-}
-
-void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
-{
- writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
-}
-
-void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
-{
- writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
-}
-
-u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
-{
- return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
-}
-
-static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
-{
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- return 0x00FFFFFF;
- else
- return 0x000FFFFF;
-}
-
-u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
-{
- u32 mask = iwl_trans_pcie_prph_msk(trans);
-
- iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
- ((reg & mask) | (3 << 24)));
- return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
-}
-
-void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
-{
- u32 mask = iwl_trans_pcie_prph_msk(trans);
-
- iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
- ((addr & mask) | (3 << 24)));
- iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
-}
-
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* free all first - we might be reconfigured for a different size */
- iwl_pcie_free_rbs_pool(trans);
-
- trans_pcie->rx_page_order =
- iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
- trans_pcie->rx_buf_bytes =
- iwl_trans_get_rb_size(trans->conf.rx_buf_size);
-}
-
-void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
- struct device *dev)
-{
- u8 i;
- struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
-
- /* free DRAM payloads */
- for (i = 0; i < dram_regions->n_regions; i++) {
- dma_free_coherent(dev, dram_regions->drams[i].size,
- dram_regions->drams[i].block,
- dram_regions->drams[i].physical);
- }
- dram_regions->n_regions = 0;
-
- /* free DRAM addresses array */
- if (desc_dram->block) {
- dma_free_coherent(dev, desc_dram->size,
- desc_dram->block,
- desc_dram->physical);
- }
- memset(desc_dram, 0, sizeof(*desc_dram));
-}
-
-static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);
-}
-
-static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_cmd_header_wide bad_cmd = {
- .cmd = INVALID_WR_PTR_CMD,
- .group_id = DEBUG_GROUP,
- .sequence = cpu_to_le16(0xffff),
- .length = cpu_to_le16(0),
- .version = 0,
- };
- int ret;
-
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,
- sizeof(bad_cmd));
- if (ret)
- return ret;
- memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
- return 0;
-}
-
-void iwl_trans_pcie_free(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- iwl_pcie_synchronize_irqs(trans);
-
- if (trans->mac_cfg->gen2)
- iwl_txq_gen2_tx_free(trans);
- else
- iwl_pcie_tx_free(trans);
- iwl_pcie_rx_free(trans);
-
- if (trans_pcie->rba.alloc_wq) {
- destroy_workqueue(trans_pcie->rba.alloc_wq);
- trans_pcie->rba.alloc_wq = NULL;
- }
-
- if (trans_pcie->msix_enabled) {
- for (i = 0; i < trans_pcie->alloc_vecs; i++) {
- irq_set_affinity_hint(
- trans_pcie->msix_entries[i].vector,
- NULL);
- }
-
- trans_pcie->msix_enabled = false;
- } else {
- iwl_pcie_free_ict(trans);
- }
-
- free_netdev(trans_pcie->napi_dev);
-
- iwl_pcie_free_invalid_tx_cmd(trans);
-
- iwl_pcie_free_fw_monitor(trans);
-
- iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
- trans->dev);
- iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
- trans->dev);
-
- mutex_destroy(&trans_pcie->mutex);
-
- if (trans_pcie->txqs.tso_hdr_page) {
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
-
- if (p && p->page)
- __free_page(p->page);
- }
-
- free_percpu(trans_pcie->txqs.tso_hdr_page);
- }
-
- iwl_trans_free(trans);
-}
-
-static union acpi_object *
-iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)
-{
-#ifdef CONFIG_ACPI
- struct iwl_dsm_internal_product_reset_cmd pldr_arg = {
- .cmd = cmd,
- .value = value,
- };
- union acpi_object arg = {
- .buffer.type = ACPI_TYPE_BUFFER,
- .buffer.length = sizeof(pldr_arg),
- .buffer.pointer = (void *)&pldr_arg,
- };
- static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-
- if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,
- DSM_INTERNAL_FUNC_PRODUCT_RESET))
- return ERR_PTR(-ENODEV);
-
- return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,
- DSM_INTERNAL_FUNC_PRODUCT_RESET,
- &arg, &dsm_guid);
-#else
- return ERR_PTR(-EOPNOTSUPP);
-#endif
-}
-
-void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
-{
- union acpi_object *res;
-
- res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
- DSM_INTERNAL_PLDR_CMD_GET_MODE,
- 0);
- if (IS_ERR(res))
- return;
-
- if (res->type != ACPI_TYPE_INTEGER)
- IWL_ERR_DEV(&pdev->dev,
- "unexpected return type from product reset DSM\n");
- else
- IWL_DEBUG_DEV_POWER(&pdev->dev,
- "product reset mode is 0x%llx\n",
- res->integer.value);
-
- ACPI_FREE(res);
-}
-
-static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,
- bool integrated)
-{
- union acpi_object *res;
- u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;
-
- if (!integrated)
- mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
- DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
-
- res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
- DSM_INTERNAL_PLDR_CMD_SET_MODE,
- mode);
- if (IS_ERR(res)) {
- if (enable)
- IWL_ERR_DEV(&pdev->dev,
- "ACPI _DSM not available (%d), cannot do product reset\n",
- (int)PTR_ERR(res));
- return;
- }
-
- ACPI_FREE(res);
- IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",
- enable ? "En" : "Dis");
- iwl_trans_pcie_check_product_reset_mode(pdev);
-}
-
-void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
-{
- union acpi_object *res;
-
- res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
- DSM_INTERNAL_PLDR_CMD_GET_STATUS,
- 0);
- if (IS_ERR(res))
- return;
-
- if (res->type != ACPI_TYPE_INTEGER)
- IWL_ERR_DEV(&pdev->dev,
- "unexpected return type from product reset DSM\n");
- else
- IWL_DEBUG_DEV_POWER(&pdev->dev,
- "product reset status is 0x%llx\n",
- res->integer.value);
-
- ACPI_FREE(res);
-}
-
-static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)
-{
-#ifdef CONFIG_ACPI
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *p, *ref;
- acpi_status status;
- int ret = -EINVAL;
-
- status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),
- "_PRR", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");
- goto out;
- }
- p = buffer.pointer;
-
- if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {
- pci_err(pdev, "Bad _PRR return type\n");
- goto out;
- }
-
- ref = &p->package.elements[0];
- if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
- pci_err(pdev, "_PRR wasn't a reference\n");
- goto out;
- }
-
- status = acpi_evaluate_object(ref->reference.handle,
- "_RST", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- pci_err(pdev,
- "Failed to call _RST on object returned by _PRR (%d)\n",
- status);
- goto out;
- }
- ret = 0;
-out:
- kfree(buffer.pointer);
- if (!ret) {
- IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");
- return;
- }
- IWL_DEBUG_DEV_POWER(&pdev->dev,
- "No BIOS support, using pci_reset_function()\n");
-#endif
- pci_reset_function(pdev);
-}
-
-struct iwl_trans_pcie_removal {
- struct pci_dev *pdev;
- struct work_struct work;
- enum iwl_reset_mode mode;
- bool integrated;
-};
-
-static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
-{
- struct iwl_trans_pcie_removal *removal =
- container_of(wk, struct iwl_trans_pcie_removal, work);
- struct pci_dev *pdev = removal->pdev;
- static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
- struct pci_bus *bus;
-
- pci_lock_rescan_remove();
-
- bus = pdev->bus;
- /* in this case, something else already removed the device */
- if (!bus)
- goto out;
-
- kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
-
- if (removal->mode == IWL_RESET_MODE_PROD_RESET) {
- struct pci_dev *bt = NULL;
-
- if (!removal->integrated) {
- /* discrete devices have WiFi/BT at function 0/1 */
- int slot = PCI_SLOT(pdev->devfn);
- int func = PCI_FUNC(pdev->devfn);
-
- if (func == 0)
- bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));
- else
- pci_info(pdev, "Unexpected function %d\n",
- func);
- } else {
- /* on integrated we have to look up by ID (same bus) */
- static const struct pci_device_id bt_device_ids[] = {
-#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }
- BT_DEV(0xA876), /* LNL */
- BT_DEV(0xE476), /* PTL-P */
- BT_DEV(0xE376), /* PTL-H */
- BT_DEV(0xD346), /* NVL-H */
- BT_DEV(0x6E74), /* NVL-S */
- BT_DEV(0x4D76), /* WCL */
- BT_DEV(0xD246), /* RZL-H */
- BT_DEV(0x6C46), /* RZL-M */
- {}
- };
- struct pci_dev *tmp = NULL;
-
- for_each_pci_dev(tmp) {
- if (tmp->bus != bus)
- continue;
-
- if (pci_match_id(bt_device_ids, tmp)) {
- bt = tmp;
- break;
- }
- }
- }
-
- if (bt) {
- pci_info(bt, "Removal by WiFi due to product reset\n");
- pci_stop_and_remove_bus_device(bt);
- pci_dev_put(bt);
- }
- }
-
- iwl_trans_pcie_set_product_reset(pdev,
- removal->mode ==
- IWL_RESET_MODE_PROD_RESET,
- removal->integrated);
- if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)
- iwl_trans_pcie_call_reset(pdev);
-
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
-
- if (removal->mode >= IWL_RESET_MODE_RESCAN) {
- if (bus->parent)
- bus = bus->parent;
- pci_rescan_bus(bus);
- }
-
-out:
- pci_unlock_rescan_remove();
-
- kfree(removal);
- module_put(THIS_MODULE);
-}
-
-void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_trans_pcie_removal *removal;
- char _msg = 0, *msg = &_msg;
-
- if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
- mode == IWL_RESET_MODE_BACKOFF))
- return;
-
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return;
-
- if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
- mode = IWL_RESET_MODE_FUNC_RESET;
- if (trans_pcie->me_present < 0)
- msg = " instead of product reset as ME may be present";
- else
- msg = " instead of product reset as ME is present";
- }
-
- IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);
-
- iwl_pcie_dump_csr(trans);
-
- /*
- * get a module reference to avoid doing this
- * while unloading anyway and to avoid
- * scheduling a work with code that's being
- * removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(trans,
- "Module is being unloaded - abort\n");
- return;
- }
-
- removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
- if (!removal) {
- module_put(THIS_MODULE);
- return;
- }
- /*
- * we don't need to clear this flag, because
- * the trans will be freed and reallocated.
- */
- set_bit(STATUS_TRANS_DEAD, &trans->status);
-
- removal->pdev = to_pci_dev(trans->dev);
- removal->mode = mode;
- removal->integrated = trans->mac_cfg->integrated;
- INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
- pci_dev_get(removal->pdev);
- schedule_work(&removal->work);
-}
-EXPORT_SYMBOL(iwl_trans_pcie_reset);
-
-/*
- * This version doesn't disable BHs but rather assumes they're
- * already disabled.
- */
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
-{
- int ret;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
- u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
- u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
-
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return false;
-
- spin_lock(&trans_pcie->reg_lock);
-
- if (trans_pcie->cmd_hold_nic_awake)
- goto out;
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
- mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
- poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
- }
-
- /* this bit wakes up the NIC */
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * HW with volatile SRAM must save/restore contents to/from
- * host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP
- * check is a good idea before accessing the SRAM of HW with
- * volatile SRAM (e.g. reading Event Log).
- *
- * 5000 series and later (including 1000 series) have non-volatile SRAM,
- * and do not save/restore SRAM when power cycling.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
- if (unlikely(ret < 0)) {
- u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
-
- if (silent) {
- spin_unlock(&trans_pcie->reg_lock);
- return false;
- }
-
- WARN_ONCE(1,
- "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
- cntrl);
-
- iwl_trans_pcie_dump_regs(trans);
-
- if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
- iwl_trans_pcie_reset(trans,
- IWL_RESET_MODE_REMOVE_ONLY);
- else
- iwl_write32(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
-
- spin_unlock(&trans_pcie->reg_lock);
- return false;
- }
-
-out:
- /*
- * Fool sparse by faking we release the lock - sparse will
- * track nic_access anyway.
- */
- __release(&trans_pcie->reg_lock);
- return true;
-}
-
-bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
-{
- bool ret;
-
- local_bh_disable();
- ret = __iwl_trans_pcie_grab_nic_access(trans, false);
- if (ret) {
- /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
- return ret;
- }
- local_bh_enable();
- return false;
-}
-
-void __releases(nic_access_nobh)
-iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->reg_lock);
-
- /*
- * Fool sparse by faking we acquiring the lock - sparse will
- * track nic_access anyway.
- */
- __acquire(&trans_pcie->reg_lock);
-
- if (trans_pcie->cmd_hold_nic_awake)
- goto out;
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
- else
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * Above we read the CSR_GP_CNTRL register, which will flush
- * any previous writes, but we need the write that clears the
- * MAC_ACCESS_REQ bit to be performed before any other writes
- * scheduled on different CPUs (after we drop reg_lock).
- */
-out:
- __release(nic_access_nobh);
- spin_unlock_bh(&trans_pcie->reg_lock);
-}
-
-int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
-#define IWL_MAX_HW_ERRS 5
- unsigned int num_consec_hw_errors = 0;
- int offs = 0;
- u32 *vals = buf;
-
- while (offs < dwords) {
- /* limit the time we spin here under lock to 1/2s */
- unsigned long end = jiffies + HZ / 2;
- bool resched = false;
-
- if (iwl_trans_grab_nic_access(trans)) {
- iwl_write32(trans, HBUS_TARG_MEM_RADDR,
- addr + 4 * offs);
-
- while (offs < dwords) {
- vals[offs] = iwl_read32(trans,
- HBUS_TARG_MEM_RDAT);
-
- if (iwl_trans_is_hw_error_value(vals[offs]))
- num_consec_hw_errors++;
- else
- num_consec_hw_errors = 0;
-
- if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {
- iwl_trans_release_nic_access(trans);
- return -EIO;
- }
-
- offs++;
-
- if (time_after(jiffies, end)) {
- resched = true;
- break;
- }
- }
- iwl_trans_release_nic_access(trans);
-
- if (resched)
- cond_resched();
- } else {
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- int offs, ret = 0;
- const u32 *vals = buf;
-
- if (iwl_trans_grab_nic_access(trans)) {
- iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- iwl_write32(trans, HBUS_TARG_MEM_WDAT,
- vals ? vals[offs] : 0);
- iwl_trans_release_nic_access(trans);
- } else {
- ret = -EBUSY;
- }
- return ret;
-}
-
-int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
- u32 *val)
-{
- return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
- ofs, val);
-}
-
-#define IWL_FLUSH_WAIT_MS 2000
-
-int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
- return -EINVAL;
-
- data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
- data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
- data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
- data->fr_bd_wid = 0;
-
- return 0;
-}
-
-int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- unsigned long now = jiffies;
- bool overflow_tx;
- u8 wr_ptr;
-
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
- return -EINVAL;
-
- IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans_pcie->txqs.txq[txq_idx];
-
- spin_lock_bh(&txq->lock);
- overflow_tx = txq->overflow_tx ||
- !skb_queue_empty(&txq->overflow_q);
- spin_unlock_bh(&txq->lock);
-
- wr_ptr = READ_ONCE(txq->write_ptr);
-
- while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
- overflow_tx) &&
- !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = READ_ONCE(txq->write_ptr);
-
- /*
- * If write pointer moved during the wait, warn only
- * if the TX came from op mode. In case TX came from
- * trans layer (overflow TX) don't warn.
- */
- if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
- "WR pointer moved while flushing %d -> %d\n",
- wr_ptr, write_ptr))
- return -ETIMEDOUT;
- wr_ptr = write_ptr;
-
- usleep_range(1000, 2000);
-
- spin_lock_bh(&txq->lock);
- overflow_tx = txq->overflow_tx ||
- !skb_queue_empty(&txq->overflow_q);
- spin_unlock_bh(&txq->lock);
- }
-
- if (txq->read_ptr != txq->write_ptr) {
- IWL_ERR(trans,
- "fail to flush all tx fifo queues Q %d\n", txq_idx);
- iwl_txq_log_scd_error(trans, txq);
- return -ETIMEDOUT;
- }
-
- IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
-
- return 0;
-}
-
-int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int cnt;
- int ret = 0;
-
- /* waiting for all the tx frames complete might take a while */
- for (cnt = 0;
- cnt < trans->mac_cfg->base->num_of_queues;
- cnt++) {
-
- if (cnt == trans->conf.cmd_queue)
- continue;
- if (!test_bit(cnt, trans_pcie->txqs.queue_used))
- continue;
- if (!(BIT(cnt) & txq_bm))
- continue;
-
- ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
- u32 mask, u32 value)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_bh(&trans_pcie->reg_lock);
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
- spin_unlock_bh(&trans_pcie->reg_lock);
-}
-
-static const char *get_csr_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
- switch (cmd) {
- IWL_CMD(CSR_HW_IF_CONFIG_REG);
- IWL_CMD(CSR_INT_COALESCING);
- IWL_CMD(CSR_INT);
- IWL_CMD(CSR_INT_MASK);
- IWL_CMD(CSR_FH_INT_STATUS);
- IWL_CMD(CSR_GPIO_IN);
- IWL_CMD(CSR_RESET);
- IWL_CMD(CSR_GP_CNTRL);
- IWL_CMD(CSR_HW_REV);
- IWL_CMD(CSR_EEPROM_REG);
- IWL_CMD(CSR_EEPROM_GP);
- IWL_CMD(CSR_OTP_GP_REG);
- IWL_CMD(CSR_GIO_REG);
- IWL_CMD(CSR_GP_UCODE_REG);
- IWL_CMD(CSR_GP_DRIVER_REG);
- IWL_CMD(CSR_UCODE_DRV_GP1);
- IWL_CMD(CSR_UCODE_DRV_GP2);
- IWL_CMD(CSR_LED_REG);
- IWL_CMD(CSR_DRAM_INT_TBL_REG);
- IWL_CMD(CSR_GIO_CHICKEN_BITS);
- IWL_CMD(CSR_ANA_PLL_CFG);
- IWL_CMD(CSR_HW_REV_WA_REG);
- IWL_CMD(CSR_MONITOR_STATUS_REG);
- IWL_CMD(CSR_DBG_HPET_MEM_REG);
- default:
- return "UNKNOWN";
- }
-#undef IWL_CMD
-}
-
-void iwl_pcie_dump_csr(struct iwl_trans *trans)
-{
- int i;
- static const u32 csr_tbl[] = {
- CSR_HW_IF_CONFIG_REG,
- CSR_INT_COALESCING,
- CSR_INT,
- CSR_INT_MASK,
- CSR_FH_INT_STATUS,
- CSR_GPIO_IN,
- CSR_RESET,
- CSR_GP_CNTRL,
- CSR_HW_REV,
- CSR_EEPROM_REG,
- CSR_EEPROM_GP,
- CSR_OTP_GP_REG,
- CSR_GIO_REG,
- CSR_GP_UCODE_REG,
- CSR_GP_DRIVER_REG,
- CSR_UCODE_DRV_GP1,
- CSR_UCODE_DRV_GP2,
- CSR_LED_REG,
- CSR_DRAM_INT_TBL_REG,
- CSR_GIO_CHICKEN_BITS,
- CSR_ANA_PLL_CFG,
- CSR_MONITOR_STATUS_REG,
- CSR_HW_REV_WA_REG,
- CSR_DBG_HPET_MEM_REG
- };
- IWL_ERR(trans, "CSR values:\n");
- IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
- "CSR_INT_PERIODIC_REG)\n");
- for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
- IWL_ERR(trans, " %25s: 0X%08x\n",
- get_csr_string(csr_tbl[i]),
- iwl_read32(trans, csr_tbl[i]));
- }
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- debugfs_create_file(#name, mode, parent, trans, \
- &iwl_dbgfs_##name##_ops); \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
-static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .write = iwl_dbgfs_##name##_write, \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-struct iwl_dbgfs_tx_queue_priv {
- struct iwl_trans *trans;
-};
-
-struct iwl_dbgfs_tx_queue_state {
- loff_t pos;
-};
-
-static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
-{
- struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
- struct iwl_dbgfs_tx_queue_state *state;
-
- if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return NULL;
- state->pos = *pos;
- return state;
-}
-
-static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
- void *v, loff_t *pos)
-{
- struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
- struct iwl_dbgfs_tx_queue_state *state = v;
-
- *pos = ++state->pos;
-
- if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
- return NULL;
-
- return state;
-}
-
-static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
-{
- kfree(v);
-}
-
-static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
-{
- struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
- struct iwl_dbgfs_tx_queue_state *state = v;
- struct iwl_trans *trans = priv->trans;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
-
- seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
- (unsigned int)state->pos,
- !!test_bit(state->pos, trans_pcie->txqs.queue_used),
- !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
- if (txq)
- seq_printf(seq,
- "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
- txq->read_ptr, txq->write_ptr,
- txq->need_update, txq->frozen,
- txq->n_window, txq->ampdu);
- else
- seq_puts(seq, "(unallocated)");
-
- if (state->pos == trans->conf.cmd_queue)
- seq_puts(seq, " (HCMD)");
- seq_puts(seq, "\n");
-
- return 0;
-}
-
-static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
- .start = iwl_dbgfs_tx_queue_seq_start,
- .next = iwl_dbgfs_tx_queue_seq_next,
- .stop = iwl_dbgfs_tx_queue_seq_stop,
- .show = iwl_dbgfs_tx_queue_seq_show,
-};
-
-static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
-{
- struct iwl_dbgfs_tx_queue_priv *priv;
-
- priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
- sizeof(*priv));
-
- if (!priv)
- return -ENOMEM;
-
- priv->trans = inode->i_private;
- return 0;
-}
-
-static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- char *buf;
- int pos = 0, i, ret;
- size_t bufsz;
-
- bufsz = sizeof(char) * 121 * trans->info.num_rxqs;
-
- if (!trans_pcie->rxq)
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
-
- spin_lock_bh(&rxq->lock);
-
- pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
- i);
- pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
- rxq->write_actual);
- pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
- rxq->need_update);
- pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- u32 r = iwl_get_closed_rb_stts(trans, rxq);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tclosed_rb_num: %u\n", r);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tclosed_rb_num: Not Allocated\n");
- }
- spin_unlock_bh(&rxq->lock);
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-
- int pos = 0;
- char *buf;
- int bufsz = 24 * 64; /* 24 items * 64 char per item */
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- isr_stats->hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- isr_stats->sw);
- if (isr_stats->sw || isr_stats->hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- isr_stats->err_code);
- }
-#ifdef CONFIG_IWLWIFI_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- isr_stats->sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- isr_stats->alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- isr_stats->ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- isr_stats->wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n", isr_stats->rx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- isr_stats->tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- isr_stats->unhandled);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
- u32 reset_flag;
- int ret;
-
- ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
- if (ret)
- return ret;
- if (reset_flag == 0)
- memset(isr_stats, 0, sizeof(*isr_stats));
-
- return count;
-}
-
-static ssize_t iwl_dbgfs_csr_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
-
- iwl_pcie_dump_csr(trans);
-
- return count;
-}
-
-static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- char *buf = NULL;
- ssize_t ret;
-
- ret = iwl_dump_fh(trans, &buf);
- if (ret < 0)
- return ret;
- if (!buf)
- return -EINVAL;
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- char buf[100];
- int pos;
-
- pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
- trans_pcie->debug_rfkill,
- !(iwl_read32(trans, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool new_value;
- int ret;
-
- ret = kstrtobool_from_user(user_buf, count, &new_value);
- if (ret)
- return ret;
- if (new_value == trans_pcie->debug_rfkill)
- return count;
- IWL_WARN(trans, "changing debug rfkill %d->%d\n",
- trans_pcie->debug_rfkill, new_value);
- trans_pcie->debug_rfkill = new_value;
- iwl_pcie_handle_rfkill_irq(trans, false);
-
- return count;
-}
-
-static int iwl_dbgfs_monitor_data_open(struct inode *inode,
- struct file *file)
-{
- struct iwl_trans *trans = inode->i_private;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!trans->dbg.dest_tlv ||
- trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
- IWL_ERR(trans, "Debug destination is not set to DRAM\n");
- return -ENOENT;
- }
-
- if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
- return -EBUSY;
-
- trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
- return simple_open(inode, file);
-}
-
-static int iwl_dbgfs_monitor_data_release(struct inode *inode,
- struct file *file)
-{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
-
- if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
- trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
- return 0;
-}
-
-static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
- void *buf, ssize_t *size,
- ssize_t *bytes_copied)
-{
- ssize_t buf_size_left = count - *bytes_copied;
-
- buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
- if (*size > buf_size_left)
- *size = buf_size_left;
-
- *size -= copy_to_user(user_buf, buf, *size);
- *bytes_copied += *size;
-
- if (buf_size_left == *size)
- return true;
- return false;
-}
-
-static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
- struct cont_rec *data = &trans_pcie->fw_mon_data;
- u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
- ssize_t size, bytes_copied = 0;
- bool b_full;
-
- if (trans->dbg.dest_tlv) {
- write_ptr_addr =
- le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
- wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
- } else {
- write_ptr_addr = MON_BUFF_WRPTR;
- wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
- }
-
- if (unlikely(!trans->dbg.rec_on))
- return 0;
-
- mutex_lock(&data->mutex);
- if (data->state ==
- IWL_FW_MON_DBGFS_STATE_DISABLED) {
- mutex_unlock(&data->mutex);
- return 0;
- }
-
- /* write_ptr position in bytes rather then DW */
- write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
- wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
-
- if (data->prev_wrap_cnt == wrap_cnt) {
- size = write_ptr - data->prev_wr_ptr;
- curr_buf = cpu_addr + data->prev_wr_ptr;
- b_full = iwl_write_to_user_buf(user_buf, count,
- curr_buf, &size,
- &bytes_copied);
- data->prev_wr_ptr += size;
-
- } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
- write_ptr < data->prev_wr_ptr) {
- size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
- curr_buf = cpu_addr + data->prev_wr_ptr;
- b_full = iwl_write_to_user_buf(user_buf, count,
- curr_buf, &size,
- &bytes_copied);
- data->prev_wr_ptr += size;
-
- if (!b_full) {
- size = write_ptr;
- b_full = iwl_write_to_user_buf(user_buf, count,
- cpu_addr, &size,
- &bytes_copied);
- data->prev_wr_ptr = size;
- data->prev_wrap_cnt++;
- }
- } else {
- if (data->prev_wrap_cnt == wrap_cnt - 1 &&
- write_ptr > data->prev_wr_ptr)
- IWL_WARN(trans,
- "write pointer passed previous write pointer, start copying from the beginning\n");
- else if (!unlikely(data->prev_wrap_cnt == 0 &&
- data->prev_wr_ptr == 0))
- IWL_WARN(trans,
- "monitor data is out of sync, start copying from the beginning\n");
-
- size = write_ptr;
- b_full = iwl_write_to_user_buf(user_buf, count,
- cpu_addr, &size,
- &bytes_copied);
- data->prev_wr_ptr = size;
- data->prev_wrap_cnt = wrap_cnt;
- }
-
- mutex_unlock(&data->mutex);
-
- return bytes_copied;
-}
-
-static ssize_t iwl_dbgfs_rf_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!trans_pcie->rf_name[0])
- return -ENODEV;
-
- return simple_read_from_buffer(user_buf, count, ppos,
- trans_pcie->rf_name,
- strlen(trans_pcie->rf_name));
-}
-
-static ssize_t iwl_dbgfs_reset_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
- static const char * const modes[] = {
- [IWL_RESET_MODE_SW_RESET] = "sw",
- [IWL_RESET_MODE_REPROBE] = "reprobe",
- [IWL_RESET_MODE_TOP_RESET] = "top",
- [IWL_RESET_MODE_REMOVE_ONLY] = "remove",
- [IWL_RESET_MODE_RESCAN] = "rescan",
- [IWL_RESET_MODE_FUNC_RESET] = "function",
- [IWL_RESET_MODE_PROD_RESET] = "product",
- };
- char buf[10] = {};
- int mode;
-
- if (count > sizeof(buf) - 1)
- return -EINVAL;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- mode = sysfs_match_string(modes, buf);
- if (mode < 0)
- return mode;
-
- if (mode < IWL_RESET_MODE_REMOVE_ONLY) {
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- return -EINVAL;
- if (mode == IWL_RESET_MODE_TOP_RESET) {
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
- return -EINVAL;
- trans->request_top_reset = 1;
- }
- iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);
- iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);
- return count;
- }
-
- iwl_trans_pcie_reset(trans, mode);
-
- return count;
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_WRITE_FILE_OPS(csr);
-DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
-DEBUGFS_READ_FILE_OPS(rf);
-DEBUGFS_WRITE_FILE_OPS(reset);
-
-static const struct file_operations iwl_dbgfs_tx_queue_ops = {
- .owner = THIS_MODULE,
- .open = iwl_dbgfs_tx_queue_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
-};
-
-static const struct file_operations iwl_dbgfs_monitor_data_ops = {
- .read = iwl_dbgfs_monitor_data_read,
- .open = iwl_dbgfs_monitor_data_open,
- .release = iwl_dbgfs_monitor_data_release,
-};
-
-/* Create the debugfs files and directories */
-void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
-{
- struct dentry *dir = trans->dbgfs_dir;
-
- DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
- DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
- DEBUGFS_ADD_FILE(interrupt, dir, 0600);
- DEBUGFS_ADD_FILE(csr, dir, 0200);
- DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
- DEBUGFS_ADD_FILE(rfkill, dir, 0600);
- DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
- DEBUGFS_ADD_FILE(rf, dir, 0400);
- DEBUGFS_ADD_FILE(reset, dir, 0200);
-}
-
-void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct cont_rec *data = &trans_pcie->fw_mon_data;
-
- mutex_lock(&data->mutex);
- data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
- mutex_unlock(&data->mutex);
-}
-#endif /*CONFIG_IWLWIFI_DEBUGFS */
-
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 cmdlen = 0;
- int i;
-
- for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
- cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
-
- return cmdlen;
-}
-
-static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
- int allocated_rb_nums)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = trans_pcie->rx_buf_bytes;
- /* Dump RBs is supported only for pre-9000 devices (1 queue) */
- struct iwl_rxq *rxq = &trans_pcie->rxq[0];
- u32 i, r, j, rb_len = 0;
-
- spin_lock_bh(&rxq->lock);
-
- r = iwl_get_closed_rb_stts(trans, rxq);
-
- for (i = rxq->read, j = 0;
- i != r && j < allocated_rb_nums;
- i = (i + 1) & RX_QUEUE_MASK, j++) {
- struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
- struct iwl_fw_error_dump_rb *rb;
-
- dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
- max_len, DMA_FROM_DEVICE);
-
- rb_len += sizeof(**data) + sizeof(*rb) + max_len;
-
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
- (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
- rb = (void *)(*data)->data;
- rb->index = cpu_to_le32(i);
- memcpy(rb->data, page_address(rxb->page), max_len);
-
- *data = iwl_fw_error_next_data(*data);
- }
-
- spin_unlock_bh(&rxq->lock);
-
- return rb_len;
-}
-#define IWL_CSR_TO_DUMP (0x250)
-
-static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data)
-{
- u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
- __le32 *val;
- int i;
-
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
- (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
- val = (void *)(*data)->data;
-
- for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
- *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
-
- *data = iwl_fw_error_next_data(*data);
-
- return csr_len;
-}
-
-static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data)
-{
- u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
- __le32 *val;
- int i;
-
- if (!iwl_trans_grab_nic_access(trans))
- return 0;
-
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
- (*data)->len = cpu_to_le32(fh_regs_len);
- val = (void *)(*data)->data;
-
- if (!trans->mac_cfg->gen2)
- for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
- i += sizeof(u32))
- *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
- else
- for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
- i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
- i += sizeof(u32))
- *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
- i));
-
- iwl_trans_release_nic_access(trans);
-
- *data = iwl_fw_error_next_data(*data);
-
- return sizeof(**data) + fh_regs_len;
-}
-
-static u32
-iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
- struct iwl_fw_error_dump_fw_mon *fw_mon_data,
- u32 monitor_len)
-{
- u32 buf_size_in_dwords = (monitor_len >> 2);
- u32 *buffer = (u32 *)fw_mon_data->data;
- u32 i;
-
- if (!iwl_trans_grab_nic_access(trans))
- return 0;
-
- iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
- for (i = 0; i < buf_size_in_dwords; i++)
- buffer[i] = iwl_read_umac_prph_no_grab(trans,
- MON_DMARB_RD_DATA_ADDR);
- iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
-
- iwl_trans_release_nic_access(trans);
-
- return monitor_len;
-}
-
-static void
-iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
- struct iwl_fw_error_dump_fw_mon *fw_mon_data)
-{
- u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
- base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
- write_ptr = DBGC_CUR_DBGBUF_STATUS;
- wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
- } else if (trans->dbg.dest_tlv) {
- write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
- } else {
- base = MON_BUFF_BASE_ADDR;
- write_ptr = MON_BUFF_WRPTR;
- wrap_cnt = MON_BUFF_CYCLE_CNT;
- }
-
- write_ptr_val = iwl_read_prph(trans, write_ptr);
- fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
- fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, base));
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- fw_mon_data->fw_mon_base_high_ptr =
- cpu_to_le32(iwl_read_prph(trans, base_high));
- write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
- /* convert wrtPtr to DWs, to align with all HWs */
- write_ptr_val >>= 2;
- }
- fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
-}
-
-static u32
-iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
- u32 monitor_len)
-{
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- u32 len = 0;
-
- if (trans->dbg.dest_tlv ||
- (fw_mon->size &&
- (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
- struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
- fw_mon_data = (void *)(*data)->data;
-
- iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
-
- len += sizeof(**data) + sizeof(*fw_mon_data);
- if (fw_mon->size) {
- memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
- monitor_len = fw_mon->size;
- } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
- u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
- /*
- * Update pointers to reflect actual values after
- * shifting
- */
- if (trans->dbg.dest_tlv->version) {
- base = (iwl_read_prph(trans, base) &
- IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg.dest_tlv->base_shift;
- base *= IWL_M2S_UNIT_SIZE;
- base += trans->mac_cfg->base->smem_offset;
- } else {
- base = iwl_read_prph(trans, base) <<
- trans->dbg.dest_tlv->base_shift;
- }
-
- iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,
- monitor_len / sizeof(u32));
- } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
- monitor_len =
- iwl_trans_pci_dump_marbh_monitor(trans,
- fw_mon_data,
- monitor_len);
- } else {
- /* Didn't match anything - output no monitor data */
- monitor_len = 0;
- }
-
- len += monitor_len;
- (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
- }
-
- return len;
-}
-
-static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
-{
- if (trans->dbg.fw_mon.size) {
- *len += sizeof(struct iwl_fw_error_dump_data) +
- sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->dbg.fw_mon.size;
- return trans->dbg.fw_mon.size;
- } else if (trans->dbg.dest_tlv) {
- u32 base, end, cfg_reg, monitor_len;
-
- if (trans->dbg.dest_tlv->version == 1) {
- cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
- cfg_reg = iwl_read_prph(trans, cfg_reg);
- base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg.dest_tlv->base_shift;
- base *= IWL_M2S_UNIT_SIZE;
- base += trans->mac_cfg->base->smem_offset;
-
- monitor_len =
- (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
- trans->dbg.dest_tlv->end_shift;
- monitor_len *= IWL_M2S_UNIT_SIZE;
- } else {
- base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
- end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
-
- base = iwl_read_prph(trans, base) <<
- trans->dbg.dest_tlv->base_shift;
- end = iwl_read_prph(trans, end) <<
- trans->dbg.dest_tlv->end_shift;
-
- /* Make "end" point to the actual end */
- if (trans->mac_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000 ||
- trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
- end += (1 << trans->dbg.dest_tlv->end_shift);
- monitor_len = end - base;
- }
- *len += sizeof(struct iwl_fw_error_dump_data) +
- sizeof(struct iwl_fw_error_dump_fw_mon) +
- monitor_len;
- return monitor_len;
- }
- return 0;
-}
-
-struct iwl_trans_dump_data *
-iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
- const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
- struct iwl_fw_error_dump_txcmd *txcmd;
- struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs = 0, monitor_len = 0;
- int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->mac_cfg->mq_rx_supported &&
- dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
-
- if (!dump_mask)
- return NULL;
-
- /* transport dump header */
- len = sizeof(*dump_data);
-
- /* host commands */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
- len += sizeof(*data) +
- cmdq->n_window * (sizeof(*txcmd) +
- TFD_MAX_PAYLOAD_SIZE);
-
- /* FW monitor */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
- monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
-
- /* CSR registers */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
- /* FH registers */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
- if (trans->mac_cfg->gen2)
- len += sizeof(*data) +
- (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
- iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
- else
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND -
- FH_MEM_LOWER_BOUND);
- }
-
- if (dump_rbs) {
- /* Dump RBs is supported only for pre-9000 devices (1 queue) */
- struct iwl_rxq *rxq = &trans_pcie->rxq[0];
- /* RBs */
- spin_lock_bh(&rxq->lock);
- num_rbs = iwl_get_closed_rb_stts(trans, rxq);
- num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
- spin_unlock_bh(&rxq->lock);
-
- len += num_rbs * (sizeof(*data) +
- sizeof(struct iwl_fw_error_dump_rb) +
- (PAGE_SIZE << trans_pcie->rx_page_order));
- }
-
- /* Paged memory for gen2 HW */
- if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
- for (i = 0; i < trans->init_dram.paging_cnt; i++)
- len += sizeof(*data) +
- sizeof(struct iwl_fw_error_dump_paging) +
- trans->init_dram.paging[i].size;
-
- dump_data = vzalloc(len);
- if (!dump_data)
- return NULL;
-
- len = 0;
- data = (void *)dump_data->data;
-
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans_pcie->txqs.tfd.size;
-
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
- txcmd = (void *)data->data;
- spin_lock_bh(&cmdq->lock);
- ptr = cmdq->write_ptr;
- for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
- u8 tfdidx;
- u32 caplen, cmdlen;
-
- if (trans->mac_cfg->gen2)
- tfdidx = idx;
- else
- tfdidx = ptr;
-
- cmdlen = iwl_trans_pcie_get_cmdlen(trans,
- (u8 *)cmdq->tfds +
- tfd_size * tfdidx);
- caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
- if (cmdlen) {
- len += sizeof(*txcmd) + caplen;
- txcmd->cmdlen = cpu_to_le32(cmdlen);
- txcmd->caplen = cpu_to_le32(caplen);
- memcpy(txcmd->data, cmdq->entries[idx].cmd,
- caplen);
- if (sanitize_ops && sanitize_ops->frob_hcmd)
- sanitize_ops->frob_hcmd(sanitize_ctx,
- txcmd->data,
- caplen);
- txcmd = (void *)((u8 *)txcmd->data + caplen);
- }
-
- ptr = iwl_txq_dec_wrap(trans, ptr);
- }
- spin_unlock_bh(&cmdq->lock);
-
- data->len = cpu_to_le32(len);
- len += sizeof(*data);
- data = iwl_fw_error_next_data(data);
- }
-
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
- len += iwl_trans_pcie_dump_csr(trans, &data);
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
- len += iwl_trans_pcie_fh_regs_dump(trans, &data);
- if (dump_rbs)
- len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
-
- /* Paged memory for gen2 HW */
- if (trans->mac_cfg->gen2 &&
- dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
- for (i = 0; i < trans->init_dram.paging_cnt; i++) {
- struct iwl_fw_error_dump_paging *paging;
- u32 page_len = trans->init_dram.paging[i].size;
-
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
- data->len = cpu_to_le32(sizeof(*paging) + page_len);
- paging = (void *)data->data;
- paging->index = cpu_to_le32(i);
- memcpy(paging->data,
- trans->init_dram.paging[i].block, page_len);
- data = iwl_fw_error_next_data(data);
-
- len += sizeof(*data) + sizeof(*paging) + page_len;
- }
- }
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
- len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
-
- dump_data->len = len;
-
- return dump_data;
-}
-
-void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
-{
- if (enable)
- iwl_enable_interrupts(trans);
- else
- iwl_disable_interrupts(trans);
-}
-
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
-{
- u32 inta_addr, sw_err_bit;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans_pcie->msix_enabled) {
- inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
- else
- sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
- } else {
- inta_addr = CSR_INT;
- sw_err_bit = CSR_INT_BIT_SW_ERR;
- }
-
- iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
-}
-
-struct iwl_trans *
-iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct iwl_mac_cfg *mac_cfg,
- struct iwl_trans_info *info)
-{
- struct iwl_trans_pcie *trans_pcie, **priv;
- struct iwl_trans *trans;
- unsigned int bc_tbl_n_entries;
- int ret, addr_size;
- u32 bar0;
-
- /* reassign our BAR 0 if invalid due to possible runtime PM races */
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
- if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
- ret = pci_assign_resource(pdev, 0);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ERR_PTR(ret);
-
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- mac_cfg);
- if (!trans)
- return ERR_PTR(-ENOMEM);
-
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* Initialize the wait queue for commands */
- init_waitqueue_head(&trans_pcie->wait_command_queue);
-
- if (trans->mac_cfg->gen2) {
- trans_pcie->txqs.tfd.addr_size = 64;
- trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
- trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
- } else {
- trans_pcie->txqs.tfd.addr_size = 36;
- trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
- trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
- }
-
- trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
-
- info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
-
- trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans_pcie->txqs.tso_hdr_page) {
- ret = -ENOMEM;
- goto out_free_trans;
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
- else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
- else
- bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
-
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (trans->mac_cfg->gen2) {
- trans_pcie->txqs.bc_pool =
- dmam_pool_create("iwlwifi:bc", trans->dev,
- trans_pcie->txqs.bc_tbl_size,
- 256, 0);
- if (!trans_pcie->txqs.bc_pool) {
- ret = -ENOMEM;
- goto out_free_tso;
- }
- }
-
- /* Some things must not change even if the config does */
- WARN_ON(trans_pcie->txqs.tfd.addr_size !=
- (trans->mac_cfg->gen2 ? 64 : 36));
-
- /* Initialize NAPI here - it should be before registering to mac80211
- * in the opmode but after the HW struct is allocated.
- */
- trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
- if (!trans_pcie->napi_dev) {
- ret = -ENOMEM;
- goto out_free_tso;
- }
- /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
- priv = netdev_priv(trans_pcie->napi_dev);
- *priv = trans_pcie;
-
- trans_pcie->trans = trans;
- trans_pcie->opmode_down = true;
- spin_lock_init(&trans_pcie->irq_lock);
- spin_lock_init(&trans_pcie->reg_lock);
- spin_lock_init(&trans_pcie->alloc_page_lock);
- mutex_init(&trans_pcie->mutex);
- init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- init_waitqueue_head(&trans_pcie->fw_reset_waitq);
- init_waitqueue_head(&trans_pcie->imr_waitq);
-
- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 0);
- if (!trans_pcie->rba.alloc_wq) {
- ret = -ENOMEM;
- goto out_free_ndev;
- }
- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
- trans_pcie->debug_rfkill = -1;
-
- if (!mac_cfg->base->pcie_l1_allowed) {
- /*
- * W/A - seems to solve weird behavior. We need to remove this
- * if we don't want to stay in L1 all the time. This wastes a
- * lot of power.
- */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
- PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
- }
-
- pci_set_master(pdev);
-
- addr_size = trans_pcie->txqs.tfd.addr_size;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
- if (ret) {
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (ret) {
- dev_err(&pdev->dev, "No suitable DMA available\n");
- goto out_no_pci;
- }
- }
-
- ret = pcim_request_all_regions(pdev, DRV_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n");
- goto out_no_pci;
- }
-
- trans_pcie->hw_base = pcim_iomap(pdev, 0, 0);
- if (!trans_pcie->hw_base) {
- dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n");
- ret = -ENODEV;
- goto out_no_pci;
- }
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- trans_pcie->pci_dev = pdev;
- iwl_disable_interrupts(trans);
-
- info->hw_rev = iwl_read32(trans, CSR_HW_REV);
- if (info->hw_rev == 0xffffffff) {
- dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
- ret = -EIO;
- goto out_no_pci;
- }
-
- /*
- * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
- * changed, and now the revision step also includes bit 0-1 (no more
- * "dash" value). To keep hw_rev backwards compatible - we'll store it
- * in the old format.
- */
- if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- info->hw_rev_step = info->hw_rev & 0xF;
- else
- info->hw_rev_step = (info->hw_rev & 0xC) >> 2;
-
- IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);
-
- iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);
-
- init_waitqueue_head(&trans_pcie->sx_waitq);
-
- ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
- if (ret)
- goto out_no_pci;
-
- if (trans_pcie->msix_enabled) {
- ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);
- if (ret)
- goto out_no_pci;
- } else {
- ret = iwl_pcie_alloc_ict(trans);
- if (ret)
- goto out_no_pci;
-
- ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
- iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
- if (ret) {
- IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
- goto out_free_ict;
- }
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
- mutex_init(&trans_pcie->fw_mon_data.mutex);
-#endif
-
- iwl_dbg_tlv_init(trans);
-
- return trans;
-
-out_free_ict:
- iwl_pcie_free_ict(trans);
-out_no_pci:
- destroy_workqueue(trans_pcie->rba.alloc_wq);
-out_free_ndev:
- free_netdev(trans_pcie->napi_dev);
-out_free_tso:
- free_percpu(trans_pcie->txqs.tso_hdr_page);
-out_free_trans:
- iwl_trans_free(trans);
- return ERR_PTR(ret);
-}
-
-void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr, u32 byte_cnt)
-{
- iwl_write_prph(trans, IMR_UREG_CHICK,
- iwl_read_prph(trans, IMR_UREG_CHICK) |
- IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
- iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
- iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
- (u32)(src_addr & 0xFFFFFFFF));
- iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
- iwl_get_dma_hi_addr(src_addr));
- iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
- iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
- IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
- IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
- IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
-}
-
-int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr, u32 byte_cnt)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret = -1;
-
- trans_pcie->imr_status = IMR_D2S_REQUESTED;
- iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
- ret = wait_event_timeout(trans_pcie->imr_waitq,
- trans_pcie->imr_status !=
- IMR_D2S_REQUESTED, 5 * HZ);
- if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
- IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
- iwl_trans_pcie_dump_regs(trans);
- return -ETIMEDOUT;
- }
- trans_pcie->imr_status = IMR_D2S_IDLE;
- return 0;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
- */
-#include <net/tso.h>
-#include <linux/tcp.h>
-
-#include "iwl-debug.h"
-#include "iwl-csr.h"
-#include "iwl-io.h"
-#include "internal.h"
-#include "fw/api/tx.h"
-#include "fw/api/commands.h"
-#include "fw/api/datapath.h"
-#include "iwl-scd.h"
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct iwl_tso_page_info *info;
- struct page **page_ptr;
- struct page *ret;
- dma_addr_t phys;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- info = IWL_TSO_PAGE_INFO(page_address(ret));
-
- /* Create a DMA mapping for the page */
- phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(trans->dev, phys))) {
- __free_page(ret);
- return NULL;
- }
-
- /* Store physical address and set use count */
- info->dma_addr = phys;
- refcount_set(&info->use_count, 1);
-
- /* set the chaining pointer to the previous page if there */
- info->next = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta,
- bool unmap)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- /*
- * This is a bit odd, but performance does not matter here, what
- * matters are the expectations of the calling code and TB cleanup
- * function.
- *
- * As such, if unmap is set, then create another mapping for the TB
- * entry as it will be unmapped later. On the other hand, if it is not
- * set, then the TB entry will not be unmapped and instead we simply
- * reference and sync the mapping that get_workaround_page() created.
- */
- if (unmap) {
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- } else {
- phys = iwl_pcie_get_tso_page_phys(page_address(page));
- dma_sync_single_for_device(trans->dev, phys, len,
- DMA_TO_DEVICE);
- }
-
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
-
- IWL_DEBUG_TX(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys,
- (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (!unmap)
- goto trace;
-
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta,
- int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int data_offset = 0;
- dma_addr_t start_hdr_phys;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct sg_table *sgt;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_network_header_len(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
- snap_ip_tcp_hdrlen + hdr_len);
- if (!sgt)
- return -ENOMEM;
-
- start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *pos_hdr = start_hdr;
-
- total_len -= data_left;
-
- memset(pos_hdr, 0, amsdu_pad);
- pos_hdr += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
- pos_hdr += ETH_ALEN;
- ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
- pos_hdr += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)pos_hdr) = cpu_to_be16(length);
- pos_hdr += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
-
- pos_hdr += snap_ip_tcp_hdrlen;
-
- tb_len = pos_hdr - start_hdr;
- tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
-
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, tb_len);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = pos_hdr;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
- tb_len);
- /* Not a real mapping error, use direct comparison */
- if (unlikely(tb_phys == DMA_MAPPING_ERROR))
- goto out_err;
-
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL, false);
- if (ret)
- goto out_err;
-
- data_left -= tb_len;
- data_offset += tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
- DMA_TO_DEVICE);
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
- len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_pcie_free_tso_pages(trans, skb, out_meta);
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta, true);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL, true);
- if (ret)
- goto out_err;
- }
-
- if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL,
- true);
- if (ret)
- goto out_err;
- if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_v9, dram_info) >
- IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd, dram_info) >
- IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_v9);
- else
- len = sizeof(struct iwl_tx_cmd);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
- return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->mac_cfg->base->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->mac_cfg->base->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-/*
- * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- } else {
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- }
-
- scd_bc_tbl[idx].tfd_offset = bc_ent;
-}
-
-static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int idx = iwl_txq_gen2_get_num_tbs(tfd);
- struct iwl_tfh_tb *tb;
-
- /* Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_txq_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
-
- if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* TB1 is mapped directly, the rest is the TSO page and SG list. */
- if (meta->sg_offset)
- num_tbs = 2;
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
-}
-
-static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, idx));
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-/*
- * iwl_txq_inc_wr_ptr - Send new write index to hardware
- */
-static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_txq_space(trans, txq) < txq->high_mark) {
- iwl_txq_stop(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_txq_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->conf.cb_data_offs +
- sizeof(void *));
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- memset(out_meta, 0, sizeof(*out_meta));
-
- tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd *tx_cmd =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd->len);
- } else {
- struct iwl_tx_cmd_v9 *tx_cmd_v9 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_v9->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
- iwl_txq_gen2_get_num_tbs(tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/*
- * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->reclaim_lock);
- spin_lock(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->conf.cmd_queue) {
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (!WARN_ON_ONCE(!skb))
- iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
- }
- iwl_txq_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock(&txq->lock);
- spin_unlock_bh(&txq->reclaim_lock);
-
- /* just in case - this queue may have been stopped */
- iwl_trans_pcie_wake_queue(trans, txq);
-}
-
-static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans_pcie->txqs.tfd.size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans_pcie->txqs.bc_pool,
- txq->bc_tbl.addr, txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans_pcie->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_txq_gen2_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->conf.cmd_queue)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- timer_delete_sync(&txq->stuck_timer);
-
- iwl_txq_gen2_free_memory(trans, txq);
-
- trans_pcie->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans_pcie->txqs.queue_used);
-}
-
-static struct iwl_txq *
-iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- WARN_ON(!trans_pcie->txqs.bc_tbl_size);
-
- bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
-
- if (WARN_ON(size > bc_tbl_entries))
- return ERR_PTR(-EINVAL);
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return ERR_PTR(-ENOMEM);
-
- txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = iwl_pcie_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- return txq;
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ERR_PTR(ret);
-}
-
-static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (WARN_ONCE(trans_pcie->txqs.txq[qid],
- "queue %d already allocated\n", qid)) {
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans_pcie->txqs.txq[qid] = txq;
- wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
- u8 tid, int size, unsigned int timeout)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- union {
- struct iwl_tx_queue_cfg_cmd old;
- struct iwl_scd_queue_cfg_cmd new;
- } cmd;
- struct iwl_host_cmd hcmd = {
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- /* take the min with bytecount table entries allowed */
- size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
- /* but must be power of 2 values for calculating read/write pointers */
- size = rounddown_pow_of_two(size);
-
- if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->info.hw_rev_step == SILICON_A_STEP) {
- size = 4096;
- txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
- } else {
- do {
- txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
- if (!IS_ERR(txq))
- break;
-
- IWL_DEBUG_TX_QUEUES(trans,
- "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
- size, sta_mask, tid,
- PTR_ERR(txq));
- size /= 2;
- } while (size >= 16);
- }
-
- if (IS_ERR(txq))
- return PTR_ERR(txq);
-
- if (trans->conf.queue_alloc_cmd_ver == 0) {
- memset(&cmd.old, 0, sizeof(cmd.old));
- cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
- cmd.old.tid = tid;
-
- if (hweight32(sta_mask) != 1) {
- ret = -EINVAL;
- goto error;
- }
- cmd.old.sta_id = ffs(sta_mask) - 1;
-
- hcmd.id = SCD_QUEUE_CFG;
- hcmd.len[0] = sizeof(cmd.old);
- hcmd.data[0] = &cmd.old;
- } else if (trans->conf.queue_alloc_cmd_ver == 3) {
- memset(&cmd.new, 0, sizeof(cmd.new));
- cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
- cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
- cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.new.u.add.flags = cpu_to_le32(flags);
- cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
- cmd.new.u.add.tid = tid;
-
- hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
- hcmd.len[0] = sizeof(cmd.new);
- hcmd.data[0] = &cmd.new;
- } else {
- ret = -EOPNOTSUPP;
- goto error;
- }
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_txq_gen2_free(trans, queue);
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- memset(trans_pcie->txqs.queue_used, 0,
- sizeof(trans_pcie->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
- if (!trans_pcie->txqs.txq[i])
- continue;
-
- iwl_txq_gen2_free(trans, i);
- }
-}
-
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans_pcie->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans_pcie->txqs.txq[txq_id] = queue;
- ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans_pcie->txqs.txq[txq_id];
- }
-
- ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans->conf.cmd_queue));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans_pcie->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans_pcie->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_txq_gen2_tx_free(trans);
- return ret;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/*
- * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a pointer to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation
- * failed. On success, it returns the index (>= 0) of command in the
- * command queue.
- */
-int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- void *dup_buf = NULL;
- dma_addr_t phys_addr;
- int i, cmd_pos, idx;
- u16 copy_size, cmd_size, tb0_size;
- bool had_nocopy = false;
- u8 group_id = iwl_cmd_groupid(cmd->id);
- const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
- u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- struct iwl_tfh_tfd *tfd;
- unsigned long flags;
-
- if (WARN_ON(cmd->flags & CMD_BLOCK_TXQS))
- return -EINVAL;
-
- copy_size = sizeof(struct iwl_cmd_header_wide);
- cmd_size = sizeof(struct iwl_cmd_header_wide);
-
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- cmddata[i] = cmd->data[i];
- cmdlen[i] = cmd->len[i];
-
- if (!cmd->len[i])
- continue;
-
- /* need at least IWL_FIRST_TB_SIZE copied */
- if (copy_size < IWL_FIRST_TB_SIZE) {
- int copy = IWL_FIRST_TB_SIZE - copy_size;
-
- if (copy > cmdlen[i])
- copy = cmdlen[i];
- cmdlen[i] -= copy;
- cmddata[i] += copy;
- copy_size += copy;
- }
-
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
- had_nocopy = true;
- if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
- } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
- /*
- * This is also a chunk that isn't copied
- * to the static buffer so set had_nocopy.
- */
- had_nocopy = true;
-
- /* only allowed once */
- if (WARN_ON(dup_buf)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
-
- dup_buf = kmemdup(cmddata[i], cmdlen[i],
- GFP_ATOMIC);
- if (!dup_buf)
- return -ENOMEM;
- } else {
- /* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
- copy_size += cmdlen[i];
- }
- cmd_size += cmd->len[i];
- }
-
- /*
- * If any of the command structures end up being larger than the
- * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
- * separate TFDs, then we will need to increase the size of the buffers
- */
- if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
- "Command %s (%#x) is too large (%d bytes)\n",
- iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
-
- spin_lock_irqsave(&txq->lock, flags);
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
- memset(tfd, 0, sizeof(*tfd));
-
- if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&txq->lock, flags);
-
- IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_nic_error(trans->op_mode,
- IWL_ERR_TYPE_CMD_QUEUE_FULL);
- iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
- idx = -ENOSPC;
- goto free_dup_buf;
- }
-
- out_cmd = txq->entries[idx].cmd;
- out_meta = &txq->entries[idx].meta;
-
- /* re-initialize, this also marks the SG list as unused */
- memset(out_meta, 0, sizeof(*out_meta));
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
-
- /* set up the header */
- out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
- out_cmd->hdr_wide.group_id = group_id;
- out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
- out_cmd->hdr_wide.length =
- cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
- out_cmd->hdr_wide.reserved = 0;
- out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
- INDEX_TO_SEQ(txq->write_ptr));
-
- cmd_pos = sizeof(struct iwl_cmd_header_wide);
- copy_size = sizeof(struct iwl_cmd_header_wide);
-
- /* and copy the data that needs to be copied */
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- int copy;
-
- if (!cmd->len[i])
- continue;
-
- /* copy everything if not nocopy/dup */
- if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP))) {
- copy = cmd->len[i];
-
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
- cmd_pos += copy;
- copy_size += copy;
- continue;
- }
-
- /*
- * Otherwise we need at least IWL_FIRST_TB_SIZE copied
- * in total (for bi-directional DMA), but copy up to what
- * we can fit into the payload for debug dump purposes.
- */
- copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
-
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
- cmd_pos += copy;
-
- /* However, treat copy_size the proper way, we need it below */
- if (copy_size < IWL_FIRST_TB_SIZE) {
- copy = IWL_FIRST_TB_SIZE - copy_size;
-
- if (copy > cmd->len[i])
- copy = cmd->len[i];
- copy_size += copy;
- }
- }
-
- IWL_DEBUG_HC(trans,
- "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- iwl_get_cmd_string(trans, cmd->id), group_id,
- out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
-
- /* start the TFD with the minimum copy bytes */
- tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
- memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
- iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
- tb0_size);
-
- /* map first command fragment, if any remains */
- if (copy_size > tb0_size) {
- phys_addr = dma_map_single(trans->dev,
- (u8 *)out_cmd + tb0_size,
- copy_size - tb0_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(trans->dev, phys_addr)) {
- idx = -ENOMEM;
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- goto out;
- }
- iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
- copy_size - tb0_size);
- }
-
- /* map the remaining (adjusted) nocopy/dup fragments */
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- void *data = (void *)(uintptr_t)cmddata[i];
-
- if (!cmdlen[i])
- continue;
- if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP)))
- continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
- data = dup_buf;
- phys_addr = dma_map_single(trans->dev, data,
- cmdlen[i], DMA_TO_DEVICE);
- if (dma_mapping_error(trans->dev, phys_addr)) {
- idx = -ENOMEM;
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- goto out;
- }
- iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
- }
-
- BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
- out_meta->flags = cmd->flags;
- if (WARN_ON_ONCE(txq->entries[idx].free_buf))
- kfree_sensitive(txq->entries[idx].free_buf);
- txq->entries[idx].free_buf = dup_buf;
-
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- spin_lock(&trans_pcie->reg_lock);
- /* Increment and update queue's write index */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_txq_inc_wr_ptr(trans, txq);
- spin_unlock(&trans_pcie->reg_lock);
-
-out:
- spin_unlock_irqrestore(&txq->lock, flags);
-free_dup_buf:
- if (idx < 0)
- kfree(dup_buf);
- return idx;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
- * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2016-2017 Intel Deutschland GmbH
- */
-#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/tcp.h>
-#include <net/ip6_checksum.h>
-#include <net/tso.h>
-
-#include "fw/api/commands.h"
-#include "fw/api/datapath.h"
-#include "fw/api/debug.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-io.h"
-#include "iwl-scd.h"
-#include "iwl-op-mode.h"
-#include "internal.h"
-#include "fw/api/tx.h"
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- ***************************************************/
-
-
-int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- if (WARN_ON(ptr->addr))
- return -EINVAL;
-
- ptr->addr = dma_alloc_coherent(trans->dev, size,
- &ptr->dma, GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/*
- * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
- */
-static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- u32 reg = 0;
- int txq_id = txq->id;
-
- lockdep_assert_held(&txq->lock);
-
- /*
- * explicitly wake up the NIC if:
- * 1. shadow registers aren't enabled
- * 2. NIC is woken up for CMD regardless of shadow outside this function
- * 3. there is a chance that the NIC is asleep
- */
- if (!trans->mac_cfg->base->shadow_reg_enable &&
- txq_id != trans->conf.cmd_queue &&
- test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- /*
- * wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part.
- */
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
- txq_id, reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- txq->need_update = true;
- return;
- }
- }
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
- if (!txq->block)
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->write_ptr | (txq_id << 8));
-}
-
-void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txqs.txq[i];
-
- if (!test_bit(i, trans_pcie->txqs.queue_used))
- continue;
-
- spin_lock_bh(&txq->lock);
- if (txq->need_update) {
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
- txq->need_update = false;
- }
- spin_unlock_bh(&txq->lock);
- }
-}
-
-static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
- u8 idx, dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- hi_n_len |= iwl_get_dma_hi_addr(addr);
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- dma_addr_t addr, u16 len, bool reset)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *tfd;
- u32 num_tbs;
-
- tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
-
- if (reset)
- memset(tfd, 0, trans_pcie->txqs.tfd.size);
-
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- if (WARN(addr & ~IWL_TX_DMA_MASK,
- "Unaligned address = %llx\n", (unsigned long long)addr))
- return -EINVAL;
-
- iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return num_tbs;
-}
-
-static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!trans->mac_cfg->base->apmg_wake_up_wa)
- return;
-
- spin_lock(&trans_pcie->reg_lock);
-
- if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
- spin_unlock(&trans_pcie->reg_lock);
- return;
- }
-
- trans_pcie->cmd_hold_nic_awake = false;
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- spin_unlock(&trans_pcie->reg_lock);
-}
-
-static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
- struct page *page)
-{
- struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
-
- /* Decrease internal use count and unmap/free page if needed */
- if (refcount_dec_and_test(&info->use_count)) {
- dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
- DMA_TO_DEVICE);
-
- __free_page(page);
- }
-}
-
-void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_cmd_meta *cmd_meta)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct iwl_tso_page_info *info;
- struct page *tmp = next;
-
- info = IWL_TSO_PAGE_INFO(page_address(next));
- next = info->next;
-
- /* Unmap the scatter gather list that is on the last page */
- if (!next && cmd_meta->sg_offset) {
- struct sg_table *sgt;
-
- sgt = (void *)((u8 *)page_address(tmp) +
- cmd_meta->sg_offset);
-
- dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
- }
-
- iwl_pcie_free_and_unmap_tso_page(trans, tmp);
- }
-}
-
-static inline dma_addr_t
-iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr;
- dma_addr_t hi_len;
-
- addr = get_unaligned_le32(&tb->lo);
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
-}
-
-static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
- struct iwl_tfd *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- tfd->num_tbs = 0;
-
- iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,
- trans_pcie->invalid_tx_cmd.size);
-}
-
-static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
- struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
-
- if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* TB1 is mapped directly, the rest is the TSO page and SG list. */
- if (meta->sg_offset)
- num_tbs = 2;
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
-}
-
-/**
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @trans: transport private data
- * @txq: tx queue
- * @read_ptr: the TXQ read_ptr to free
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- int read_ptr)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_txq_get_cmd_index(txq, read_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->reclaim_lock);
-
- if (!txq->entries)
- return;
-
- /* We have only q->n_window txq->entries, but we use
- * TFD_QUEUE_SIZE_MAX tfds
- */
- if (trans->mac_cfg->gen2)
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, read_ptr));
- else
- iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
- txq, read_ptr);
-
- /* free SKB */
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-/*
- * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
-
- if (!txq) {
- IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
- return;
- }
-
- spin_lock_bh(&txq->reclaim_lock);
- spin_lock(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->conf.cmd_queue) {
- struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
- struct iwl_cmd_meta *cmd_meta =
- &txq->entries[txq->read_ptr].meta;
-
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
- }
- iwl_txq_free_tfd(trans, txq, txq->read_ptr);
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
-
- if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans->conf.cmd_queue)
- iwl_pcie_clear_cmd_in_flight(trans);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock(&txq->lock);
- spin_unlock_bh(&txq->reclaim_lock);
-
- /* just in case - this queue may have been stopped */
- iwl_trans_pcie_wake_queue(trans, txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- struct device *dev = trans->dev;
- int i;
-
- if (WARN_ON(!txq))
- return;
-
- iwl_pcie_txq_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->conf.cmd_queue)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans_pcie->txqs.tfd.size *
- trans->mac_cfg->base->max_tfd_queue_size,
- txq->tfds, txq->dma_addr);
- txq->dma_addr = 0;
- txq->tfds = NULL;
-
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- txq->entries = NULL;
-
- timer_delete_sync(&txq->stuck_timer);
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-void iwl_pcie_tx_start(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int nq = trans->mac_cfg->base->num_of_queues;
- int chan;
- u32 reg_val;
- int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
- SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->txqs.queue_stopped, 0,
- sizeof(trans_pcie->txqs.queue_stopped));
- memset(trans_pcie->txqs.queue_used, 0,
- sizeof(trans_pcie->txqs.queue_used));
-
- trans_pcie->scd_base_addr =
- iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
-
- /* reset context data, TX status and translation data */
- iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_MEM_LOWER_BOUND,
- NULL, clear_dwords);
-
- iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans_pcie->txqs.scd_bc_tbls.dma >> 10);
-
- /* The chain extension of the SCD doesn't work well. This feature is
- * enabled by default by the HW, so we need to disable it manually.
- */
- if (trans->mac_cfg->base->scd_chain_ext_wa)
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
-
- iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
- trans->conf.cmd_fifo,
- IWL_DEF_WD_TIMEOUT);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_scd_activate_fifos(trans);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
- iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Enable L1-Active */
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int txq_id;
-
- /*
- * we should never get here in gen2 trans mode return early to avoid
- * having invalid accesses
- */
- if (WARN_ON_ONCE(trans->mac_cfg->gen2))
- return;
-
- for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
- txq_id++) {
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- if (trans->mac_cfg->gen2)
- iwl_write_direct64(trans,
- FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr);
- else
- iwl_write_direct32(trans,
- FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr >> 8);
- iwl_pcie_txq_unmap(trans, txq_id);
- txq->read_ptr = 0;
- txq->write_ptr = 0;
- }
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
- trans_pcie->kw.dma >> 4);
-
- /*
- * Send 0 as the scd_base_addr since the device may have be reset
- * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
- * contain garbage.
- */
- iwl_pcie_tx_start(trans);
-}
-
-static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ch, ret;
- u32 mask = 0;
-
- spin_lock_bh(&trans_pcie->irq_lock);
-
- if (!iwl_trans_grab_nic_access(trans))
- goto out;
-
- /* Stop each Tx DMA channel */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
- }
-
- /* Wait for DMA channels to be idle */
- ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
- if (ret < 0)
- IWL_ERR(trans,
- "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
- ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
-
- iwl_trans_release_nic_access(trans);
-
-out:
- spin_unlock_bh(&trans_pcie->irq_lock);
-}
-
-/*
- * iwl_pcie_tx_stop - Stop all Tx DMA channels
- */
-int iwl_pcie_tx_stop(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int txq_id;
-
- /* Turn off all Tx DMA fifos */
- iwl_scd_deactivate_fifos(trans);
-
- /* Turn off all Tx DMA channels */
- iwl_pcie_tx_stop_fh(trans);
-
- /*
- * This function can be called before the op_mode disabled the
- * queues. This happens when we have an rfkill interrupt.
- * Since we stop Tx altogether - mark the queues as stopped.
- */
- memset(trans_pcie->txqs.queue_stopped, 0,
- sizeof(trans_pcie->txqs.queue_stopped));
- memset(trans_pcie->txqs.queue_used, 0,
- sizeof(trans_pcie->txqs.queue_used));
-
- /* This can happen: start_hw, stop_device */
- if (!trans_pcie->txq_memory)
- return 0;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
- txq_id++)
- iwl_pcie_txq_unmap(trans, txq_id);
-
- return 0;
-}
-
-/*
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl_pcie_tx_free(struct iwl_trans *trans)
-{
- int txq_id;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- memset(trans_pcie->txqs.queue_used, 0,
- sizeof(trans_pcie->txqs.queue_used));
-
- /* Tx queues */
- if (trans_pcie->txq_memory) {
- for (txq_id = 0;
- txq_id < trans->mac_cfg->base->num_of_queues;
- txq_id++) {
- iwl_pcie_txq_free(trans, txq_id);
- trans_pcie->txqs.txq[txq_id] = NULL;
- }
- }
-
- kfree(trans_pcie->txq_memory);
- trans_pcie->txq_memory = NULL;
-
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
-
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
-}
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->mac_cfg->gen2) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->mac_cfg->base->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->mac_cfg->base->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
-static void iwl_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_txq_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t num_entries = trans->mac_cfg->gen2 ?
- slots_num : trans->mac_cfg->base->max_tfd_queue_size;
- size_t tfd_sz;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
- return -EINVAL;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
-
- timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device
- */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- for (i = 0; i < num_entries; i++) {
- void *tfd = iwl_txq_get_tfd(trans, txq, i);
-
- if (trans->mac_cfg->gen2)
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
- else
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
- }
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
- txq->tfds = NULL;
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-}
-
-#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
-
-/*
- * iwl_pcie_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- */
-static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
-{
- int ret;
- int txq_id, slots_num;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;
-
- if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
- return -EINVAL;
-
- bc_tbls_size *= BC_TABLE_SIZE;
-
- /*It is not allowed to alloc twice, so warn when this happens.
- * We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq_memory)) {
- ret = -EINVAL;
- goto error;
- }
-
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
- bc_tbls_size);
- if (ret) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- goto error;
- }
-
- /* Alloc keep-warm buffer */
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(trans, "Keep Warm allocation failed\n");
- goto error;
- }
-
- trans_pcie->txq_memory =
- kcalloc(trans->mac_cfg->base->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
- if (!trans_pcie->txq_memory) {
- IWL_ERR(trans, "Not enough memory for txq\n");
- ret = -ENOMEM;
- goto error;
- }
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
- txq_id++) {
- bool cmd_queue = (txq_id == trans->conf.cmd_queue);
-
- if (cmd_queue)
- slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->mac_cfg->base->min_txq_size);
- else
- slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->mac_cfg->base->min_ba_txq_size);
- trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
- slots_num, cmd_queue);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans_pcie->txqs.txq[txq_id]->id = txq_id;
- }
-
- return 0;
-
-error:
- iwl_pcie_tx_free(trans);
-
- return ret;
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_txq_get_cmd_index is broken.
- */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
-
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- u32 tfd_queue_max_size =
- trans->mac_cfg->base->max_tfd_queue_size;
- int ret;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
- */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
- spin_lock_init(&txq->reclaim_lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_txq_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-int iwl_pcie_tx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
- int txq_id, slots_num;
- bool alloc = false;
-
- if (!trans_pcie->txq_memory) {
- ret = iwl_pcie_tx_alloc(trans);
- if (ret)
- goto error;
- alloc = true;
- }
-
- spin_lock_bh(&trans_pcie->irq_lock);
-
- /* Turn off all Tx DMA fifos */
- iwl_scd_deactivate_fifos(trans);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
- trans_pcie->kw.dma >> 4);
-
- spin_unlock_bh(&trans_pcie->irq_lock);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
- txq_id++) {
- bool cmd_queue = (txq_id == trans->conf.cmd_queue);
-
- if (cmd_queue)
- slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->mac_cfg->base->min_txq_size);
- else
- slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->mac_cfg->base->min_ba_txq_size);
- ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
- cmd_queue);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
-
- /*
- * Tell nic where to find circular buffer of TFDs for a
- * given Tx queue, and enable the DMA channel used for that
- * queue.
- * Circular buffer (TFD queue in DRAM) physical base address
- */
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
- }
-
- iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
- if (trans->mac_cfg->base->num_of_queues > 20)
- iwl_set_bits_prph(trans, SCD_GP_CTRL,
- SCD_GP_CTRL_ENABLE_31_QUEUES);
-
- return 0;
-error:
- /*Upon error, free only if we allocated something */
- if (alloc)
- iwl_pcie_tx_free(trans);
- return ret;
-}
-
-static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
- const struct iwl_host_cmd *cmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!trans->mac_cfg->base->apmg_wake_up_wa)
- return 0;
-
- /*
- * wake up the NIC to make sure that the firmware will see the host
- * command - we will let the NIC sleep once all the host commands
- * returned. This needs to be done only on NICs that have
- * apmg_wake_up_wa set (see above.)
- */
- if (!_iwl_trans_pcie_grab_nic_access(trans, false))
- return -EIO;
-
- /*
- * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
- * There, we also returned immediately if cmd_hold_nic_awake is
- * already true, so it's OK to unconditionally set it to true.
- */
- trans_pcie->cmd_hold_nic_awake = true;
- spin_unlock(&trans_pcie->reg_lock);
-
- return 0;
-}
-
-static void iwl_txq_progress(struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- if (!txq->wd_timeout)
- return;
-
- /*
- * station is asleep and we send data - that must
- * be uAPSD or PS-Poll. Don't rearm the timer.
- */
- if (txq->frozen)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->read_ptr == txq->write_ptr)
- timer_delete(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-}
-
-static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
- int read_ptr, int write_ptr)
-{
- int index = iwl_txq_get_cmd_index(q, i);
- int r = iwl_txq_get_cmd_index(q, read_ptr);
- int w = iwl_txq_get_cmd_index(q, write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
-/*
- * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- int nfreed = 0;
- u16 r;
-
- lockdep_assert_held(&txq->lock);
-
- idx = iwl_txq_get_cmd_index(txq, idx);
- r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
-
- if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||
- (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
- WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
- "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx,
- trans->mac_cfg->base->max_tfd_queue_size,
- txq->write_ptr, txq->read_ptr);
- return;
- }
-
- for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
- r = iwl_txq_inc_wrap(trans, r)) {
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
-
- if (nfreed++ > 0) {
- IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, txq->write_ptr, r);
- iwl_force_nmi(trans);
- }
- }
-
- if (txq->read_ptr == txq->write_ptr)
- iwl_pcie_clear_cmd_in_flight(trans);
-
- iwl_txq_progress(txq);
-}
-
-static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
- u16 txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-/* Receiver address (actually, Rx station's index into station table),
- * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
-#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-
-bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- int fifo = -1;
- bool scd_bug = false;
-
- if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
- WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
-
- txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
-
- if (cfg) {
- fifo = cfg->fifo;
-
- /* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans->conf.cmd_queue &&
- trans->conf.scd_set_active)
- iwl_scd_enable_set_active(trans, 0);
-
- /* Stop this Tx queue before configuring it */
- iwl_scd_txq_set_inactive(trans, txq_id);
-
- /* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans->conf.cmd_queue)
- iwl_scd_txq_set_chain(trans, txq_id);
-
- if (cfg->aggregate) {
- u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
-
- /* enable aggregations for the queue */
- iwl_scd_txq_enable_agg(trans, txq_id);
- txq->ampdu = true;
- } else {
- /*
- * disable aggregations for the queue, this will also
- * make the ra_tid mapping configuration irrelevant
- * since it is now a non-AGG queue.
- */
- iwl_scd_txq_disable_agg(trans, txq_id);
-
- ssn = txq->read_ptr;
- }
- } else {
- /*
- * If we need to move the SCD write pointer by steps of
- * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
- * the op_mode know by returning true later.
- * Do this only in case cfg is NULL since this trick can
- * be done only if we have DQA enabled which is true for mvm
- * only. And mvm never sets a cfg pointer.
- * This is really ugly, but this is the easiest way out for
- * this sad hardware issue.
- * This bug has been fixed on devices 9000 and up.
- */
- scd_bug = !trans->mac_cfg->mq_rx_supported &&
- !((ssn - txq->write_ptr) & 0x3f) &&
- (ssn != txq->write_ptr);
- if (scd_bug)
- ssn++;
- }
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->read_ptr = (ssn & 0xff);
- txq->write_ptr = (ssn & 0xff);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (ssn & 0xff) | (txq_id << 8));
-
- if (cfg) {
- u8 frame_limit = cfg->frame_limit;
-
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_trans_write_mem32(trans,
- trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
- SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
-
- /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
- iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
- (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
- SCD_QUEUE_STTS_REG_MSK);
-
- /* enable the scheduler for this queue (only) */
- if (txq_id == trans->conf.cmd_queue &&
- trans->conf.scd_set_active)
- iwl_scd_enable_set_active(trans, BIT(txq_id));
-
- IWL_DEBUG_TX_QUEUES(trans,
- "Activate queue %d on FIFO %d WrPtr: %d\n",
- txq_id, fifo, ssn & 0xff);
- } else {
- IWL_DEBUG_TX_QUEUES(trans,
- "Activate queue %d WrPtr: %d\n",
- txq_id, ssn & 0xff);
- }
-
- return scd_bug;
-}
-
-void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
- bool shared_mode)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
-
- txq->ampdu = !shared_mode;
-}
-
-void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
- bool configure_scd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 stts_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq_id);
- static const u32 zero_val[4] = {};
-
- trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
- trans_pcie->txqs.txq[txq_id]->frozen = false;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", txq_id);
- return;
- }
-
- if (configure_scd) {
- iwl_scd_txq_set_inactive(trans, txq_id);
-
- iwl_trans_pcie_write_mem(trans, stts_addr,
- (const void *)zero_val,
- ARRAY_SIZE(zero_val));
- }
-
- iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txqs.txq[txq_id]->ampdu = false;
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i;
-
- for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txqs.txq[i];
-
- if (i == trans->conf.cmd_queue)
- continue;
-
- /* we skip the command queue (obviously) so it's OK to nest */
- spin_lock_nested(&txq->lock, 1);
-
- if (!block && !(WARN_ON_ONCE(!txq->block))) {
- txq->block--;
- if (!txq->block) {
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->write_ptr | (i << 8));
- }
- } else if (block) {
- txq->block++;
- }
-
- spin_unlock(&txq->lock);
- }
-}
-
-/*
- * iwl_pcie_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a pointer to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation
- * failed. On success, it returns the index (>= 0) of command in the
- * command queue.
- */
-int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- void *dup_buf = NULL;
- dma_addr_t phys_addr;
- int idx;
- u16 copy_size, cmd_size, tb0_size;
- bool had_nocopy = false;
- u8 group_id = iwl_cmd_groupid(cmd->id);
- int i, ret;
- u32 cmd_pos;
- const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
- u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- unsigned long flags;
-
- if (WARN(!trans->conf.wide_cmd_header &&
- group_id > IWL_ALWAYS_LONG_GROUP,
- "unsupported wide command %#x\n", cmd->id))
- return -EINVAL;
-
- if (group_id != 0) {
- copy_size = sizeof(struct iwl_cmd_header_wide);
- cmd_size = sizeof(struct iwl_cmd_header_wide);
- } else {
- copy_size = sizeof(struct iwl_cmd_header);
- cmd_size = sizeof(struct iwl_cmd_header);
- }
-
- /* need one for the header if the first is NOCOPY */
- BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
-
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- cmddata[i] = cmd->data[i];
- cmdlen[i] = cmd->len[i];
-
- if (!cmd->len[i])
- continue;
-
- /* need at least IWL_FIRST_TB_SIZE copied */
- if (copy_size < IWL_FIRST_TB_SIZE) {
- int copy = IWL_FIRST_TB_SIZE - copy_size;
-
- if (copy > cmdlen[i])
- copy = cmdlen[i];
- cmdlen[i] -= copy;
- cmddata[i] += copy;
- copy_size += copy;
- }
-
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
- had_nocopy = true;
- if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
- } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
- /*
- * This is also a chunk that isn't copied
- * to the static buffer so set had_nocopy.
- */
- had_nocopy = true;
-
- /* only allowed once */
- if (WARN_ON(dup_buf)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
-
- dup_buf = kmemdup(cmddata[i], cmdlen[i],
- GFP_ATOMIC);
- if (!dup_buf)
- return -ENOMEM;
- } else {
- /* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
- copy_size += cmdlen[i];
- }
- cmd_size += cmd->len[i];
- }
-
- /*
- * If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
- * allocated into separate TFDs, then we will need to
- * increase the size of the buffers.
- */
- if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
- "Command %s (%#x) is too large (%d bytes)\n",
- iwl_get_cmd_string(trans, cmd->id),
- cmd->id, copy_size)) {
- idx = -EINVAL;
- goto free_dup_buf;
- }
-
- spin_lock_irqsave(&txq->lock, flags);
-
- if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&txq->lock, flags);
-
- IWL_ERR(trans, "No space in command queue\n");
- iwl_op_mode_nic_error(trans->op_mode,
- IWL_ERR_TYPE_CMD_QUEUE_FULL);
- iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
- idx = -ENOSPC;
- goto free_dup_buf;
- }
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- out_cmd = txq->entries[idx].cmd;
- out_meta = &txq->entries[idx].meta;
-
- /* re-initialize, this also marks the SG list as unused */
- memset(out_meta, 0, sizeof(*out_meta));
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
-
- /* set up the header */
- if (group_id != 0) {
- out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
- out_cmd->hdr_wide.group_id = group_id;
- out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
- out_cmd->hdr_wide.length =
- cpu_to_le16(cmd_size -
- sizeof(struct iwl_cmd_header_wide));
- out_cmd->hdr_wide.reserved = 0;
- out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
- INDEX_TO_SEQ(txq->write_ptr));
-
- cmd_pos = sizeof(struct iwl_cmd_header_wide);
- copy_size = sizeof(struct iwl_cmd_header_wide);
- } else {
- out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
- out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
- INDEX_TO_SEQ(txq->write_ptr));
- out_cmd->hdr.group_id = 0;
-
- cmd_pos = sizeof(struct iwl_cmd_header);
- copy_size = sizeof(struct iwl_cmd_header);
- }
-
- /* and copy the data that needs to be copied */
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- int copy;
-
- if (!cmd->len[i])
- continue;
-
- /* copy everything if not nocopy/dup */
- if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP))) {
- copy = cmd->len[i];
-
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
- cmd_pos += copy;
- copy_size += copy;
- continue;
- }
-
- /*
- * Otherwise we need at least IWL_FIRST_TB_SIZE copied
- * in total (for bi-directional DMA), but copy up to what
- * we can fit into the payload for debug dump purposes.
- */
- copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
-
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
- cmd_pos += copy;
-
- /* However, treat copy_size the proper way, we need it below */
- if (copy_size < IWL_FIRST_TB_SIZE) {
- copy = IWL_FIRST_TB_SIZE - copy_size;
-
- if (copy > cmd->len[i])
- copy = cmd->len[i];
- copy_size += copy;
- }
- }
-
- IWL_DEBUG_HC(trans,
- "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- iwl_get_cmd_string(trans, cmd->id),
- group_id, out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
-
- /* start the TFD with the minimum copy bytes */
- tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
- memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
- iwl_pcie_txq_build_tfd(trans, txq,
- iwl_txq_get_first_tb_dma(txq, idx),
- tb0_size, true);
-
- /* map first command fragment, if any remains */
- if (copy_size > tb0_size) {
- phys_addr = dma_map_single(trans->dev,
- ((u8 *)&out_cmd->hdr) + tb0_size,
- copy_size - tb0_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
- idx = -ENOMEM;
- goto out;
- }
-
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
- copy_size - tb0_size, false);
- }
-
- /* map the remaining (adjusted) nocopy/dup fragments */
- for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- void *data = (void *)(uintptr_t)cmddata[i];
-
- if (!cmdlen[i])
- continue;
- if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP)))
- continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
- data = dup_buf;
- phys_addr = dma_map_single(trans->dev, data,
- cmdlen[i], DMA_TO_DEVICE);
- if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
- idx = -ENOMEM;
- goto out;
- }
-
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
- }
-
- BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
- out_meta->flags = cmd->flags;
- if (WARN_ON_ONCE(txq->entries[idx].free_buf))
- kfree_sensitive(txq->entries[idx].free_buf);
- txq->entries[idx].free_buf = dup_buf;
-
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
- if (ret < 0) {
- idx = ret;
- goto out;
- }
-
- if (cmd->flags & CMD_BLOCK_TXQS)
- iwl_trans_pcie_block_txq_ptrs(trans, true);
-
- /* Increment and update queue's write index */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
-
- out:
- spin_unlock_irqrestore(&txq->lock, flags);
- free_dup_buf:
- if (idx < 0)
- kfree(dup_buf);
- return idx;
-}
-
-/*
- * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- */
-void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- u8 group_id;
- u32 cmd_id;
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != trans->conf.cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
- txq->write_ptr)) {
- iwl_print_hex_error(trans, pkt, 32);
- return;
- }
-
- spin_lock_bh(&txq->lock);
-
- cmd_index = iwl_txq_get_cmd_index(txq, index);
- cmd = txq->entries[cmd_index].cmd;
- meta = &txq->entries[cmd_index].meta;
- group_id = cmd->hdr.group_id;
- cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
-
- if (trans->mac_cfg->gen2)
- iwl_txq_gen2_tfd_unmap(trans, meta,
- iwl_txq_get_tfd(trans, txq, index));
- else
- iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- struct page *p = rxb_steal_page(rxb);
-
- meta->source->resp_pkt = pkt;
- meta->source->_rx_page_addr = (unsigned long)page_address(p);
- meta->source->_rx_page_order = trans_pcie->rx_page_order;
- }
-
- if (meta->flags & CMD_BLOCK_TXQS)
- iwl_trans_pcie_block_txq_ptrs(trans, false);
-
- iwl_pcie_cmdq_reclaim(trans, txq_id, index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
- IWL_WARN(trans,
- "HCMD_ACTIVE already clear for command %s\n",
- iwl_get_cmd_string(trans, cmd_id));
- }
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_get_cmd_string(trans, cmd_id));
- wake_up(&trans_pcie->wait_command_queue);
- }
-
- meta->flags = 0;
-
- spin_unlock_bh(&txq->lock);
-}
-
-static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_txq *txq, u8 hdr_len,
- struct iwl_cmd_meta *out_meta)
-{
- u16 head_tb_len;
- int i;
-
- /*
- * Set up TFD's third entry to point directly to remainder
- * of skb's head, if any
- */
- head_tb_len = skb_headlen(skb) - hdr_len;
-
- if (head_tb_len > 0) {
- dma_addr_t tb_phys = dma_map_single(trans->dev,
- skb->data + hdr_len,
- head_tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, head_tb_len);
- iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
- }
-
- /* set up the remaining entries to point to the data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- int tb_idx;
-
- if (!skb_frag_size(frag))
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
- skb_frag_size(frag), false);
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_INET
-static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
- size_t len, struct sk_buff *skb)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
- struct iwl_tso_page_info *info;
- struct page **page_ptr;
- dma_addr_t phys;
- void *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
- info = IWL_TSO_PAGE_INFO(page_address(p->page));
- goto out;
- }
-
- /* We don't have enough room on this page, get a new one. */
- iwl_pcie_free_and_unmap_tso_page(trans, p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
-
- info = IWL_TSO_PAGE_INFO(page_address(p->page));
-
- /* set the chaining pointer to NULL */
- info->next = NULL;
-
- /* Create a DMA mapping for the page */
- phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(trans->dev, phys))) {
- __free_page(p->page);
- p->page = NULL;
-
- return NULL;
- }
-
- /* Store physical address and set use count */
- info->dma_addr = phys;
- refcount_set(&info->use_count, 1);
-out:
- *page_ptr = p->page;
- /* Return an internal reference for the caller */
- refcount_inc(&info->use_count);
- ret = p->pos;
- p->pos += len;
-
- return ret;
-}
-
-/**
- * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
- * @sgt: scatter gather table
- * @offset: Offset into the mapped memory (i.e. SKB payload data)
- * @len: Length of the area
- *
- * Find the DMA address that corresponds to the SKB payload data at the
- * position given by @offset.
- *
- * Returns: Address for TB entry
- */
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
- unsigned int len)
-{
- struct scatterlist *sg;
- unsigned int sg_offset = 0;
- int i;
-
- /*
- * Search the mapped DMA areas in the SG for the area that contains the
- * data at offset with the given length.
- */
- for_each_sgtable_dma_sg(sgt, sg, i) {
- if (offset >= sg_offset &&
- offset + len <= sg_offset + sg_dma_len(sg))
- return sg_dma_address(sg) + offset - sg_offset;
-
- sg_offset += sg_dma_len(sg);
- }
-
- WARN_ON_ONCE(1);
-
- return DMA_MAPPING_ERROR;
-}
-
-/**
- * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
- * @trans: transport private data
- * @skb: the SKB to map
- * @cmd_meta: command meta to store the scatter list information for unmapping
- * @hdr: output argument for TSO headers
- * @hdr_room: requested length for TSO headers
- * @offset: offset into the data from which mapping should start
- *
- * Allocate space for a scatter gather list and TSO headers and map the SKB
- * using the scatter gather list. The SKB is unmapped again when the page is
- * free'ed again at the end of the operation.
- *
- * Returns: newly allocated and mapped scatter gather table with list
- */
-struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room,
- unsigned int offset)
-{
- struct sg_table *sgt;
- unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
- int orig_nents;
-
- if (WARN_ON_ONCE(skb_has_frag_list(skb)))
- return NULL;
-
- *hdr = iwl_pcie_get_page_hdr(trans,
- hdr_room + __alignof__(struct sg_table) +
- sizeof(struct sg_table) +
- n_segments * sizeof(struct scatterlist),
- skb);
- if (!*hdr)
- return NULL;
-
- sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
- sgt->sgl = (void *)(sgt + 1);
-
- sg_init_table(sgt->sgl, n_segments);
-
- /* Only map the data, not the header (it is copied to the TSO page) */
- orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
- if (WARN_ON_ONCE(orig_nents <= 0))
- return NULL;
-
- sgt->orig_nents = orig_nents;
-
- /* And map the entire SKB */
- if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
- return NULL;
-
- /* Store non-zero (i.e. valid) offset for unmapping */
- cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
-
- return sgt;
-}
-
-static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_txq *txq, u8 hdr_len,
- struct iwl_cmd_meta *out_meta,
- struct iwl_device_tx_cmd *dev_cmd,
- u16 tb1_len)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int data_offset = 0;
- u16 length, iv_len, amsdu_pad;
- dma_addr_t start_hdr_phys;
- u8 *start_hdr, *pos_hdr;
- struct sg_table *sgt;
- struct tso_t tso;
-
- /* if the packet is protected, then it must be CCMP or GCMP */
- BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
- iv_len = ieee80211_has_protected(hdr->frame_control) ?
- IEEE80211_CCMP_HDR_LEN : 0;
-
- trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->txqs.tfd.size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
-
- ip_hdrlen = skb_network_header_len(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
- snap_ip_tcp_hdrlen + hdr_len + iv_len);
- if (!sgt)
- return -ENOMEM;
-
- start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
- pos_hdr = start_hdr;
- memcpy(pos_hdr, skb->data + hdr_len, iv_len);
- pos_hdr += iv_len;
-
- /*
- * Pull the ieee80211 header + IV to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len + iv_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left =
- min_t(unsigned int, mss, total_len);
- unsigned int hdr_tb_len;
- dma_addr_t hdr_tb_phys;
- u8 *subf_hdrs_start = pos_hdr;
-
- total_len -= data_left;
-
- memset(pos_hdr, 0, amsdu_pad);
- pos_hdr += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
- pos_hdr += ETH_ALEN;
- ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
- pos_hdr += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)pos_hdr) = cpu_to_be16(length);
- pos_hdr += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
-
- pos_hdr += snap_ip_tcp_hdrlen;
-
- hdr_tb_len = pos_hdr - start_hdr;
- hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
-
- iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
- hdr_tb_len, false);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- hdr_tb_phys, hdr_tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = pos_hdr;
-
- /* put the payload */
- while (data_left) {
- unsigned int size = min_t(unsigned int, tso.size,
- data_left);
- dma_addr_t tb_phys;
-
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
- /* Not a real mapping error, use direct comparison */
- if (unlikely(tb_phys == DMA_MAPPING_ERROR))
- return -EINVAL;
-
- iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
- size, false);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, size);
-
- data_left -= size;
- data_offset += size;
- tso_build_data(skb, &tso, size);
- }
- }
-
- dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
- DMA_TO_DEVICE);
-
- /* re -add the WiFi header and IV */
- skb_push(skb, hdr_len + iv_len);
-
- return 0;
-}
-#else /* CONFIG_INET */
-static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_txq *txq, u8 hdr_len,
- struct iwl_cmd_meta *out_meta,
- struct iwl_device_tx_cmd *dev_cmd,
- u16 tb1_len)
-{
- /* No A-MSDU without CONFIG_INET */
- WARN_ON(1);
-
- return -1;
-}
-#endif /* CONFIG_INET */
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-/*
- * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_bc_tbl_entry *scd_bc_tbl;
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
-
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
- bc_ent;
-}
-
-int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct ieee80211_hdr *hdr;
- struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq;
- dma_addr_t tb0_phys, tb1_phys, scratch_phys;
- void *tb1_addr;
- void *tfd;
- u16 len, tb1_len;
- bool wait_write_ptr;
- __le16 fc;
- u8 hdr_len;
- u16 wifi_seq;
- bool amsdu;
-
- txq = trans_pcie->txqs.txq[txq_id];
-
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- /* mac80211 always puts the full header into the SKB's head,
- * so there's no need to check if it's readable there
- */
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- hdr_len = ieee80211_hdrlen(fc);
-
- spin_lock(&txq->lock);
-
- if (iwl_txq_space(trans, txq) < txq->high_mark) {
- iwl_txq_stop(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_txq_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->conf.cb_data_offs +
- sizeof(void *));
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
-
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- /* In AGG mode, the index in the ring must correspond to the WiFi
- * sequence number. This is a HW requirements to help the SCD to parse
- * the BA.
- * Check here that the packets are in the right place on the ring.
- */
- wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != txq->write_ptr,
- "Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[txq->write_ptr].skb = skb;
- txq->entries[txq->write_ptr].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(txq->write_ptr)));
-
- tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
- scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd_v6, scratch);
-
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[txq->write_ptr].meta;
- memset(out_meta, 0, sizeof(*out_meta));
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +
- hdr_len - IWL_FIRST_TB_SIZE;
- /* do not align A-MSDU to dword as the subframe header aligns it */
- amsdu = ieee80211_is_data_qos(fc) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- if (!amsdu) {
- tb1_len = ALIGN(len, 4);
- /* Tell NIC about any 2-byte padding after MAC header */
- if (tb1_len != len)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
- } else {
- tb1_len = len;
- }
-
- /*
- * The first TB points to bi-directional DMA data, we'll
- * memcpy the data into it later.
- */
- iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
- IWL_FIRST_TB_SIZE, true);
-
- /* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_v6, scratch) >
- IWL_FIRST_TB_SIZE);
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
- goto out_err;
- iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
-
- trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->txqs.tfd.size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- hdr_len);
-
- /*
- * If gso_size wasn't set, don't give the frame "amsdu treatment"
- * (adding subframes, etc.).
- * This can happen in some testing flows when the amsdu was already
- * pre-built, and we just need to send the resulting skb.
- */
- if (amsdu && skb_shinfo(skb)->gso_size) {
- if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
- out_meta, dev_cmd,
- tb1_len)))
- goto out_err;
- } else {
- struct sk_buff *frag;
-
- if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
- out_meta)))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
- out_meta)))
- goto out_err;
- }
- }
-
- /* building the A-MSDU might have changed this data, so memcpy it now */
- memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
-
- tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_txq_gen1_tfd_get_num_tbs(tfd));
-
- wait_write_ptr = ieee80211_has_morefrags(fc);
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
- /*
- * If the TXQ is active, then set the timer, if not,
- * set the timer in remainder so that the timer will
- * be armed with the right value when the station will
- * wake up.
- */
- if (!txq->frozen)
- mod_timer(&txq->stuck_timer,
- jiffies + txq->wd_timeout);
- else
- txq->frozen_expiry_remainder = txq->wd_timeout;
- }
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- if (!wait_write_ptr)
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-out_err:
- iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
- spin_unlock(&txq->lock);
- return -1;
-}
-
-static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq,
- int read_ptr)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
- int txq_id = txq->id;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->conf.cmd_queue)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
- bc_ent;
-}
-
-/* Frees buffers until index _not_ inclusive */
-void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- int tfd_num, read_ptr, last_to_free;
- int txq_read_ptr, txq_write_ptr;
-
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->conf.cmd_queue))
- return;
-
- if (WARN_ON(!txq))
- return;
-
- tfd_num = iwl_txq_get_cmd_index(txq, ssn);
-
- spin_lock_bh(&txq->reclaim_lock);
-
- spin_lock(&txq->lock);
- txq_read_ptr = txq->read_ptr;
- txq_write_ptr = txq->write_ptr;
- spin_unlock(&txq->lock);
-
- /* There is nothing to do if we are flushing an empty queue */
- if (is_flush && txq_write_ptr == txq_read_ptr)
- goto out;
-
- read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
-
- if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
- IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
- txq_id, ssn);
- goto out;
- }
-
- if (read_ptr == tfd_num)
- goto out;
-
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
- txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
-
- /* Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used
- */
- last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
-
- if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
- IWL_ERR(trans,
- "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free,
- trans->mac_cfg->base->max_tfd_queue_size,
- txq_write_ptr, txq_read_ptr);
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_FAKE_TX,
- NULL);
- goto out;
- }
-
- if (WARN_ON(!skb_queue_empty(skbs)))
- goto out;
-
- for (;
- read_ptr != tfd_num;
- txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
- read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
- struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
- struct sk_buff *skb = txq->entries[read_ptr].skb;
-
- if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
- read_ptr, txq_read_ptr, txq_id))
- continue;
-
- iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
-
- __skb_queue_tail(skbs, skb);
-
- txq->entries[read_ptr].skb = NULL;
-
- if (!trans->mac_cfg->gen2)
- iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
- txq_read_ptr);
-
- iwl_txq_free_tfd(trans, txq, txq_read_ptr);
- }
-
- spin_lock(&txq->lock);
- txq->read_ptr = txq_read_ptr;
-
- iwl_txq_progress(txq);
-
- if (iwl_txq_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
- struct sk_buff_head overflow_skbs;
- struct sk_buff *skb;
-
- __skb_queue_head_init(&overflow_skbs);
- skb_queue_splice_init(&txq->overflow_q,
- is_flush ? skbs : &overflow_skbs);
-
- /*
- * We are going to transmit from the overflow queue.
- * Remember this state so that wait_for_txq_empty will know we
- * are adding more packets to the TFD queue. It cannot rely on
- * the state of &txq->overflow_q, as we just emptied it, but
- * haven't TXed the content yet.
- */
- txq->overflow_tx = true;
-
- /*
- * This is tricky: we are in reclaim path and are holding
- * reclaim_lock, so noone will try to access the txq data
- * from that path. We stopped tx, so we can't have tx as well.
- * Bottom line, we can unlock and re-lock later.
- */
- spin_unlock(&txq->lock);
-
- while ((skb = __skb_dequeue(&overflow_skbs))) {
- struct iwl_device_tx_cmd *dev_cmd_ptr;
-
- dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans->conf.cb_data_offs +
- sizeof(void *));
-
- /*
- * Note that we can very well be overflowing again.
- * In that case, iwl_txq_space will be small again
- * and we won't wake mac80211's queue.
- */
- iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
- }
-
- if (iwl_txq_space(trans, txq) > txq->low_mark)
- iwl_trans_pcie_wake_queue(trans, txq);
-
- spin_lock(&txq->lock);
- txq->overflow_tx = false;
- }
-
- spin_unlock(&txq->lock);
-out:
- spin_unlock_bh(&txq->reclaim_lock);
-}
-
-/* Set wr_ptr of specific device and txq */
-void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
-
- txq->write_ptr = ptr;
- txq->read_ptr = txq->write_ptr;
-
- spin_unlock_bh(&txq->lock);
-}
-
-void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
- unsigned long txqs, bool freeze)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue;
-
- for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
- unsigned long now;
-
- spin_lock_bh(&txq->lock);
-
- now = jiffies;
-
- if (txq->frozen == freeze)
- goto next_queue;
-
- IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
- freeze ? "Freezing" : "Waking", queue);
-
- txq->frozen = freeze;
-
- if (txq->read_ptr == txq->write_ptr)
- goto next_queue;
-
- if (freeze) {
- if (unlikely(time_after(now,
- txq->stuck_timer.expires))) {
- /*
- * The timer should have fired, maybe it is
- * spinning right now on the lock.
- */
- goto next_queue;
- }
- /* remember how long until the timer fires */
- txq->frozen_expiry_remainder =
- txq->stuck_timer.expires - now;
- timer_delete(&txq->stuck_timer);
- goto next_queue;
- }
-
- /*
- * Wake a non-empty queue -> arm timer with the
- * remainder before it froze
- */
- mod_timer(&txq->stuck_timer,
- now + txq->frozen_expiry_remainder);
-
-next_queue:
- spin_unlock_bh(&txq->lock);
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd,
- const char *cmd_str)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n", cmd_str))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
- if (trans->mac_cfg->gen2)
- cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
- else
- cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
-
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
- cmd_str, ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- cmd_str);
- ret = -ETIMEDOUT;
-
- iwl_trans_pcie_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
- }
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
-
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (cmd->flags & CMD_ASYNC) {
- int ret;
-
- IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- if (trans->mac_cfg->gen2)
- ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
- else
- ret = iwl_pcie_enqueue_hcmd(trans, cmd);
-
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
- }
-
- return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);
-}