1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <linux/pci.h>
68 #include <linux/pci-aspm.h>
69 #include <linux/interrupt.h>
70 #include <linux/debugfs.h>
71 #include <linux/sched.h>
72 #include <linux/bitops.h>
73 #include <linux/gfp.h>
74 #include <linux/vmalloc.h>
75 #include <linux/pm_runtime.h>
78 #include "iwl-trans.h"
82 #include "iwl-agn-hw.h"
83 #include "iwl-fw-error-dump.h"
87 /* extended range in FW SRAM */
88 #define IWL_FW_MEM_EXTENDED_START 0x40000
89 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
91 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
93 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
95 if (!trans_pcie->fw_mon_page)
98 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
99 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
100 __free_pages(trans_pcie->fw_mon_page,
101 get_order(trans_pcie->fw_mon_size));
102 trans_pcie->fw_mon_page = NULL;
103 trans_pcie->fw_mon_phys = 0;
104 trans_pcie->fw_mon_size = 0;
107 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
109 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
110 struct page *page = NULL;
116 /* default max_power is maximum */
122 if (WARN(max_power > 26,
123 "External buffer size for monitor is too big %d, check the FW TLV\n",
127 if (trans_pcie->fw_mon_page) {
128 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
129 trans_pcie->fw_mon_size,
135 for (power = max_power; power >= 11; power--) {
139 order = get_order(size);
140 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
145 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
147 if (dma_mapping_error(trans->dev, phys)) {
148 __free_pages(page, order);
153 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
158 if (WARN_ON_ONCE(!page))
161 if (power != max_power)
163 "Sorry - debug buffer is only %luK while you requested %luK\n",
164 (unsigned long)BIT(power - 10),
165 (unsigned long)BIT(max_power - 10));
167 trans_pcie->fw_mon_page = page;
168 trans_pcie->fw_mon_phys = phys;
169 trans_pcie->fw_mon_size = size;
172 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
174 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
175 ((reg & 0x0000ffff) | (2 << 28)));
176 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
179 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
181 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
182 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
183 ((reg & 0x0000ffff) | (3 << 28)));
186 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
188 if (trans->cfg->apmg_not_supported)
191 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
192 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
193 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
194 ~APMG_PS_CTRL_MSK_PWR_SRC);
196 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
197 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
198 ~APMG_PS_CTRL_MSK_PWR_SRC);
202 #define PCI_CFG_RETRY_TIMEOUT 0x041
204 static void iwl_pcie_apm_config(struct iwl_trans *trans)
206 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
212 * Check if BIOS (or OS) enabled L1-ASPM on this device.
213 * If so (likely), disable L0S, so device moves directly L0->L1;
214 * costs negligible amount of power savings.
215 * If not (unlikely), enable L0S, so there is at least some
216 * power savings, even without L1.
218 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
219 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
220 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
222 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
223 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
225 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
226 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
227 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
228 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
229 trans->ltr_enabled ? "En" : "Dis");
233 * Start up NIC's basic functionality after it has been reset
234 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
235 * NOTE: This does not load uCode nor start the embedded processor
237 static int iwl_pcie_apm_init(struct iwl_trans *trans)
240 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
243 * Use "set_bit" below rather than "write", to preserve any hardware
244 * bits already set by default after reset.
247 /* Disable L0S exit timer (platform NMI Work/Around) */
248 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
249 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
250 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
253 * Disable L0s without affecting L1;
254 * don't wait for ICH L0s (ICH bug W/A)
256 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
257 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
259 /* Set FH wait threshold to maximum (HW error during stress W/A) */
260 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
263 * Enable HAP INTA (interrupt from management bus) to
264 * wake device's PCI Express link L1a -> L0s
266 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
267 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
269 iwl_pcie_apm_config(trans);
271 /* Configure analog phase-lock-loop before activating to D0A */
272 if (trans->cfg->base_params->pll_cfg_val)
273 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
274 trans->cfg->base_params->pll_cfg_val);
277 * Set "initialization complete" bit to move adapter from
278 * D0U* --> D0A* (powered-up active) state.
280 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
283 * Wait for clock stabilization; once stabilized, access to
284 * device-internal resources is supported, e.g. iwl_write_prph()
285 * and accesses to uCode SRAM.
287 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
288 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
289 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
291 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
295 if (trans->cfg->host_interrupt_operation_mode) {
297 * This is a bit of an abuse - This is needed for 7260 / 3160
298 * only check host_interrupt_operation_mode even if this is
299 * not related to host_interrupt_operation_mode.
301 * Enable the oscillator to count wake up time for L1 exit. This
302 * consumes slightly more power (100uA) - but allows to be sure
303 * that we wake up from L1 on time.
305 * This looks weird: read twice the same register, discard the
306 * value, set a bit, and yet again, read that same register
307 * just to discard the value. But that's the way the hardware
310 iwl_read_prph(trans, OSC_CLK);
311 iwl_read_prph(trans, OSC_CLK);
312 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
313 iwl_read_prph(trans, OSC_CLK);
314 iwl_read_prph(trans, OSC_CLK);
318 * Enable DMA clock and wait for it to stabilize.
320 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
321 * bits do not disable clocks. This preserves any hardware
322 * bits already set by default in "CLK_CTRL_REG" after reset.
324 if (!trans->cfg->apmg_not_supported) {
325 iwl_write_prph(trans, APMG_CLK_EN_REG,
326 APMG_CLK_VAL_DMA_CLK_RQT);
329 /* Disable L1-Active */
330 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
331 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
333 /* Clear the interrupt in APMG if the NIC is in RFKILL */
334 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
335 APMG_RTC_INT_STT_RFKILL);
338 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
345 * Enable LP XTAL to avoid HW bug where device may consume much power if
346 * FW is not loaded after device reset. LP XTAL is disabled by default
347 * after device HW reset. Do it only if XTAL is fed by internal source.
348 * Configure device's "persistence" mode to avoid resetting XTAL again when
349 * SHRD_HW_RST occurs in S3.
351 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
355 u32 apmg_xtal_cfg_reg;
359 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
360 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
362 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
363 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
368 * Set "initialization complete" bit to move adapter from
369 * D0U* --> D0A* (powered-up active) state.
371 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
374 * Wait for clock stabilization; once stabilized, access to
375 * device-internal resources is possible.
377 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
378 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
379 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
381 if (WARN_ON(ret < 0)) {
382 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
383 /* Release XTAL ON request */
384 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
385 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
390 * Clear "disable persistence" to avoid LP XTAL resetting when
391 * SHRD_HW_RST is applied in S3.
393 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
394 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
397 * Force APMG XTAL to be active to prevent its disabling by HW
398 * caused by APMG idle state.
400 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
401 SHR_APMG_XTAL_CFG_REG);
402 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
404 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
407 * Reset entire device again - do controller reset (results in
408 * SHRD_HW_RST). Turn MAC off before proceeding.
410 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
414 /* Enable LP XTAL by indirect access through CSR */
415 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
416 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
417 SHR_APMG_GP1_WF_XTAL_LP_EN |
418 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
420 /* Clear delay line clock power up */
421 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
422 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
423 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
426 * Enable persistence mode to avoid LP XTAL resetting when
427 * SHRD_HW_RST is applied in S3.
429 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
430 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
433 * Clear "initialization complete" bit to move adapter from
434 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
436 iwl_clear_bit(trans, CSR_GP_CNTRL,
437 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
439 /* Activates XTAL resources monitor */
440 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
441 CSR_MONITOR_XTAL_RESOURCES);
443 /* Release XTAL ON request */
444 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
445 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
448 /* Release APMG XTAL */
449 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
451 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
454 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
458 /* stop device's busmaster DMA activity */
459 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
461 ret = iwl_poll_bit(trans, CSR_RESET,
462 CSR_RESET_REG_FLAG_MASTER_DISABLED,
463 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
465 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
467 IWL_DEBUG_INFO(trans, "stop master\n");
472 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
474 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
477 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
478 iwl_pcie_apm_init(trans);
480 /* inform ME that we are leaving */
481 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
482 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
483 APMG_PCIDEV_STT_VAL_WAKE_ME);
484 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
485 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
486 CSR_RESET_LINK_PWR_MGMT_DISABLED);
487 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
488 CSR_HW_IF_CONFIG_REG_PREPARE |
489 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
491 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
492 CSR_RESET_LINK_PWR_MGMT_DISABLED);
497 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
499 /* Stop device's DMA activity */
500 iwl_pcie_apm_stop_master(trans);
502 if (trans->cfg->lp_xtal_workaround) {
503 iwl_pcie_apm_lp_xtal_enable(trans);
507 /* Reset the entire device */
508 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
513 * Clear "initialization complete" bit to move adapter from
514 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
516 iwl_clear_bit(trans, CSR_GP_CNTRL,
517 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
520 static int iwl_pcie_nic_init(struct iwl_trans *trans)
522 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525 spin_lock(&trans_pcie->irq_lock);
526 iwl_pcie_apm_init(trans);
528 spin_unlock(&trans_pcie->irq_lock);
530 iwl_pcie_set_pwr(trans, false);
532 iwl_op_mode_nic_config(trans->op_mode);
534 /* Allocate the RX queue, or reset if it is already allocated */
535 iwl_pcie_rx_init(trans);
537 /* Allocate or reset and init all Tx and Command queues */
538 if (iwl_pcie_tx_init(trans))
541 if (trans->cfg->base_params->shadow_reg_enable) {
542 /* enable shadow regs in HW */
543 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
544 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
550 #define HW_READY_TIMEOUT (50)
552 /* Note: returns poll_bit return value, which is >= 0 if success */
553 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
557 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
558 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
560 /* See if we got it */
561 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
562 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
563 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
567 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
569 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
573 /* Note: returns standard 0/-ERROR code */
574 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
580 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
582 ret = iwl_pcie_set_hw_ready(trans);
583 /* If the card is ready, exit 0 */
587 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
588 CSR_RESET_LINK_PWR_MGMT_DISABLED);
591 for (iter = 0; iter < 10; iter++) {
592 /* If HW is not ready, prepare the conditions to check again */
593 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
594 CSR_HW_IF_CONFIG_REG_PREPARE);
597 ret = iwl_pcie_set_hw_ready(trans);
601 usleep_range(200, 1000);
603 } while (t < 150000);
607 IWL_ERR(trans, "Couldn't prepare the card\n");
615 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
616 dma_addr_t phy_addr, u32 byte_cnt)
618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
622 trans_pcie->ucode_write_complete = false;
624 if (!iwl_trans_grab_nic_access(trans, &flags))
627 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
628 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
630 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
633 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
634 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
636 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
637 (iwl_get_dma_hi_addr(phy_addr)
638 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
640 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
641 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
642 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
643 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
645 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
646 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
647 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
648 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
650 iwl_trans_release_nic_access(trans, &flags);
652 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
653 trans_pcie->ucode_write_complete, 5 * HZ);
655 IWL_ERR(trans, "Failed to load firmware chunk!\n");
662 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
663 const struct fw_desc *section)
667 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
670 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
673 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
674 GFP_KERNEL | __GFP_NOWARN);
676 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
677 chunk_sz = PAGE_SIZE;
678 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
679 &p_addr, GFP_KERNEL);
684 for (offset = 0; offset < section->len; offset += chunk_sz) {
685 u32 copy_size, dst_addr;
686 bool extended_addr = false;
688 copy_size = min_t(u32, chunk_sz, section->len - offset);
689 dst_addr = section->offset + offset;
691 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
692 dst_addr <= IWL_FW_MEM_EXTENDED_END)
693 extended_addr = true;
696 iwl_set_bits_prph(trans, LMPM_CHICK,
697 LMPM_CHICK_EXTENDED_ADDR_SPACE);
699 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
700 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
704 iwl_clear_bits_prph(trans, LMPM_CHICK,
705 LMPM_CHICK_EXTENDED_ADDR_SPACE);
709 "Could not load the [%d] uCode section\n",
715 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
720 * Driver Takes the ownership on secure machine before FW load
721 * and prevent race with the BT load.
722 * W/A for ROM bug. (should be remove in the next Si step)
724 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
726 u32 val, loop = 1000;
729 * Check the RSA semaphore is accessible.
730 * If the HW isn't locked and the rsa semaphore isn't accessible,
733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
734 if (val & (BIT(1) | BIT(17))) {
736 "can't access the RSA semaphore it is write protected\n");
740 /* take ownership on the AUX IF */
741 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
742 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
745 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
746 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
748 iwl_write_prph(trans, RSA_ENABLE, 0);
756 IWL_ERR(trans, "Failed to take ownership on secure machine\n");
760 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
761 const struct fw_img *image,
763 int *first_ucode_section)
766 int i, ret = 0, sec_num = 0x1;
767 u32 val, last_read_idx = 0;
771 *first_ucode_section = 0;
774 (*first_ucode_section)++;
777 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
781 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
783 * PAGING_SEPARATOR_SECTION delimiter - separate between
784 * CPU2 non paged to CPU2 paging sec.
786 if (!image->sec[i].data ||
787 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
788 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
790 "Break since Data not valid or Empty section, sec = %d\n",
795 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
799 /* Notify the ucode of the loaded section number and status */
800 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
801 val = val | (sec_num << shift_param);
802 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
803 sec_num = (sec_num << 1) | 0x1;
806 *first_ucode_section = last_read_idx;
809 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
811 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
816 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
817 const struct fw_img *image,
819 int *first_ucode_section)
823 u32 last_read_idx = 0;
827 *first_ucode_section = 0;
830 (*first_ucode_section)++;
833 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
837 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
839 * PAGING_SEPARATOR_SECTION delimiter - separate between
840 * CPU2 non paged to CPU2 paging sec.
842 if (!image->sec[i].data ||
843 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
844 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
846 "Break since Data not valid or Empty section, sec = %d\n",
851 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
856 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
857 iwl_set_bits_prph(trans,
858 CSR_UCODE_LOAD_STATUS_ADDR,
859 (LMPM_CPU_UCODE_LOADING_COMPLETED |
860 LMPM_CPU_HDRS_LOADING_COMPLETED |
861 LMPM_CPU_UCODE_LOADING_STARTED) <<
864 *first_ucode_section = last_read_idx;
869 static void iwl_pcie_apply_destination(struct iwl_trans *trans)
871 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
872 const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
877 "DBG DEST version is %d - expect issues\n",
880 IWL_INFO(trans, "Applying debug destination %s\n",
881 get_fw_dbg_mode_string(dest->monitor_mode));
883 if (dest->monitor_mode == EXTERNAL_MODE)
884 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
886 IWL_WARN(trans, "PCI should have external buffer debug\n");
888 for (i = 0; i < trans->dbg_dest_reg_num; i++) {
889 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
890 u32 val = le32_to_cpu(dest->reg_ops[i].val);
892 switch (dest->reg_ops[i].op) {
894 iwl_write32(trans, addr, val);
897 iwl_set_bit(trans, addr, BIT(val));
900 iwl_clear_bit(trans, addr, BIT(val));
903 iwl_write_prph(trans, addr, val);
906 iwl_set_bits_prph(trans, addr, BIT(val));
909 iwl_clear_bits_prph(trans, addr, BIT(val));
912 if (iwl_read_prph(trans, addr) & BIT(val)) {
914 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
920 IWL_ERR(trans, "FW debug - unknown OP %d\n",
921 dest->reg_ops[i].op);
927 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
928 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
929 trans_pcie->fw_mon_phys >> dest->base_shift);
930 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
931 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
932 (trans_pcie->fw_mon_phys +
933 trans_pcie->fw_mon_size - 256) >>
936 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
937 (trans_pcie->fw_mon_phys +
938 trans_pcie->fw_mon_size) >>
943 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
944 const struct fw_img *image)
946 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
948 int first_ucode_section;
950 IWL_DEBUG_FW(trans, "working with %s CPU\n",
951 image->is_dual_cpus ? "Dual" : "Single");
953 /* load to FW the binary non secured sections of CPU1 */
954 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
958 if (image->is_dual_cpus) {
959 /* set CPU2 header address */
960 iwl_write_prph(trans,
961 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
962 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
964 /* load to FW the binary sections of CPU2 */
965 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
966 &first_ucode_section);
971 /* supported for 7000 only for the moment */
972 if (iwlwifi_mod_params.fw_monitor &&
973 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
974 iwl_pcie_alloc_fw_monitor(trans, 0);
976 if (trans_pcie->fw_mon_size) {
977 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
978 trans_pcie->fw_mon_phys >> 4);
979 iwl_write_prph(trans, MON_BUFF_END_ADDR,
980 (trans_pcie->fw_mon_phys +
981 trans_pcie->fw_mon_size) >> 4);
983 } else if (trans->dbg_dest_tlv) {
984 iwl_pcie_apply_destination(trans);
987 /* release CPU reset */
988 iwl_write32(trans, CSR_RESET, 0);
993 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
994 const struct fw_img *image)
997 int first_ucode_section;
999 IWL_DEBUG_FW(trans, "working with %s CPU\n",
1000 image->is_dual_cpus ? "Dual" : "Single");
1002 if (trans->dbg_dest_tlv)
1003 iwl_pcie_apply_destination(trans);
1005 /* TODO: remove in the next Si step */
1006 ret = iwl_pcie_rsa_race_bug_wa(trans);
1010 /* configure the ucode to be ready to get the secured image */
1011 /* release CPU reset */
1012 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1014 /* load to FW the binary Secured sections of CPU1 */
1015 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1016 &first_ucode_section);
1020 /* load to FW the binary sections of CPU2 */
1021 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1022 &first_ucode_section);
1025 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill, was_hw_rfkill;
1030 lockdep_assert_held(&trans_pcie->mutex);
1032 if (trans_pcie->is_down)
1035 trans_pcie->is_down = true;
1037 was_hw_rfkill = iwl_is_rfkill_set(trans);
1039 /* tell the device to stop sending interrupts */
1040 spin_lock(&trans_pcie->irq_lock);
1041 iwl_disable_interrupts(trans);
1042 spin_unlock(&trans_pcie->irq_lock);
1044 /* device going down, Stop using ICT table */
1045 iwl_pcie_disable_ict(trans);
1048 * If a HW restart happens during firmware loading,
1049 * then the firmware loading might call this function
1050 * and later it might be called again due to the
1051 * restart. So don't process again if the device is
1054 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1055 IWL_DEBUG_INFO(trans,
1056 "DEVICE_ENABLED bit was set and is now cleared\n");
1057 iwl_pcie_tx_stop(trans);
1058 iwl_pcie_rx_stop(trans);
1060 /* Power-down device's busmaster DMA clocks */
1061 if (!trans->cfg->apmg_not_supported) {
1062 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1063 APMG_CLK_VAL_DMA_CLK_RQT);
1068 /* Make sure (redundant) we've released our request to stay awake */
1069 iwl_clear_bit(trans, CSR_GP_CNTRL,
1070 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1072 /* Stop the device, and put it in low power state */
1073 iwl_pcie_apm_stop(trans, false);
1075 /* stop and reset the on-board processor */
1076 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1080 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1081 * This is a bug in certain verions of the hardware.
1082 * Certain devices also keep sending HW RF kill interrupt all
1083 * the time, unless the interrupt is ACKed even if the interrupt
1084 * should be masked. Re-ACK all the interrupts here.
1086 spin_lock(&trans_pcie->irq_lock);
1087 iwl_disable_interrupts(trans);
1088 spin_unlock(&trans_pcie->irq_lock);
1090 /* clear all status bits */
1091 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1092 clear_bit(STATUS_INT_ENABLED, &trans->status);
1093 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1094 clear_bit(STATUS_RFKILL, &trans->status);
1097 * Even if we stop the HW, we still want the RF kill
1100 iwl_enable_rfkill_int(trans);
1103 * Check again since the RF kill state may have changed while
1104 * all the interrupts were disabled, in this case we couldn't
1105 * receive the RF kill interrupt and update the state in the
1107 * Don't call the op_mode if the rkfill state hasn't changed.
1108 * This allows the op_mode to call stop_device from the rfkill
1109 * notification without endless recursion. Under very rare
1110 * circumstances, we might have a small recursion if the rfkill
1111 * state changed exactly now while we were called from stop_device.
1112 * This is very unlikely but can happen and is supported.
1114 hw_rfkill = iwl_is_rfkill_set(trans);
1116 set_bit(STATUS_RFKILL, &trans->status);
1118 clear_bit(STATUS_RFKILL, &trans->status);
1119 if (hw_rfkill != was_hw_rfkill)
1120 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1122 /* re-take ownership to prevent other users from stealing the device */
1123 iwl_pcie_prepare_card_hw(trans);
1126 static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1130 if (trans_pcie->msix_enabled) {
1133 for (i = 0; i < trans_pcie->allocated_vector; i++)
1134 synchronize_irq(trans_pcie->msix_entries[i].vector);
1136 synchronize_irq(trans_pcie->pci_dev->irq);
1140 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1141 const struct fw_img *fw, bool run_in_rfkill)
1143 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1147 /* This may fail if AMT took ownership of the device */
1148 if (iwl_pcie_prepare_card_hw(trans)) {
1149 IWL_WARN(trans, "Exit HW not ready\n");
1154 iwl_enable_rfkill_int(trans);
1156 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1159 * We enabled the RF-Kill interrupt and the handler may very
1160 * well be running. Disable the interrupts to make sure no other
1161 * interrupt can be fired.
1163 iwl_disable_interrupts(trans);
1165 /* Make sure it finished running */
1166 iwl_pcie_synchronize_irqs(trans);
1168 mutex_lock(&trans_pcie->mutex);
1170 /* If platform's RF_KILL switch is NOT set to KILL */
1171 hw_rfkill = iwl_is_rfkill_set(trans);
1173 set_bit(STATUS_RFKILL, &trans->status);
1175 clear_bit(STATUS_RFKILL, &trans->status);
1176 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1177 if (hw_rfkill && !run_in_rfkill) {
1182 /* Someone called stop_device, don't try to start_fw */
1183 if (trans_pcie->is_down) {
1185 "Can't start_fw since the HW hasn't been started\n");
1190 /* make sure rfkill handshake bits are cleared */
1191 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1192 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1193 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1195 /* clear (again), then enable host interrupts */
1196 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1198 ret = iwl_pcie_nic_init(trans);
1200 IWL_ERR(trans, "Unable to init nic\n");
1205 * Now, we load the firmware and don't want to be interrupted, even
1206 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1207 * FH_TX interrupt which is needed to load the firmware). If the
1208 * RF-Kill switch is toggled, we will find out after having loaded
1209 * the firmware and return the proper value to the caller.
1211 iwl_enable_fw_load_int(trans);
1213 /* really make sure rfkill handshake bits are cleared */
1214 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1215 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1217 /* Load the given image to the HW */
1218 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1219 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1221 ret = iwl_pcie_load_given_ucode(trans, fw);
1222 iwl_enable_interrupts(trans);
1224 /* re-check RF-Kill state since we may have missed the interrupt */
1225 hw_rfkill = iwl_is_rfkill_set(trans);
1227 set_bit(STATUS_RFKILL, &trans->status);
1229 clear_bit(STATUS_RFKILL, &trans->status);
1231 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1232 if (hw_rfkill && !run_in_rfkill)
1236 mutex_unlock(&trans_pcie->mutex);
1240 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1242 iwl_pcie_reset_ict(trans);
1243 iwl_pcie_tx_start(trans, scd_addr);
1246 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1250 mutex_lock(&trans_pcie->mutex);
1251 _iwl_trans_pcie_stop_device(trans, low_power);
1252 mutex_unlock(&trans_pcie->mutex);
1255 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1257 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1258 IWL_TRANS_GET_PCIE_TRANS(trans);
1260 lockdep_assert_held(&trans_pcie->mutex);
1262 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1263 _iwl_trans_pcie_stop_device(trans, true);
1266 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1270 /* Enable persistence mode to avoid reset */
1271 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1272 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1275 iwl_disable_interrupts(trans);
1278 * in testing mode, the host stays awake and the
1279 * hardware won't be reset (not even partially)
1284 iwl_pcie_disable_ict(trans);
1286 iwl_pcie_synchronize_irqs(trans);
1288 iwl_clear_bit(trans, CSR_GP_CNTRL,
1289 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1290 iwl_clear_bit(trans, CSR_GP_CNTRL,
1291 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1295 * reset TX queues -- some of their registers reset during S3
1296 * so if we don't reset everything here the D3 image would try
1297 * to execute some invalid memory upon resume
1299 iwl_trans_pcie_tx_reset(trans);
1302 iwl_pcie_set_pwr(trans, true);
1305 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1306 enum iwl_d3_status *status,
1307 bool test, bool reset)
1313 iwl_enable_interrupts(trans);
1314 *status = IWL_D3_STATUS_ALIVE;
1319 * Also enables interrupts - none will happen as the device doesn't
1320 * know we're waking it up, only when the opmode actually tells it
1323 iwl_pcie_reset_ict(trans);
1325 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1326 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1328 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1331 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1332 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1333 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1336 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1340 iwl_pcie_set_pwr(trans, false);
1343 iwl_clear_bit(trans, CSR_GP_CNTRL,
1344 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1346 iwl_trans_pcie_tx_reset(trans);
1348 ret = iwl_pcie_rx_init(trans);
1351 "Failed to resume the device (RX reset)\n");
1356 val = iwl_read32(trans, CSR_RESET);
1357 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1358 *status = IWL_D3_STATUS_RESET;
1360 *status = IWL_D3_STATUS_ALIVE;
1365 struct iwl_causes_list {
1371 static struct iwl_causes_list causes_list[] = {
1372 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
1373 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
1374 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
1375 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
1376 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
1377 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
1378 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
1379 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
1380 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
1381 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
1382 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1383 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1384 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1385 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1388 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1390 u32 val, max_rx_vector, i;
1391 struct iwl_trans *trans = trans_pcie->trans;
1393 max_rx_vector = trans_pcie->allocated_vector - 1;
1395 if (!trans_pcie->msix_enabled)
1398 iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1401 * Each cause from the list above and the RX causes is represented as
1402 * a byte in the IVAR table. We access the first (N - 1) bytes and map
1403 * them to the (N - 1) vectors so these vectors will be used as rx
1404 * vectors. Then access all non rx causes and map them to the
1405 * default queue (N'th queue).
1407 for (i = 0; i < max_rx_vector; i++) {
1408 iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
1409 iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
1410 BIT(MSIX_FH_INT_CAUSES_Q(i)));
1413 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1414 val = trans_pcie->default_irq_num |
1415 MSIX_NON_AUTO_CLEAR_CAUSE;
1416 iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
1417 iwl_clear_bit(trans, causes_list[i].mask_reg,
1418 causes_list[i].cause_num);
1420 trans_pcie->fh_init_mask =
1421 ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1422 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1423 trans_pcie->hw_init_mask =
1424 ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1425 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1428 static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1429 struct iwl_trans *trans)
1431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1436 if (trans->cfg->mq_rx_supported) {
1437 max_vector = min_t(u32, (num_possible_cpus() + 1),
1438 IWL_MAX_RX_HW_QUEUES);
1439 for (i = 0; i < max_vector; i++)
1440 trans_pcie->msix_entries[i].entry = i;
1442 ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1443 MSIX_MIN_INTERRUPT_VECTORS,
1446 IWL_DEBUG_INFO(trans,
1447 "Enable MSI-X allocate %d interrupt vector\n",
1449 trans_pcie->allocated_vector = ret;
1450 trans_pcie->default_irq_num =
1451 trans_pcie->allocated_vector - 1;
1452 trans_pcie->trans->num_rx_queues =
1453 trans_pcie->allocated_vector - 1;
1454 trans_pcie->msix_enabled = true;
1458 IWL_DEBUG_INFO(trans,
1459 "ret = %d %s move to msi mode\n", ret,
1461 "can't allocate more than 1 interrupt vector" :
1462 "failed to enable msi-x mode");
1463 pci_disable_msix(pdev);
1466 ret = pci_enable_msi(pdev);
1468 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
1469 /* enable rfkill interrupt: hw bug w/a */
1470 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1471 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1472 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1473 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1478 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1479 struct iwl_trans_pcie *trans_pcie)
1483 last_vector = trans_pcie->trans->num_rx_queues;
1485 for (i = 0; i < trans_pcie->allocated_vector; i++) {
1488 ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
1490 (i == last_vector) ?
1491 iwl_pcie_irq_msix_handler :
1492 iwl_pcie_irq_rx_msix_handler,
1495 &trans_pcie->msix_entries[i]);
1499 IWL_ERR(trans_pcie->trans,
1500 "Error allocating IRQ %d\n", i);
1501 for (j = 0; j < i; j++)
1502 free_irq(trans_pcie->msix_entries[i].vector,
1503 &trans_pcie->msix_entries[i]);
1504 pci_disable_msix(pdev);
1512 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1518 lockdep_assert_held(&trans_pcie->mutex);
1520 err = iwl_pcie_prepare_card_hw(trans);
1522 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1526 /* Reset the entire device */
1527 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1529 usleep_range(10, 15);
1531 iwl_pcie_apm_init(trans);
1533 iwl_pcie_init_msix(trans_pcie);
1534 /* From now on, the op_mode will be kept updated about RF kill state */
1535 iwl_enable_rfkill_int(trans);
1537 /* Set is_down to false here so that...*/
1538 trans_pcie->is_down = false;
1540 hw_rfkill = iwl_is_rfkill_set(trans);
1542 set_bit(STATUS_RFKILL, &trans->status);
1544 clear_bit(STATUS_RFKILL, &trans->status);
1545 /* ... rfkill can call stop_device and set it false if needed */
1546 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1548 /* Make sure we sync here, because we'll need full access later */
1550 pm_runtime_resume(trans->dev);
1555 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1557 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1560 mutex_lock(&trans_pcie->mutex);
1561 ret = _iwl_trans_pcie_start_hw(trans, low_power);
1562 mutex_unlock(&trans_pcie->mutex);
1567 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1569 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1571 mutex_lock(&trans_pcie->mutex);
1573 /* disable interrupts - don't enable HW RF kill interrupt */
1574 spin_lock(&trans_pcie->irq_lock);
1575 iwl_disable_interrupts(trans);
1576 spin_unlock(&trans_pcie->irq_lock);
1578 iwl_pcie_apm_stop(trans, true);
1580 spin_lock(&trans_pcie->irq_lock);
1581 iwl_disable_interrupts(trans);
1582 spin_unlock(&trans_pcie->irq_lock);
1584 iwl_pcie_disable_ict(trans);
1586 mutex_unlock(&trans_pcie->mutex);
1588 iwl_pcie_synchronize_irqs(trans);
1591 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1593 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1596 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1598 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1601 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1603 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1606 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1608 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1609 ((reg & 0x000FFFFF) | (3 << 24)));
1610 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1613 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1616 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1617 ((addr & 0x000FFFFF) | (3 << 24)));
1618 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1621 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1622 const struct iwl_trans_config *trans_cfg)
1624 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1626 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1627 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1628 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1629 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1630 trans_pcie->n_no_reclaim_cmds = 0;
1632 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1633 if (trans_pcie->n_no_reclaim_cmds)
1634 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1635 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1637 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1638 trans_pcie->rx_page_order =
1639 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1641 trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
1642 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1643 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1644 trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
1646 trans->command_groups = trans_cfg->command_groups;
1647 trans->command_groups_size = trans_cfg->command_groups_size;
1649 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1650 trans_pcie->ref_count = 1;
1652 /* Initialize NAPI here - it should be before registering to mac80211
1653 * in the opmode but after the HW struct is allocated.
1654 * As this function may be called again in some corner cases don't
1655 * do anything if NAPI was already initialized.
1657 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1658 init_dummy_netdev(&trans_pcie->napi_dev);
1661 void iwl_trans_pcie_free(struct iwl_trans *trans)
1663 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1666 /* TODO: check if this is really needed */
1667 pm_runtime_disable(trans->dev);
1669 iwl_pcie_synchronize_irqs(trans);
1671 iwl_pcie_tx_free(trans);
1672 iwl_pcie_rx_free(trans);
1674 if (trans_pcie->msix_enabled) {
1675 for (i = 0; i < trans_pcie->allocated_vector; i++)
1676 free_irq(trans_pcie->msix_entries[i].vector,
1677 &trans_pcie->msix_entries[i]);
1679 pci_disable_msix(trans_pcie->pci_dev);
1680 trans_pcie->msix_enabled = false;
1682 free_irq(trans_pcie->pci_dev->irq, trans);
1684 iwl_pcie_free_ict(trans);
1686 pci_disable_msi(trans_pcie->pci_dev);
1688 iounmap(trans_pcie->hw_base);
1689 pci_release_regions(trans_pcie->pci_dev);
1690 pci_disable_device(trans_pcie->pci_dev);
1692 iwl_pcie_free_fw_monitor(trans);
1694 for_each_possible_cpu(i) {
1695 struct iwl_tso_hdr_page *p =
1696 per_cpu_ptr(trans_pcie->tso_hdr_page, i);
1699 __free_page(p->page);
1702 free_percpu(trans_pcie->tso_hdr_page);
1703 iwl_trans_free(trans);
1706 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1709 set_bit(STATUS_TPOWER_PMI, &trans->status);
1711 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1714 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
1715 unsigned long *flags)
1718 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1720 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1722 if (trans_pcie->cmd_hold_nic_awake)
1725 /* this bit wakes up the NIC */
1726 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1727 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1728 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1732 * These bits say the device is running, and should keep running for
1733 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1734 * but they do not indicate that embedded SRAM is restored yet;
1735 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1736 * to/from host DRAM when sleeping/waking for power-saving.
1737 * Each direction takes approximately 1/4 millisecond; with this
1738 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1739 * series of register accesses are expected (e.g. reading Event Log),
1740 * to keep device from sleeping.
1742 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1743 * SRAM is okay/restored. We don't check that here because this call
1744 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1745 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1747 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1748 * and do not save/restore SRAM when power cycling.
1750 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1751 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1752 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1753 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1754 if (unlikely(ret < 0)) {
1755 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1757 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1758 iwl_read32(trans, CSR_GP_CNTRL));
1759 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1765 * Fool sparse by faking we release the lock - sparse will
1766 * track nic_access anyway.
1768 __release(&trans_pcie->reg_lock);
1772 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1773 unsigned long *flags)
1775 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1777 lockdep_assert_held(&trans_pcie->reg_lock);
1780 * Fool sparse by faking we acquiring the lock - sparse will
1781 * track nic_access anyway.
1783 __acquire(&trans_pcie->reg_lock);
1785 if (trans_pcie->cmd_hold_nic_awake)
1788 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1789 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1791 * Above we read the CSR_GP_CNTRL register, which will flush
1792 * any previous writes, but we need the write that clears the
1793 * MAC_ACCESS_REQ bit to be performed before any other writes
1794 * scheduled on different CPUs (after we drop reg_lock).
1798 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1801 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1802 void *buf, int dwords)
1804 unsigned long flags;
1808 if (iwl_trans_grab_nic_access(trans, &flags)) {
1809 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1810 for (offs = 0; offs < dwords; offs++)
1811 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1812 iwl_trans_release_nic_access(trans, &flags);
1819 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1820 const void *buf, int dwords)
1822 unsigned long flags;
1824 const u32 *vals = buf;
1826 if (iwl_trans_grab_nic_access(trans, &flags)) {
1827 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1828 for (offs = 0; offs < dwords; offs++)
1829 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1830 vals ? vals[offs] : 0);
1831 iwl_trans_release_nic_access(trans, &flags);
1838 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1842 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1845 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1846 struct iwl_txq *txq = &trans_pcie->txq[queue];
1849 spin_lock_bh(&txq->lock);
1853 if (txq->frozen == freeze)
1856 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1857 freeze ? "Freezing" : "Waking", queue);
1859 txq->frozen = freeze;
1861 if (txq->q.read_ptr == txq->q.write_ptr)
1865 if (unlikely(time_after(now,
1866 txq->stuck_timer.expires))) {
1868 * The timer should have fired, maybe it is
1869 * spinning right now on the lock.
1873 /* remember how long until the timer fires */
1874 txq->frozen_expiry_remainder =
1875 txq->stuck_timer.expires - now;
1876 del_timer(&txq->stuck_timer);
1881 * Wake a non-empty queue -> arm timer with the
1882 * remainder before it froze
1884 mod_timer(&txq->stuck_timer,
1885 now + txq->frozen_expiry_remainder);
1888 spin_unlock_bh(&txq->lock);
1892 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
1894 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1897 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1898 struct iwl_txq *txq = &trans_pcie->txq[i];
1900 if (i == trans_pcie->cmd_queue)
1903 spin_lock_bh(&txq->lock);
1905 if (!block && !(WARN_ON_ONCE(!txq->block))) {
1908 iwl_write32(trans, HBUS_TARG_WRPTR,
1909 txq->q.write_ptr | (i << 8));
1915 spin_unlock_bh(&txq->lock);
1919 #define IWL_FLUSH_WAIT_MS 2000
1921 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1923 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1924 struct iwl_txq *txq;
1925 struct iwl_queue *q;
1927 unsigned long now = jiffies;
1932 /* waiting for all the tx frames complete might take a while */
1933 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1936 if (cnt == trans_pcie->cmd_queue)
1938 if (!test_bit(cnt, trans_pcie->queue_used))
1940 if (!(BIT(cnt) & txq_bm))
1943 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1944 txq = &trans_pcie->txq[cnt];
1946 wr_ptr = ACCESS_ONCE(q->write_ptr);
1948 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1949 !time_after(jiffies,
1950 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1951 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1953 if (WARN_ONCE(wr_ptr != write_ptr,
1954 "WR pointer moved while flushing %d -> %d\n",
1960 if (q->read_ptr != q->write_ptr) {
1962 "fail to flush all tx fifo queues Q %d\n", cnt);
1966 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1972 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1973 txq->q.read_ptr, txq->q.write_ptr);
1975 scd_sram_addr = trans_pcie->scd_base_addr +
1976 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1977 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1979 iwl_print_hex_error(trans, buf, sizeof(buf));
1981 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1982 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1983 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1985 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1986 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1987 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1988 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1990 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1991 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1994 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1996 tbl_dw = tbl_dw & 0x0000FFFF;
1999 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
2000 cnt, active ? "" : "in", fifo, tbl_dw,
2001 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
2002 (TFD_QUEUE_SIZE_MAX - 1),
2003 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
2009 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2010 u32 mask, u32 value)
2012 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2013 unsigned long flags;
2015 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
2016 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2017 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
2020 void iwl_trans_pcie_ref(struct iwl_trans *trans)
2022 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2023 unsigned long flags;
2025 if (iwlwifi_mod_params.d0i3_disable)
2028 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
2029 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
2030 trans_pcie->ref_count++;
2031 pm_runtime_get(&trans_pcie->pci_dev->dev);
2032 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
2035 void iwl_trans_pcie_unref(struct iwl_trans *trans)
2037 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2038 unsigned long flags;
2040 if (iwlwifi_mod_params.d0i3_disable)
2043 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
2044 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
2045 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
2046 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
2049 trans_pcie->ref_count--;
2051 pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
2052 pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
2054 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
2057 static const char *get_csr_string(int cmd)
2059 #define IWL_CMD(x) case x: return #x
2061 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2062 IWL_CMD(CSR_INT_COALESCING);
2064 IWL_CMD(CSR_INT_MASK);
2065 IWL_CMD(CSR_FH_INT_STATUS);
2066 IWL_CMD(CSR_GPIO_IN);
2068 IWL_CMD(CSR_GP_CNTRL);
2069 IWL_CMD(CSR_HW_REV);
2070 IWL_CMD(CSR_EEPROM_REG);
2071 IWL_CMD(CSR_EEPROM_GP);
2072 IWL_CMD(CSR_OTP_GP_REG);
2073 IWL_CMD(CSR_GIO_REG);
2074 IWL_CMD(CSR_GP_UCODE_REG);
2075 IWL_CMD(CSR_GP_DRIVER_REG);
2076 IWL_CMD(CSR_UCODE_DRV_GP1);
2077 IWL_CMD(CSR_UCODE_DRV_GP2);
2078 IWL_CMD(CSR_LED_REG);
2079 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2080 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2081 IWL_CMD(CSR_ANA_PLL_CFG);
2082 IWL_CMD(CSR_HW_REV_WA_REG);
2083 IWL_CMD(CSR_MONITOR_STATUS_REG);
2084 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2091 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2094 static const u32 csr_tbl[] = {
2095 CSR_HW_IF_CONFIG_REG,
2113 CSR_DRAM_INT_TBL_REG,
2114 CSR_GIO_CHICKEN_BITS,
2116 CSR_MONITOR_STATUS_REG,
2118 CSR_DBG_HPET_MEM_REG
2120 IWL_ERR(trans, "CSR values:\n");
2121 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2122 "CSR_INT_PERIODIC_REG)\n");
2123 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2124 IWL_ERR(trans, " %25s: 0X%08x\n",
2125 get_csr_string(csr_tbl[i]),
2126 iwl_read32(trans, csr_tbl[i]));
2130 #ifdef CONFIG_IWLWIFI_DEBUGFS
2131 /* create and remove of files */
2132 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2133 if (!debugfs_create_file(#name, mode, parent, trans, \
2134 &iwl_dbgfs_##name##_ops)) \
2138 /* file operation */
2139 #define DEBUGFS_READ_FILE_OPS(name) \
2140 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2141 .read = iwl_dbgfs_##name##_read, \
2142 .open = simple_open, \
2143 .llseek = generic_file_llseek, \
2146 #define DEBUGFS_WRITE_FILE_OPS(name) \
2147 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2148 .write = iwl_dbgfs_##name##_write, \
2149 .open = simple_open, \
2150 .llseek = generic_file_llseek, \
2153 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2154 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2155 .write = iwl_dbgfs_##name##_write, \
2156 .read = iwl_dbgfs_##name##_read, \
2157 .open = simple_open, \
2158 .llseek = generic_file_llseek, \
2161 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
2162 char __user *user_buf,
2163 size_t count, loff_t *ppos)
2165 struct iwl_trans *trans = file->private_data;
2166 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2167 struct iwl_txq *txq;
2168 struct iwl_queue *q;
2175 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
2177 if (!trans_pcie->txq)
2180 buf = kzalloc(bufsz, GFP_KERNEL);
2184 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2185 txq = &trans_pcie->txq[cnt];
2187 pos += scnprintf(buf + pos, bufsz - pos,
2188 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
2189 cnt, q->read_ptr, q->write_ptr,
2190 !!test_bit(cnt, trans_pcie->queue_used),
2191 !!test_bit(cnt, trans_pcie->queue_stopped),
2192 txq->need_update, txq->frozen,
2193 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
2195 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2200 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2201 char __user *user_buf,
2202 size_t count, loff_t *ppos)
2204 struct iwl_trans *trans = file->private_data;
2205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2207 int pos = 0, i, ret;
2208 size_t bufsz = sizeof(buf);
2210 bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2212 if (!trans_pcie->rxq)
2215 buf = kzalloc(bufsz, GFP_KERNEL);
2219 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2220 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2222 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2224 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2226 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2228 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2230 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2232 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2235 pos += scnprintf(buf + pos, bufsz - pos,
2236 "\tclosed_rb_num: %u\n",
2237 le16_to_cpu(rxq->rb_stts->closed_rb_num) &
2240 pos += scnprintf(buf + pos, bufsz - pos,
2241 "\tclosed_rb_num: Not Allocated\n");
2244 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2250 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2251 char __user *user_buf,
2252 size_t count, loff_t *ppos)
2254 struct iwl_trans *trans = file->private_data;
2255 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2256 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2260 int bufsz = 24 * 64; /* 24 items * 64 char per item */
2263 buf = kzalloc(bufsz, GFP_KERNEL);
2267 pos += scnprintf(buf + pos, bufsz - pos,
2268 "Interrupt Statistics Report:\n");
2270 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2272 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2274 if (isr_stats->sw || isr_stats->hw) {
2275 pos += scnprintf(buf + pos, bufsz - pos,
2276 "\tLast Restarting Code: 0x%X\n",
2277 isr_stats->err_code);
2279 #ifdef CONFIG_IWLWIFI_DEBUG
2280 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2282 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2285 pos += scnprintf(buf + pos, bufsz - pos,
2286 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2288 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2291 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2294 pos += scnprintf(buf + pos, bufsz - pos,
2295 "Rx command responses:\t\t %u\n", isr_stats->rx);
2297 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2300 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2301 isr_stats->unhandled);
2303 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2308 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2309 const char __user *user_buf,
2310 size_t count, loff_t *ppos)
2312 struct iwl_trans *trans = file->private_data;
2313 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2314 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2320 memset(buf, 0, sizeof(buf));
2321 buf_size = min(count, sizeof(buf) - 1);
2322 if (copy_from_user(buf, user_buf, buf_size))
2324 if (sscanf(buf, "%x", &reset_flag) != 1)
2326 if (reset_flag == 0)
2327 memset(isr_stats, 0, sizeof(*isr_stats));
2332 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2333 const char __user *user_buf,
2334 size_t count, loff_t *ppos)
2336 struct iwl_trans *trans = file->private_data;
2341 memset(buf, 0, sizeof(buf));
2342 buf_size = min(count, sizeof(buf) - 1);
2343 if (copy_from_user(buf, user_buf, buf_size))
2345 if (sscanf(buf, "%d", &csr) != 1)
2348 iwl_pcie_dump_csr(trans);
2353 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2354 char __user *user_buf,
2355 size_t count, loff_t *ppos)
2357 struct iwl_trans *trans = file->private_data;
2361 ret = iwl_dump_fh(trans, &buf);
2366 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2371 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2372 DEBUGFS_READ_FILE_OPS(fh_reg);
2373 DEBUGFS_READ_FILE_OPS(rx_queue);
2374 DEBUGFS_READ_FILE_OPS(tx_queue);
2375 DEBUGFS_WRITE_FILE_OPS(csr);
2377 /* Create the debugfs files and directories */
2378 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2380 struct dentry *dir = trans->dbgfs_dir;
2382 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2383 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2384 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2385 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2386 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2390 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2393 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2395 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
2400 for (i = 0; i < IWL_NUM_OF_TBS; i++)
2401 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
2406 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2407 struct iwl_fw_error_dump_data **data,
2408 int allocated_rb_nums)
2410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2411 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2412 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2413 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2414 u32 i, r, j, rb_len = 0;
2416 spin_lock(&rxq->lock);
2418 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
2420 for (i = rxq->read, j = 0;
2421 i != r && j < allocated_rb_nums;
2422 i = (i + 1) & RX_QUEUE_MASK, j++) {
2423 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2424 struct iwl_fw_error_dump_rb *rb;
2426 dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2429 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2431 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2432 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2433 rb = (void *)(*data)->data;
2434 rb->index = cpu_to_le32(i);
2435 memcpy(rb->data, page_address(rxb->page), max_len);
2436 /* remap the page for the free benefit */
2437 rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
2441 *data = iwl_fw_error_next_data(*data);
2444 spin_unlock(&rxq->lock);
2448 #define IWL_CSR_TO_DUMP (0x250)
2450 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2451 struct iwl_fw_error_dump_data **data)
2453 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2457 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2458 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2459 val = (void *)(*data)->data;
2461 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2462 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2464 *data = iwl_fw_error_next_data(*data);
2469 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2470 struct iwl_fw_error_dump_data **data)
2472 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2473 unsigned long flags;
2477 if (!iwl_trans_grab_nic_access(trans, &flags))
2480 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2481 (*data)->len = cpu_to_le32(fh_regs_len);
2482 val = (void *)(*data)->data;
2484 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
2485 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2487 iwl_trans_release_nic_access(trans, &flags);
2489 *data = iwl_fw_error_next_data(*data);
2491 return sizeof(**data) + fh_regs_len;
2495 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
2496 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
2499 u32 buf_size_in_dwords = (monitor_len >> 2);
2500 u32 *buffer = (u32 *)fw_mon_data->data;
2501 unsigned long flags;
2504 if (!iwl_trans_grab_nic_access(trans, &flags))
2507 iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2508 for (i = 0; i < buf_size_in_dwords; i++)
2509 buffer[i] = iwl_read_prph_no_grab(trans,
2510 MON_DMARB_RD_DATA_ADDR);
2511 iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2513 iwl_trans_release_nic_access(trans, &flags);
2519 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
2520 struct iwl_fw_error_dump_data **data,
2523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2526 if ((trans_pcie->fw_mon_page &&
2527 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
2528 trans->dbg_dest_tlv) {
2529 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2530 u32 base, write_ptr, wrap_cnt;
2532 /* If there was a dest TLV - use the values from there */
2533 if (trans->dbg_dest_tlv) {
2535 le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2536 wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2537 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2539 base = MON_BUFF_BASE_ADDR;
2540 write_ptr = MON_BUFF_WRPTR;
2541 wrap_cnt = MON_BUFF_CYCLE_CNT;
2544 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2545 fw_mon_data = (void *)(*data)->data;
2546 fw_mon_data->fw_mon_wr_ptr =
2547 cpu_to_le32(iwl_read_prph(trans, write_ptr));
2548 fw_mon_data->fw_mon_cycle_cnt =
2549 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2550 fw_mon_data->fw_mon_base_ptr =
2551 cpu_to_le32(iwl_read_prph(trans, base));
2553 len += sizeof(**data) + sizeof(*fw_mon_data);
2554 if (trans_pcie->fw_mon_page) {
2556 * The firmware is now asserted, it won't write anything
2557 * to the buffer. CPU can take ownership to fetch the
2558 * data. The buffer will be handed back to the device
2559 * before the firmware will be restarted.
2561 dma_sync_single_for_cpu(trans->dev,
2562 trans_pcie->fw_mon_phys,
2563 trans_pcie->fw_mon_size,
2565 memcpy(fw_mon_data->data,
2566 page_address(trans_pcie->fw_mon_page),
2567 trans_pcie->fw_mon_size);
2569 monitor_len = trans_pcie->fw_mon_size;
2570 } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
2572 * Update pointers to reflect actual values after
2575 base = iwl_read_prph(trans, base) <<
2576 trans->dbg_dest_tlv->base_shift;
2577 iwl_trans_read_mem(trans, base, fw_mon_data->data,
2578 monitor_len / sizeof(u32));
2579 } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
2581 iwl_trans_pci_dump_marbh_monitor(trans,
2585 /* Didn't match anything - output no monitor data */
2590 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
2596 static struct iwl_trans_dump_data
2597 *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
2598 const struct iwl_fw_dbg_trigger_tlv *trigger)
2600 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2601 struct iwl_fw_error_dump_data *data;
2602 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
2603 struct iwl_fw_error_dump_txcmd *txcmd;
2604 struct iwl_trans_dump_data *dump_data;
2608 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
2609 !trans->cfg->mq_rx_supported;
2611 /* transport dump header */
2612 len = sizeof(*dump_data);
2615 len += sizeof(*data) +
2616 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2619 if (trans_pcie->fw_mon_page) {
2620 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2621 trans_pcie->fw_mon_size;
2622 monitor_len = trans_pcie->fw_mon_size;
2623 } else if (trans->dbg_dest_tlv) {
2626 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2627 end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
2629 base = iwl_read_prph(trans, base) <<
2630 trans->dbg_dest_tlv->base_shift;
2631 end = iwl_read_prph(trans, end) <<
2632 trans->dbg_dest_tlv->end_shift;
2634 /* Make "end" point to the actual end */
2635 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
2636 trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
2637 end += (1 << trans->dbg_dest_tlv->end_shift);
2638 monitor_len = end - base;
2639 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2645 if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
2646 dump_data = vzalloc(len);
2650 data = (void *)dump_data->data;
2651 len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
2652 dump_data->len = len;
2658 len += sizeof(*data) + IWL_CSR_TO_DUMP;
2661 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
2664 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2665 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2667 num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
2669 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
2670 len += num_rbs * (sizeof(*data) +
2671 sizeof(struct iwl_fw_error_dump_rb) +
2672 (PAGE_SIZE << trans_pcie->rx_page_order));
2675 dump_data = vzalloc(len);
2680 data = (void *)dump_data->data;
2681 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
2682 txcmd = (void *)data->data;
2683 spin_lock_bh(&cmdq->lock);
2684 ptr = cmdq->q.write_ptr;
2685 for (i = 0; i < cmdq->q.n_window; i++) {
2686 u8 idx = get_cmd_index(&cmdq->q, ptr);
2689 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2690 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2693 len += sizeof(*txcmd) + caplen;
2694 txcmd->cmdlen = cpu_to_le32(cmdlen);
2695 txcmd->caplen = cpu_to_le32(caplen);
2696 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2697 txcmd = (void *)((u8 *)txcmd->data + caplen);
2700 ptr = iwl_queue_dec_wrap(ptr);
2702 spin_unlock_bh(&cmdq->lock);
2704 data->len = cpu_to_le32(len);
2705 len += sizeof(*data);
2706 data = iwl_fw_error_next_data(data);
2708 len += iwl_trans_pcie_dump_csr(trans, &data);
2709 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2711 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
2713 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
2715 dump_data->len = len;
2720 #ifdef CONFIG_PM_SLEEP
2721 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2723 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
2724 return iwl_pci_fw_enter_d0i3(trans);
2729 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
2731 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
2732 iwl_pci_fw_exit_d0i3(trans);
2734 #endif /* CONFIG_PM_SLEEP */
2736 static const struct iwl_trans_ops trans_ops_pcie = {
2737 .start_hw = iwl_trans_pcie_start_hw,
2738 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
2739 .fw_alive = iwl_trans_pcie_fw_alive,
2740 .start_fw = iwl_trans_pcie_start_fw,
2741 .stop_device = iwl_trans_pcie_stop_device,
2743 .d3_suspend = iwl_trans_pcie_d3_suspend,
2744 .d3_resume = iwl_trans_pcie_d3_resume,
2746 #ifdef CONFIG_PM_SLEEP
2747 .suspend = iwl_trans_pcie_suspend,
2748 .resume = iwl_trans_pcie_resume,
2749 #endif /* CONFIG_PM_SLEEP */
2751 .send_cmd = iwl_trans_pcie_send_hcmd,
2753 .tx = iwl_trans_pcie_tx,
2754 .reclaim = iwl_trans_pcie_reclaim,
2756 .txq_disable = iwl_trans_pcie_txq_disable,
2757 .txq_enable = iwl_trans_pcie_txq_enable,
2759 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2760 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2761 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
2763 .write8 = iwl_trans_pcie_write8,
2764 .write32 = iwl_trans_pcie_write32,
2765 .read32 = iwl_trans_pcie_read32,
2766 .read_prph = iwl_trans_pcie_read_prph,
2767 .write_prph = iwl_trans_pcie_write_prph,
2768 .read_mem = iwl_trans_pcie_read_mem,
2769 .write_mem = iwl_trans_pcie_write_mem,
2770 .configure = iwl_trans_pcie_configure,
2771 .set_pmi = iwl_trans_pcie_set_pmi,
2772 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
2773 .release_nic_access = iwl_trans_pcie_release_nic_access,
2774 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
2776 .ref = iwl_trans_pcie_ref,
2777 .unref = iwl_trans_pcie_unref,
2779 .dump_data = iwl_trans_pcie_dump_data,
2782 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2783 const struct pci_device_id *ent,
2784 const struct iwl_cfg *cfg)
2786 struct iwl_trans_pcie *trans_pcie;
2787 struct iwl_trans *trans;
2790 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2791 &pdev->dev, cfg, &trans_ops_pcie, 0);
2793 return ERR_PTR(-ENOMEM);
2795 trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
2797 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2799 trans_pcie->trans = trans;
2800 spin_lock_init(&trans_pcie->irq_lock);
2801 spin_lock_init(&trans_pcie->reg_lock);
2802 spin_lock_init(&trans_pcie->ref_lock);
2803 mutex_init(&trans_pcie->mutex);
2804 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2805 trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
2806 if (!trans_pcie->tso_hdr_page) {
2811 ret = pci_enable_device(pdev);
2815 if (!cfg->base_params->pcie_l1_allowed) {
2817 * W/A - seems to solve weird behavior. We need to remove this
2818 * if we don't want to stay in L1 all the time. This wastes a
2821 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2822 PCIE_LINK_STATE_L1 |
2823 PCIE_LINK_STATE_CLKPM);
2826 if (cfg->mq_rx_supported)
2831 pci_set_master(pdev);
2833 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
2835 ret = pci_set_consistent_dma_mask(pdev,
2836 DMA_BIT_MASK(addr_size));
2838 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2840 ret = pci_set_consistent_dma_mask(pdev,
2842 /* both attempts failed: */
2844 dev_err(&pdev->dev, "No suitable DMA available\n");
2845 goto out_pci_disable_device;
2849 ret = pci_request_regions(pdev, DRV_NAME);
2851 dev_err(&pdev->dev, "pci_request_regions failed\n");
2852 goto out_pci_disable_device;
2855 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2856 if (!trans_pcie->hw_base) {
2857 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2859 goto out_pci_release_regions;
2862 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2863 * PCI Tx retries from interfering with C3 CPU state */
2864 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2866 trans->dev = &pdev->dev;
2867 trans_pcie->pci_dev = pdev;
2868 iwl_disable_interrupts(trans);
2870 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2872 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2873 * changed, and now the revision step also includes bit 0-1 (no more
2874 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2875 * in the old format.
2877 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2878 unsigned long flags;
2880 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2881 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2883 ret = iwl_pcie_prepare_card_hw(trans);
2885 IWL_WARN(trans, "Exit HW not ready\n");
2886 goto out_pci_disable_msi;
2890 * in-order to recognize C step driver should read chip version
2891 * id located at the AUX bus MISC address space.
2893 iwl_set_bit(trans, CSR_GP_CNTRL,
2894 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2897 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2898 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2899 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2902 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2903 goto out_pci_disable_msi;
2906 if (iwl_trans_grab_nic_access(trans, &flags)) {
2909 hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
2910 hw_step |= ENABLE_WFPM;
2911 iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
2912 hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
2913 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2915 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2916 (SILICON_C_STEP << 2);
2917 iwl_trans_release_nic_access(trans, &flags);
2921 iwl_pcie_set_interrupt_capa(pdev, trans);
2922 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2923 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2924 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2926 /* Initialize the wait queue for commands */
2927 init_waitqueue_head(&trans_pcie->wait_command_queue);
2929 init_waitqueue_head(&trans_pcie->d0i3_waitq);
2931 if (trans_pcie->msix_enabled) {
2932 if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
2933 goto out_pci_release_regions;
2935 ret = iwl_pcie_alloc_ict(trans);
2937 goto out_pci_disable_msi;
2939 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2940 iwl_pcie_irq_handler,
2941 IRQF_SHARED, DRV_NAME, trans);
2943 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2946 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2949 #ifdef CONFIG_IWLWIFI_PCIE_RTPM
2950 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
2952 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2953 #endif /* CONFIG_IWLWIFI_PCIE_RTPM */
2958 iwl_pcie_free_ict(trans);
2959 out_pci_disable_msi:
2960 pci_disable_msi(pdev);
2961 out_pci_release_regions:
2962 pci_release_regions(pdev);
2963 out_pci_disable_device:
2964 pci_disable_device(pdev);
2966 free_percpu(trans_pcie->tso_hdr_page);
2967 iwl_trans_free(trans);
2968 return ERR_PTR(ret);