iwlwifi: continue clean up - pcie/rx.c
[linux-2.6-block.git] / drivers / net / wireless / iwlwifi / pcie / trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
4e318262 8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
c85eb619
EG
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
4e318262 33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
c85eb619
EG
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
a42a1844
EG
63#include <linux/pci.h>
64#include <linux/pci-aspm.h>
e6bb4c9c 65#include <linux/interrupt.h>
87e5666c 66#include <linux/debugfs.h>
cf614297 67#include <linux/sched.h>
6d8f6eeb
EG
68#include <linux/bitops.h>
69#include <linux/gfp.h>
e6bb4c9c 70
82575102 71#include "iwl-drv.h"
c85eb619 72#include "iwl-trans.h"
522376d2
EG
73#include "iwl-csr.h"
74#include "iwl-prph.h"
7a10e3e4 75#include "iwl-agn-hw.h"
6468a01a 76#include "internal.h"
6238b008 77/* FIXME: need to abstract out TX command (once we know what it looks like) */
1023fdc4 78#include "dvm/commands.h"
0439bb62 79
c6f600fc 80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
035f7ff2 81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
c6f600fc
MV
82 (~(1<<(trans_pcie)->cmd_queue)))
83
20d3b647
JB
84static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
85 struct iwl_dma_ptr *ptr, size_t size)
02aca585
EG
86{
87 if (WARN_ON(ptr->addr))
88 return -EINVAL;
89
1042db2a 90 ptr->addr = dma_alloc_coherent(trans->dev, size,
02aca585
EG
91 &ptr->dma, GFP_KERNEL);
92 if (!ptr->addr)
93 return -ENOMEM;
94 ptr->size = size;
95 return 0;
96}
97
20d3b647
JB
98static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
99 struct iwl_dma_ptr *ptr)
1359ca4f
EG
100{
101 if (unlikely(!ptr->addr))
102 return;
103
1042db2a 104 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
1359ca4f
EG
105 memset(ptr, 0, sizeof(*ptr));
106}
107
7c5ba4a8
JB
108static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
109{
990aa6d7 110 struct iwl_txq *txq = (void *)data;
e9d364de 111 struct iwl_queue *q = &txq->q;
7c5ba4a8
JB
112 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
113 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
f22d3328 114 u32 scd_sram_addr = trans_pcie->scd_base_addr +
0adb52de 115 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
f22d3328
EG
116 u8 buf[16];
117 int i;
7c5ba4a8
JB
118
119 spin_lock(&txq->lock);
120 /* check if triggered erroneously */
121 if (txq->q.read_ptr == txq->q.write_ptr) {
122 spin_unlock(&txq->lock);
123 return;
124 }
125 spin_unlock(&txq->lock);
126
7c5ba4a8
JB
127 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
128 jiffies_to_msecs(trans_pcie->wd_timeout));
129 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
130 txq->q.read_ptr, txq->q.write_ptr);
7c5ba4a8 131
f22d3328
EG
132 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
133
134 iwl_print_hex_error(trans, buf, sizeof(buf));
135
136 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
137 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
138 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
139
12af0468
EG
140 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
141 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
142 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
143 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
144 u32 tbl_dw =
145 iwl_read_targ_mem(trans,
146 trans_pcie->scd_base_addr +
147 SCD_TRANS_TBL_OFFSET_QUEUE(i));
148
149 if (i & 0x1)
150 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
151 else
152 tbl_dw = tbl_dw & 0x0000FFFF;
153
154 IWL_ERR(trans,
155 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
156 i, active ? "" : "in", fifo, tbl_dw,
157 iwl_read_prph(trans,
158 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
159 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
160 }
7c5ba4a8 161
e9d364de
EG
162 for (i = q->read_ptr; i != q->write_ptr;
163 i = iwl_queue_inc_wrap(i, q->n_bd)) {
164 struct iwl_tx_cmd *tx_cmd =
165 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
166 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
167 get_unaligned_le32(&tx_cmd->scratch));
168 }
169
7c5ba4a8
JB
170 iwl_op_mode_nic_error(trans->op_mode);
171}
172
6d8f6eeb 173static int iwl_trans_txq_alloc(struct iwl_trans *trans,
990aa6d7 174 struct iwl_txq *txq, int slots_num,
20d3b647 175 u32 txq_id)
02aca585 176{
20d3b647 177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
ab9e212e 178 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
02aca585
EG
179 int i;
180
bf8440e6 181 if (WARN_ON(txq->entries || txq->tfds))
02aca585
EG
182 return -EINVAL;
183
7c5ba4a8
JB
184 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
185 (unsigned long)txq);
186 txq->trans_pcie = trans_pcie;
187
1359ca4f
EG
188 txq->q.n_window = slots_num;
189
bf8440e6 190 txq->entries = kcalloc(slots_num,
990aa6d7 191 sizeof(struct iwl_pcie_txq_entry),
bf8440e6 192 GFP_KERNEL);
02aca585 193
bf8440e6 194 if (!txq->entries)
02aca585
EG
195 goto error;
196
c6f600fc 197 if (txq_id == trans_pcie->cmd_queue)
dfa2bdba 198 for (i = 0; i < slots_num; i++) {
bf8440e6
JB
199 txq->entries[i].cmd =
200 kmalloc(sizeof(struct iwl_device_cmd),
201 GFP_KERNEL);
202 if (!txq->entries[i].cmd)
dfa2bdba
EG
203 goto error;
204 }
02aca585 205
02aca585
EG
206 /* Circular buffer of transmit frame descriptors (TFDs),
207 * shared with device */
1042db2a 208 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
6d8f6eeb 209 &txq->q.dma_addr, GFP_KERNEL);
02aca585 210 if (!txq->tfds) {
6d8f6eeb 211 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
02aca585
EG
212 goto error;
213 }
214 txq->q.id = txq_id;
215
216 return 0;
217error:
bf8440e6 218 if (txq->entries && txq_id == trans_pcie->cmd_queue)
02aca585 219 for (i = 0; i < slots_num; i++)
bf8440e6
JB
220 kfree(txq->entries[i].cmd);
221 kfree(txq->entries);
222 txq->entries = NULL;
02aca585
EG
223
224 return -ENOMEM;
225
226}
227
990aa6d7 228static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
9eae88fa 229 int slots_num, u32 txq_id)
02aca585
EG
230{
231 int ret;
232
233 txq->need_update = 0;
02aca585 234
02aca585
EG
235 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
236 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
237 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
238
239 /* Initialize queue's high/low-water marks, and head/tail indexes */
6d8f6eeb 240 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
02aca585
EG
241 txq_id);
242 if (ret)
243 return ret;
244
015c15e1
JB
245 spin_lock_init(&txq->lock);
246
02aca585
EG
247 /*
248 * Tell nic where to find circular buffer of Tx Frame Descriptors for
249 * given Tx queue, and enable the DMA channel used for that queue.
250 * Circular buffer (TFD queue in DRAM) physical base address */
1042db2a 251 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
02aca585
EG
252 txq->q.dma_addr >> 8);
253
254 return 0;
255}
256
6c3fd3f0 257/*
990aa6d7 258 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
c170b867 259 */
990aa6d7 260void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
c170b867 261{
8ad71bef 262 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 263 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
c170b867 264 struct iwl_queue *q = &txq->q;
39644e9a 265 enum dma_data_direction dma_dir;
c170b867
EG
266
267 if (!q->n_bd)
268 return;
269
39644e9a
EG
270 /* In the command queue, all the TBs are mapped as BIDI
271 * so unmap them as such.
272 */
c6f600fc 273 if (txq_id == trans_pcie->cmd_queue)
39644e9a 274 dma_dir = DMA_BIDIRECTIONAL;
015c15e1 275 else
39644e9a
EG
276 dma_dir = DMA_TO_DEVICE;
277
015c15e1 278 spin_lock_bh(&txq->lock);
c170b867 279 while (q->write_ptr != q->read_ptr) {
990aa6d7 280 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
c170b867
EG
281 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
282 }
015c15e1 283 spin_unlock_bh(&txq->lock);
c170b867
EG
284}
285
990aa6d7
EG
286/*
287 * iwl_txq_free - Deallocate DMA queue.
1359ca4f
EG
288 * @txq: Transmit queue to deallocate.
289 *
290 * Empty queue by removing and destroying all BD's.
291 * Free all buffers.
292 * 0-fill, but do not free "txq" descriptor structure.
293 */
990aa6d7 294static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
1359ca4f 295{
8ad71bef 296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 297 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1042db2a 298 struct device *dev = trans->dev;
1359ca4f 299 int i;
20d3b647 300
1359ca4f
EG
301 if (WARN_ON(!txq))
302 return;
303
990aa6d7 304 iwl_pcie_txq_unmap(trans, txq_id);
1359ca4f
EG
305
306 /* De-alloc array of command/tx buffers */
c6f600fc 307 if (txq_id == trans_pcie->cmd_queue)
96791422 308 for (i = 0; i < txq->q.n_window; i++) {
bf8440e6 309 kfree(txq->entries[i].cmd);
96791422 310 kfree(txq->entries[i].copy_cmd);
f4feb8ac 311 kfree(txq->entries[i].free_buf);
96791422 312 }
1359ca4f
EG
313
314 /* De-alloc circular buffer of TFDs */
315 if (txq->q.n_bd) {
ab9e212e 316 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
1359ca4f
EG
317 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
318 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
319 }
320
bf8440e6
JB
321 kfree(txq->entries);
322 txq->entries = NULL;
1359ca4f 323
7c5ba4a8
JB
324 del_timer_sync(&txq->stuck_timer);
325
1359ca4f
EG
326 /* 0-fill queue descriptor structure */
327 memset(txq, 0, sizeof(*txq));
328}
329
990aa6d7 330/*
1359ca4f
EG
331 * iwl_trans_tx_free - Free TXQ Context
332 *
333 * Destroy all TX DMA queues and structures
334 */
6d8f6eeb 335static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
1359ca4f
EG
336{
337 int txq_id;
8ad71bef 338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1359ca4f
EG
339
340 /* Tx queues */
8ad71bef 341 if (trans_pcie->txq) {
d6189124 342 for (txq_id = 0;
035f7ff2 343 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
990aa6d7 344 iwl_txq_free(trans, txq_id);
1359ca4f
EG
345 }
346
8ad71bef
EG
347 kfree(trans_pcie->txq);
348 trans_pcie->txq = NULL;
1359ca4f 349
9d6b2cb1 350 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
1359ca4f 351
6d8f6eeb 352 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
1359ca4f
EG
353}
354
990aa6d7 355/*
02aca585
EG
356 * iwl_trans_tx_alloc - allocate TX context
357 * Allocate all Tx DMA structures and initialize them
02aca585 358 */
6d8f6eeb 359static int iwl_trans_tx_alloc(struct iwl_trans *trans)
02aca585
EG
360{
361 int ret;
362 int txq_id, slots_num;
8ad71bef 363 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585 364
035f7ff2 365 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
ab9e212e
EG
366 sizeof(struct iwlagn_scd_bc_tbl);
367
02aca585
EG
368 /*It is not allowed to alloc twice, so warn when this happens.
369 * We cannot rely on the previous allocation, so free and fail */
8ad71bef 370 if (WARN_ON(trans_pcie->txq)) {
02aca585
EG
371 ret = -EINVAL;
372 goto error;
373 }
374
6d8f6eeb 375 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
ab9e212e 376 scd_bc_tbls_size);
02aca585 377 if (ret) {
6d8f6eeb 378 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
02aca585
EG
379 goto error;
380 }
381
382 /* Alloc keep-warm buffer */
9d6b2cb1 383 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
02aca585 384 if (ret) {
6d8f6eeb 385 IWL_ERR(trans, "Keep Warm allocation failed\n");
02aca585
EG
386 goto error;
387 }
388
035f7ff2 389 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
990aa6d7 390 sizeof(struct iwl_txq), GFP_KERNEL);
8ad71bef 391 if (!trans_pcie->txq) {
6d8f6eeb 392 IWL_ERR(trans, "Not enough memory for txq\n");
02aca585
EG
393 ret = ENOMEM;
394 goto error;
395 }
396
397 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
035f7ff2 398 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 399 txq_id++) {
9ba1947a 400 slots_num = (txq_id == trans_pcie->cmd_queue) ?
02aca585 401 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
8ad71bef
EG
402 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
403 slots_num, txq_id);
02aca585 404 if (ret) {
6d8f6eeb 405 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
02aca585
EG
406 goto error;
407 }
408 }
409
410 return 0;
411
412error:
ae2c30bf 413 iwl_trans_pcie_tx_free(trans);
02aca585
EG
414
415 return ret;
416}
6d8f6eeb 417static int iwl_tx_init(struct iwl_trans *trans)
02aca585 418{
20d3b647 419 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585
EG
420 int ret;
421 int txq_id, slots_num;
422 unsigned long flags;
423 bool alloc = false;
424
8ad71bef 425 if (!trans_pcie->txq) {
6d8f6eeb 426 ret = iwl_trans_tx_alloc(trans);
02aca585
EG
427 if (ret)
428 goto error;
429 alloc = true;
430 }
431
7b11488f 432 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
02aca585
EG
433
434 /* Turn off all Tx DMA fifos */
1042db2a 435 iwl_write_prph(trans, SCD_TXFACT, 0);
02aca585
EG
436
437 /* Tell NIC where to find the "keep warm" buffer */
1042db2a 438 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
83ed9015 439 trans_pcie->kw.dma >> 4);
02aca585 440
7b11488f 441 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
02aca585
EG
442
443 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
035f7ff2 444 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 445 txq_id++) {
9ba1947a 446 slots_num = (txq_id == trans_pcie->cmd_queue) ?
02aca585 447 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
8ad71bef
EG
448 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
449 slots_num, txq_id);
02aca585 450 if (ret) {
6d8f6eeb 451 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
02aca585
EG
452 goto error;
453 }
454 }
455
456 return 0;
457error:
458 /*Upon error, free only if we allocated something */
459 if (alloc)
ae2c30bf 460 iwl_trans_pcie_tx_free(trans);
02aca585
EG
461 return ret;
462}
463
3e10caeb 464static void iwl_set_pwr_vmain(struct iwl_trans *trans)
392f8b78
EG
465{
466/*
467 * (for documentation purposes)
468 * to set power to V_AUX, do:
469
470 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
1042db2a 471 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
392f8b78
EG
472 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
473 ~APMG_PS_CTRL_MSK_PWR_SRC);
474 */
475
1042db2a 476 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
392f8b78
EG
477 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
478 ~APMG_PS_CTRL_MSK_PWR_SRC);
479}
480
af634bee
EG
481/* PCI registers */
482#define PCI_CFG_RETRY_TIMEOUT 0x041
483#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
484#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
485
486static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
487{
20d3b647 488 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
af634bee 489 u16 pci_lnk_ctl;
af634bee 490
a7238b37
JL
491 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
492 &pci_lnk_ctl);
af634bee
EG
493 return pci_lnk_ctl;
494}
495
496static void iwl_apm_config(struct iwl_trans *trans)
497{
498 /*
499 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
500 * Check if BIOS (or OS) enabled L1-ASPM on this device.
501 * If so (likely), disable L0S, so device moves directly L0->L1;
502 * costs negligible amount of power savings.
503 * If not (unlikely), enable L0S, so there is at least some
504 * power savings, even without L1.
505 */
506 u16 lctl = iwl_pciexp_link_ctrl(trans);
507
508 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
509 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
510 /* L1-ASPM enabled; disable(!) L0S */
511 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
512 dev_printk(KERN_INFO, trans->dev,
513 "L1 Enabled; Disabling L0S\n");
514 } else {
515 /* L1-ASPM disabled; enable(!) L0S */
516 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
517 dev_printk(KERN_INFO, trans->dev,
518 "L1 Disabled; Enabling L0S\n");
519 }
f6d0e9be 520 trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
af634bee
EG
521}
522
a6c684ee
EG
523/*
524 * Start up NIC's basic functionality after it has been reset
525 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
526 * NOTE: This does not load uCode nor start the embedded processor
527 */
528static int iwl_apm_init(struct iwl_trans *trans)
529{
83626404 530 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
a6c684ee
EG
531 int ret = 0;
532 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
533
534 /*
535 * Use "set_bit" below rather than "write", to preserve any hardware
536 * bits already set by default after reset.
537 */
538
539 /* Disable L0S exit timer (platform NMI Work/Around) */
540 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
20d3b647 541 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
a6c684ee
EG
542
543 /*
544 * Disable L0s without affecting L1;
545 * don't wait for ICH L0s (ICH bug W/A)
546 */
547 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
20d3b647 548 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
a6c684ee
EG
549
550 /* Set FH wait threshold to maximum (HW error during stress W/A) */
551 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
552
553 /*
554 * Enable HAP INTA (interrupt from management bus) to
555 * wake device's PCI Express link L1a -> L0s
556 */
557 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 558 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
a6c684ee 559
af634bee 560 iwl_apm_config(trans);
a6c684ee
EG
561
562 /* Configure analog phase-lock-loop before activating to D0A */
035f7ff2 563 if (trans->cfg->base_params->pll_cfg_val)
a6c684ee 564 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
035f7ff2 565 trans->cfg->base_params->pll_cfg_val);
a6c684ee
EG
566
567 /*
568 * Set "initialization complete" bit to move adapter from
569 * D0U* --> D0A* (powered-up active) state.
570 */
571 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
572
573 /*
574 * Wait for clock stabilization; once stabilized, access to
575 * device-internal resources is supported, e.g. iwl_write_prph()
576 * and accesses to uCode SRAM.
577 */
578 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
20d3b647
JB
579 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
580 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
a6c684ee
EG
581 if (ret < 0) {
582 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
583 goto out;
584 }
585
586 /*
587 * Enable DMA clock and wait for it to stabilize.
588 *
589 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
590 * do not disable clocks. This preserves any hardware bits already
591 * set by default in "CLK_CTRL_REG" after reset.
592 */
593 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
594 udelay(20);
595
596 /* Disable L1-Active */
597 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
598 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
599
83626404 600 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
a6c684ee
EG
601
602out:
603 return ret;
604}
605
cc56feb2
EG
606static int iwl_apm_stop_master(struct iwl_trans *trans)
607{
608 int ret = 0;
609
610 /* stop device's busmaster DMA activity */
611 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
612
613 ret = iwl_poll_bit(trans, CSR_RESET,
20d3b647
JB
614 CSR_RESET_REG_FLAG_MASTER_DISABLED,
615 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
cc56feb2
EG
616 if (ret)
617 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
618
619 IWL_DEBUG_INFO(trans, "stop master\n");
620
621 return ret;
622}
623
624static void iwl_apm_stop(struct iwl_trans *trans)
625{
83626404 626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
cc56feb2
EG
627 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
628
83626404 629 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
cc56feb2
EG
630
631 /* Stop device's DMA activity */
632 iwl_apm_stop_master(trans);
633
634 /* Reset the entire device */
635 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
636
637 udelay(10);
638
639 /*
640 * Clear "initialization complete" bit to move adapter from
641 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
642 */
643 iwl_clear_bit(trans, CSR_GP_CNTRL,
644 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
645}
646
6d8f6eeb 647static int iwl_nic_init(struct iwl_trans *trans)
392f8b78 648{
7b11488f 649 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78
EG
650 unsigned long flags;
651
652 /* nic_init */
7b11488f 653 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
a6c684ee 654 iwl_apm_init(trans);
392f8b78
EG
655
656 /* Set interrupt coalescing calibration timer to default (512 usecs) */
20d3b647 657 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
392f8b78 658
7b11488f 659 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392f8b78 660
3e10caeb 661 iwl_set_pwr_vmain(trans);
392f8b78 662
ecdb975c 663 iwl_op_mode_nic_config(trans->op_mode);
392f8b78
EG
664
665 /* Allocate the RX queue, or reset if it is already allocated */
9805c446 666 iwl_pcie_rx_init(trans);
392f8b78
EG
667
668 /* Allocate or reset and init all Tx and Command queues */
6d8f6eeb 669 if (iwl_tx_init(trans))
392f8b78
EG
670 return -ENOMEM;
671
035f7ff2 672 if (trans->cfg->base_params->shadow_reg_enable) {
392f8b78 673 /* enable shadow regs in HW */
20d3b647 674 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
d38069d1 675 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
392f8b78
EG
676 }
677
392f8b78
EG
678 return 0;
679}
680
681#define HW_READY_TIMEOUT (50)
682
683/* Note: returns poll_bit return value, which is >= 0 if success */
6d8f6eeb 684static int iwl_set_hw_ready(struct iwl_trans *trans)
392f8b78
EG
685{
686 int ret;
687
1042db2a 688 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 689 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
392f8b78
EG
690
691 /* See if we got it */
1042db2a 692 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647
JB
693 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
694 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
695 HW_READY_TIMEOUT);
392f8b78 696
6d8f6eeb 697 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
392f8b78
EG
698 return ret;
699}
700
701/* Note: returns standard 0/-ERROR code */
ebb7678d 702static int iwl_prepare_card_hw(struct iwl_trans *trans)
392f8b78
EG
703{
704 int ret;
289e5501 705 int t = 0;
392f8b78 706
6d8f6eeb 707 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
392f8b78 708
6d8f6eeb 709 ret = iwl_set_hw_ready(trans);
ebb7678d 710 /* If the card is ready, exit 0 */
392f8b78
EG
711 if (ret >= 0)
712 return 0;
713
714 /* If HW is not ready, prepare the conditions to check again */
1042db2a 715 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
20d3b647 716 CSR_HW_IF_CONFIG_REG_PREPARE);
392f8b78 717
289e5501
EG
718 do {
719 ret = iwl_set_hw_ready(trans);
720 if (ret >= 0)
721 return 0;
392f8b78 722
289e5501
EG
723 usleep_range(200, 1000);
724 t += 200;
725 } while (t < 150000);
392f8b78 726
392f8b78
EG
727 return ret;
728}
729
cf614297
EG
730/*
731 * ucode
732 */
83f84d7b
JB
733static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
734 dma_addr_t phy_addr, u32 byte_cnt)
cf614297 735{
13df1aab 736 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
cf614297
EG
737 int ret;
738
13df1aab 739 trans_pcie->ucode_write_complete = false;
cf614297
EG
740
741 iwl_write_direct32(trans,
20d3b647
JB
742 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
743 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
cf614297
EG
744
745 iwl_write_direct32(trans,
20d3b647
JB
746 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
747 dst_addr);
cf614297
EG
748
749 iwl_write_direct32(trans,
83f84d7b
JB
750 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
751 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
cf614297
EG
752
753 iwl_write_direct32(trans,
20d3b647
JB
754 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
755 (iwl_get_dma_hi_addr(phy_addr)
756 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
cf614297
EG
757
758 iwl_write_direct32(trans,
20d3b647
JB
759 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
760 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
761 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
762 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
cf614297
EG
763
764 iwl_write_direct32(trans,
20d3b647
JB
765 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
766 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
767 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
768 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
cf614297 769
13df1aab
JB
770 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
771 trans_pcie->ucode_write_complete, 5 * HZ);
cf614297 772 if (!ret) {
83f84d7b 773 IWL_ERR(trans, "Failed to load firmware chunk!\n");
cf614297
EG
774 return -ETIMEDOUT;
775 }
776
777 return 0;
778}
779
83f84d7b
JB
780static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
781 const struct fw_desc *section)
cf614297 782{
83f84d7b
JB
783 u8 *v_addr;
784 dma_addr_t p_addr;
785 u32 offset;
cf614297
EG
786 int ret = 0;
787
83f84d7b
JB
788 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
789 section_num);
790
791 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
792 if (!v_addr)
793 return -ENOMEM;
794
795 for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
796 u32 copy_size;
797
798 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
cf614297 799
83f84d7b
JB
800 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
801 ret = iwl_load_firmware_chunk(trans, section->offset + offset,
802 p_addr, copy_size);
803 if (ret) {
804 IWL_ERR(trans,
805 "Could not load the [%d] uCode section\n",
806 section_num);
807 break;
6dfa8d01 808 }
83f84d7b
JB
809 }
810
811 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
812 return ret;
813}
814
0692fe41
JB
815static int iwl_load_given_ucode(struct iwl_trans *trans,
816 const struct fw_img *image)
cf614297 817{
2d1c0044 818 int i, ret = 0;
cf614297 819
2d1c0044 820 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
83f84d7b 821 if (!image->sec[i].data)
2d1c0044 822 break;
cf614297 823
2d1c0044
JB
824 ret = iwl_load_section(trans, i, &image->sec[i]);
825 if (ret)
826 return ret;
827 }
cf614297
EG
828
829 /* Remove all resets to allow NIC to operate */
830 iwl_write32(trans, CSR_RESET, 0);
831
832 return 0;
833}
834
0692fe41
JB
835static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
836 const struct fw_img *fw)
392f8b78 837{
d18aa87f 838 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392f8b78 839 int ret;
c9eec95c 840 bool hw_rfkill;
392f8b78 841
496bab39
JB
842 /* This may fail if AMT took ownership of the device */
843 if (iwl_prepare_card_hw(trans)) {
6d8f6eeb 844 IWL_WARN(trans, "Exit HW not ready\n");
392f8b78
EG
845 return -EIO;
846 }
847
d18aa87f
JB
848 clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
849
8c46bb70
EG
850 iwl_enable_rfkill_int(trans);
851
392f8b78 852 /* If platform's RF_KILL switch is NOT set to KILL */
8d425517 853 hw_rfkill = iwl_is_rfkill_set(trans);
c9eec95c 854 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
8c46bb70 855 if (hw_rfkill)
392f8b78 856 return -ERFKILL;
392f8b78 857
1042db2a 858 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
392f8b78 859
6d8f6eeb 860 ret = iwl_nic_init(trans);
392f8b78 861 if (ret) {
6d8f6eeb 862 IWL_ERR(trans, "Unable to init nic\n");
392f8b78
EG
863 return ret;
864 }
865
866 /* make sure rfkill handshake bits are cleared */
1042db2a
EG
867 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
868 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
392f8b78
EG
869 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
870
871 /* clear (again), then enable host interrupts */
1042db2a 872 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
6d8f6eeb 873 iwl_enable_interrupts(trans);
392f8b78
EG
874
875 /* really make sure rfkill handshake bits are cleared */
1042db2a
EG
876 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
877 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
392f8b78 878
cf614297 879 /* Load the given image to the HW */
9441b85d 880 return iwl_load_given_ucode(trans, fw);
392f8b78
EG
881}
882
b3c2ce13
EG
883/*
884 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
b3c2ce13 885 */
6d8f6eeb 886static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
b3c2ce13 887{
7b11488f
JB
888 struct iwl_trans_pcie __maybe_unused *trans_pcie =
889 IWL_TRANS_GET_PCIE_TRANS(trans);
890
1042db2a 891 iwl_write_prph(trans, SCD_TXFACT, mask);
b3c2ce13
EG
892}
893
adca1235 894static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
b3c2ce13 895{
9eae88fa 896 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
b3c2ce13 897 u32 a;
b04db9ac 898 int chan;
b3c2ce13
EG
899 u32 reg_val;
900
fc248615
EG
901 /* make sure all queue are not stopped/used */
902 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
903 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
904
83ed9015 905 trans_pcie->scd_base_addr =
1042db2a 906 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
adca1235
EG
907
908 WARN_ON(scd_base_addr != 0 &&
909 scd_base_addr != trans_pcie->scd_base_addr);
910
105183b1 911 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
b3c2ce13 912 /* reset conext data memory */
105183b1 913 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
b3c2ce13 914 a += 4)
1042db2a 915 iwl_write_targ_mem(trans, a, 0);
b3c2ce13 916 /* reset tx status memory */
105183b1 917 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
b3c2ce13 918 a += 4)
1042db2a 919 iwl_write_targ_mem(trans, a, 0);
105183b1 920 for (; a < trans_pcie->scd_base_addr +
1745e440 921 SCD_TRANS_TBL_OFFSET_QUEUE(
035f7ff2 922 trans->cfg->base_params->num_of_queues);
d6189124 923 a += 4)
1042db2a 924 iwl_write_targ_mem(trans, a, 0);
b3c2ce13 925
1042db2a 926 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
105183b1 927 trans_pcie->scd_bc_tbls.dma >> 10);
b3c2ce13 928
d012d04e
EG
929 /* The chain extension of the SCD doesn't work well. This feature is
930 * enabled by default by the HW, so we need to disable it manually.
931 */
932 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
933
b04db9ac
EG
934 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
935 trans_pcie->cmd_fifo);
b3c2ce13 936
fc248615
EG
937 /* Activate all Tx DMA/FIFO channels */
938 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
939
b3c2ce13
EG
940 /* Enable DMA channel */
941 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1042db2a 942 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
fc248615
EG
943 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
944 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
b3c2ce13
EG
945
946 /* Update FH chicken bits */
1042db2a
EG
947 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
948 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
b3c2ce13
EG
949 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
950
b3c2ce13 951 /* Enable L1-Active */
1042db2a 952 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
20d3b647 953 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
b3c2ce13
EG
954}
955
adca1235 956static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
ed6a3803 957{
990aa6d7 958 iwl_pcie_reset_ict(trans);
adca1235 959 iwl_tx_start(trans, scd_addr);
ed6a3803
EG
960}
961
990aa6d7 962/*
c170b867
EG
963 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
964 */
6d8f6eeb 965static int iwl_trans_tx_stop(struct iwl_trans *trans)
c170b867 966{
20d3b647 967 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
c2945f39 968 int ch, txq_id, ret;
c170b867
EG
969 unsigned long flags;
970
971 /* Turn off all Tx DMA fifos */
7b11488f 972 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
c170b867 973
6d8f6eeb 974 iwl_trans_txq_set_sched(trans, 0);
c170b867
EG
975
976 /* Stop each Tx DMA channel, and wait for it to be idle */
02f6f659 977 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1042db2a 978 iwl_write_direct32(trans,
6d8f6eeb 979 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
c2945f39 980 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
20d3b647 981 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
c2945f39 982 if (ret < 0)
20d3b647 983 IWL_ERR(trans,
d6f1c316 984 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
20d3b647
JB
985 ch,
986 iwl_read_direct32(trans,
987 FH_TSSR_TX_STATUS_REG));
c170b867 988 }
7b11488f 989 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
c170b867 990
8ad71bef 991 if (!trans_pcie->txq) {
d6f1c316
JB
992 IWL_WARN(trans,
993 "Stopping tx queues that aren't allocated...\n");
c170b867
EG
994 return 0;
995 }
996
997 /* Unmap DMA from host system and free skb's */
035f7ff2 998 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1745e440 999 txq_id++)
990aa6d7 1000 iwl_pcie_txq_unmap(trans, txq_id);
c170b867
EG
1001
1002 return 0;
1003}
1004
43e58856 1005static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
ae2c30bf 1006{
43e58856 1007 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
20d3b647 1008 unsigned long flags;
ae2c30bf 1009
43e58856 1010 /* tell the device to stop sending interrupts */
7b11488f 1011 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
ae2c30bf 1012 iwl_disable_interrupts(trans);
7b11488f 1013 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
ae2c30bf 1014
ab6cf8e8 1015 /* device going down, Stop using ICT table */
990aa6d7 1016 iwl_pcie_disable_ict(trans);
ab6cf8e8
EG
1017
1018 /*
1019 * If a HW restart happens during firmware loading,
1020 * then the firmware loading might call this function
1021 * and later it might be called again due to the
1022 * restart. So don't process again if the device is
1023 * already dead.
1024 */
83626404 1025 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
6d8f6eeb 1026 iwl_trans_tx_stop(trans);
9805c446 1027 iwl_pcie_rx_stop(trans);
6379103e 1028
ab6cf8e8 1029 /* Power-down device's busmaster DMA clocks */
1042db2a 1030 iwl_write_prph(trans, APMG_CLK_DIS_REG,
ab6cf8e8
EG
1031 APMG_CLK_VAL_DMA_CLK_RQT);
1032 udelay(5);
1033 }
1034
1035 /* Make sure (redundant) we've released our request to stay awake */
1042db2a 1036 iwl_clear_bit(trans, CSR_GP_CNTRL,
20d3b647 1037 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ab6cf8e8
EG
1038
1039 /* Stop the device, and put it in low power state */
cc56feb2 1040 iwl_apm_stop(trans);
43e58856
EG
1041
1042 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1043 * Clean again the interrupt here
1044 */
7b11488f 1045 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
43e58856 1046 iwl_disable_interrupts(trans);
7b11488f 1047 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
43e58856 1048
218733cf
EG
1049 iwl_enable_rfkill_int(trans);
1050
43e58856 1051 /* wait to make sure we flush pending tasklet*/
75595536 1052 synchronize_irq(trans_pcie->irq);
43e58856
EG
1053 tasklet_kill(&trans_pcie->irq_tasklet);
1054
1ee158d8
JB
1055 cancel_work_sync(&trans_pcie->rx_replenish);
1056
43e58856 1057 /* stop and reset the on-board processor */
1042db2a 1058 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
74fda971
DF
1059
1060 /* clear all status bits */
1061 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1062 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1063 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
01d651d4 1064 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
f946b529 1065 clear_bit(STATUS_RFKILL, &trans_pcie->status);
ab6cf8e8
EG
1066}
1067
2dd4f9f7
JB
1068static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1069{
1070 /* let the ucode operate on its own */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
1072 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
1073
1074 iwl_disable_interrupts(trans);
1075 iwl_clear_bit(trans, CSR_GP_CNTRL,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1077}
1078
e13c0c59 1079static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
9eae88fa 1080 struct iwl_device_cmd *dev_cmd, int txq_id)
47c1b496 1081{
e13c0c59
EG
1082 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1083 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
132f98c2 1084 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
47c1b496 1085 struct iwl_cmd_meta *out_meta;
990aa6d7 1086 struct iwl_txq *txq;
e13c0c59 1087 struct iwl_queue *q;
47c1b496
EG
1088 dma_addr_t phys_addr = 0;
1089 dma_addr_t txcmd_phys;
1090 dma_addr_t scratch_phys;
1091 u16 len, firstlen, secondlen;
1092 u8 wait_write_ptr = 0;
e13c0c59 1093 __le16 fc = hdr->frame_control;
47c1b496 1094 u8 hdr_len = ieee80211_hdrlen(fc);
631b84c5 1095 u16 __maybe_unused wifi_seq;
47c1b496 1096
8ad71bef 1097 txq = &trans_pcie->txq[txq_id];
e13c0c59
EG
1098 q = &txq->q;
1099
9eae88fa
JB
1100 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1101 WARN_ON_ONCE(1);
1102 return -EINVAL;
1103 }
015c15e1 1104
9eae88fa 1105 spin_lock(&txq->lock);
631b84c5 1106
7bc057ff
EG
1107 /* In AGG mode, the index in the ring must correspond to the WiFi
1108 * sequence number. This is a HW requirements to help the SCD to parse
1109 * the BA.
1110 * Check here that the packets are in the right place on the ring.
1111 */
1112#ifdef CONFIG_IWLWIFI_DEBUG
1113 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1114 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1115 ((wifi_seq & 0xff) != q->write_ptr),
1116 "Q: %d WiFi Seq %d tfdNum %d",
1117 txq_id, wifi_seq, q->write_ptr);
1118#endif
1119
47c1b496 1120 /* Set up driver data for this TFD */
bf8440e6
JB
1121 txq->entries[q->write_ptr].skb = skb;
1122 txq->entries[q->write_ptr].cmd = dev_cmd;
dfa2bdba
EG
1123
1124 dev_cmd->hdr.cmd = REPLY_TX;
20d3b647
JB
1125 dev_cmd->hdr.sequence =
1126 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1127 INDEX_TO_SEQ(q->write_ptr)));
47c1b496
EG
1128
1129 /* Set up first empty entry in queue's array of Tx/cmd buffers */
bf8440e6 1130 out_meta = &txq->entries[q->write_ptr].meta;
47c1b496
EG
1131
1132 /*
1133 * Use the first empty entry in this queue's command buffer array
1134 * to contain the Tx command and MAC header concatenated together
1135 * (payload data will be in another buffer).
1136 * Size of this varies, due to varying MAC header length.
1137 * If end is not dword aligned, we'll have 2 extra bytes at the end
1138 * of the MAC header (device reads on dword boundaries).
1139 * We'll tell device about this padding later.
1140 */
1141 len = sizeof(struct iwl_tx_cmd) +
1142 sizeof(struct iwl_cmd_header) + hdr_len;
1143 firstlen = (len + 3) & ~3;
1144
1145 /* Tell NIC about any 2-byte padding after MAC header */
1146 if (firstlen != len)
1147 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1148
1149 /* Physical address of this Tx command's header (not MAC header!),
1150 * within command buffer array. */
1042db2a 1151 txcmd_phys = dma_map_single(trans->dev,
47c1b496
EG
1152 &dev_cmd->hdr, firstlen,
1153 DMA_BIDIRECTIONAL);
1042db2a 1154 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
015c15e1 1155 goto out_err;
47c1b496
EG
1156 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1157 dma_unmap_len_set(out_meta, len, firstlen);
1158
1159 if (!ieee80211_has_morefrags(fc)) {
1160 txq->need_update = 1;
1161 } else {
1162 wait_write_ptr = 1;
1163 txq->need_update = 0;
1164 }
1165
1166 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1167 * if any (802.11 null frames have no payload). */
1168 secondlen = skb->len - hdr_len;
1169 if (secondlen > 0) {
1042db2a 1170 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
47c1b496 1171 secondlen, DMA_TO_DEVICE);
1042db2a
EG
1172 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1173 dma_unmap_single(trans->dev,
47c1b496
EG
1174 dma_unmap_addr(out_meta, mapping),
1175 dma_unmap_len(out_meta, len),
1176 DMA_BIDIRECTIONAL);
015c15e1 1177 goto out_err;
47c1b496
EG
1178 }
1179 }
1180
1181 /* Attach buffers to TFD */
990aa6d7 1182 iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
47c1b496 1183 if (secondlen > 0)
990aa6d7 1184 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
47c1b496
EG
1185
1186 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1187 offsetof(struct iwl_tx_cmd, scratch);
1188
1189 /* take back ownership of DMA buffer to enable update */
1042db2a 1190 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
20d3b647 1191 DMA_BIDIRECTIONAL);
47c1b496
EG
1192 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1193 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1194
e13c0c59 1195 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
47c1b496 1196 le16_to_cpu(dev_cmd->hdr.sequence));
e13c0c59 1197 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
47c1b496
EG
1198
1199 /* Set up entry for this TFD in Tx byte-count array */
990aa6d7 1200 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
47c1b496 1201
1042db2a 1202 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
20d3b647 1203 DMA_BIDIRECTIONAL);
47c1b496 1204
f042c2eb 1205 trace_iwlwifi_dev_tx(trans->dev, skb,
2c208890 1206 &txq->tfds[txq->q.write_ptr],
47c1b496
EG
1207 sizeof(struct iwl_tfd),
1208 &dev_cmd->hdr, firstlen,
1209 skb->data + hdr_len, secondlen);
f042c2eb
JB
1210 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1211 skb->data + hdr_len, secondlen);
47c1b496 1212
7c5ba4a8 1213 /* start timer if queue currently empty */
49a4fc20
EG
1214 if (txq->need_update && q->read_ptr == q->write_ptr &&
1215 trans_pcie->wd_timeout)
7c5ba4a8
JB
1216 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1217
47c1b496
EG
1218 /* Tell device the write index *just past* this latest filled TFD */
1219 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
990aa6d7 1220 iwl_pcie_txq_inc_wr_ptr(trans, txq);
e13c0c59 1221
47c1b496
EG
1222 /*
1223 * At this point the frame is "transmitted" successfully
1224 * and we will get a TX status notification eventually,
1225 * regardless of the value of ret. "ret" only indicates
1226 * whether or not we should update the write pointer.
1227 */
a0eaad71 1228 if (iwl_queue_space(q) < q->high_mark) {
47c1b496
EG
1229 if (wait_write_ptr) {
1230 txq->need_update = 1;
990aa6d7 1231 iwl_pcie_txq_inc_wr_ptr(trans, txq);
47c1b496 1232 } else {
bada991b 1233 iwl_stop_queue(trans, txq);
47c1b496
EG
1234 }
1235 }
015c15e1 1236 spin_unlock(&txq->lock);
47c1b496 1237 return 0;
015c15e1
JB
1238 out_err:
1239 spin_unlock(&txq->lock);
1240 return -1;
47c1b496
EG
1241}
1242
57a1dc89 1243static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
e6bb4c9c 1244{
20d3b647 1245 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
e6bb4c9c 1246 int err;
c9eec95c 1247 bool hw_rfkill;
e6bb4c9c 1248
0c325769
EG
1249 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1250
57a1dc89
EG
1251 if (!trans_pcie->irq_requested) {
1252 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
990aa6d7 1253 iwl_pcie_tasklet, (unsigned long)trans);
e6bb4c9c 1254
990aa6d7 1255 iwl_pcie_alloc_ict(trans);
e6bb4c9c 1256
990aa6d7
EG
1257 err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
1258 IRQF_SHARED, DRV_NAME, trans);
57a1dc89
EG
1259 if (err) {
1260 IWL_ERR(trans, "Error allocating IRQ %d\n",
75595536 1261 trans_pcie->irq);
ebb7678d 1262 goto error;
57a1dc89
EG
1263 }
1264
57a1dc89 1265 trans_pcie->irq_requested = true;
e6bb4c9c
EG
1266 }
1267
ebb7678d
EG
1268 err = iwl_prepare_card_hw(trans);
1269 if (err) {
d6f1c316 1270 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
f057ac4e 1271 goto err_free_irq;
ebb7678d 1272 }
a6c684ee
EG
1273
1274 iwl_apm_init(trans);
1275
226c02ca
EG
1276 /* From now on, the op_mode will be kept updated about RF kill state */
1277 iwl_enable_rfkill_int(trans);
1278
8d425517 1279 hw_rfkill = iwl_is_rfkill_set(trans);
c9eec95c 1280 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
d48e2074 1281
ebb7678d
EG
1282 return err;
1283
f057ac4e 1284err_free_irq:
a7be50b7 1285 trans_pcie->irq_requested = false;
75595536 1286 free_irq(trans_pcie->irq, trans);
ebb7678d 1287error:
990aa6d7 1288 iwl_pcie_free_ict(trans);
ebb7678d
EG
1289 tasklet_kill(&trans_pcie->irq_tasklet);
1290 return err;
e6bb4c9c
EG
1291}
1292
218733cf
EG
1293static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1294 bool op_mode_leaving)
cc56feb2 1295{
20d3b647 1296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
d23f78e6 1297 bool hw_rfkill;
218733cf 1298 unsigned long flags;
d23f78e6 1299
ee7d737c
DS
1300 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1301 iwl_disable_interrupts(trans);
1302 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1303
cc56feb2
EG
1304 iwl_apm_stop(trans);
1305
218733cf
EG
1306 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1307 iwl_disable_interrupts(trans);
1308 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1df06bdc 1309
218733cf
EG
1310 if (!op_mode_leaving) {
1311 /*
1312 * Even if we stop the HW, we still want the RF kill
1313 * interrupt
1314 */
1315 iwl_enable_rfkill_int(trans);
1316
1317 /*
1318 * Check again since the RF kill state may have changed while
1319 * all the interrupts were disabled, in this case we couldn't
1320 * receive the RF kill interrupt and update the state in the
1321 * op_mode.
1322 */
1323 hw_rfkill = iwl_is_rfkill_set(trans);
1324 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1325 }
cc56feb2
EG
1326}
1327
9eae88fa
JB
1328static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1329 struct sk_buff_head *skbs)
464021ff 1330{
8ad71bef 1331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1332 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
a0eaad71
EG
1333 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1334 int tfd_num = ssn & (txq->q.n_bd - 1);
a0eaad71 1335
015c15e1
JB
1336 spin_lock(&txq->lock);
1337
a0eaad71 1338 if (txq->q.read_ptr != tfd_num) {
9eae88fa
JB
1339 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1340 txq_id, txq->q.read_ptr, tfd_num, ssn);
990aa6d7 1341 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
e755f882 1342 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
bada991b 1343 iwl_wake_queue(trans, txq);
a0eaad71 1344 }
015c15e1
JB
1345
1346 spin_unlock(&txq->lock);
a0eaad71
EG
1347}
1348
03905495
EG
1349static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1350{
05f5b97e 1351 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1352}
1353
1354static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1355{
05f5b97e 1356 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1357}
1358
1359static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1360{
05f5b97e 1361 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
03905495
EG
1362}
1363
c6f600fc 1364static void iwl_trans_pcie_configure(struct iwl_trans *trans,
9eae88fa 1365 const struct iwl_trans_config *trans_cfg)
c6f600fc
MV
1366{
1367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1368
1369 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
b04db9ac 1370 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
d663ee73
JB
1371 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1372 trans_pcie->n_no_reclaim_cmds = 0;
1373 else
1374 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1375 if (trans_pcie->n_no_reclaim_cmds)
1376 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1377 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
9eae88fa 1378
b2cf410c
JB
1379 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1380 if (trans_pcie->rx_buf_size_8k)
1381 trans_pcie->rx_page_order = get_order(8 * 1024);
1382 else
1383 trans_pcie->rx_page_order = get_order(4 * 1024);
7c5ba4a8
JB
1384
1385 trans_pcie->wd_timeout =
1386 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
d9fb6465
JB
1387
1388 trans_pcie->command_names = trans_cfg->command_names;
c6f600fc
MV
1389}
1390
d1ff5253 1391void iwl_trans_pcie_free(struct iwl_trans *trans)
34c1b7ba 1392{
20d3b647 1393 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
a42a1844 1394
ae2c30bf 1395 iwl_trans_pcie_tx_free(trans);
9805c446 1396 iwl_pcie_rx_free(trans);
6379103e 1397
57a1dc89 1398 if (trans_pcie->irq_requested == true) {
75595536 1399 free_irq(trans_pcie->irq, trans);
990aa6d7 1400 iwl_pcie_free_ict(trans);
57a1dc89 1401 }
a42a1844
EG
1402
1403 pci_disable_msi(trans_pcie->pci_dev);
05f5b97e 1404 iounmap(trans_pcie->hw_base);
a42a1844
EG
1405 pci_release_regions(trans_pcie->pci_dev);
1406 pci_disable_device(trans_pcie->pci_dev);
59c647b6 1407 kmem_cache_destroy(trans->dev_cmd_pool);
a42a1844 1408
6d8f6eeb 1409 kfree(trans);
34c1b7ba
EG
1410}
1411
47107e84
DF
1412static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1413{
1414 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1415
1416 if (state)
01d651d4 1417 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
47107e84 1418 else
01d651d4 1419 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
47107e84
DF
1420}
1421
c01a4047 1422#ifdef CONFIG_PM_SLEEP
57210f7c
EG
1423static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1424{
57210f7c
EG
1425 return 0;
1426}
1427
1428static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1429{
c9eec95c 1430 bool hw_rfkill;
57210f7c 1431
8c46bb70
EG
1432 iwl_enable_rfkill_int(trans);
1433
8d425517 1434 hw_rfkill = iwl_is_rfkill_set(trans);
8c46bb70 1435 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
8722c899 1436
8c46bb70 1437 if (!hw_rfkill)
8722c899
SG
1438 iwl_enable_interrupts(trans);
1439
57210f7c
EG
1440 return 0;
1441}
c01a4047 1442#endif /* CONFIG_PM_SLEEP */
57210f7c 1443
5f178cd2
EG
1444#define IWL_FLUSH_WAIT_MS 2000
1445
990aa6d7 1446static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
5f178cd2 1447{
8ad71bef 1448 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1449 struct iwl_txq *txq;
5f178cd2
EG
1450 struct iwl_queue *q;
1451 int cnt;
1452 unsigned long now = jiffies;
1453 int ret = 0;
1454
1455 /* waiting for all the tx frames complete might take a while */
035f7ff2 1456 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
9ba1947a 1457 if (cnt == trans_pcie->cmd_queue)
5f178cd2 1458 continue;
8ad71bef 1459 txq = &trans_pcie->txq[cnt];
5f178cd2
EG
1460 q = &txq->q;
1461 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1462 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1463 msleep(1);
1464
1465 if (q->read_ptr != q->write_ptr) {
1466 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1467 ret = -ETIMEDOUT;
1468 break;
1469 }
1470 }
1471 return ret;
1472}
1473
ff620849
EG
1474static const char *get_fh_string(int cmd)
1475{
d9fb6465 1476#define IWL_CMD(x) case x: return #x
ff620849
EG
1477 switch (cmd) {
1478 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1479 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1480 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1481 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1482 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1483 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1484 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1485 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1486 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1487 default:
1488 return "UNKNOWN";
1489 }
d9fb6465 1490#undef IWL_CMD
ff620849
EG
1491}
1492
990aa6d7 1493int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
ff620849
EG
1494{
1495 int i;
ff620849
EG
1496 static const u32 fh_tbl[] = {
1497 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1498 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1499 FH_RSCSR_CHNL0_WPTR,
1500 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1501 FH_MEM_RSSR_SHARED_CTRL_REG,
1502 FH_MEM_RSSR_RX_STATUS_REG,
1503 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1504 FH_TSSR_TX_STATUS_REG,
1505 FH_TSSR_TX_ERROR_REG
1506 };
94543a8d
JB
1507
1508#ifdef CONFIG_IWLWIFI_DEBUGFS
1509 if (buf) {
1510 int pos = 0;
1511 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1512
ff620849
EG
1513 *buf = kmalloc(bufsz, GFP_KERNEL);
1514 if (!*buf)
1515 return -ENOMEM;
94543a8d 1516
ff620849
EG
1517 pos += scnprintf(*buf + pos, bufsz - pos,
1518 "FH register values:\n");
94543a8d
JB
1519
1520 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
ff620849
EG
1521 pos += scnprintf(*buf + pos, bufsz - pos,
1522 " %34s: 0X%08x\n",
1523 get_fh_string(fh_tbl[i]),
1042db2a 1524 iwl_read_direct32(trans, fh_tbl[i]));
94543a8d 1525
ff620849
EG
1526 return pos;
1527 }
1528#endif
94543a8d 1529
ff620849 1530 IWL_ERR(trans, "FH register values:\n");
94543a8d 1531 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
ff620849
EG
1532 IWL_ERR(trans, " %34s: 0X%08x\n",
1533 get_fh_string(fh_tbl[i]),
1042db2a 1534 iwl_read_direct32(trans, fh_tbl[i]));
94543a8d 1535
ff620849
EG
1536 return 0;
1537}
1538
1539static const char *get_csr_string(int cmd)
1540{
d9fb6465 1541#define IWL_CMD(x) case x: return #x
ff620849
EG
1542 switch (cmd) {
1543 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1544 IWL_CMD(CSR_INT_COALESCING);
1545 IWL_CMD(CSR_INT);
1546 IWL_CMD(CSR_INT_MASK);
1547 IWL_CMD(CSR_FH_INT_STATUS);
1548 IWL_CMD(CSR_GPIO_IN);
1549 IWL_CMD(CSR_RESET);
1550 IWL_CMD(CSR_GP_CNTRL);
1551 IWL_CMD(CSR_HW_REV);
1552 IWL_CMD(CSR_EEPROM_REG);
1553 IWL_CMD(CSR_EEPROM_GP);
1554 IWL_CMD(CSR_OTP_GP_REG);
1555 IWL_CMD(CSR_GIO_REG);
1556 IWL_CMD(CSR_GP_UCODE_REG);
1557 IWL_CMD(CSR_GP_DRIVER_REG);
1558 IWL_CMD(CSR_UCODE_DRV_GP1);
1559 IWL_CMD(CSR_UCODE_DRV_GP2);
1560 IWL_CMD(CSR_LED_REG);
1561 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1562 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1563 IWL_CMD(CSR_ANA_PLL_CFG);
1564 IWL_CMD(CSR_HW_REV_WA_REG);
1565 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1566 default:
1567 return "UNKNOWN";
1568 }
d9fb6465 1569#undef IWL_CMD
ff620849
EG
1570}
1571
990aa6d7 1572void iwl_pcie_dump_csr(struct iwl_trans *trans)
ff620849
EG
1573{
1574 int i;
1575 static const u32 csr_tbl[] = {
1576 CSR_HW_IF_CONFIG_REG,
1577 CSR_INT_COALESCING,
1578 CSR_INT,
1579 CSR_INT_MASK,
1580 CSR_FH_INT_STATUS,
1581 CSR_GPIO_IN,
1582 CSR_RESET,
1583 CSR_GP_CNTRL,
1584 CSR_HW_REV,
1585 CSR_EEPROM_REG,
1586 CSR_EEPROM_GP,
1587 CSR_OTP_GP_REG,
1588 CSR_GIO_REG,
1589 CSR_GP_UCODE_REG,
1590 CSR_GP_DRIVER_REG,
1591 CSR_UCODE_DRV_GP1,
1592 CSR_UCODE_DRV_GP2,
1593 CSR_LED_REG,
1594 CSR_DRAM_INT_TBL_REG,
1595 CSR_GIO_CHICKEN_BITS,
1596 CSR_ANA_PLL_CFG,
1597 CSR_HW_REV_WA_REG,
1598 CSR_DBG_HPET_MEM_REG
1599 };
1600 IWL_ERR(trans, "CSR values:\n");
1601 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1602 "CSR_INT_PERIODIC_REG)\n");
1603 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1604 IWL_ERR(trans, " %25s: 0X%08x\n",
1605 get_csr_string(csr_tbl[i]),
1042db2a 1606 iwl_read32(trans, csr_tbl[i]));
ff620849
EG
1607 }
1608}
1609
87e5666c
EG
1610#ifdef CONFIG_IWLWIFI_DEBUGFS
1611/* create and remove of files */
1612#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5a878bf6 1613 if (!debugfs_create_file(#name, mode, parent, trans, \
87e5666c 1614 &iwl_dbgfs_##name##_ops)) \
9da987ac 1615 goto err; \
87e5666c
EG
1616} while (0)
1617
1618/* file operation */
1619#define DEBUGFS_READ_FUNC(name) \
1620static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1621 char __user *user_buf, \
1622 size_t count, loff_t *ppos);
1623
1624#define DEBUGFS_WRITE_FUNC(name) \
1625static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1626 const char __user *user_buf, \
1627 size_t count, loff_t *ppos);
1628
1629
87e5666c
EG
1630#define DEBUGFS_READ_FILE_OPS(name) \
1631 DEBUGFS_READ_FUNC(name); \
1632static const struct file_operations iwl_dbgfs_##name##_ops = { \
1633 .read = iwl_dbgfs_##name##_read, \
234e3405 1634 .open = simple_open, \
87e5666c
EG
1635 .llseek = generic_file_llseek, \
1636};
1637
16db88ba
EG
1638#define DEBUGFS_WRITE_FILE_OPS(name) \
1639 DEBUGFS_WRITE_FUNC(name); \
1640static const struct file_operations iwl_dbgfs_##name##_ops = { \
1641 .write = iwl_dbgfs_##name##_write, \
234e3405 1642 .open = simple_open, \
16db88ba
EG
1643 .llseek = generic_file_llseek, \
1644};
1645
87e5666c
EG
1646#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1647 DEBUGFS_READ_FUNC(name); \
1648 DEBUGFS_WRITE_FUNC(name); \
1649static const struct file_operations iwl_dbgfs_##name##_ops = { \
1650 .write = iwl_dbgfs_##name##_write, \
1651 .read = iwl_dbgfs_##name##_read, \
234e3405 1652 .open = simple_open, \
87e5666c
EG
1653 .llseek = generic_file_llseek, \
1654};
1655
87e5666c 1656static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
20d3b647
JB
1657 char __user *user_buf,
1658 size_t count, loff_t *ppos)
8ad71bef 1659{
5a878bf6 1660 struct iwl_trans *trans = file->private_data;
8ad71bef 1661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1662 struct iwl_txq *txq;
87e5666c
EG
1663 struct iwl_queue *q;
1664 char *buf;
1665 int pos = 0;
1666 int cnt;
1667 int ret;
1745e440
WYG
1668 size_t bufsz;
1669
035f7ff2 1670 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
87e5666c 1671
f9e75447 1672 if (!trans_pcie->txq)
87e5666c 1673 return -EAGAIN;
f9e75447 1674
87e5666c
EG
1675 buf = kzalloc(bufsz, GFP_KERNEL);
1676 if (!buf)
1677 return -ENOMEM;
1678
035f7ff2 1679 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
8ad71bef 1680 txq = &trans_pcie->txq[cnt];
87e5666c
EG
1681 q = &txq->q;
1682 pos += scnprintf(buf + pos, bufsz - pos,
9eae88fa 1683 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
87e5666c 1684 cnt, q->read_ptr, q->write_ptr,
9eae88fa
JB
1685 !!test_bit(cnt, trans_pcie->queue_used),
1686 !!test_bit(cnt, trans_pcie->queue_stopped));
87e5666c
EG
1687 }
1688 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1689 kfree(buf);
1690 return ret;
1691}
1692
1693static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
20d3b647
JB
1694 char __user *user_buf,
1695 size_t count, loff_t *ppos)
1696{
5a878bf6 1697 struct iwl_trans *trans = file->private_data;
20d3b647 1698 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1699 struct iwl_rxq *rxq = &trans_pcie->rxq;
87e5666c
EG
1700 char buf[256];
1701 int pos = 0;
1702 const size_t bufsz = sizeof(buf);
1703
1704 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1705 rxq->read);
1706 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1707 rxq->write);
1708 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1709 rxq->free_count);
1710 if (rxq->rb_stts) {
1711 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1712 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1713 } else {
1714 pos += scnprintf(buf + pos, bufsz - pos,
1715 "closed_rb_num: Not Allocated\n");
1716 }
1717 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1718}
1719
1f7b6172
EG
1720static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1721 char __user *user_buf,
20d3b647
JB
1722 size_t count, loff_t *ppos)
1723{
1f7b6172 1724 struct iwl_trans *trans = file->private_data;
20d3b647 1725 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1726 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1727
1728 int pos = 0;
1729 char *buf;
1730 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1731 ssize_t ret;
1732
1733 buf = kzalloc(bufsz, GFP_KERNEL);
f9e75447 1734 if (!buf)
1f7b6172 1735 return -ENOMEM;
1f7b6172
EG
1736
1737 pos += scnprintf(buf + pos, bufsz - pos,
1738 "Interrupt Statistics Report:\n");
1739
1740 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1741 isr_stats->hw);
1742 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1743 isr_stats->sw);
1744 if (isr_stats->sw || isr_stats->hw) {
1745 pos += scnprintf(buf + pos, bufsz - pos,
1746 "\tLast Restarting Code: 0x%X\n",
1747 isr_stats->err_code);
1748 }
1749#ifdef CONFIG_IWLWIFI_DEBUG
1750 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1751 isr_stats->sch);
1752 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1753 isr_stats->alive);
1754#endif
1755 pos += scnprintf(buf + pos, bufsz - pos,
1756 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1757
1758 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1759 isr_stats->ctkill);
1760
1761 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1762 isr_stats->wakeup);
1763
1764 pos += scnprintf(buf + pos, bufsz - pos,
1765 "Rx command responses:\t\t %u\n", isr_stats->rx);
1766
1767 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1768 isr_stats->tx);
1769
1770 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1771 isr_stats->unhandled);
1772
1773 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1774 kfree(buf);
1775 return ret;
1776}
1777
1778static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1779 const char __user *user_buf,
1780 size_t count, loff_t *ppos)
1781{
1782 struct iwl_trans *trans = file->private_data;
20d3b647 1783 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
1784 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1785
1786 char buf[8];
1787 int buf_size;
1788 u32 reset_flag;
1789
1790 memset(buf, 0, sizeof(buf));
1791 buf_size = min(count, sizeof(buf) - 1);
1792 if (copy_from_user(buf, user_buf, buf_size))
1793 return -EFAULT;
1794 if (sscanf(buf, "%x", &reset_flag) != 1)
1795 return -EFAULT;
1796 if (reset_flag == 0)
1797 memset(isr_stats, 0, sizeof(*isr_stats));
1798
1799 return count;
1800}
1801
16db88ba 1802static ssize_t iwl_dbgfs_csr_write(struct file *file,
20d3b647
JB
1803 const char __user *user_buf,
1804 size_t count, loff_t *ppos)
16db88ba
EG
1805{
1806 struct iwl_trans *trans = file->private_data;
1807 char buf[8];
1808 int buf_size;
1809 int csr;
1810
1811 memset(buf, 0, sizeof(buf));
1812 buf_size = min(count, sizeof(buf) - 1);
1813 if (copy_from_user(buf, user_buf, buf_size))
1814 return -EFAULT;
1815 if (sscanf(buf, "%d", &csr) != 1)
1816 return -EFAULT;
1817
990aa6d7 1818 iwl_pcie_dump_csr(trans);
16db88ba
EG
1819
1820 return count;
1821}
1822
16db88ba 1823static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
20d3b647
JB
1824 char __user *user_buf,
1825 size_t count, loff_t *ppos)
16db88ba
EG
1826{
1827 struct iwl_trans *trans = file->private_data;
94543a8d 1828 char *buf = NULL;
16db88ba
EG
1829 int pos = 0;
1830 ssize_t ret = -EFAULT;
1831
990aa6d7 1832 ret = pos = iwl_pcie_dump_fh(trans, &buf);
16db88ba
EG
1833 if (buf) {
1834 ret = simple_read_from_buffer(user_buf,
1835 count, ppos, buf, pos);
1836 kfree(buf);
1837 }
1838
1839 return ret;
1840}
1841
48dffd39
JB
1842static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1843 const char __user *user_buf,
1844 size_t count, loff_t *ppos)
1845{
1846 struct iwl_trans *trans = file->private_data;
1847
1848 if (!trans->op_mode)
1849 return -EAGAIN;
1850
24172f39 1851 local_bh_disable();
48dffd39 1852 iwl_op_mode_nic_error(trans->op_mode);
24172f39 1853 local_bh_enable();
48dffd39
JB
1854
1855 return count;
1856}
1857
1f7b6172 1858DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
16db88ba 1859DEBUGFS_READ_FILE_OPS(fh_reg);
87e5666c
EG
1860DEBUGFS_READ_FILE_OPS(rx_queue);
1861DEBUGFS_READ_FILE_OPS(tx_queue);
16db88ba 1862DEBUGFS_WRITE_FILE_OPS(csr);
48dffd39 1863DEBUGFS_WRITE_FILE_OPS(fw_restart);
87e5666c
EG
1864
1865/*
1866 * Create the debugfs files and directories
1867 *
1868 */
1869static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
20d3b647 1870 struct dentry *dir)
87e5666c 1871{
87e5666c
EG
1872 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1873 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1f7b6172 1874 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
16db88ba
EG
1875 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1876 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
48dffd39 1877 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
87e5666c 1878 return 0;
9da987ac
MV
1879
1880err:
1881 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1882 return -ENOMEM;
87e5666c
EG
1883}
1884#else
1885static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
20d3b647
JB
1886 struct dentry *dir)
1887{
1888 return 0;
1889}
87e5666c
EG
1890#endif /*CONFIG_IWLWIFI_DEBUGFS */
1891
d1ff5253 1892static const struct iwl_trans_ops trans_ops_pcie = {
57a1dc89 1893 .start_hw = iwl_trans_pcie_start_hw,
cc56feb2 1894 .stop_hw = iwl_trans_pcie_stop_hw,
ed6a3803 1895 .fw_alive = iwl_trans_pcie_fw_alive,
cf614297 1896 .start_fw = iwl_trans_pcie_start_fw,
e6bb4c9c 1897 .stop_device = iwl_trans_pcie_stop_device,
48d42c42 1898
2dd4f9f7
JB
1899 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
1900
990aa6d7 1901 .send_cmd = iwl_pcie_send_cmd,
c85eb619 1902
e6bb4c9c 1903 .tx = iwl_trans_pcie_tx,
a0eaad71 1904 .reclaim = iwl_trans_pcie_reclaim,
34c1b7ba 1905
990aa6d7
EG
1906 .txq_disable = iwl_pcie_txq_disable,
1907 .txq_enable = iwl_pcie_txq_enable,
34c1b7ba 1908
87e5666c 1909 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
5f178cd2 1910
990aa6d7 1911 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
5f178cd2 1912
c01a4047 1913#ifdef CONFIG_PM_SLEEP
57210f7c
EG
1914 .suspend = iwl_trans_pcie_suspend,
1915 .resume = iwl_trans_pcie_resume,
c01a4047 1916#endif
03905495
EG
1917 .write8 = iwl_trans_pcie_write8,
1918 .write32 = iwl_trans_pcie_write32,
1919 .read32 = iwl_trans_pcie_read32,
c6f600fc 1920 .configure = iwl_trans_pcie_configure,
47107e84 1921 .set_pmi = iwl_trans_pcie_set_pmi,
e6bb4c9c 1922};
a42a1844 1923
87ce05a2 1924struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
035f7ff2
EG
1925 const struct pci_device_id *ent,
1926 const struct iwl_cfg *cfg)
a42a1844 1927{
a42a1844
EG
1928 struct iwl_trans_pcie *trans_pcie;
1929 struct iwl_trans *trans;
1930 u16 pci_cmd;
1931 int err;
1932
1933 trans = kzalloc(sizeof(struct iwl_trans) +
20d3b647 1934 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
a42a1844 1935
dbeca583 1936 if (!trans)
a42a1844
EG
1937 return NULL;
1938
1939 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1940
1941 trans->ops = &trans_ops_pcie;
035f7ff2 1942 trans->cfg = cfg;
a42a1844 1943 trans_pcie->trans = trans;
7b11488f 1944 spin_lock_init(&trans_pcie->irq_lock);
13df1aab 1945 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
a42a1844
EG
1946
1947 /* W/A - seems to solve weird behavior. We need to remove this if we
1948 * don't want to stay in L1 all the time. This wastes a lot of power */
1949 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
20d3b647 1950 PCIE_LINK_STATE_CLKPM);
a42a1844
EG
1951
1952 if (pci_enable_device(pdev)) {
1953 err = -ENODEV;
1954 goto out_no_pci;
1955 }
1956
1957 pci_set_master(pdev);
1958
1959 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1960 if (!err)
1961 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1962 if (err) {
1963 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1964 if (!err)
1965 err = pci_set_consistent_dma_mask(pdev,
20d3b647 1966 DMA_BIT_MASK(32));
a42a1844
EG
1967 /* both attempts failed: */
1968 if (err) {
1969 dev_printk(KERN_ERR, &pdev->dev,
1970 "No suitable DMA available.\n");
1971 goto out_pci_disable_device;
1972 }
1973 }
1974
1975 err = pci_request_regions(pdev, DRV_NAME);
1976 if (err) {
d6f1c316
JB
1977 dev_printk(KERN_ERR, &pdev->dev,
1978 "pci_request_regions failed\n");
a42a1844
EG
1979 goto out_pci_disable_device;
1980 }
1981
05f5b97e 1982 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
a42a1844 1983 if (!trans_pcie->hw_base) {
d6f1c316 1984 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
a42a1844
EG
1985 err = -ENODEV;
1986 goto out_pci_release_regions;
1987 }
1988
a42a1844
EG
1989 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1990 * PCI Tx retries from interfering with C3 CPU state */
1991 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1992
1993 err = pci_enable_msi(pdev);
9f904b38 1994 if (err) {
a42a1844 1995 dev_printk(KERN_ERR, &pdev->dev,
d6f1c316 1996 "pci_enable_msi failed(0X%x)\n", err);
9f904b38
EG
1997 /* enable rfkill interrupt: hw bug w/a */
1998 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1999 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2000 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2001 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2002 }
2003 }
a42a1844
EG
2004
2005 trans->dev = &pdev->dev;
75595536 2006 trans_pcie->irq = pdev->irq;
a42a1844 2007 trans_pcie->pci_dev = pdev;
08079a49 2008 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
99673ee5 2009 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
9ca85961
EG
2010 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2011 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
a42a1844 2012
69a10b29 2013 /* Initialize the wait queue for commands */
f946b529 2014 init_waitqueue_head(&trans_pcie->wait_command_queue);
8b5bed90 2015 spin_lock_init(&trans->reg_lock);
69a10b29 2016
3ec45882
JB
2017 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2018 "iwl_cmd_pool:%s", dev_name(trans->dev));
59c647b6
EG
2019
2020 trans->dev_cmd_headroom = 0;
2021 trans->dev_cmd_pool =
3ec45882 2022 kmem_cache_create(trans->dev_cmd_pool_name,
59c647b6
EG
2023 sizeof(struct iwl_device_cmd)
2024 + trans->dev_cmd_headroom,
2025 sizeof(void *),
2026 SLAB_HWCACHE_ALIGN,
2027 NULL);
2028
2029 if (!trans->dev_cmd_pool)
2030 goto out_pci_disable_msi;
2031
a42a1844
EG
2032 return trans;
2033
59c647b6
EG
2034out_pci_disable_msi:
2035 pci_disable_msi(pdev);
a42a1844
EG
2036out_pci_release_regions:
2037 pci_release_regions(pdev);
2038out_pci_disable_device:
2039 pci_disable_device(pdev);
2040out_no_pci:
2041 kfree(trans);
2042 return NULL;
2043}