1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 *****************************************************************************/
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/gfp.h>
37 #include "iwl-op-mode.h"
39 /******************************************************************************
43 ******************************************************************************/
46 * Rx theory of operation
48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49 * each of which point to Receive Buffers to be filled by the NIC. These get
50 * used not only for Rx frames, but for any command response or notification
51 * from the NIC. The driver and NIC manage the Rx buffers by means
52 * of indexes into the circular buffer.
55 * The host/firmware share two index registers for managing the Rx buffers.
57 * The READ index maps to the first position that the firmware may be writing
58 * to -- the driver can read up to (but not including) this position and get
60 * The READ index is managed by the firmware once the card is enabled.
62 * The WRITE index maps to the last position the driver has read from -- the
63 * position preceding WRITE is the last slot the firmware can place a packet.
65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
68 * During initialization, the host sets up the READ queue position to the first
69 * INDEX position, and WRITE to the last (READ - 1 wrapped)
71 * When the firmware places a packet in a buffer, it will advance the READ index
72 * and fire the RX interrupt. The driver can then query the READ index and
73 * process as many packets as possible, moving the WRITE index forward as it
74 * resets the Rx queue buffers with new memory.
76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
78 * When the interrupt handler is called, the request is processed.
79 * The page is either stolen - transferred to the upper layer
80 * or reused - added immediately to the iwl->rxq->rx_free list.
81 * + When the page is stolen - the driver updates the matching queue's used
82 * count, detaches the RBD and transfers it to the queue used list.
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared.
106 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock.
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates
113 * iwl_pcie_rx_allocator() Background work for allocating pages.
115 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
128 * Regular Receive interrupt:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
133 * rxq.queue -> rxq.rx_free -> rxq.queue
139 * iwl_rxq_space - Return number of free slots available in queue.
141 static int iwl_rxq_space(const struct iwl_rxq *rxq)
143 /* Make sure RX_QUEUE_SIZE is a power of 2 */
144 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
147 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
148 * between empty and completely full queues.
149 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
150 * defined for negative dividends.
152 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
156 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
158 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
160 return cpu_to_le32((u32)(dma_addr >> 8));
164 * iwl_pcie_rx_stop - stops the Rx DMA
166 int iwl_pcie_rx_stop(struct iwl_trans *trans)
168 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
169 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
170 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
174 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
176 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
178 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
179 struct iwl_rxq *rxq = &trans_pcie->rxq;
182 lockdep_assert_held(&rxq->lock);
185 * explicitly wake up the NIC if:
186 * 1. shadow registers aren't enabled
187 * 2. there is a chance that the NIC is asleep
189 if (!trans->cfg->base_params->shadow_reg_enable &&
190 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
191 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
193 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
194 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
196 iwl_set_bit(trans, CSR_GP_CNTRL,
197 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
198 rxq->need_update = true;
203 rxq->write_actual = round_down(rxq->write, 8);
204 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
207 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 struct iwl_rxq *rxq = &trans_pcie->rxq;
212 spin_lock(&rxq->lock);
214 if (!rxq->need_update)
217 iwl_pcie_rxq_inc_wr_ptr(trans);
218 rxq->need_update = false;
221 spin_unlock(&rxq->lock);
225 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
227 * If there are slots in the RX queue that need to be restocked,
228 * and we have free pre-allocated buffers, fill the ranks as much
229 * as we can, pulling from rx_free.
231 * This moves the 'write' index forward to catch up with 'processed', and
232 * also updates the memory address in the firmware to reference the new
235 static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
237 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
238 struct iwl_rxq *rxq = &trans_pcie->rxq;
239 struct iwl_rx_mem_buffer *rxb;
242 * If the device isn't enabled - not need to try to add buffers...
243 * This can happen when we stop the device and still have an interrupt
244 * pending. We stop the APM before we sync the interrupts because we
245 * have to (see comment there). On the other hand, since the APM is
246 * stopped, we cannot access the HW (in particular not prph).
247 * So don't try to restock if the APM has been already stopped.
249 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
252 spin_lock(&rxq->lock);
253 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
254 /* The overwritten rxb must be a used one */
255 rxb = rxq->queue[rxq->write];
256 BUG_ON(rxb && rxb->page);
258 /* Get next free Rx buffer, remove from free list */
259 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
261 list_del(&rxb->list);
263 /* Point to Rx buffer via next RBD in circular buffer */
264 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
265 rxq->queue[rxq->write] = rxb;
266 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
269 spin_unlock(&rxq->lock);
271 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */
273 if (rxq->write_actual != (rxq->write & ~0x7)) {
274 spin_lock(&rxq->lock);
275 iwl_pcie_rxq_inc_wr_ptr(trans);
276 spin_unlock(&rxq->lock);
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
284 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288 struct iwl_rxq *rxq = &trans_pcie->rxq;
290 gfp_t gfp_mask = priority;
292 if (rxq->free_count > RX_LOW_WATERMARK)
293 gfp_mask |= __GFP_NOWARN;
295 if (trans_pcie->rx_page_order > 0)
296 gfp_mask |= __GFP_COMP;
298 /* Alloc a new receive buffer */
299 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
302 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
303 trans_pcie->rx_page_order);
304 /* Issue an error if the hardware has consumed more than half
305 * of its free buffer list and we don't have enough
306 * pre-allocated buffers.
308 if (rxq->free_count <= RX_LOW_WATERMARK &&
309 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
312 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
320 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
322 * A used RBD is an Rx buffer that has been given to the stack. To use it again
323 * a page must be allocated and the RBD must point to the page. This function
324 * doesn't change the HW pointer but handles the list of pages that is used by
325 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
328 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
330 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
331 struct iwl_rxq *rxq = &trans_pcie->rxq;
332 struct iwl_rx_mem_buffer *rxb;
336 spin_lock(&rxq->lock);
337 if (list_empty(&rxq->rx_used)) {
338 spin_unlock(&rxq->lock);
341 spin_unlock(&rxq->lock);
343 /* Alloc a new receive buffer */
344 page = iwl_pcie_rx_alloc_page(trans, priority);
348 spin_lock(&rxq->lock);
350 if (list_empty(&rxq->rx_used)) {
351 spin_unlock(&rxq->lock);
352 __free_pages(page, trans_pcie->rx_page_order);
355 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
357 list_del(&rxb->list);
358 spin_unlock(&rxq->lock);
362 /* Get physical address of the RB */
364 dma_map_page(trans->dev, page, 0,
365 PAGE_SIZE << trans_pcie->rx_page_order,
367 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
369 spin_lock(&rxq->lock);
370 list_add(&rxb->list, &rxq->rx_used);
371 spin_unlock(&rxq->lock);
372 __free_pages(page, trans_pcie->rx_page_order);
375 /* dma address must be no more than 36 bits */
376 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
377 /* and also 256 byte aligned! */
378 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
380 spin_lock(&rxq->lock);
382 list_add_tail(&rxb->list, &rxq->rx_free);
385 spin_unlock(&rxq->lock);
389 static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392 struct iwl_rxq *rxq = &trans_pcie->rxq;
395 lockdep_assert_held(&rxq->lock);
397 for (i = 0; i < RX_QUEUE_SIZE; i++) {
398 if (!rxq->pool[i].page)
400 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
401 PAGE_SIZE << trans_pcie->rx_page_order,
403 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
404 rxq->pool[i].page = NULL;
409 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
411 * When moving to rx_free an page is allocated for the slot.
413 * Also restock the Rx queue via iwl_pcie_rxq_restock.
414 * This is called only during initialization
416 static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
418 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
420 iwl_pcie_rxq_restock(trans);
424 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
426 * Allocates for each received request 8 pages
427 * Called as a scheduled work item.
429 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
432 struct iwl_rb_allocator *rba = &trans_pcie->rba;
433 struct list_head local_empty;
434 int pending = atomic_xchg(&rba->req_pending, 0);
436 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
438 /* If we were scheduled - there is at least one request */
439 spin_lock(&rba->lock);
440 /* swap out the rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
446 struct list_head local_allocated;
448 INIT_LIST_HEAD(&local_allocated);
450 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
451 struct iwl_rx_mem_buffer *rxb;
454 /* List should never be empty - each reused RBD is
455 * returned to the list, and initial pool covers any
456 * possible gap between the time the page is allocated
457 * to the time the RBD is added.
459 BUG_ON(list_empty(&local_empty));
460 /* Get the first rxb from the rbd list */
461 rxb = list_first_entry(&local_empty,
462 struct iwl_rx_mem_buffer, list);
465 /* Alloc a new receive buffer */
466 page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
471 /* Get physical address of the RB */
472 rxb->page_dma = dma_map_page(trans->dev, page, 0,
473 PAGE_SIZE << trans_pcie->rx_page_order,
475 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
477 __free_pages(page, trans_pcie->rx_page_order);
480 /* dma address must be no more than 36 bits */
481 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
482 /* and also 256 byte aligned! */
483 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
485 /* move the allocated entry to the out list */
486 list_move(&rxb->list, &local_allocated);
492 pending = atomic_xchg(&rba->req_pending, 0);
494 "Pending allocation requests = %d\n",
498 spin_lock(&rba->lock);
499 /* add the allocated rbds to the allocator allocated list */
500 list_splice_tail(&local_allocated, &rba->rbd_allocated);
501 /* get more empty RBDs for current pending requests */
502 list_splice_tail_init(&rba->rbd_empty, &local_empty);
503 spin_unlock(&rba->lock);
505 atomic_inc(&rba->req_ready);
508 spin_lock(&rba->lock);
509 /* return unused rbds to the allocator empty list */
510 list_splice_tail(&local_empty, &rba->rbd_empty);
511 spin_unlock(&rba->lock);
515 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
517 .* Called by queue when the queue posted allocation request and
518 * has freed 8 RBDs in order to restock itself.
520 static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
521 struct iwl_rx_mem_buffer
522 *out[RX_CLAIM_REQ_ALLOC])
524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525 struct iwl_rb_allocator *rba = &trans_pcie->rba;
529 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
530 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
531 * function will return -ENOMEM, as there are no ready requests.
532 * atomic_dec_if_positive will perofrm the *actual* decrement only if
533 * req_ready > 0, i.e. - there are ready requests and the function
534 * hands one request to the caller.
536 if (atomic_dec_if_positive(&rba->req_ready) < 0)
539 spin_lock(&rba->lock);
540 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
541 /* Get next free Rx buffer, remove it from free list */
542 out[i] = list_first_entry(&rba->rbd_allocated,
543 struct iwl_rx_mem_buffer, list);
544 list_del(&out[i]->list);
546 spin_unlock(&rba->lock);
551 static void iwl_pcie_rx_allocator_work(struct work_struct *data)
553 struct iwl_rb_allocator *rba_p =
554 container_of(data, struct iwl_rb_allocator, rx_alloc);
555 struct iwl_trans_pcie *trans_pcie =
556 container_of(rba_p, struct iwl_trans_pcie, rba);
558 iwl_pcie_rx_allocator(trans_pcie->trans);
561 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564 struct iwl_rxq *rxq = &trans_pcie->rxq;
565 struct iwl_rb_allocator *rba = &trans_pcie->rba;
566 struct device *dev = trans->dev;
568 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
570 spin_lock_init(&rxq->lock);
571 spin_lock_init(&rba->lock);
573 if (WARN_ON(rxq->bd || rxq->rb_stts))
576 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
577 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
578 &rxq->bd_dma, GFP_KERNEL);
582 /*Allocate the driver's pointer to receive buffer status */
583 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
584 &rxq->rb_stts_dma, GFP_KERNEL);
591 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
592 rxq->bd, rxq->bd_dma);
599 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
603 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
605 switch (trans_pcie->rx_buf_size) {
607 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
610 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
613 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
617 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
621 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
622 /* reset and flush pointers */
623 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
624 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
625 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
627 /* Reset driver's Rx queue write index */
628 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
630 /* Tell device where to find RBD circular buffer in DRAM */
631 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
632 (u32)(rxq->bd_dma >> 8));
634 /* Tell device where in DRAM to update its Rx status */
635 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
636 rxq->rb_stts_dma >> 4);
639 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
640 * the credit mechanism in 5000 HW RX FIFO
641 * Direct rx interrupts to hosts
642 * Rx buffer size 4 or 8k or 12k
646 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
647 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
648 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
649 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
651 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
652 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
654 /* Set interrupt coalescing timer to default (2048 usecs) */
655 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
657 /* W/A for interrupt coalescing bug in 7260 and 3160 */
658 if (trans->cfg->host_interrupt_operation_mode)
659 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
662 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
666 lockdep_assert_held(&rxq->lock);
668 INIT_LIST_HEAD(&rxq->rx_free);
669 INIT_LIST_HEAD(&rxq->rx_used);
673 for (i = 0; i < RX_QUEUE_SIZE; i++)
674 list_add(&rxq->pool[i].list, &rxq->rx_used);
677 static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
681 lockdep_assert_held(&rba->lock);
683 INIT_LIST_HEAD(&rba->rbd_allocated);
684 INIT_LIST_HEAD(&rba->rbd_empty);
686 for (i = 0; i < RX_POOL_SIZE; i++)
687 list_add(&rba->pool[i].list, &rba->rbd_empty);
690 static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693 struct iwl_rb_allocator *rba = &trans_pcie->rba;
696 lockdep_assert_held(&rba->lock);
698 for (i = 0; i < RX_POOL_SIZE; i++) {
699 if (!rba->pool[i].page)
701 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
702 PAGE_SIZE << trans_pcie->rx_page_order,
704 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
705 rba->pool[i].page = NULL;
709 int iwl_pcie_rx_init(struct iwl_trans *trans)
711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
712 struct iwl_rxq *rxq = &trans_pcie->rxq;
713 struct iwl_rb_allocator *rba = &trans_pcie->rba;
717 err = iwl_pcie_rx_alloc(trans);
722 rba->alloc_wq = alloc_workqueue("rb_allocator",
723 WQ_HIGHPRI | WQ_UNBOUND, 1);
724 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
726 spin_lock(&rba->lock);
727 atomic_set(&rba->req_pending, 0);
728 atomic_set(&rba->req_ready, 0);
729 /* free all first - we might be reconfigured for a different size */
730 iwl_pcie_rx_free_rba(trans);
731 iwl_pcie_rx_init_rba(rba);
732 spin_unlock(&rba->lock);
734 spin_lock(&rxq->lock);
736 /* free all first - we might be reconfigured for a different size */
737 iwl_pcie_rxq_free_rbs(trans);
738 iwl_pcie_rx_init_rxb_lists(rxq);
740 for (i = 0; i < RX_QUEUE_SIZE; i++)
741 rxq->queue[i] = NULL;
743 /* Set us so that we have processed and used all buffers, but have
744 * not restocked the Rx queue with fresh buffers */
745 rxq->read = rxq->write = 0;
746 rxq->write_actual = 0;
747 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
748 spin_unlock(&rxq->lock);
750 iwl_pcie_rx_replenish(trans);
752 iwl_pcie_rx_hw_init(trans, rxq);
754 spin_lock(&rxq->lock);
755 iwl_pcie_rxq_inc_wr_ptr(trans);
756 spin_unlock(&rxq->lock);
761 void iwl_pcie_rx_free(struct iwl_trans *trans)
763 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
764 struct iwl_rxq *rxq = &trans_pcie->rxq;
765 struct iwl_rb_allocator *rba = &trans_pcie->rba;
767 /*if rxq->bd is NULL, it means that nothing has been allocated,
770 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
774 cancel_work_sync(&rba->rx_alloc);
776 destroy_workqueue(rba->alloc_wq);
777 rba->alloc_wq = NULL;
780 spin_lock(&rba->lock);
781 iwl_pcie_rx_free_rba(trans);
782 spin_unlock(&rba->lock);
784 spin_lock(&rxq->lock);
785 iwl_pcie_rxq_free_rbs(trans);
786 spin_unlock(&rxq->lock);
788 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
789 rxq->bd, rxq->bd_dma);
794 dma_free_coherent(trans->dev,
795 sizeof(struct iwl_rb_status),
796 rxq->rb_stts, rxq->rb_stts_dma);
798 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
799 rxq->rb_stts_dma = 0;
804 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
806 * Called when a RBD can be reused. The RBD is transferred to the allocator.
807 * When there are 2 empty RBDs - a request for allocation is posted
809 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb,
811 struct iwl_rxq *rxq, bool emergency)
813 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
814 struct iwl_rb_allocator *rba = &trans_pcie->rba;
816 /* Move the RBD to the used list, will be moved to allocator in batches
817 * before claiming or posting a request*/
818 list_add_tail(&rxb->list, &rxq->rx_used);
820 if (unlikely(emergency))
823 /* Count the allocator owned RBDs */
826 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
827 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
828 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
829 * after but we still need to post another request.
831 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
832 /* Move the 2 RBDs to the allocator ownership.
833 Allocator has another 6 from pool for the request completion*/
834 spin_lock(&rba->lock);
835 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
836 spin_unlock(&rba->lock);
838 atomic_inc(&rba->req_pending);
839 queue_work(rba->alloc_wq, &rba->rx_alloc);
843 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
844 struct iwl_rx_mem_buffer *rxb,
847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
848 struct iwl_rxq *rxq = &trans_pcie->rxq;
849 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
850 bool page_stolen = false;
851 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
857 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
859 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
860 struct iwl_rx_packet *pkt;
863 int index, cmd_index, len;
864 struct iwl_rx_cmd_buffer rxcb = {
866 ._rx_page_order = trans_pcie->rx_page_order,
868 ._page_stolen = false,
872 pkt = rxb_addr(&rxcb);
874 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
878 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
880 iwl_get_cmd_string(trans,
881 iwl_cmd_id(pkt->hdr.cmd,
884 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
886 len = iwl_rx_packet_len(pkt);
887 len += sizeof(u32); /* account for status word */
888 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
889 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
891 /* Reclaim a command buffer only if this packet is a response
892 * to a (driver-originated) command.
893 * If the packet (e.g. Rx frame) originated from uCode,
894 * there is no command buffer to reclaim.
895 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
896 * but apparently a few don't get set; catch them here. */
897 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
901 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
902 if (trans_pcie->no_reclaim_cmds[i] ==
910 sequence = le16_to_cpu(pkt->hdr.sequence);
911 index = SEQ_TO_INDEX(sequence);
912 cmd_index = get_cmd_index(&txq->q, index);
914 iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
917 kzfree(txq->entries[cmd_index].free_buf);
918 txq->entries[cmd_index].free_buf = NULL;
922 * After here, we should always check rxcb._page_stolen,
923 * if it is true then one of the handlers took the page.
927 /* Invoke any callbacks, transfer the buffer to caller,
928 * and fire off the (possibly) blocking
929 * iwl_trans_send_cmd()
930 * as we reclaim the driver command queue */
931 if (!rxcb._page_stolen)
932 iwl_pcie_hcmd_complete(trans, &rxcb);
934 IWL_WARN(trans, "Claim null rxb?\n");
937 page_stolen |= rxcb._page_stolen;
938 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
941 /* page was stolen from us -- free our reference */
943 __free_pages(rxb->page, trans_pcie->rx_page_order);
947 /* Reuse the page if possible. For notification packets and
948 * SKBs that fail to Rx correctly, add them back into the
949 * rx_free list for reuse later. */
950 if (rxb->page != NULL) {
952 dma_map_page(trans->dev, rxb->page, 0,
953 PAGE_SIZE << trans_pcie->rx_page_order,
955 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
957 * free the page(s) as well to not break
958 * the invariant that the items on the used
959 * list have no page(s)
961 __free_pages(rxb->page, trans_pcie->rx_page_order);
963 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
965 list_add_tail(&rxb->list, &rxq->rx_free);
969 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
973 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
975 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
977 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
978 struct iwl_rxq *rxq = &trans_pcie->rxq;
979 u32 r, i, j, count = 0;
980 bool emergency = false;
983 spin_lock(&rxq->lock);
984 /* uCode's read index (stored in shared DRAM) indicates the last Rx
985 * buffer that the driver may process (last buffer filled by ucode). */
986 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
989 /* Rx interrupt, but nothing sent from uCode */
991 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
994 struct iwl_rx_mem_buffer *rxb;
996 if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
1000 rxq->queue[i] = NULL;
1002 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
1003 iwl_pcie_rx_handle_rb(trans, rxb, emergency);
1005 i = (i + 1) & RX_QUEUE_MASK;
1007 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1008 * try to claim the pre-allocated buffers from the allocator */
1009 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
1010 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1011 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
1013 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
1015 /* Add the remaining 6 empty RBDs
1018 spin_lock(&rba->lock);
1019 list_splice_tail_init(&rxq->rx_used,
1021 spin_unlock(&rba->lock);
1024 /* If not ready - continue, will try to reclaim later.
1025 * No need to reschedule work - allocator exits only on
1027 if (!iwl_pcie_rx_allocator_get(trans, out)) {
1028 /* If success - then RX_CLAIM_REQ_ALLOC
1029 * buffers were retrieved and should be added
1031 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
1032 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
1033 list_add_tail(&out[j]->list,
1043 if (rxq->used_count < RX_QUEUE_SIZE / 3)
1045 spin_unlock(&rxq->lock);
1046 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1047 spin_lock(&rxq->lock);
1050 /* handle restock for three cases, can be all of them at once:
1051 * - we just pulled buffers from the allocator
1052 * - we have 8+ unstolen pages accumulated
1053 * - we are in emergency and allocated buffers
1055 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1057 spin_unlock(&rxq->lock);
1058 iwl_pcie_rxq_restock(trans);
1063 /* Backtrack one entry */
1065 spin_unlock(&rxq->lock);
1068 * handle a case where in emergency there are some unallocated RBDs.
1069 * those RBDs are in the used list, but are not tracked by the queue's
1070 * used_count which counts allocator owned RBDs.
1071 * unallocated emergency RBDs must be allocated on exit, otherwise
1072 * when called again the function may not be in emergency mode and
1073 * they will be handed to the allocator with no tracking in the RBD
1074 * allocator counters, which will lead to them never being claimed back
1076 * by allocating them here, they are now in the queue free list, and
1077 * will be restocked by the next call of iwl_pcie_rxq_restock.
1079 if (unlikely(emergency && count))
1080 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1082 if (trans_pcie->napi.poll)
1083 napi_gro_flush(&trans_pcie->napi, false);
1087 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1089 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1091 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1094 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1095 if (trans->cfg->internal_wimax_coex &&
1096 !trans->cfg->apmg_not_supported &&
1097 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1098 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1099 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1100 APMG_PS_CTRL_VAL_RESET_REQ))) {
1101 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1102 iwl_op_mode_wimax_active(trans->op_mode);
1103 wake_up(&trans_pcie->wait_command_queue);
1107 iwl_pcie_dump_csr(trans);
1108 iwl_dump_fh(trans, NULL);
1111 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1112 * before we wake up the command caller, to ensure a proper cleanup. */
1113 iwl_trans_fw_error(trans);
1116 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1117 del_timer(&trans_pcie->txq[i].stuck_timer);
1119 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1120 wake_up(&trans_pcie->wait_command_queue);
1123 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1127 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1129 trace_iwlwifi_dev_irq(trans->dev);
1131 /* Discover which interrupts are active/pending */
1132 inta = iwl_read32(trans, CSR_INT);
1134 /* the thread will service interrupts and re-enable them */
1138 /* a device (PCI-E) page is 4096 bytes long */
1139 #define ICT_SHIFT 12
1140 #define ICT_SIZE (1 << ICT_SHIFT)
1141 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1143 /* interrupt handler using ict table, with this interrupt driver will
1144 * stop using INTA register to get device's interrupt, reading this register
1145 * is expensive, device will write interrupts in ICT dram table, increment
1146 * index then will fire interrupt to driver, driver will OR all ICT table
1147 * entries from current index up to table entry with 0 value. the result is
1148 * the interrupt we need to service, driver will set the entries back to 0 and
1151 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1153 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1158 trace_iwlwifi_dev_irq(trans->dev);
1160 /* Ignore interrupt if there's nothing in NIC to service.
1161 * This may be due to IRQ shared with another device,
1162 * or due to sporadic interrupts thrown from our NIC. */
1163 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1164 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1169 * Collect all entries up to the first 0, starting from ict_index;
1170 * note we already read at ict_index.
1174 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1175 trans_pcie->ict_index, read);
1176 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1177 trans_pcie->ict_index =
1178 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1180 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1181 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1185 /* We should not get this value, just ignore it. */
1186 if (val == 0xffffffff)
1190 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1191 * (bit 15 before shifting it to 31) to clear when using interrupt
1192 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1193 * so we use them to decide on the real state of the Rx bit.
1194 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1199 inta = (0xff & val) | ((0xff00 & val) << 16);
1203 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1205 struct iwl_trans *trans = dev_id;
1206 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1211 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1213 spin_lock(&trans_pcie->irq_lock);
1215 /* dram interrupt table not set yet,
1216 * use legacy interrupt.
1218 if (likely(trans_pcie->use_ict))
1219 inta = iwl_pcie_int_cause_ict(trans);
1221 inta = iwl_pcie_int_cause_non_ict(trans);
1223 if (iwl_have_debug_level(IWL_DL_ISR)) {
1224 IWL_DEBUG_ISR(trans,
1225 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1226 inta, trans_pcie->inta_mask,
1227 iwl_read32(trans, CSR_INT_MASK),
1228 iwl_read32(trans, CSR_FH_INT_STATUS));
1229 if (inta & (~trans_pcie->inta_mask))
1230 IWL_DEBUG_ISR(trans,
1231 "We got a masked interrupt (0x%08x)\n",
1232 inta & (~trans_pcie->inta_mask));
1235 inta &= trans_pcie->inta_mask;
1238 * Ignore interrupt if there's nothing in NIC to service.
1239 * This may be due to IRQ shared with another device,
1240 * or due to sporadic interrupts thrown from our NIC.
1242 if (unlikely(!inta)) {
1243 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1245 * Re-enable interrupts here since we don't
1246 * have anything to service
1248 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1249 iwl_enable_interrupts(trans);
1250 spin_unlock(&trans_pcie->irq_lock);
1251 lock_map_release(&trans->sync_cmd_lockdep_map);
1255 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1257 * Hardware disappeared. It might have
1258 * already raised an interrupt.
1260 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1261 spin_unlock(&trans_pcie->irq_lock);
1265 /* Ack/clear/reset pending uCode interrupts.
1266 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1268 /* There is a hardware bug in the interrupt mask function that some
1269 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1270 * they are disabled in the CSR_INT_MASK register. Furthermore the
1271 * ICT interrupt handling mechanism has another bug that might cause
1272 * these unmasked interrupts fail to be detected. We workaround the
1273 * hardware bugs here by ACKing all the possible interrupts so that
1274 * interrupt coalescing can still be achieved.
1276 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1278 if (iwl_have_debug_level(IWL_DL_ISR))
1279 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1280 inta, iwl_read32(trans, CSR_INT_MASK));
1282 spin_unlock(&trans_pcie->irq_lock);
1284 /* Now service all interrupt bits discovered above. */
1285 if (inta & CSR_INT_BIT_HW_ERR) {
1286 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1288 /* Tell the device to stop sending interrupts */
1289 iwl_disable_interrupts(trans);
1292 iwl_pcie_irq_handle_error(trans);
1294 handled |= CSR_INT_BIT_HW_ERR;
1299 if (iwl_have_debug_level(IWL_DL_ISR)) {
1300 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1301 if (inta & CSR_INT_BIT_SCD) {
1302 IWL_DEBUG_ISR(trans,
1303 "Scheduler finished to transmit the frame/frames.\n");
1307 /* Alive notification via Rx interrupt will do the real work */
1308 if (inta & CSR_INT_BIT_ALIVE) {
1309 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1314 /* Safely ignore these bits for debug checks below */
1315 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1317 /* HW RF KILL switch toggled */
1318 if (inta & CSR_INT_BIT_RF_KILL) {
1321 hw_rfkill = iwl_is_rfkill_set(trans);
1322 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1323 hw_rfkill ? "disable radio" : "enable radio");
1325 isr_stats->rfkill++;
1327 mutex_lock(&trans_pcie->mutex);
1328 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1329 mutex_unlock(&trans_pcie->mutex);
1331 set_bit(STATUS_RFKILL, &trans->status);
1332 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1334 IWL_DEBUG_RF_KILL(trans,
1335 "Rfkill while SYNC HCMD in flight\n");
1336 wake_up(&trans_pcie->wait_command_queue);
1338 clear_bit(STATUS_RFKILL, &trans->status);
1341 handled |= CSR_INT_BIT_RF_KILL;
1344 /* Chip got too hot and stopped itself */
1345 if (inta & CSR_INT_BIT_CT_KILL) {
1346 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1347 isr_stats->ctkill++;
1348 handled |= CSR_INT_BIT_CT_KILL;
1351 /* Error detected by uCode */
1352 if (inta & CSR_INT_BIT_SW_ERR) {
1353 IWL_ERR(trans, "Microcode SW error detected. "
1354 " Restarting 0x%X.\n", inta);
1356 iwl_pcie_irq_handle_error(trans);
1357 handled |= CSR_INT_BIT_SW_ERR;
1360 /* uCode wakes up after power-down sleep */
1361 if (inta & CSR_INT_BIT_WAKEUP) {
1362 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1363 iwl_pcie_rxq_check_wrptr(trans);
1364 iwl_pcie_txq_check_wrptrs(trans);
1366 isr_stats->wakeup++;
1368 handled |= CSR_INT_BIT_WAKEUP;
1371 /* All uCode command responses, including Tx command responses,
1372 * Rx "responses" (frame-received notification), and other
1373 * notifications from uCode come through here*/
1374 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1375 CSR_INT_BIT_RX_PERIODIC)) {
1376 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1377 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1378 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1379 iwl_write32(trans, CSR_FH_INT_STATUS,
1380 CSR_FH_INT_RX_MASK);
1382 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1383 handled |= CSR_INT_BIT_RX_PERIODIC;
1385 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1387 /* Sending RX interrupt require many steps to be done in the
1389 * 1- write interrupt to current index in ICT table.
1391 * 3- update RX shared data to indicate last write index.
1392 * 4- send interrupt.
1393 * This could lead to RX race, driver could receive RX interrupt
1394 * but the shared data changes does not reflect this;
1395 * periodic interrupt will detect any dangling Rx activity.
1398 /* Disable periodic interrupt; we use it as just a one-shot. */
1399 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1400 CSR_INT_PERIODIC_DIS);
1403 * Enable periodic interrupt in 8 msec only if we received
1404 * real RX interrupt (instead of just periodic int), to catch
1405 * any dangling Rx interrupt. If it was just the periodic
1406 * interrupt, there was no dangling Rx activity, and no need
1407 * to extend the periodic interrupt; one-shot is enough.
1409 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1410 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1411 CSR_INT_PERIODIC_ENA);
1416 iwl_pcie_rx_handle(trans);
1420 /* This "Tx" DMA channel is used only for loading uCode */
1421 if (inta & CSR_INT_BIT_FH_TX) {
1422 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1423 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1425 handled |= CSR_INT_BIT_FH_TX;
1426 /* Wake up uCode load routine, now that load is complete */
1427 trans_pcie->ucode_write_complete = true;
1428 wake_up(&trans_pcie->ucode_write_waitq);
1431 if (inta & ~handled) {
1432 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1433 isr_stats->unhandled++;
1436 if (inta & ~(trans_pcie->inta_mask)) {
1437 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1438 inta & ~trans_pcie->inta_mask);
1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 if (handled & CSR_INT_BIT_FH_TX)
1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1446 iwl_enable_interrupts(trans);
1447 /* Re-enable RF_KILL if it occurred */
1448 else if (handled & CSR_INT_BIT_RF_KILL)
1449 iwl_enable_rfkill_int(trans);
1452 lock_map_release(&trans->sync_cmd_lockdep_map);
1456 /******************************************************************************
1460 ******************************************************************************/
1462 /* Free dram table */
1463 void iwl_pcie_free_ict(struct iwl_trans *trans)
1465 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1467 if (trans_pcie->ict_tbl) {
1468 dma_free_coherent(trans->dev, ICT_SIZE,
1469 trans_pcie->ict_tbl,
1470 trans_pcie->ict_tbl_dma);
1471 trans_pcie->ict_tbl = NULL;
1472 trans_pcie->ict_tbl_dma = 0;
1477 * allocate dram shared table, it is an aligned memory
1478 * block of ICT_SIZE.
1479 * also reset all data related to ICT table interrupt.
1481 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1485 trans_pcie->ict_tbl =
1486 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1487 &trans_pcie->ict_tbl_dma,
1489 if (!trans_pcie->ict_tbl)
1492 /* just an API sanity check ... it is guaranteed to be aligned */
1493 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1494 iwl_pcie_free_ict(trans);
1501 /* Device is going up inform it about using ICT interrupt table,
1502 * also we need to tell the driver to start using ICT interrupt.
1504 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1509 if (!trans_pcie->ict_tbl)
1512 spin_lock(&trans_pcie->irq_lock);
1513 iwl_disable_interrupts(trans);
1515 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1517 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1519 val |= CSR_DRAM_INT_TBL_ENABLE |
1520 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1521 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1523 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1525 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1526 trans_pcie->use_ict = true;
1527 trans_pcie->ict_index = 0;
1528 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1529 iwl_enable_interrupts(trans);
1530 spin_unlock(&trans_pcie->irq_lock);
1533 /* Device is going down disable ict interrupt usage */
1534 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1536 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1538 spin_lock(&trans_pcie->irq_lock);
1539 trans_pcie->use_ict = false;
1540 spin_unlock(&trans_pcie->irq_lock);
1543 irqreturn_t iwl_pcie_isr(int irq, void *data)
1545 struct iwl_trans *trans = data;
1550 /* Disable (but don't clear!) interrupts here to avoid
1551 * back-to-back ISRs and sporadic interrupts from our NIC.
1552 * If we have something to service, the tasklet will re-enable ints.
1553 * If we *don't* have something, we'll re-enable before leaving here.
1555 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1557 return IRQ_WAKE_THREAD;