Commit | Line | Data |
---|---|---|
ab697a9f | 1 | /****************************************************************************** |
cefec29e JB |
2 | * |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
ab697a9f | 7 | * |
51368bf7 | 8 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
26d535ae | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
eda50cde | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
ea695b7c | 11 | * Copyright(c) 2018 - 2019 Intel Corporation |
ab697a9f | 12 | * |
ab697a9f EG |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of version 2 of the GNU General Public License as | |
15 | * published by the Free Software Foundation. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
20 | * more details. | |
21 | * | |
ab697a9f | 22 | * The full GNU General Public License is included in this distribution in the |
cefec29e | 23 | * file called COPYING. |
ab697a9f EG |
24 | * |
25 | * Contact Information: | |
d01c5366 | 26 | * Intel Linux Wireless <linuxwifi@intel.com> |
ab697a9f EG |
27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
28 | * | |
cefec29e JB |
29 | * BSD LICENSE |
30 | * | |
31 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | |
32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | |
33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | |
ea695b7c | 34 | * Copyright(c) 2018 - 2019 Intel Corporation |
cefec29e JB |
35 | * All rights reserved. |
36 | * | |
37 | * Redistribution and use in source and binary forms, with or without | |
38 | * modification, are permitted provided that the following conditions | |
39 | * are met: | |
40 | * | |
41 | * * Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * * Redistributions in binary form must reproduce the above copyright | |
44 | * notice, this list of conditions and the following disclaimer in | |
45 | * the documentation and/or other materials provided with the | |
46 | * distribution. | |
47 | * * Neither the name Intel Corporation nor the names of its | |
48 | * contributors may be used to endorse or promote products derived | |
49 | * from this software without specific prior written permission. | |
50 | * | |
51 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
52 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
53 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
54 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
55 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
56 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
57 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
58 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
59 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
60 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
61 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
62 | * | |
ab697a9f EG |
63 | *****************************************************************************/ |
64 | #include <linux/sched.h> | |
65 | #include <linux/wait.h> | |
1a361cd8 | 66 | #include <linux/gfp.h> |
ab697a9f | 67 | |
1b29dc94 | 68 | #include "iwl-prph.h" |
ab697a9f | 69 | #include "iwl-io.h" |
6468a01a | 70 | #include "internal.h" |
db70f290 | 71 | #include "iwl-op-mode.h" |
9b58419e | 72 | #include "iwl-context-info-gen3.h" |
ab697a9f EG |
73 | |
74 | /****************************************************************************** | |
75 | * | |
76 | * RX path functions | |
77 | * | |
78 | ******************************************************************************/ | |
79 | ||
80 | /* | |
81 | * Rx theory of operation | |
82 | * | |
83 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
84 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
85 | * used not only for Rx frames, but for any command response or notification | |
86 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
87 | * of indexes into the circular buffer. | |
88 | * | |
89 | * Rx Queue Indexes | |
90 | * The host/firmware share two index registers for managing the Rx buffers. | |
91 | * | |
92 | * The READ index maps to the first position that the firmware may be writing | |
93 | * to -- the driver can read up to (but not including) this position and get | |
94 | * good data. | |
95 | * The READ index is managed by the firmware once the card is enabled. | |
96 | * | |
97 | * The WRITE index maps to the last position the driver has read from -- the | |
98 | * position preceding WRITE is the last slot the firmware can place a packet. | |
99 | * | |
100 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
101 | * WRITE = READ. | |
102 | * | |
103 | * During initialization, the host sets up the READ queue position to the first | |
104 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
105 | * | |
106 | * When the firmware places a packet in a buffer, it will advance the READ index | |
107 | * and fire the RX interrupt. The driver can then query the READ index and | |
108 | * process as many packets as possible, moving the WRITE index forward as it | |
109 | * resets the Rx queue buffers with new memory. | |
110 | * | |
111 | * The management in the driver is as follows: | |
26d535ae SS |
112 | * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. |
113 | * When the interrupt handler is called, the request is processed. | |
114 | * The page is either stolen - transferred to the upper layer | |
115 | * or reused - added immediately to the iwl->rxq->rx_free list. | |
116 | * + When the page is stolen - the driver updates the matching queue's used | |
117 | * count, detaches the RBD and transfers it to the queue used list. | |
118 | * When there are two used RBDs - they are transferred to the allocator empty | |
119 | * list. Work is then scheduled for the allocator to start allocating | |
120 | * eight buffers. | |
121 | * When there are another 6 used RBDs - they are transferred to the allocator | |
122 | * empty list and the driver tries to claim the pre-allocated buffers and | |
123 | * add them to iwl->rxq->rx_free. If it fails - it continues to claim them | |
124 | * until ready. | |
125 | * When there are 8+ buffers in the free list - either from allocation or from | |
126 | * 8 reused unstolen pages - restock is called to update the FW and indexes. | |
127 | * + In order to make sure the allocator always has RBDs to use for allocation | |
128 | * the allocator has initial pool in the size of num_queues*(8-2) - the | |
129 | * maximum missing RBDs per allocation request (request posted with 2 | |
130 | * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). | |
131 | * The queues supplies the recycle of the rest of the RBDs. | |
ab697a9f EG |
132 | * + A received packet is processed and handed to the kernel network stack, |
133 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
26d535ae | 134 | * + If there are no allocated buffers in iwl->rxq->rx_free, |
2bfb5092 JB |
135 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. |
136 | * If there were enough free buffers and RX_STALLED is set it is cleared. | |
ab697a9f EG |
137 | * |
138 | * | |
139 | * Driver sequence: | |
140 | * | |
990aa6d7 EG |
141 | * iwl_rxq_alloc() Allocates rx_free |
142 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
26d535ae SS |
143 | * iwl_pcie_rxq_restock. |
144 | * Used only during initialization. | |
990aa6d7 | 145 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
ab697a9f | 146 | * queue, updates firmware pointers, and updates |
26d535ae SS |
147 | * the WRITE index. |
148 | * iwl_pcie_rx_allocator() Background work for allocating pages. | |
ab697a9f EG |
149 | * |
150 | * -- enable interrupts -- | |
990aa6d7 | 151 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
152 | * READ INDEX, detaching the SKB from the pool. |
153 | * Moves the packet buffer from queue to rx_used. | |
26d535ae | 154 | * Posts and claims requests to the allocator. |
990aa6d7 | 155 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f | 156 | * slots. |
26d535ae SS |
157 | * |
158 | * RBD life-cycle: | |
159 | * | |
160 | * Init: | |
161 | * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue | |
162 | * | |
163 | * Regular Receive interrupt: | |
164 | * Page Stolen: | |
165 | * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> | |
166 | * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue | |
167 | * Page not Stolen: | |
168 | * rxq.queue -> rxq.rx_free -> rxq.queue | |
ab697a9f EG |
169 | * ... |
170 | * | |
171 | */ | |
172 | ||
990aa6d7 EG |
173 | /* |
174 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 175 | */ |
fecba09e | 176 | static int iwl_rxq_space(const struct iwl_rxq *rxq) |
ab697a9f | 177 | { |
96a6497b SS |
178 | /* Make sure rx queue size is a power of 2 */ |
179 | WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); | |
fecba09e | 180 | |
351746c9 IY |
181 | /* |
182 | * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity | |
183 | * between empty and completely full queues. | |
184 | * The following is equivalent to modulo by RX_QUEUE_SIZE and is well | |
185 | * defined for negative dividends. | |
186 | */ | |
96a6497b | 187 | return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); |
ab697a9f EG |
188 | } |
189 | ||
9805c446 EG |
190 | /* |
191 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
192 | */ | |
193 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
194 | { | |
195 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
196 | } | |
197 | ||
49bd072d EG |
198 | /* |
199 | * iwl_pcie_rx_stop - stops the Rx DMA | |
200 | */ | |
9805c446 EG |
201 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
202 | { | |
3681021f JB |
203 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
204 | /* TODO: remove this once fw does it */ | |
ea695b7c ST |
205 | iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); |
206 | return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, | |
207 | RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); | |
286ca8eb | 208 | } else if (trans->trans_cfg->mq_rx_supported) { |
d7fdd0e5 SS |
209 | iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); |
210 | return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, | |
211 | RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); | |
212 | } else { | |
213 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
214 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
215 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, | |
216 | 1000); | |
217 | } | |
9805c446 EG |
218 | } |
219 | ||
990aa6d7 EG |
220 | /* |
221 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 222 | */ |
78485054 SS |
223 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, |
224 | struct iwl_rxq *rxq) | |
ab697a9f | 225 | { |
ab697a9f EG |
226 | u32 reg; |
227 | ||
5d63f926 | 228 | lockdep_assert_held(&rxq->lock); |
ab697a9f | 229 | |
5045388c EP |
230 | /* |
231 | * explicitly wake up the NIC if: | |
232 | * 1. shadow registers aren't enabled | |
233 | * 2. there is a chance that the NIC is asleep | |
234 | */ | |
286ca8eb | 235 | if (!trans->trans_cfg->base_params->shadow_reg_enable && |
5045388c EP |
236 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { |
237 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | |
238 | ||
239 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
240 | IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", | |
241 | reg); | |
242 | iwl_set_bit(trans, CSR_GP_CNTRL, | |
286ca8eb | 243 | BIT(trans->trans_cfg->csr->flag_mac_access_req)); |
5d63f926 JB |
244 | rxq->need_update = true; |
245 | return; | |
ab697a9f EG |
246 | } |
247 | } | |
5045388c EP |
248 | |
249 | rxq->write_actual = round_down(rxq->write, 8); | |
3681021f | 250 | if (trans->trans_cfg->mq_rx_supported) |
1554ed20 SS |
251 | iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), |
252 | rxq->write_actual); | |
1316d595 SS |
253 | else |
254 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); | |
5d63f926 JB |
255 | } |
256 | ||
257 | static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) | |
258 | { | |
259 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 260 | int i; |
5d63f926 | 261 | |
78485054 SS |
262 | for (i = 0; i < trans->num_rx_queues; i++) { |
263 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
ab697a9f | 264 | |
78485054 SS |
265 | if (!rxq->need_update) |
266 | continue; | |
267 | spin_lock(&rxq->lock); | |
268 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
269 | rxq->need_update = false; | |
270 | spin_unlock(&rxq->lock); | |
271 | } | |
ab697a9f EG |
272 | } |
273 | ||
0307c839 GBA |
274 | static void iwl_pcie_restock_bd(struct iwl_trans *trans, |
275 | struct iwl_rxq *rxq, | |
276 | struct iwl_rx_mem_buffer *rxb) | |
277 | { | |
3681021f | 278 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
0307c839 GBA |
279 | struct iwl_rx_transfer_desc *bd = rxq->bd; |
280 | ||
f826faaa JB |
281 | BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); |
282 | ||
0307c839 GBA |
283 | bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); |
284 | bd[rxq->write].rbid = cpu_to_le16(rxb->vid); | |
285 | } else { | |
286 | __le64 *bd = rxq->bd; | |
287 | ||
288 | bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); | |
289 | } | |
85d78bb1 SS |
290 | |
291 | IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", | |
292 | (u32)rxb->vid, rxq->id, rxq->write); | |
0307c839 GBA |
293 | } |
294 | ||
e0e168dc | 295 | /* |
2047fa54 | 296 | * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx |
e0e168dc | 297 | */ |
2047fa54 SS |
298 | static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, |
299 | struct iwl_rxq *rxq) | |
96a6497b SS |
300 | { |
301 | struct iwl_rx_mem_buffer *rxb; | |
302 | ||
303 | /* | |
304 | * If the device isn't enabled - no need to try to add buffers... | |
305 | * This can happen when we stop the device and still have an interrupt | |
306 | * pending. We stop the APM before we sync the interrupts because we | |
307 | * have to (see comment there). On the other hand, since the APM is | |
308 | * stopped, we cannot access the HW (in particular not prph). | |
309 | * So don't try to restock if the APM has been already stopped. | |
310 | */ | |
311 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) | |
312 | return; | |
313 | ||
314 | spin_lock(&rxq->lock); | |
315 | while (rxq->free_count) { | |
96a6497b SS |
316 | /* Get next free Rx buffer, remove from free list */ |
317 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, | |
318 | list); | |
319 | list_del(&rxb->list); | |
b1753c62 | 320 | rxb->invalid = false; |
96a6497b SS |
321 | /* 12 first bits are expected to be empty */ |
322 | WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); | |
323 | /* Point to Rx buffer via next RBD in circular buffer */ | |
0307c839 | 324 | iwl_pcie_restock_bd(trans, rxq, rxb); |
96a6497b SS |
325 | rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; |
326 | rxq->free_count--; | |
327 | } | |
328 | spin_unlock(&rxq->lock); | |
329 | ||
330 | /* | |
331 | * If we've added more space for the firmware to place data, tell it. | |
332 | * Increment device's write pointer in multiples of 8. | |
333 | */ | |
334 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
335 | spin_lock(&rxq->lock); | |
336 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
337 | spin_unlock(&rxq->lock); | |
338 | } | |
339 | } | |
340 | ||
990aa6d7 | 341 | /* |
2047fa54 | 342 | * iwl_pcie_rxsq_restock - restock implementation for single queue rx |
ab697a9f | 343 | */ |
2047fa54 SS |
344 | static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, |
345 | struct iwl_rxq *rxq) | |
ab697a9f | 346 | { |
ab697a9f | 347 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 348 | |
7439046d EG |
349 | /* |
350 | * If the device isn't enabled - not need to try to add buffers... | |
351 | * This can happen when we stop the device and still have an interrupt | |
2bfb5092 JB |
352 | * pending. We stop the APM before we sync the interrupts because we |
353 | * have to (see comment there). On the other hand, since the APM is | |
354 | * stopped, we cannot access the HW (in particular not prph). | |
7439046d EG |
355 | * So don't try to restock if the APM has been already stopped. |
356 | */ | |
eb7ff77e | 357 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) |
7439046d EG |
358 | return; |
359 | ||
51232f7e | 360 | spin_lock(&rxq->lock); |
990aa6d7 | 361 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
96a6497b | 362 | __le32 *bd = (__le32 *)rxq->bd; |
ab697a9f EG |
363 | /* The overwritten rxb must be a used one */ |
364 | rxb = rxq->queue[rxq->write]; | |
365 | BUG_ON(rxb && rxb->page); | |
366 | ||
367 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
368 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
369 | list); | |
370 | list_del(&rxb->list); | |
b1753c62 | 371 | rxb->invalid = false; |
ab697a9f EG |
372 | |
373 | /* Point to Rx buffer via next RBD in circular buffer */ | |
96a6497b | 374 | bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
375 | rxq->queue[rxq->write] = rxb; |
376 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
377 | rxq->free_count--; | |
378 | } | |
51232f7e | 379 | spin_unlock(&rxq->lock); |
ab697a9f | 380 | |
ab697a9f EG |
381 | /* If we've added more space for the firmware to place data, tell it. |
382 | * Increment device's write pointer in multiples of 8. */ | |
383 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
51232f7e | 384 | spin_lock(&rxq->lock); |
78485054 | 385 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
51232f7e | 386 | spin_unlock(&rxq->lock); |
ab697a9f EG |
387 | } |
388 | } | |
389 | ||
e0e168dc GG |
390 | /* |
391 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
392 | * | |
393 | * If there are slots in the RX queue that need to be restocked, | |
394 | * and we have free pre-allocated buffers, fill the ranks as much | |
395 | * as we can, pulling from rx_free. | |
396 | * | |
397 | * This moves the 'write' index forward to catch up with 'processed', and | |
398 | * also updates the memory address in the firmware to reference the new | |
399 | * target buffer. | |
400 | */ | |
401 | static | |
402 | void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) | |
403 | { | |
286ca8eb | 404 | if (trans->trans_cfg->mq_rx_supported) |
2047fa54 | 405 | iwl_pcie_rxmq_restock(trans, rxq); |
e0e168dc | 406 | else |
2047fa54 | 407 | iwl_pcie_rxsq_restock(trans, rxq); |
e0e168dc GG |
408 | } |
409 | ||
26d535ae SS |
410 | /* |
411 | * iwl_pcie_rx_alloc_page - allocates and returns a page. | |
412 | * | |
413 | */ | |
414 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, | |
415 | gfp_t priority) | |
416 | { | |
417 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae SS |
418 | struct page *page; |
419 | gfp_t gfp_mask = priority; | |
420 | ||
26d535ae SS |
421 | if (trans_pcie->rx_page_order > 0) |
422 | gfp_mask |= __GFP_COMP; | |
423 | ||
424 | /* Alloc a new receive buffer */ | |
425 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | |
426 | if (!page) { | |
427 | if (net_ratelimit()) | |
428 | IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", | |
429 | trans_pcie->rx_page_order); | |
78485054 SS |
430 | /* |
431 | * Issue an error if we don't have enough pre-allocated | |
432 | * buffers. | |
1da3823d | 433 | */ |
78485054 | 434 | if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) |
26d535ae | 435 | IWL_CRIT(trans, |
78485054 | 436 | "Failed to alloc_pages\n"); |
26d535ae SS |
437 | return NULL; |
438 | } | |
439 | return page; | |
440 | } | |
441 | ||
358a46d4 | 442 | /* |
9805c446 | 443 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 444 | * |
358a46d4 EG |
445 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
446 | * a page must be allocated and the RBD must point to the page. This function | |
447 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 448 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 449 | * allocated buffers. |
ab697a9f | 450 | */ |
ff932f61 GBA |
451 | void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, |
452 | struct iwl_rxq *rxq) | |
ab697a9f | 453 | { |
20d3b647 | 454 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
455 | struct iwl_rx_mem_buffer *rxb; |
456 | struct page *page; | |
ab697a9f EG |
457 | |
458 | while (1) { | |
51232f7e | 459 | spin_lock(&rxq->lock); |
ab697a9f | 460 | if (list_empty(&rxq->rx_used)) { |
51232f7e | 461 | spin_unlock(&rxq->lock); |
ab697a9f EG |
462 | return; |
463 | } | |
51232f7e | 464 | spin_unlock(&rxq->lock); |
ab697a9f | 465 | |
ab697a9f | 466 | /* Alloc a new receive buffer */ |
26d535ae SS |
467 | page = iwl_pcie_rx_alloc_page(trans, priority); |
468 | if (!page) | |
ab697a9f | 469 | return; |
ab697a9f | 470 | |
51232f7e | 471 | spin_lock(&rxq->lock); |
ab697a9f EG |
472 | |
473 | if (list_empty(&rxq->rx_used)) { | |
51232f7e | 474 | spin_unlock(&rxq->lock); |
b2cf410c | 475 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
476 | return; |
477 | } | |
e2b1930e JB |
478 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
479 | list); | |
480 | list_del(&rxb->list); | |
51232f7e | 481 | spin_unlock(&rxq->lock); |
ab697a9f EG |
482 | |
483 | BUG_ON(rxb->page); | |
484 | rxb->page = page; | |
485 | /* Get physical address of the RB */ | |
20d3b647 JB |
486 | rxb->page_dma = |
487 | dma_map_page(trans->dev, page, 0, | |
488 | PAGE_SIZE << trans_pcie->rx_page_order, | |
489 | DMA_FROM_DEVICE); | |
7c341582 JB |
490 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
491 | rxb->page = NULL; | |
51232f7e | 492 | spin_lock(&rxq->lock); |
7c341582 | 493 | list_add(&rxb->list, &rxq->rx_used); |
51232f7e | 494 | spin_unlock(&rxq->lock); |
7c341582 JB |
495 | __free_pages(page, trans_pcie->rx_page_order); |
496 | return; | |
497 | } | |
ab697a9f | 498 | |
51232f7e | 499 | spin_lock(&rxq->lock); |
ab697a9f EG |
500 | |
501 | list_add_tail(&rxb->list, &rxq->rx_free); | |
502 | rxq->free_count++; | |
503 | ||
51232f7e | 504 | spin_unlock(&rxq->lock); |
ab697a9f EG |
505 | } |
506 | } | |
507 | ||
ff932f61 | 508 | void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) |
9805c446 EG |
509 | { |
510 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
9805c446 EG |
511 | int i; |
512 | ||
7b542436 | 513 | for (i = 0; i < RX_POOL_SIZE; i++) { |
78485054 | 514 | if (!trans_pcie->rx_pool[i].page) |
c7df1f4b | 515 | continue; |
78485054 | 516 | dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, |
c7df1f4b JB |
517 | PAGE_SIZE << trans_pcie->rx_page_order, |
518 | DMA_FROM_DEVICE); | |
78485054 SS |
519 | __free_pages(trans_pcie->rx_pool[i].page, |
520 | trans_pcie->rx_page_order); | |
521 | trans_pcie->rx_pool[i].page = NULL; | |
9805c446 EG |
522 | } |
523 | } | |
524 | ||
26d535ae SS |
525 | /* |
526 | * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues | |
527 | * | |
528 | * Allocates for each received request 8 pages | |
529 | * Called as a scheduled work item. | |
530 | */ | |
531 | static void iwl_pcie_rx_allocator(struct iwl_trans *trans) | |
532 | { | |
533 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
534 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
535 | struct list_head local_empty; | |
c6ac9f9f | 536 | int pending = atomic_read(&rba->req_pending); |
26d535ae | 537 | |
6dcdd165 | 538 | IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); |
26d535ae SS |
539 | |
540 | /* If we were scheduled - there is at least one request */ | |
541 | spin_lock(&rba->lock); | |
542 | /* swap out the rba->rbd_empty to a local list */ | |
543 | list_replace_init(&rba->rbd_empty, &local_empty); | |
544 | spin_unlock(&rba->lock); | |
545 | ||
546 | while (pending) { | |
547 | int i; | |
0979a913 | 548 | LIST_HEAD(local_allocated); |
78485054 SS |
549 | gfp_t gfp_mask = GFP_KERNEL; |
550 | ||
551 | /* Do not post a warning if there are only a few requests */ | |
552 | if (pending < RX_PENDING_WATERMARK) | |
553 | gfp_mask |= __GFP_NOWARN; | |
26d535ae | 554 | |
26d535ae SS |
555 | for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { |
556 | struct iwl_rx_mem_buffer *rxb; | |
557 | struct page *page; | |
558 | ||
559 | /* List should never be empty - each reused RBD is | |
560 | * returned to the list, and initial pool covers any | |
561 | * possible gap between the time the page is allocated | |
562 | * to the time the RBD is added. | |
563 | */ | |
564 | BUG_ON(list_empty(&local_empty)); | |
565 | /* Get the first rxb from the rbd list */ | |
566 | rxb = list_first_entry(&local_empty, | |
567 | struct iwl_rx_mem_buffer, list); | |
568 | BUG_ON(rxb->page); | |
569 | ||
570 | /* Alloc a new receive buffer */ | |
78485054 | 571 | page = iwl_pcie_rx_alloc_page(trans, gfp_mask); |
26d535ae SS |
572 | if (!page) |
573 | continue; | |
574 | rxb->page = page; | |
575 | ||
576 | /* Get physical address of the RB */ | |
577 | rxb->page_dma = dma_map_page(trans->dev, page, 0, | |
578 | PAGE_SIZE << trans_pcie->rx_page_order, | |
579 | DMA_FROM_DEVICE); | |
580 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | |
581 | rxb->page = NULL; | |
582 | __free_pages(page, trans_pcie->rx_page_order); | |
583 | continue; | |
584 | } | |
26d535ae SS |
585 | |
586 | /* move the allocated entry to the out list */ | |
587 | list_move(&rxb->list, &local_allocated); | |
588 | i++; | |
589 | } | |
590 | ||
c6ac9f9f | 591 | atomic_dec(&rba->req_pending); |
26d535ae | 592 | pending--; |
c6ac9f9f | 593 | |
26d535ae | 594 | if (!pending) { |
c6ac9f9f | 595 | pending = atomic_read(&rba->req_pending); |
6dcdd165 SS |
596 | if (pending) |
597 | IWL_DEBUG_TPT(trans, | |
598 | "Got more pending allocation requests = %d\n", | |
599 | pending); | |
26d535ae SS |
600 | } |
601 | ||
602 | spin_lock(&rba->lock); | |
603 | /* add the allocated rbds to the allocator allocated list */ | |
604 | list_splice_tail(&local_allocated, &rba->rbd_allocated); | |
605 | /* get more empty RBDs for current pending requests */ | |
606 | list_splice_tail_init(&rba->rbd_empty, &local_empty); | |
607 | spin_unlock(&rba->lock); | |
608 | ||
609 | atomic_inc(&rba->req_ready); | |
c6ac9f9f | 610 | |
26d535ae SS |
611 | } |
612 | ||
613 | spin_lock(&rba->lock); | |
614 | /* return unused rbds to the allocator empty list */ | |
615 | list_splice_tail(&local_empty, &rba->rbd_empty); | |
616 | spin_unlock(&rba->lock); | |
c6ac9f9f | 617 | |
6dcdd165 | 618 | IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); |
26d535ae SS |
619 | } |
620 | ||
621 | /* | |
d56daea4 | 622 | * iwl_pcie_rx_allocator_get - returns the pre-allocated pages |
26d535ae SS |
623 | .* |
624 | .* Called by queue when the queue posted allocation request and | |
625 | * has freed 8 RBDs in order to restock itself. | |
d56daea4 SS |
626 | * This function directly moves the allocated RBs to the queue's ownership |
627 | * and updates the relevant counters. | |
26d535ae | 628 | */ |
d56daea4 SS |
629 | static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, |
630 | struct iwl_rxq *rxq) | |
26d535ae SS |
631 | { |
632 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
633 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
634 | int i; | |
635 | ||
d56daea4 SS |
636 | lockdep_assert_held(&rxq->lock); |
637 | ||
26d535ae SS |
638 | /* |
639 | * atomic_dec_if_positive returns req_ready - 1 for any scenario. | |
640 | * If req_ready is 0 atomic_dec_if_positive will return -1 and this | |
d56daea4 | 641 | * function will return early, as there are no ready requests. |
26d535ae SS |
642 | * atomic_dec_if_positive will perofrm the *actual* decrement only if |
643 | * req_ready > 0, i.e. - there are ready requests and the function | |
644 | * hands one request to the caller. | |
645 | */ | |
646 | if (atomic_dec_if_positive(&rba->req_ready) < 0) | |
d56daea4 | 647 | return; |
26d535ae SS |
648 | |
649 | spin_lock(&rba->lock); | |
650 | for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { | |
651 | /* Get next free Rx buffer, remove it from free list */ | |
d56daea4 SS |
652 | struct iwl_rx_mem_buffer *rxb = |
653 | list_first_entry(&rba->rbd_allocated, | |
654 | struct iwl_rx_mem_buffer, list); | |
655 | ||
656 | list_move(&rxb->list, &rxq->rx_free); | |
26d535ae SS |
657 | } |
658 | spin_unlock(&rba->lock); | |
659 | ||
d56daea4 SS |
660 | rxq->used_count -= RX_CLAIM_REQ_ALLOC; |
661 | rxq->free_count += RX_CLAIM_REQ_ALLOC; | |
26d535ae SS |
662 | } |
663 | ||
10a54d81 | 664 | void iwl_pcie_rx_allocator_work(struct work_struct *data) |
ab697a9f | 665 | { |
26d535ae SS |
666 | struct iwl_rb_allocator *rba_p = |
667 | container_of(data, struct iwl_rb_allocator, rx_alloc); | |
5a878bf6 | 668 | struct iwl_trans_pcie *trans_pcie = |
26d535ae | 669 | container_of(rba_p, struct iwl_trans_pcie, rba); |
ab697a9f | 670 | |
26d535ae | 671 | iwl_pcie_rx_allocator(trans_pcie->trans); |
ab697a9f EG |
672 | } |
673 | ||
0307c839 GBA |
674 | static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) |
675 | { | |
676 | struct iwl_rx_transfer_desc *rx_td; | |
677 | ||
678 | if (use_rx_td) | |
679 | return sizeof(*rx_td); | |
680 | else | |
286ca8eb | 681 | return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : |
0307c839 GBA |
682 | sizeof(__le32); |
683 | } | |
684 | ||
1b493e30 GBA |
685 | static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, |
686 | struct iwl_rxq *rxq) | |
9805c446 | 687 | { |
9805c446 | 688 | struct device *dev = trans->dev; |
286ca8eb | 689 | bool use_rx_td = (trans->trans_cfg->device_family >= |
3681021f | 690 | IWL_DEVICE_FAMILY_AX210); |
0307c839 | 691 | int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); |
9805c446 | 692 | |
1b493e30 | 693 | if (rxq->bd) |
0307c839 GBA |
694 | dma_free_coherent(trans->dev, |
695 | free_size * rxq->queue_size, | |
1b493e30 GBA |
696 | rxq->bd, rxq->bd_dma); |
697 | rxq->bd_dma = 0; | |
698 | rxq->bd = NULL; | |
699 | ||
1b493e30 GBA |
700 | rxq->rb_stts_dma = 0; |
701 | rxq->rb_stts = NULL; | |
702 | ||
703 | if (rxq->used_bd) | |
0307c839 | 704 | dma_free_coherent(trans->dev, |
b2a58c97 | 705 | (use_rx_td ? sizeof(*rxq->cd) : |
0307c839 | 706 | sizeof(__le32)) * rxq->queue_size, |
1b493e30 GBA |
707 | rxq->used_bd, rxq->used_bd_dma); |
708 | rxq->used_bd_dma = 0; | |
709 | rxq->used_bd = NULL; | |
710 | ||
3681021f | 711 | if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) |
1b493e30 | 712 | return; |
9805c446 | 713 | |
1b493e30 GBA |
714 | if (rxq->tr_tail) |
715 | dma_free_coherent(dev, sizeof(__le16), | |
716 | rxq->tr_tail, rxq->tr_tail_dma); | |
717 | rxq->tr_tail_dma = 0; | |
718 | rxq->tr_tail = NULL; | |
719 | ||
720 | if (rxq->cr_tail) | |
721 | dma_free_coherent(dev, sizeof(__le16), | |
722 | rxq->cr_tail, rxq->cr_tail_dma); | |
723 | rxq->cr_tail_dma = 0; | |
724 | rxq->cr_tail = NULL; | |
725 | } | |
9805c446 | 726 | |
1b493e30 GBA |
727 | static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, |
728 | struct iwl_rxq *rxq) | |
729 | { | |
730 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
731 | struct device *dev = trans->dev; | |
732 | int i; | |
0307c839 | 733 | int free_size; |
286ca8eb | 734 | bool use_rx_td = (trans->trans_cfg->device_family >= |
3681021f | 735 | IWL_DEVICE_FAMILY_AX210); |
6cc6ba3a T |
736 | size_t rb_stts_size = use_rx_td ? sizeof(__le16) : |
737 | sizeof(struct iwl_rb_status); | |
96a6497b | 738 | |
1b493e30 | 739 | spin_lock_init(&rxq->lock); |
286ca8eb | 740 | if (trans->trans_cfg->mq_rx_supported) |
1b493e30 GBA |
741 | rxq->queue_size = MQ_RX_TABLE_SIZE; |
742 | else | |
743 | rxq->queue_size = RX_QUEUE_SIZE; | |
9805c446 | 744 | |
0307c839 GBA |
745 | free_size = iwl_pcie_free_bd_size(trans, use_rx_td); |
746 | ||
1b493e30 GBA |
747 | /* |
748 | * Allocate the circular buffer of Read Buffer Descriptors | |
749 | * (RBDs) | |
750 | */ | |
750afb08 LC |
751 | rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, |
752 | &rxq->bd_dma, GFP_KERNEL); | |
1b493e30 GBA |
753 | if (!rxq->bd) |
754 | goto err; | |
9805c446 | 755 | |
286ca8eb | 756 | if (trans->trans_cfg->mq_rx_supported) { |
750afb08 LC |
757 | rxq->used_bd = dma_alloc_coherent(dev, |
758 | (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, | |
759 | &rxq->used_bd_dma, | |
760 | GFP_KERNEL); | |
1b493e30 | 761 | if (!rxq->used_bd) |
78485054 SS |
762 | goto err; |
763 | } | |
1b493e30 | 764 | |
6cc6ba3a T |
765 | rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; |
766 | rxq->rb_stts_dma = | |
767 | trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; | |
1b493e30 | 768 | |
0307c839 | 769 | if (!use_rx_td) |
1b493e30 GBA |
770 | return 0; |
771 | ||
772 | /* Allocate the driver's pointer to TR tail */ | |
750afb08 LC |
773 | rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
774 | &rxq->tr_tail_dma, GFP_KERNEL); | |
1b493e30 GBA |
775 | if (!rxq->tr_tail) |
776 | goto err; | |
777 | ||
778 | /* Allocate the driver's pointer to CR tail */ | |
750afb08 LC |
779 | rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
780 | &rxq->cr_tail_dma, GFP_KERNEL); | |
1b493e30 GBA |
781 | if (!rxq->cr_tail) |
782 | goto err; | |
783 | ||
9805c446 EG |
784 | return 0; |
785 | ||
78485054 SS |
786 | err: |
787 | for (i = 0; i < trans->num_rx_queues; i++) { | |
788 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
789 | ||
1b493e30 | 790 | iwl_pcie_free_rxq_dma(trans, rxq); |
78485054 | 791 | } |
96a6497b | 792 | |
9805c446 | 793 | return -ENOMEM; |
ab697a9f EG |
794 | } |
795 | ||
89d5e833 | 796 | int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
1b493e30 GBA |
797 | { |
798 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
799 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
800 | int i, ret; | |
286ca8eb | 801 | size_t rb_stts_size = trans->trans_cfg->device_family >= |
3681021f | 802 | IWL_DEVICE_FAMILY_AX210 ? |
6cc6ba3a | 803 | sizeof(__le16) : sizeof(struct iwl_rb_status); |
1b493e30 GBA |
804 | |
805 | if (WARN_ON(trans_pcie->rxq)) | |
806 | return -EINVAL; | |
807 | ||
808 | trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), | |
809 | GFP_KERNEL); | |
810 | if (!trans_pcie->rxq) | |
6cc6ba3a | 811 | return -ENOMEM; |
1b493e30 GBA |
812 | |
813 | spin_lock_init(&rba->lock); | |
814 | ||
6cc6ba3a T |
815 | /* |
816 | * Allocate the driver's pointer to receive buffer status. | |
817 | * Allocate for all queues continuously (HW requirement). | |
818 | */ | |
819 | trans_pcie->base_rb_stts = | |
820 | dma_alloc_coherent(trans->dev, | |
821 | rb_stts_size * trans->num_rx_queues, | |
822 | &trans_pcie->base_rb_stts_dma, | |
823 | GFP_KERNEL); | |
824 | if (!trans_pcie->base_rb_stts) { | |
825 | ret = -ENOMEM; | |
826 | goto err; | |
827 | } | |
828 | ||
1b493e30 GBA |
829 | for (i = 0; i < trans->num_rx_queues; i++) { |
830 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
831 | ||
6cc6ba3a | 832 | rxq->id = i; |
1b493e30 GBA |
833 | ret = iwl_pcie_alloc_rxq_dma(trans, rxq); |
834 | if (ret) | |
6cc6ba3a | 835 | goto err; |
1b493e30 GBA |
836 | } |
837 | return 0; | |
6cc6ba3a T |
838 | |
839 | err: | |
840 | if (trans_pcie->base_rb_stts) { | |
841 | dma_free_coherent(trans->dev, | |
842 | rb_stts_size * trans->num_rx_queues, | |
843 | trans_pcie->base_rb_stts, | |
844 | trans_pcie->base_rb_stts_dma); | |
845 | trans_pcie->base_rb_stts = NULL; | |
846 | trans_pcie->base_rb_stts_dma = 0; | |
847 | } | |
848 | kfree(trans_pcie->rxq); | |
849 | ||
850 | return ret; | |
1b493e30 GBA |
851 | } |
852 | ||
9805c446 EG |
853 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
854 | { | |
855 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
856 | u32 rb_size; | |
dfcfeef9 | 857 | unsigned long flags; |
9805c446 EG |
858 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ |
859 | ||
6c4fbcbc EG |
860 | switch (trans_pcie->rx_buf_size) { |
861 | case IWL_AMSDU_4K: | |
862 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
863 | break; | |
864 | case IWL_AMSDU_8K: | |
9805c446 | 865 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; |
6c4fbcbc EG |
866 | break; |
867 | case IWL_AMSDU_12K: | |
868 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; | |
869 | break; | |
870 | default: | |
871 | WARN_ON(1); | |
9805c446 | 872 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; |
6c4fbcbc | 873 | } |
9805c446 | 874 | |
dfcfeef9 SS |
875 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
876 | return; | |
877 | ||
9805c446 | 878 | /* Stop Rx DMA */ |
dfcfeef9 | 879 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ddaf5a5b | 880 | /* reset and flush pointers */ |
dfcfeef9 SS |
881 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); |
882 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); | |
883 | iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); | |
9805c446 EG |
884 | |
885 | /* Reset driver's Rx queue write index */ | |
dfcfeef9 | 886 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
9805c446 EG |
887 | |
888 | /* Tell device where to find RBD circular buffer in DRAM */ | |
dfcfeef9 SS |
889 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
890 | (u32)(rxq->bd_dma >> 8)); | |
9805c446 EG |
891 | |
892 | /* Tell device where in DRAM to update its Rx status */ | |
dfcfeef9 SS |
893 | iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, |
894 | rxq->rb_stts_dma >> 4); | |
9805c446 EG |
895 | |
896 | /* Enable Rx DMA | |
897 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
898 | * the credit mechanism in 5000 HW RX FIFO | |
899 | * Direct rx interrupts to hosts | |
6c4fbcbc | 900 | * Rx buffer size 4 or 8k or 12k |
9805c446 EG |
901 | * RB timeout 0x10 |
902 | * 256 RBDs | |
903 | */ | |
dfcfeef9 SS |
904 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, |
905 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
906 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
907 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
908 | rb_size | | |
909 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | | |
910 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
911 | ||
912 | iwl_trans_release_nic_access(trans, &flags); | |
9805c446 EG |
913 | |
914 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
915 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
6960a059 EG |
916 | |
917 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ | |
918 | if (trans->cfg->host_interrupt_operation_mode) | |
919 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); | |
9805c446 EG |
920 | } |
921 | ||
bce97731 | 922 | static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) |
c7df1f4b | 923 | { |
96a6497b SS |
924 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
925 | u32 rb_size, enabled = 0; | |
dfcfeef9 | 926 | unsigned long flags; |
96a6497b | 927 | int i; |
c7df1f4b | 928 | |
96a6497b | 929 | switch (trans_pcie->rx_buf_size) { |
1a4968d1 GBA |
930 | case IWL_AMSDU_2K: |
931 | rb_size = RFH_RXF_DMA_RB_SIZE_2K; | |
932 | break; | |
96a6497b SS |
933 | case IWL_AMSDU_4K: |
934 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
935 | break; | |
936 | case IWL_AMSDU_8K: | |
937 | rb_size = RFH_RXF_DMA_RB_SIZE_8K; | |
938 | break; | |
939 | case IWL_AMSDU_12K: | |
940 | rb_size = RFH_RXF_DMA_RB_SIZE_12K; | |
941 | break; | |
942 | default: | |
943 | WARN_ON(1); | |
944 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
945 | } | |
c7df1f4b | 946 | |
dfcfeef9 SS |
947 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
948 | return; | |
949 | ||
96a6497b | 950 | /* Stop Rx DMA */ |
dfcfeef9 | 951 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); |
96a6497b | 952 | /* disable free amd used rx queue operation */ |
dfcfeef9 | 953 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); |
26d535ae | 954 | |
96a6497b SS |
955 | for (i = 0; i < trans->num_rx_queues; i++) { |
956 | /* Tell device where to find RBD free table in DRAM */ | |
12a17458 SS |
957 | iwl_write_prph64_no_grab(trans, |
958 | RFH_Q_FRBDCB_BA_LSB(i), | |
959 | trans_pcie->rxq[i].bd_dma); | |
96a6497b | 960 | /* Tell device where to find RBD used table in DRAM */ |
12a17458 SS |
961 | iwl_write_prph64_no_grab(trans, |
962 | RFH_Q_URBDCB_BA_LSB(i), | |
963 | trans_pcie->rxq[i].used_bd_dma); | |
96a6497b | 964 | /* Tell device where in DRAM to update its Rx status */ |
12a17458 SS |
965 | iwl_write_prph64_no_grab(trans, |
966 | RFH_Q_URBD_STTS_WPTR_LSB(i), | |
967 | trans_pcie->rxq[i].rb_stts_dma); | |
96a6497b | 968 | /* Reset device indice tables */ |
dfcfeef9 SS |
969 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); |
970 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); | |
971 | iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); | |
96a6497b SS |
972 | |
973 | enabled |= BIT(i) | BIT(i + 16); | |
974 | } | |
26d535ae | 975 | |
96a6497b SS |
976 | /* |
977 | * Enable Rx DMA | |
96a6497b SS |
978 | * Rx buffer size 4 or 8k or 12k |
979 | * Min RB size 4 or 8 | |
88076015 | 980 | * Drop frames that exceed RB size |
96a6497b SS |
981 | * 512 RBDs |
982 | */ | |
dfcfeef9 | 983 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, |
63044335 | 984 | RFH_DMA_EN_ENABLE_VAL | rb_size | |
dfcfeef9 SS |
985 | RFH_RXF_DMA_MIN_RB_4_8 | |
986 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | | |
987 | RFH_RXF_DMA_RBDCB_SIZE_512); | |
96a6497b | 988 | |
88076015 SS |
989 | /* |
990 | * Activate DMA snooping. | |
b0262f07 | 991 | * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe |
88076015 SS |
992 | * Default queue is 0 |
993 | */ | |
f3779f47 JB |
994 | iwl_write_prph_no_grab(trans, RFH_GEN_CFG, |
995 | RFH_GEN_CFG_RFH_DMA_SNOOP | | |
996 | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | | |
b0262f07 | 997 | RFH_GEN_CFG_SERVICE_DMA_SNOOP | |
f3779f47 JB |
998 | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, |
999 | trans->cfg->integrated ? | |
1000 | RFH_GEN_CFG_RB_CHUNK_SIZE_64 : | |
1001 | RFH_GEN_CFG_RB_CHUNK_SIZE_128)); | |
88076015 | 1002 | /* Enable the relevant rx queues */ |
dfcfeef9 SS |
1003 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); |
1004 | ||
1005 | iwl_trans_release_nic_access(trans, &flags); | |
26d535ae | 1006 | |
96a6497b SS |
1007 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
1008 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
26d535ae SS |
1009 | } |
1010 | ||
ff932f61 | 1011 | void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
26d535ae | 1012 | { |
96a6497b | 1013 | lockdep_assert_held(&rxq->lock); |
26d535ae | 1014 | |
96a6497b SS |
1015 | INIT_LIST_HEAD(&rxq->rx_free); |
1016 | INIT_LIST_HEAD(&rxq->rx_used); | |
1017 | rxq->free_count = 0; | |
1018 | rxq->used_count = 0; | |
26d535ae SS |
1019 | } |
1020 | ||
ff932f61 | 1021 | int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) |
bce97731 SS |
1022 | { |
1023 | WARN_ON(1); | |
1024 | return 0; | |
1025 | } | |
1026 | ||
89d5e833 | 1027 | int _iwl_pcie_rx_init(struct iwl_trans *trans) |
9805c446 EG |
1028 | { |
1029 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 1030 | struct iwl_rxq *def_rxq; |
26d535ae | 1031 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
7b542436 | 1032 | int i, err, queue_size, allocator_pool_size, num_alloc; |
9805c446 | 1033 | |
78485054 | 1034 | if (!trans_pcie->rxq) { |
9805c446 EG |
1035 | err = iwl_pcie_rx_alloc(trans); |
1036 | if (err) | |
1037 | return err; | |
1038 | } | |
78485054 | 1039 | def_rxq = trans_pcie->rxq; |
26d535ae | 1040 | |
0f22e400 ST |
1041 | cancel_work_sync(&rba->rx_alloc); |
1042 | ||
26d535ae SS |
1043 | spin_lock(&rba->lock); |
1044 | atomic_set(&rba->req_pending, 0); | |
1045 | atomic_set(&rba->req_ready, 0); | |
96a6497b SS |
1046 | INIT_LIST_HEAD(&rba->rbd_allocated); |
1047 | INIT_LIST_HEAD(&rba->rbd_empty); | |
26d535ae | 1048 | spin_unlock(&rba->lock); |
9805c446 | 1049 | |
c7df1f4b | 1050 | /* free all first - we might be reconfigured for a different size */ |
78485054 | 1051 | iwl_pcie_free_rbs_pool(trans); |
9805c446 EG |
1052 | |
1053 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
78485054 | 1054 | def_rxq->queue[i] = NULL; |
9805c446 | 1055 | |
78485054 SS |
1056 | for (i = 0; i < trans->num_rx_queues; i++) { |
1057 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
1058 | ||
1059 | spin_lock(&rxq->lock); | |
1060 | /* | |
1061 | * Set read write pointer to reflect that we have processed | |
1062 | * and used all buffers, but have not restocked the Rx queue | |
1063 | * with fresh buffers | |
1064 | */ | |
1065 | rxq->read = 0; | |
1066 | rxq->write = 0; | |
1067 | rxq->write_actual = 0; | |
3681021f JB |
1068 | memset(rxq->rb_stts, 0, |
1069 | (trans->trans_cfg->device_family >= | |
1070 | IWL_DEVICE_FAMILY_AX210) ? | |
0307c839 | 1071 | sizeof(__le16) : sizeof(struct iwl_rb_status)); |
9805c446 | 1072 | |
78485054 SS |
1073 | iwl_pcie_rx_init_rxb_lists(rxq); |
1074 | ||
bce97731 SS |
1075 | if (!rxq->napi.poll) |
1076 | netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, | |
1077 | iwl_pcie_dummy_napi_poll, 64); | |
1078 | ||
78485054 SS |
1079 | spin_unlock(&rxq->lock); |
1080 | } | |
9805c446 | 1081 | |
96a6497b | 1082 | /* move the pool to the default queue and allocator ownerships */ |
286ca8eb | 1083 | queue_size = trans->trans_cfg->mq_rx_supported ? |
7b542436 | 1084 | MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; |
96a6497b SS |
1085 | allocator_pool_size = trans->num_rx_queues * |
1086 | (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); | |
7b542436 | 1087 | num_alloc = queue_size + allocator_pool_size; |
43146925 SS |
1088 | BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != |
1089 | ARRAY_SIZE(trans_pcie->rx_pool)); | |
7b542436 | 1090 | for (i = 0; i < num_alloc; i++) { |
96a6497b SS |
1091 | struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; |
1092 | ||
1093 | if (i < allocator_pool_size) | |
1094 | list_add(&rxb->list, &rba->rbd_empty); | |
1095 | else | |
1096 | list_add(&rxb->list, &def_rxq->rx_used); | |
1097 | trans_pcie->global_table[i] = rxb; | |
e25d65f2 | 1098 | rxb->vid = (u16)(i + 1); |
b1753c62 | 1099 | rxb->invalid = true; |
96a6497b | 1100 | } |
9805c446 | 1101 | |
78485054 | 1102 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); |
2047fa54 | 1103 | |
eda50cde SS |
1104 | return 0; |
1105 | } | |
1106 | ||
1107 | int iwl_pcie_rx_init(struct iwl_trans *trans) | |
1108 | { | |
1109 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1110 | int ret = _iwl_pcie_rx_init(trans); | |
1111 | ||
1112 | if (ret) | |
1113 | return ret; | |
1114 | ||
286ca8eb | 1115 | if (trans->trans_cfg->mq_rx_supported) |
bce97731 | 1116 | iwl_pcie_rx_mq_hw_init(trans); |
2047fa54 | 1117 | else |
eda50cde | 1118 | iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); |
2047fa54 | 1119 | |
eda50cde | 1120 | iwl_pcie_rxq_restock(trans, trans_pcie->rxq); |
78485054 | 1121 | |
eda50cde SS |
1122 | spin_lock(&trans_pcie->rxq->lock); |
1123 | iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); | |
1124 | spin_unlock(&trans_pcie->rxq->lock); | |
9805c446 EG |
1125 | |
1126 | return 0; | |
1127 | } | |
1128 | ||
eda50cde SS |
1129 | int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) |
1130 | { | |
e506b481 SS |
1131 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
1132 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
1133 | ||
eda50cde SS |
1134 | /* |
1135 | * We don't configure the RFH. | |
1136 | * Restock will be done at alive, after firmware configured the RFH. | |
1137 | */ | |
1138 | return _iwl_pcie_rx_init(trans); | |
1139 | } | |
1140 | ||
9805c446 EG |
1141 | void iwl_pcie_rx_free(struct iwl_trans *trans) |
1142 | { | |
1143 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 1144 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
78485054 | 1145 | int i; |
286ca8eb | 1146 | size_t rb_stts_size = trans->trans_cfg->device_family >= |
3681021f | 1147 | IWL_DEVICE_FAMILY_AX210 ? |
6cc6ba3a | 1148 | sizeof(__le16) : sizeof(struct iwl_rb_status); |
9805c446 | 1149 | |
78485054 SS |
1150 | /* |
1151 | * if rxq is NULL, it means that nothing has been allocated, | |
1152 | * exit now | |
1153 | */ | |
1154 | if (!trans_pcie->rxq) { | |
9805c446 EG |
1155 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
1156 | return; | |
1157 | } | |
1158 | ||
26d535ae | 1159 | cancel_work_sync(&rba->rx_alloc); |
26d535ae | 1160 | |
78485054 SS |
1161 | iwl_pcie_free_rbs_pool(trans); |
1162 | ||
6cc6ba3a T |
1163 | if (trans_pcie->base_rb_stts) { |
1164 | dma_free_coherent(trans->dev, | |
1165 | rb_stts_size * trans->num_rx_queues, | |
1166 | trans_pcie->base_rb_stts, | |
1167 | trans_pcie->base_rb_stts_dma); | |
1168 | trans_pcie->base_rb_stts = NULL; | |
1169 | trans_pcie->base_rb_stts_dma = 0; | |
1170 | } | |
1171 | ||
78485054 SS |
1172 | for (i = 0; i < trans->num_rx_queues; i++) { |
1173 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
1174 | ||
1b493e30 | 1175 | iwl_pcie_free_rxq_dma(trans, rxq); |
bce97731 SS |
1176 | |
1177 | if (rxq->napi.poll) | |
1178 | netif_napi_del(&rxq->napi); | |
96a6497b | 1179 | } |
78485054 | 1180 | kfree(trans_pcie->rxq); |
9805c446 EG |
1181 | } |
1182 | ||
868a1e86 ST |
1183 | static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, |
1184 | struct iwl_rb_allocator *rba) | |
1185 | { | |
1186 | spin_lock(&rba->lock); | |
1187 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | |
1188 | spin_unlock(&rba->lock); | |
1189 | } | |
1190 | ||
26d535ae SS |
1191 | /* |
1192 | * iwl_pcie_rx_reuse_rbd - Recycle used RBDs | |
1193 | * | |
1194 | * Called when a RBD can be reused. The RBD is transferred to the allocator. | |
1195 | * When there are 2 empty RBDs - a request for allocation is posted | |
1196 | */ | |
1197 | static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, | |
1198 | struct iwl_rx_mem_buffer *rxb, | |
1199 | struct iwl_rxq *rxq, bool emergency) | |
1200 | { | |
1201 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1202 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
1203 | ||
1204 | /* Move the RBD to the used list, will be moved to allocator in batches | |
1205 | * before claiming or posting a request*/ | |
1206 | list_add_tail(&rxb->list, &rxq->rx_used); | |
1207 | ||
1208 | if (unlikely(emergency)) | |
1209 | return; | |
1210 | ||
1211 | /* Count the allocator owned RBDs */ | |
1212 | rxq->used_count++; | |
1213 | ||
1214 | /* If we have RX_POST_REQ_ALLOC new released rx buffers - | |
1215 | * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is | |
1216 | * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, | |
1217 | * after but we still need to post another request. | |
1218 | */ | |
1219 | if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { | |
1220 | /* Move the 2 RBDs to the allocator ownership. | |
1221 | Allocator has another 6 from pool for the request completion*/ | |
868a1e86 | 1222 | iwl_pcie_rx_move_to_allocator(rxq, rba); |
26d535ae SS |
1223 | |
1224 | atomic_inc(&rba->req_pending); | |
1225 | queue_work(rba->alloc_wq, &rba->rx_alloc); | |
1226 | } | |
1227 | } | |
1228 | ||
9805c446 | 1229 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, |
78485054 | 1230 | struct iwl_rxq *rxq, |
26d535ae | 1231 | struct iwl_rx_mem_buffer *rxb, |
7891965d SS |
1232 | bool emergency, |
1233 | int i) | |
df2f3216 JB |
1234 | { |
1235 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b2a3b1c1 | 1236 | struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
0c19744c | 1237 | bool page_stolen = false; |
b2cf410c | 1238 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 1239 | u32 offset = 0; |
df2f3216 JB |
1240 | |
1241 | if (WARN_ON(!rxb)) | |
1242 | return; | |
1243 | ||
0c19744c JB |
1244 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
1245 | ||
1246 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
1247 | struct iwl_rx_packet *pkt; | |
0c19744c JB |
1248 | u16 sequence; |
1249 | bool reclaim; | |
f7e6469f | 1250 | int index, cmd_index, len; |
0c19744c JB |
1251 | struct iwl_rx_cmd_buffer rxcb = { |
1252 | ._offset = offset, | |
d13f1862 | 1253 | ._rx_page_order = trans_pcie->rx_page_order, |
0c19744c JB |
1254 | ._page = rxb->page, |
1255 | ._page_stolen = false, | |
0d6c4a2e | 1256 | .truesize = max_len, |
0c19744c JB |
1257 | }; |
1258 | ||
1259 | pkt = rxb_addr(&rxcb); | |
1260 | ||
3bfdee76 JB |
1261 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { |
1262 | IWL_DEBUG_RX(trans, | |
1263 | "Q %d: RB end marker at offset %d\n", | |
1264 | rxq->id, offset); | |
0c19744c | 1265 | break; |
3bfdee76 | 1266 | } |
0c19744c | 1267 | |
a395058e JB |
1268 | WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> |
1269 | FH_RSCSR_RXQ_POS != rxq->id, | |
1270 | "frame on invalid queue - is on %d and indicates %d\n", | |
1271 | rxq->id, | |
1272 | (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> | |
1273 | FH_RSCSR_RXQ_POS); | |
ab2e696b | 1274 | |
9243efcc | 1275 | IWL_DEBUG_RX(trans, |
3bfdee76 JB |
1276 | "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", |
1277 | rxq->id, offset, | |
39bdb17e SD |
1278 | iwl_get_cmd_string(trans, |
1279 | iwl_cmd_id(pkt->hdr.cmd, | |
1280 | pkt->hdr.group_id, | |
1281 | 0)), | |
35177c99 SS |
1282 | pkt->hdr.group_id, pkt->hdr.cmd, |
1283 | le16_to_cpu(pkt->hdr.sequence)); | |
0c19744c | 1284 | |
65b30348 | 1285 | len = iwl_rx_packet_len(pkt); |
0c19744c | 1286 | len += sizeof(u32); /* account for status word */ |
f042c2eb JB |
1287 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
1288 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
1289 | |
1290 | /* Reclaim a command buffer only if this packet is a response | |
1291 | * to a (driver-originated) command. | |
1292 | * If the packet (e.g. Rx frame) originated from uCode, | |
1293 | * there is no command buffer to reclaim. | |
1294 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
1295 | * but apparently a few don't get set; catch them here. */ | |
1296 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
d8a130b0 | 1297 | if (reclaim && !pkt->hdr.group_id) { |
0c19744c JB |
1298 | int i; |
1299 | ||
1300 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
1301 | if (trans_pcie->no_reclaim_cmds[i] == | |
1302 | pkt->hdr.cmd) { | |
1303 | reclaim = false; | |
1304 | break; | |
1305 | } | |
d663ee73 JB |
1306 | } |
1307 | } | |
df2f3216 | 1308 | |
0c19744c JB |
1309 | sequence = le16_to_cpu(pkt->hdr.sequence); |
1310 | index = SEQ_TO_INDEX(sequence); | |
4ecab561 | 1311 | cmd_index = iwl_pcie_get_cmd_index(txq, index); |
0c19744c | 1312 | |
9416560e | 1313 | if (rxq->id == trans_pcie->def_rx_queue) |
bce97731 SS |
1314 | iwl_op_mode_rx(trans->op_mode, &rxq->napi, |
1315 | &rxcb); | |
1316 | else | |
1317 | iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, | |
1318 | &rxcb, rxq->id); | |
0c19744c | 1319 | |
96791422 | 1320 | if (reclaim) { |
5d4185ae | 1321 | kzfree(txq->entries[cmd_index].free_buf); |
f4feb8ac | 1322 | txq->entries[cmd_index].free_buf = NULL; |
96791422 EG |
1323 | } |
1324 | ||
0c19744c JB |
1325 | /* |
1326 | * After here, we should always check rxcb._page_stolen, | |
1327 | * if it is true then one of the handlers took the page. | |
1328 | */ | |
1329 | ||
1330 | if (reclaim) { | |
1331 | /* Invoke any callbacks, transfer the buffer to caller, | |
1332 | * and fire off the (possibly) blocking | |
1333 | * iwl_trans_send_cmd() | |
1334 | * as we reclaim the driver command queue */ | |
1335 | if (!rxcb._page_stolen) | |
f7e6469f | 1336 | iwl_pcie_hcmd_complete(trans, &rxcb); |
0c19744c JB |
1337 | else |
1338 | IWL_WARN(trans, "Claim null rxb?\n"); | |
1339 | } | |
1340 | ||
1341 | page_stolen |= rxcb._page_stolen; | |
3681021f | 1342 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
0307c839 | 1343 | break; |
0c19744c | 1344 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); |
df2f3216 JB |
1345 | } |
1346 | ||
0c19744c JB |
1347 | /* page was stolen from us -- free our reference */ |
1348 | if (page_stolen) { | |
b2cf410c | 1349 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 1350 | rxb->page = NULL; |
0c19744c | 1351 | } |
df2f3216 JB |
1352 | |
1353 | /* Reuse the page if possible. For notification packets and | |
1354 | * SKBs that fail to Rx correctly, add them back into the | |
1355 | * rx_free list for reuse later. */ | |
df2f3216 JB |
1356 | if (rxb->page != NULL) { |
1357 | rxb->page_dma = | |
1358 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
1359 | PAGE_SIZE << trans_pcie->rx_page_order, |
1360 | DMA_FROM_DEVICE); | |
7c341582 JB |
1361 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
1362 | /* | |
1363 | * free the page(s) as well to not break | |
1364 | * the invariant that the items on the used | |
1365 | * list have no page(s) | |
1366 | */ | |
1367 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
1368 | rxb->page = NULL; | |
26d535ae | 1369 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
7c341582 JB |
1370 | } else { |
1371 | list_add_tail(&rxb->list, &rxq->rx_free); | |
1372 | rxq->free_count++; | |
1373 | } | |
df2f3216 | 1374 | } else |
26d535ae | 1375 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
df2f3216 JB |
1376 | } |
1377 | ||
1b4bbe8b SS |
1378 | static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, |
1379 | struct iwl_rxq *rxq, int i) | |
1380 | { | |
1381 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1382 | struct iwl_rx_mem_buffer *rxb; | |
1383 | u16 vid; | |
1384 | ||
f826faaa JB |
1385 | BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); |
1386 | ||
286ca8eb | 1387 | if (!trans->trans_cfg->mq_rx_supported) { |
1b4bbe8b SS |
1388 | rxb = rxq->queue[i]; |
1389 | rxq->queue[i] = NULL; | |
1390 | return rxb; | |
1391 | } | |
1392 | ||
1393 | /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ | |
3681021f | 1394 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
1b4bbe8b SS |
1395 | vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; |
1396 | else | |
1397 | vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; | |
1398 | ||
1399 | if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) | |
1400 | goto out_err; | |
1401 | ||
1402 | rxb = trans_pcie->global_table[vid - 1]; | |
1403 | if (rxb->invalid) | |
1404 | goto out_err; | |
1405 | ||
85d78bb1 SS |
1406 | IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); |
1407 | ||
1b4bbe8b SS |
1408 | rxb->invalid = true; |
1409 | ||
1410 | return rxb; | |
1411 | ||
1412 | out_err: | |
1413 | WARN(1, "Invalid rxb from HW %u\n", (u32)vid); | |
1414 | iwl_force_nmi(trans); | |
1415 | return NULL; | |
1416 | } | |
1417 | ||
990aa6d7 EG |
1418 | /* |
1419 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 1420 | */ |
2e5d4a8f | 1421 | static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
ab697a9f | 1422 | { |
df2f3216 | 1423 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
30f24eab | 1424 | struct iwl_rxq *rxq; |
d56daea4 | 1425 | u32 r, i, count = 0; |
26d535ae | 1426 | bool emergency = false; |
ab697a9f | 1427 | |
30f24eab JB |
1428 | if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) |
1429 | return; | |
1430 | ||
1431 | rxq = &trans_pcie->rxq[queue]; | |
1432 | ||
f14d6b39 JB |
1433 | restart: |
1434 | spin_lock(&rxq->lock); | |
ab697a9f EG |
1435 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1436 | * buffer that the driver may process (last buffer filled by ucode). */ | |
0307c839 | 1437 | r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; |
ab697a9f EG |
1438 | i = rxq->read; |
1439 | ||
5eae443e SS |
1440 | /* W/A 9000 device step A0 wrap-around bug */ |
1441 | r &= (rxq->queue_size - 1); | |
1442 | ||
ab697a9f EG |
1443 | /* Rx interrupt, but nothing sent from uCode */ |
1444 | if (i == r) | |
5eae443e | 1445 | IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); |
ab697a9f | 1446 | |
ab697a9f | 1447 | while (i != r) { |
868a1e86 | 1448 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
48a2d66f | 1449 | struct iwl_rx_mem_buffer *rxb; |
868a1e86 ST |
1450 | /* number of RBDs still waiting for page allocation */ |
1451 | u32 rb_pending_alloc = | |
1452 | atomic_read(&trans_pcie->rba.req_pending) * | |
1453 | RX_CLAIM_REQ_ALLOC; | |
1454 | ||
1455 | if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && | |
1456 | !emergency)) { | |
1457 | iwl_pcie_rx_move_to_allocator(rxq, rba); | |
26d535ae | 1458 | emergency = true; |
6dcdd165 SS |
1459 | IWL_DEBUG_TPT(trans, |
1460 | "RX path is in emergency. Pending allocations %d\n", | |
1461 | rb_pending_alloc); | |
868a1e86 | 1462 | } |
26d535ae | 1463 | |
85d78bb1 SS |
1464 | IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); |
1465 | ||
1b4bbe8b SS |
1466 | rxb = iwl_pcie_get_rxb(trans, rxq, i); |
1467 | if (!rxb) | |
1468 | goto out; | |
ab697a9f | 1469 | |
7891965d | 1470 | iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); |
ab697a9f | 1471 | |
96a6497b | 1472 | i = (i + 1) & (rxq->queue_size - 1); |
26d535ae | 1473 | |
d56daea4 SS |
1474 | /* |
1475 | * If we have RX_CLAIM_REQ_ALLOC released rx buffers - | |
1476 | * try to claim the pre-allocated buffers from the allocator. | |
1477 | * If not ready - will try to reclaim next time. | |
1478 | * There is no need to reschedule work - allocator exits only | |
1479 | * on success | |
1480 | */ | |
1481 | if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) | |
1482 | iwl_pcie_rx_allocator_get(trans, rxq); | |
1483 | ||
1484 | if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { | |
d56daea4 | 1485 | /* Add the remaining empty RBDs for allocator use */ |
868a1e86 | 1486 | iwl_pcie_rx_move_to_allocator(rxq, rba); |
d56daea4 | 1487 | } else if (emergency) { |
255ba065 | 1488 | count++; |
26d535ae | 1489 | if (count == 8) { |
255ba065 | 1490 | count = 0; |
6dcdd165 SS |
1491 | if (rb_pending_alloc < rxq->queue_size / 3) { |
1492 | IWL_DEBUG_TPT(trans, | |
1493 | "RX path exited emergency. Pending allocations %d\n", | |
1494 | rb_pending_alloc); | |
26d535ae | 1495 | emergency = false; |
6dcdd165 | 1496 | } |
e0e168dc GG |
1497 | |
1498 | rxq->read = i; | |
26d535ae | 1499 | spin_unlock(&rxq->lock); |
78485054 | 1500 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
96a6497b | 1501 | iwl_pcie_rxq_restock(trans, rxq); |
e0e168dc GG |
1502 | goto restart; |
1503 | } | |
26d535ae | 1504 | } |
ab697a9f | 1505 | } |
5eae443e | 1506 | out: |
ab697a9f EG |
1507 | /* Backtrack one entry */ |
1508 | rxq->read = i; | |
0307c839 | 1509 | /* update cr tail with the rxq read pointer */ |
3681021f | 1510 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
0307c839 | 1511 | *rxq->cr_tail = cpu_to_le16(r); |
f14d6b39 JB |
1512 | spin_unlock(&rxq->lock); |
1513 | ||
26d535ae SS |
1514 | /* |
1515 | * handle a case where in emergency there are some unallocated RBDs. | |
1516 | * those RBDs are in the used list, but are not tracked by the queue's | |
1517 | * used_count which counts allocator owned RBDs. | |
1518 | * unallocated emergency RBDs must be allocated on exit, otherwise | |
1519 | * when called again the function may not be in emergency mode and | |
1520 | * they will be handed to the allocator with no tracking in the RBD | |
1521 | * allocator counters, which will lead to them never being claimed back | |
1522 | * by the queue. | |
1523 | * by allocating them here, they are now in the queue free list, and | |
1524 | * will be restocked by the next call of iwl_pcie_rxq_restock. | |
1525 | */ | |
1526 | if (unlikely(emergency && count)) | |
78485054 | 1527 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
255ba065 | 1528 | |
bce97731 SS |
1529 | if (rxq->napi.poll) |
1530 | napi_gro_flush(&rxq->napi, false); | |
e0e168dc GG |
1531 | |
1532 | iwl_pcie_rxq_restock(trans, rxq); | |
ab697a9f EG |
1533 | } |
1534 | ||
2e5d4a8f HD |
1535 | static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) |
1536 | { | |
1537 | u8 queue = entry->entry; | |
1538 | struct msix_entry *entries = entry - queue; | |
1539 | ||
1540 | return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); | |
1541 | } | |
1542 | ||
2e5d4a8f HD |
1543 | /* |
1544 | * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw | |
1545 | * This interrupt handler should be used with RSS queue only. | |
1546 | */ | |
1547 | irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) | |
1548 | { | |
1549 | struct msix_entry *entry = dev_id; | |
1550 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
1551 | struct iwl_trans *trans = trans_pcie->trans; | |
1552 | ||
c42ff65d JB |
1553 | trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); |
1554 | ||
5eae443e SS |
1555 | if (WARN_ON(entry->entry >= trans->num_rx_queues)) |
1556 | return IRQ_NONE; | |
1557 | ||
2e5d4a8f HD |
1558 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1559 | ||
1560 | local_bh_disable(); | |
1561 | iwl_pcie_rx_handle(trans, entry->entry); | |
1562 | local_bh_enable(); | |
1563 | ||
1564 | iwl_pcie_clear_irq(trans, entry); | |
1565 | ||
1566 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1567 | ||
1568 | return IRQ_HANDLED; | |
1569 | } | |
1570 | ||
990aa6d7 EG |
1571 | /* |
1572 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 1573 | */ |
990aa6d7 | 1574 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 1575 | { |
f946b529 | 1576 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1103323c | 1577 | int i; |
f946b529 | 1578 | |
7ff94706 | 1579 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 1580 | if (trans->cfg->internal_wimax_coex && |
95411d04 | 1581 | !trans->cfg->apmg_not_supported && |
1042db2a | 1582 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 1583 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 1584 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 1585 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
eb7ff77e | 1586 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
8a8bbdb4 | 1587 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 1588 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
1589 | return; |
1590 | } | |
1591 | ||
286ca8eb | 1592 | for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { |
13a3a390 SS |
1593 | if (!trans_pcie->txq[i]) |
1594 | continue; | |
b2a3b1c1 | 1595 | del_timer(&trans_pcie->txq[i]->stuck_timer); |
13a3a390 | 1596 | } |
1103323c | 1597 | |
7d75f32e EG |
1598 | /* The STATUS_FW_ERROR bit is set in this function. This must happen |
1599 | * before we wake up the command caller, to ensure a proper cleanup. */ | |
1600 | iwl_trans_fw_error(trans); | |
1601 | ||
2a988e98 AN |
1602 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
1603 | wake_up(&trans_pcie->wait_command_queue); | |
7ff94706 EG |
1604 | } |
1605 | ||
7117c000 | 1606 | static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) |
fc84472b | 1607 | { |
fc84472b EG |
1608 | u32 inta; |
1609 | ||
46e81af9 | 1610 | lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); |
fc84472b EG |
1611 | |
1612 | trace_iwlwifi_dev_irq(trans->dev); | |
1613 | ||
1614 | /* Discover which interrupts are active/pending */ | |
1615 | inta = iwl_read32(trans, CSR_INT); | |
1616 | ||
fc84472b | 1617 | /* the thread will service interrupts and re-enable them */ |
fe523dc9 | 1618 | return inta; |
fc84472b EG |
1619 | } |
1620 | ||
1621 | /* a device (PCI-E) page is 4096 bytes long */ | |
1622 | #define ICT_SHIFT 12 | |
1623 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1624 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1625 | ||
1626 | /* interrupt handler using ict table, with this interrupt driver will | |
1627 | * stop using INTA register to get device's interrupt, reading this register | |
1628 | * is expensive, device will write interrupts in ICT dram table, increment | |
1629 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1630 | * entries from current index up to table entry with 0 value. the result is | |
1631 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1632 | * set index. | |
1633 | */ | |
7117c000 | 1634 | static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) |
fc84472b EG |
1635 | { |
1636 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
fc84472b EG |
1637 | u32 inta; |
1638 | u32 val = 0; | |
1639 | u32 read; | |
1640 | ||
fc84472b EG |
1641 | trace_iwlwifi_dev_irq(trans->dev); |
1642 | ||
1643 | /* Ignore interrupt if there's nothing in NIC to service. | |
1644 | * This may be due to IRQ shared with another device, | |
1645 | * or due to sporadic interrupts thrown from our NIC. */ | |
1646 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1647 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); | |
7ba1faa4 EG |
1648 | if (!read) |
1649 | return 0; | |
fc84472b EG |
1650 | |
1651 | /* | |
1652 | * Collect all entries up to the first 0, starting from ict_index; | |
1653 | * note we already read at ict_index. | |
1654 | */ | |
1655 | do { | |
1656 | val |= read; | |
1657 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", | |
1658 | trans_pcie->ict_index, read); | |
1659 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | |
1660 | trans_pcie->ict_index = | |
83f32a4b | 1661 | ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); |
fc84472b EG |
1662 | |
1663 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1664 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | |
1665 | read); | |
1666 | } while (read); | |
1667 | ||
1668 | /* We should not get this value, just ignore it. */ | |
1669 | if (val == 0xffffffff) | |
1670 | val = 0; | |
1671 | ||
1672 | /* | |
1673 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1674 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1675 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1676 | * so we use them to decide on the real state of the Rx bit. | |
1677 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1678 | */ | |
1679 | if (val & 0xC0000) | |
1680 | val |= 0x8000; | |
1681 | ||
1682 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
fe523dc9 | 1683 | return inta; |
fc84472b EG |
1684 | } |
1685 | ||
fa4de7f7 | 1686 | void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) |
3a6e168b JB |
1687 | { |
1688 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1689 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
326477e4 | 1690 | bool hw_rfkill, prev, report; |
3a6e168b JB |
1691 | |
1692 | mutex_lock(&trans_pcie->mutex); | |
326477e4 | 1693 | prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); |
3a6e168b | 1694 | hw_rfkill = iwl_is_rfkill_set(trans); |
326477e4 JB |
1695 | if (hw_rfkill) { |
1696 | set_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
1697 | set_bit(STATUS_RFKILL_HW, &trans->status); | |
1698 | } | |
1699 | if (trans_pcie->opmode_down) | |
1700 | report = hw_rfkill; | |
1701 | else | |
1702 | report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
3a6e168b JB |
1703 | |
1704 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", | |
1705 | hw_rfkill ? "disable radio" : "enable radio"); | |
1706 | ||
1707 | isr_stats->rfkill++; | |
1708 | ||
326477e4 JB |
1709 | if (prev != report) |
1710 | iwl_trans_pcie_rf_kill(trans, report); | |
3a6e168b JB |
1711 | mutex_unlock(&trans_pcie->mutex); |
1712 | ||
1713 | if (hw_rfkill) { | |
1714 | if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, | |
1715 | &trans->status)) | |
1716 | IWL_DEBUG_RF_KILL(trans, | |
1717 | "Rfkill while SYNC HCMD in flight\n"); | |
1718 | wake_up(&trans_pcie->wait_command_queue); | |
1719 | } else { | |
326477e4 JB |
1720 | clear_bit(STATUS_RFKILL_HW, &trans->status); |
1721 | if (trans_pcie->opmode_down) | |
1722 | clear_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
3a6e168b JB |
1723 | } |
1724 | } | |
1725 | ||
2bfb5092 | 1726 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) |
ab697a9f | 1727 | { |
2bfb5092 | 1728 | struct iwl_trans *trans = dev_id; |
20d3b647 JB |
1729 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1730 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
1731 | u32 inta = 0; |
1732 | u32 handled = 0; | |
ab697a9f | 1733 | |
2bfb5092 JB |
1734 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1735 | ||
7b70bd63 | 1736 | spin_lock(&trans_pcie->irq_lock); |
ab697a9f | 1737 | |
0fec9542 EG |
1738 | /* dram interrupt table not set yet, |
1739 | * use legacy interrupt. | |
1740 | */ | |
1741 | if (likely(trans_pcie->use_ict)) | |
7117c000 | 1742 | inta = iwl_pcie_int_cause_ict(trans); |
0fec9542 | 1743 | else |
7117c000 | 1744 | inta = iwl_pcie_int_cause_non_ict(trans); |
0fec9542 | 1745 | |
7ba1faa4 EG |
1746 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1747 | IWL_DEBUG_ISR(trans, | |
1748 | "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", | |
1749 | inta, trans_pcie->inta_mask, | |
1750 | iwl_read32(trans, CSR_INT_MASK), | |
1751 | iwl_read32(trans, CSR_FH_INT_STATUS)); | |
1752 | if (inta & (~trans_pcie->inta_mask)) | |
1753 | IWL_DEBUG_ISR(trans, | |
1754 | "We got a masked interrupt (0x%08x)\n", | |
1755 | inta & (~trans_pcie->inta_mask)); | |
1756 | } | |
1757 | ||
1758 | inta &= trans_pcie->inta_mask; | |
1759 | ||
1760 | /* | |
1761 | * Ignore interrupt if there's nothing in NIC to service. | |
1762 | * This may be due to IRQ shared with another device, | |
1763 | * or due to sporadic interrupts thrown from our NIC. | |
1764 | */ | |
7117c000 | 1765 | if (unlikely(!inta)) { |
7ba1faa4 EG |
1766 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1767 | /* | |
1768 | * Re-enable interrupts here since we don't | |
1769 | * have anything to service | |
1770 | */ | |
1771 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
f16c3ebf | 1772 | _iwl_enable_interrupts(trans); |
7b70bd63 | 1773 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 EG |
1774 | lock_map_release(&trans->sync_cmd_lockdep_map); |
1775 | return IRQ_NONE; | |
1776 | } | |
1777 | ||
7ba1faa4 EG |
1778 | if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { |
1779 | /* | |
1780 | * Hardware disappeared. It might have | |
1781 | * already raised an interrupt. | |
1782 | */ | |
1783 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | |
7b70bd63 | 1784 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 | 1785 | goto out; |
a0f337cc EG |
1786 | } |
1787 | ||
ab697a9f EG |
1788 | /* Ack/clear/reset pending uCode interrupts. |
1789 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
1790 | */ | |
1791 | /* There is a hardware bug in the interrupt mask function that some | |
1792 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
1793 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
1794 | * ICT interrupt handling mechanism has another bug that might cause | |
1795 | * these unmasked interrupts fail to be detected. We workaround the | |
1796 | * hardware bugs here by ACKing all the possible interrupts so that | |
1797 | * interrupt coalescing can still be achieved. | |
1798 | */ | |
7117c000 | 1799 | iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); |
ab697a9f | 1800 | |
51cd53ad | 1801 | if (iwl_have_debug_level(IWL_DL_ISR)) |
0ca24daf | 1802 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
51cd53ad | 1803 | inta, iwl_read32(trans, CSR_INT_MASK)); |
ab697a9f | 1804 | |
7b70bd63 | 1805 | spin_unlock(&trans_pcie->irq_lock); |
b49ba04a | 1806 | |
ab697a9f EG |
1807 | /* Now service all interrupt bits discovered above. */ |
1808 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 1809 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
1810 | |
1811 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 1812 | iwl_disable_interrupts(trans); |
ab697a9f | 1813 | |
1f7b6172 | 1814 | isr_stats->hw++; |
990aa6d7 | 1815 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1816 | |
1817 | handled |= CSR_INT_BIT_HW_ERR; | |
1818 | ||
2bfb5092 | 1819 | goto out; |
ab697a9f EG |
1820 | } |
1821 | ||
ec46ae30 EG |
1822 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
1823 | if (inta & CSR_INT_BIT_SCD) { | |
1824 | IWL_DEBUG_ISR(trans, | |
1825 | "Scheduler finished to transmit the frame/frames.\n"); | |
1826 | isr_stats->sch++; | |
1827 | } | |
ab697a9f | 1828 | |
ec46ae30 EG |
1829 | /* Alive notification via Rx interrupt will do the real work */ |
1830 | if (inta & CSR_INT_BIT_ALIVE) { | |
1831 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); | |
1832 | isr_stats->alive++; | |
286ca8eb | 1833 | if (trans->trans_cfg->gen2) { |
ec46ae30 EG |
1834 | /* |
1835 | * We can restock, since firmware configured | |
1836 | * the RFH | |
1837 | */ | |
1838 | iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); | |
ab697a9f | 1839 | } |
ed3e4c6d EG |
1840 | |
1841 | handled |= CSR_INT_BIT_ALIVE; | |
ab697a9f | 1842 | } |
51cd53ad | 1843 | |
ab697a9f EG |
1844 | /* Safely ignore these bits for debug checks below */ |
1845 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
1846 | ||
1847 | /* HW RF KILL switch toggled */ | |
1848 | if (inta & CSR_INT_BIT_RF_KILL) { | |
3a6e168b | 1849 | iwl_pcie_handle_rfkill_irq(trans); |
ab697a9f EG |
1850 | handled |= CSR_INT_BIT_RF_KILL; |
1851 | } | |
1852 | ||
1853 | /* Chip got too hot and stopped itself */ | |
1854 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 1855 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 1856 | isr_stats->ctkill++; |
ab697a9f EG |
1857 | handled |= CSR_INT_BIT_CT_KILL; |
1858 | } | |
1859 | ||
1860 | /* Error detected by uCode */ | |
1861 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 1862 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 1863 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 1864 | isr_stats->sw++; |
990aa6d7 | 1865 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1866 | handled |= CSR_INT_BIT_SW_ERR; |
1867 | } | |
1868 | ||
1869 | /* uCode wakes up after power-down sleep */ | |
1870 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 1871 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
5d63f926 | 1872 | iwl_pcie_rxq_check_wrptr(trans); |
ea68f460 | 1873 | iwl_pcie_txq_check_wrptrs(trans); |
ab697a9f | 1874 | |
1f7b6172 | 1875 | isr_stats->wakeup++; |
ab697a9f EG |
1876 | |
1877 | handled |= CSR_INT_BIT_WAKEUP; | |
1878 | } | |
1879 | ||
1880 | /* All uCode command responses, including Tx command responses, | |
1881 | * Rx "responses" (frame-received notification), and other | |
1882 | * notifications from uCode come through here*/ | |
1883 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 1884 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 1885 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
1886 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
1887 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 1888 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
1889 | CSR_FH_INT_RX_MASK); |
1890 | } | |
1891 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
1892 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 1893 | iwl_write32(trans, |
0c325769 | 1894 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
1895 | } |
1896 | /* Sending RX interrupt require many steps to be done in the | |
1897 | * the device: | |
1898 | * 1- write interrupt to current index in ICT table. | |
1899 | * 2- dma RX frame. | |
1900 | * 3- update RX shared data to indicate last write index. | |
1901 | * 4- send interrupt. | |
1902 | * This could lead to RX race, driver could receive RX interrupt | |
1903 | * but the shared data changes does not reflect this; | |
1904 | * periodic interrupt will detect any dangling Rx activity. | |
1905 | */ | |
1906 | ||
1907 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 1908 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 1909 | CSR_INT_PERIODIC_DIS); |
6379103e | 1910 | |
ab697a9f EG |
1911 | /* |
1912 | * Enable periodic interrupt in 8 msec only if we received | |
1913 | * real RX interrupt (instead of just periodic int), to catch | |
1914 | * any dangling Rx interrupt. If it was just the periodic | |
1915 | * interrupt, there was no dangling Rx activity, and no need | |
1916 | * to extend the periodic interrupt; one-shot is enough. | |
1917 | */ | |
1918 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 1919 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 1920 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 1921 | |
1f7b6172 | 1922 | isr_stats->rx++; |
f14d6b39 JB |
1923 | |
1924 | local_bh_disable(); | |
2e5d4a8f | 1925 | iwl_pcie_rx_handle(trans, 0); |
f14d6b39 | 1926 | local_bh_enable(); |
ab697a9f EG |
1927 | } |
1928 | ||
1929 | /* This "Tx" DMA channel is used only for loading uCode */ | |
1930 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 1931 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 1932 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 1933 | isr_stats->tx++; |
ab697a9f EG |
1934 | handled |= CSR_INT_BIT_FH_TX; |
1935 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
1936 | trans_pcie->ucode_write_complete = true; |
1937 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
1938 | } |
1939 | ||
1940 | if (inta & ~handled) { | |
0c325769 | 1941 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 1942 | isr_stats->unhandled++; |
ab697a9f EG |
1943 | } |
1944 | ||
0c325769 EG |
1945 | if (inta & ~(trans_pcie->inta_mask)) { |
1946 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
1947 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
1948 | } |
1949 | ||
f16c3ebf EG |
1950 | spin_lock(&trans_pcie->irq_lock); |
1951 | /* only Re-enable all interrupt if disabled by irq */ | |
1952 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
1953 | _iwl_enable_interrupts(trans); | |
a6bd005f | 1954 | /* we are loading the firmware, enable FH_TX interrupt only */ |
f16c3ebf | 1955 | else if (handled & CSR_INT_BIT_FH_TX) |
a6bd005f | 1956 | iwl_enable_fw_load_int(trans); |
ab697a9f | 1957 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
1958 | else if (handled & CSR_INT_BIT_RF_KILL) |
1959 | iwl_enable_rfkill_int(trans); | |
ed3e4c6d EG |
1960 | /* Re-enable the ALIVE / Rx interrupt if it occurred */ |
1961 | else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) | |
1962 | iwl_enable_fw_load_int_ctx_info(trans); | |
f16c3ebf | 1963 | spin_unlock(&trans_pcie->irq_lock); |
2bfb5092 JB |
1964 | |
1965 | out: | |
1966 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1967 | return IRQ_HANDLED; | |
ab697a9f EG |
1968 | } |
1969 | ||
1a361cd8 EG |
1970 | /****************************************************************************** |
1971 | * | |
1972 | * ICT functions | |
1973 | * | |
1974 | ******************************************************************************/ | |
10667136 | 1975 | |
1a361cd8 | 1976 | /* Free dram table */ |
990aa6d7 | 1977 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1978 | { |
20d3b647 | 1979 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1980 | |
10667136 | 1981 | if (trans_pcie->ict_tbl) { |
1042db2a | 1982 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1983 | trans_pcie->ict_tbl, |
0c325769 | 1984 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1985 | trans_pcie->ict_tbl = NULL; |
1986 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1987 | } |
1988 | } | |
1989 | ||
10667136 JB |
1990 | /* |
1991 | * allocate dram shared table, it is an aligned memory | |
1992 | * block of ICT_SIZE. | |
1a361cd8 EG |
1993 | * also reset all data related to ICT table interrupt. |
1994 | */ | |
990aa6d7 | 1995 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1996 | { |
20d3b647 | 1997 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1998 | |
10667136 | 1999 | trans_pcie->ict_tbl = |
750afb08 LC |
2000 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
2001 | &trans_pcie->ict_tbl_dma, GFP_KERNEL); | |
10667136 | 2002 | if (!trans_pcie->ict_tbl) |
1a361cd8 EG |
2003 | return -ENOMEM; |
2004 | ||
10667136 JB |
2005 | /* just an API sanity check ... it is guaranteed to be aligned */ |
2006 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 2007 | iwl_pcie_free_ict(trans); |
10667136 JB |
2008 | return -EINVAL; |
2009 | } | |
1a361cd8 | 2010 | |
1a361cd8 EG |
2011 | return 0; |
2012 | } | |
2013 | ||
2014 | /* Device is going up inform it about using ICT interrupt table, | |
2015 | * also we need to tell the driver to start using ICT interrupt. | |
2016 | */ | |
990aa6d7 | 2017 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 2018 | { |
20d3b647 | 2019 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 2020 | u32 val; |
1a361cd8 | 2021 | |
10667136 | 2022 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 2023 | return; |
1a361cd8 | 2024 | |
7b70bd63 | 2025 | spin_lock(&trans_pcie->irq_lock); |
f16c3ebf | 2026 | _iwl_disable_interrupts(trans); |
1a361cd8 | 2027 | |
10667136 | 2028 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 2029 | |
10667136 | 2030 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 | 2031 | |
18f5a374 EP |
2032 | val |= CSR_DRAM_INT_TBL_ENABLE | |
2033 | CSR_DRAM_INIT_TBL_WRAP_CHECK | | |
2034 | CSR_DRAM_INIT_TBL_WRITE_POINTER; | |
1a361cd8 | 2035 | |
10667136 | 2036 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 2037 | |
1042db2a | 2038 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
2039 | trans_pcie->use_ict = true; |
2040 | trans_pcie->ict_index = 0; | |
1042db2a | 2041 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
f16c3ebf | 2042 | _iwl_enable_interrupts(trans); |
7b70bd63 | 2043 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
2044 | } |
2045 | ||
2046 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 2047 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 2048 | { |
20d3b647 | 2049 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 2050 | |
7b70bd63 | 2051 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 2052 | trans_pcie->use_ict = false; |
7b70bd63 | 2053 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
2054 | } |
2055 | ||
85bf9da1 EG |
2056 | irqreturn_t iwl_pcie_isr(int irq, void *data) |
2057 | { | |
2058 | struct iwl_trans *trans = data; | |
2059 | ||
2060 | if (!trans) | |
2061 | return IRQ_NONE; | |
2062 | ||
2063 | /* Disable (but don't clear!) interrupts here to avoid | |
2064 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
2065 | * If we have something to service, the tasklet will re-enable ints. | |
2066 | * If we *don't* have something, we'll re-enable before leaving here. | |
2067 | */ | |
2068 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
2069 | ||
a0f337cc | 2070 | return IRQ_WAKE_THREAD; |
85bf9da1 | 2071 | } |
2e5d4a8f HD |
2072 | |
2073 | irqreturn_t iwl_pcie_msix_isr(int irq, void *data) | |
2074 | { | |
2075 | return IRQ_WAKE_THREAD; | |
2076 | } | |
2077 | ||
2078 | irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) | |
2079 | { | |
2080 | struct msix_entry *entry = dev_id; | |
2081 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
2082 | struct iwl_trans *trans = trans_pcie->trans; | |
46167a8f | 2083 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; |
2e5d4a8f HD |
2084 | u32 inta_fh, inta_hw; |
2085 | ||
2086 | lock_map_acquire(&trans->sync_cmd_lockdep_map); | |
2087 | ||
2088 | spin_lock(&trans_pcie->irq_lock); | |
7ef3dd26 HD |
2089 | inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); |
2090 | inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); | |
2e5d4a8f HD |
2091 | /* |
2092 | * Clear causes registers to avoid being handling the same cause. | |
2093 | */ | |
7ef3dd26 HD |
2094 | iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); |
2095 | iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); | |
2e5d4a8f HD |
2096 | spin_unlock(&trans_pcie->irq_lock); |
2097 | ||
c42ff65d JB |
2098 | trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); |
2099 | ||
2e5d4a8f HD |
2100 | if (unlikely(!(inta_fh | inta_hw))) { |
2101 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); | |
2102 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
2103 | return IRQ_NONE; | |
2104 | } | |
2105 | ||
3b57a10c EG |
2106 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
2107 | IWL_DEBUG_ISR(trans, | |
2108 | "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", | |
2109 | inta_fh, trans_pcie->fh_mask, | |
2e5d4a8f | 2110 | iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); |
3b57a10c EG |
2111 | if (inta_fh & ~trans_pcie->fh_mask) |
2112 | IWL_DEBUG_ISR(trans, | |
2113 | "We got a masked interrupt (0x%08x)\n", | |
2114 | inta_fh & ~trans_pcie->fh_mask); | |
2115 | } | |
2116 | ||
2117 | inta_fh &= trans_pcie->fh_mask; | |
2e5d4a8f | 2118 | |
496d83ca HD |
2119 | if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && |
2120 | inta_fh & MSIX_FH_INT_CAUSES_Q0) { | |
2121 | local_bh_disable(); | |
2122 | iwl_pcie_rx_handle(trans, 0); | |
2123 | local_bh_enable(); | |
2124 | } | |
2125 | ||
2126 | if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && | |
2127 | inta_fh & MSIX_FH_INT_CAUSES_Q1) { | |
2128 | local_bh_disable(); | |
2129 | iwl_pcie_rx_handle(trans, 1); | |
2130 | local_bh_enable(); | |
2131 | } | |
2132 | ||
2e5d4a8f HD |
2133 | /* This "Tx" DMA channel is used only for loading uCode */ |
2134 | if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { | |
2135 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); | |
2136 | isr_stats->tx++; | |
2137 | /* | |
2138 | * Wake up uCode load routine, | |
2139 | * now that load is complete | |
2140 | */ | |
2141 | trans_pcie->ucode_write_complete = true; | |
2142 | wake_up(&trans_pcie->ucode_write_waitq); | |
2143 | } | |
2144 | ||
2145 | /* Error detected by uCode */ | |
2146 | if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || | |
3681021f | 2147 | (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { |
2e5d4a8f HD |
2148 | IWL_ERR(trans, |
2149 | "Microcode SW error detected. Restarting 0x%X.\n", | |
2150 | inta_fh); | |
2151 | isr_stats->sw++; | |
2152 | iwl_pcie_irq_handle_error(trans); | |
2153 | } | |
2154 | ||
2155 | /* After checking FH register check HW register */ | |
3b57a10c | 2156 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
2e5d4a8f | 2157 | IWL_DEBUG_ISR(trans, |
3b57a10c EG |
2158 | "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", |
2159 | inta_hw, trans_pcie->hw_mask, | |
2e5d4a8f | 2160 | iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); |
3b57a10c EG |
2161 | if (inta_hw & ~trans_pcie->hw_mask) |
2162 | IWL_DEBUG_ISR(trans, | |
2163 | "We got a masked interrupt 0x%08x\n", | |
2164 | inta_hw & ~trans_pcie->hw_mask); | |
2165 | } | |
2166 | ||
2167 | inta_hw &= trans_pcie->hw_mask; | |
2e5d4a8f HD |
2168 | |
2169 | /* Alive notification via Rx interrupt will do the real work */ | |
2170 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { | |
2171 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); | |
2172 | isr_stats->alive++; | |
286ca8eb | 2173 | if (trans->trans_cfg->gen2) { |
eda50cde SS |
2174 | /* We can restock, since firmware configured the RFH */ |
2175 | iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); | |
2176 | } | |
2e5d4a8f HD |
2177 | } |
2178 | ||
3681021f | 2179 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { |
e5f3f215 HD |
2180 | u32 sleep_notif = |
2181 | le32_to_cpu(trans_pcie->prph_info->sleep_notif); | |
2182 | if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || | |
2183 | sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { | |
2184 | IWL_DEBUG_ISR(trans, | |
2185 | "Sx interrupt: sleep notification = 0x%x\n", | |
2186 | sleep_notif); | |
2187 | trans_pcie->sx_complete = true; | |
2188 | wake_up(&trans_pcie->sx_waitq); | |
2189 | } else { | |
2190 | /* uCode wakes up after power-down sleep */ | |
2191 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); | |
2192 | iwl_pcie_rxq_check_wrptr(trans); | |
2193 | iwl_pcie_txq_check_wrptrs(trans); | |
2e5d4a8f | 2194 | |
e5f3f215 HD |
2195 | isr_stats->wakeup++; |
2196 | } | |
2e5d4a8f HD |
2197 | } |
2198 | ||
ff911dca ST |
2199 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { |
2200 | /* Reflect IML transfer status */ | |
2201 | int res = iwl_read32(trans, CSR_IML_RESP_ADDR); | |
2202 | ||
2203 | IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); | |
2204 | if (res == IWL_IMAGE_RESP_FAIL) { | |
2205 | isr_stats->sw++; | |
2206 | iwl_pcie_irq_handle_error(trans); | |
2207 | } | |
2208 | } | |
2209 | ||
2e5d4a8f HD |
2210 | /* Chip got too hot and stopped itself */ |
2211 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { | |
2212 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); | |
2213 | isr_stats->ctkill++; | |
2214 | } | |
2215 | ||
2216 | /* HW RF KILL switch toggled */ | |
3a6e168b JB |
2217 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) |
2218 | iwl_pcie_handle_rfkill_irq(trans); | |
2e5d4a8f HD |
2219 | |
2220 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { | |
2221 | IWL_ERR(trans, | |
2222 | "Hardware error detected. Restarting.\n"); | |
2223 | ||
2224 | isr_stats->hw++; | |
91c28b83 | 2225 | trans->dbg.hw_error = true; |
2e5d4a8f HD |
2226 | iwl_pcie_irq_handle_error(trans); |
2227 | } | |
2228 | ||
2229 | iwl_pcie_clear_irq(trans, entry); | |
2230 | ||
2231 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
2232 | ||
2233 | return IRQ_HANDLED; | |
2234 | } |