Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
51368bf7 | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
26d535ae | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
bce97731 | 5 | * Copyright(c) 2016 Intel Deutschland GmbH |
ab697a9f EG |
6 | * |
7 | * Portions of this file are derived from the ipw3945 project, as well | |
8 | * as portions of the ieee80211 subsystem header files. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
17 | * more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License along with | |
20 | * this program; if not, write to the Free Software Foundation, Inc., | |
21 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
22 | * | |
23 | * The full GNU General Public License is included in this distribution in the | |
24 | * file called LICENSE. | |
25 | * | |
26 | * Contact Information: | |
d01c5366 | 27 | * Intel Linux Wireless <linuxwifi@intel.com> |
ab697a9f EG |
28 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
29 | * | |
30 | *****************************************************************************/ | |
31 | #include <linux/sched.h> | |
32 | #include <linux/wait.h> | |
1a361cd8 | 33 | #include <linux/gfp.h> |
ab697a9f | 34 | |
1b29dc94 | 35 | #include "iwl-prph.h" |
ab697a9f | 36 | #include "iwl-io.h" |
6468a01a | 37 | #include "internal.h" |
db70f290 | 38 | #include "iwl-op-mode.h" |
ab697a9f EG |
39 | |
40 | /****************************************************************************** | |
41 | * | |
42 | * RX path functions | |
43 | * | |
44 | ******************************************************************************/ | |
45 | ||
46 | /* | |
47 | * Rx theory of operation | |
48 | * | |
49 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
50 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
51 | * used not only for Rx frames, but for any command response or notification | |
52 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
53 | * of indexes into the circular buffer. | |
54 | * | |
55 | * Rx Queue Indexes | |
56 | * The host/firmware share two index registers for managing the Rx buffers. | |
57 | * | |
58 | * The READ index maps to the first position that the firmware may be writing | |
59 | * to -- the driver can read up to (but not including) this position and get | |
60 | * good data. | |
61 | * The READ index is managed by the firmware once the card is enabled. | |
62 | * | |
63 | * The WRITE index maps to the last position the driver has read from -- the | |
64 | * position preceding WRITE is the last slot the firmware can place a packet. | |
65 | * | |
66 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
67 | * WRITE = READ. | |
68 | * | |
69 | * During initialization, the host sets up the READ queue position to the first | |
70 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
71 | * | |
72 | * When the firmware places a packet in a buffer, it will advance the READ index | |
73 | * and fire the RX interrupt. The driver can then query the READ index and | |
74 | * process as many packets as possible, moving the WRITE index forward as it | |
75 | * resets the Rx queue buffers with new memory. | |
76 | * | |
77 | * The management in the driver is as follows: | |
26d535ae SS |
78 | * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. |
79 | * When the interrupt handler is called, the request is processed. | |
80 | * The page is either stolen - transferred to the upper layer | |
81 | * or reused - added immediately to the iwl->rxq->rx_free list. | |
82 | * + When the page is stolen - the driver updates the matching queue's used | |
83 | * count, detaches the RBD and transfers it to the queue used list. | |
84 | * When there are two used RBDs - they are transferred to the allocator empty | |
85 | * list. Work is then scheduled for the allocator to start allocating | |
86 | * eight buffers. | |
87 | * When there are another 6 used RBDs - they are transferred to the allocator | |
88 | * empty list and the driver tries to claim the pre-allocated buffers and | |
89 | * add them to iwl->rxq->rx_free. If it fails - it continues to claim them | |
90 | * until ready. | |
91 | * When there are 8+ buffers in the free list - either from allocation or from | |
92 | * 8 reused unstolen pages - restock is called to update the FW and indexes. | |
93 | * + In order to make sure the allocator always has RBDs to use for allocation | |
94 | * the allocator has initial pool in the size of num_queues*(8-2) - the | |
95 | * maximum missing RBDs per allocation request (request posted with 2 | |
96 | * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). | |
97 | * The queues supplies the recycle of the rest of the RBDs. | |
ab697a9f EG |
98 | * + A received packet is processed and handed to the kernel network stack, |
99 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
26d535ae | 100 | * + If there are no allocated buffers in iwl->rxq->rx_free, |
2bfb5092 JB |
101 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. |
102 | * If there were enough free buffers and RX_STALLED is set it is cleared. | |
ab697a9f EG |
103 | * |
104 | * | |
105 | * Driver sequence: | |
106 | * | |
990aa6d7 EG |
107 | * iwl_rxq_alloc() Allocates rx_free |
108 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
26d535ae SS |
109 | * iwl_pcie_rxq_restock. |
110 | * Used only during initialization. | |
990aa6d7 | 111 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
ab697a9f | 112 | * queue, updates firmware pointers, and updates |
26d535ae SS |
113 | * the WRITE index. |
114 | * iwl_pcie_rx_allocator() Background work for allocating pages. | |
ab697a9f EG |
115 | * |
116 | * -- enable interrupts -- | |
990aa6d7 | 117 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
118 | * READ INDEX, detaching the SKB from the pool. |
119 | * Moves the packet buffer from queue to rx_used. | |
26d535ae | 120 | * Posts and claims requests to the allocator. |
990aa6d7 | 121 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f | 122 | * slots. |
26d535ae SS |
123 | * |
124 | * RBD life-cycle: | |
125 | * | |
126 | * Init: | |
127 | * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue | |
128 | * | |
129 | * Regular Receive interrupt: | |
130 | * Page Stolen: | |
131 | * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> | |
132 | * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue | |
133 | * Page not Stolen: | |
134 | * rxq.queue -> rxq.rx_free -> rxq.queue | |
ab697a9f EG |
135 | * ... |
136 | * | |
137 | */ | |
138 | ||
990aa6d7 EG |
139 | /* |
140 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 141 | */ |
fecba09e | 142 | static int iwl_rxq_space(const struct iwl_rxq *rxq) |
ab697a9f | 143 | { |
96a6497b SS |
144 | /* Make sure rx queue size is a power of 2 */ |
145 | WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); | |
fecba09e | 146 | |
351746c9 IY |
147 | /* |
148 | * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity | |
149 | * between empty and completely full queues. | |
150 | * The following is equivalent to modulo by RX_QUEUE_SIZE and is well | |
151 | * defined for negative dividends. | |
152 | */ | |
96a6497b | 153 | return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); |
ab697a9f EG |
154 | } |
155 | ||
9805c446 EG |
156 | /* |
157 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
158 | */ | |
159 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
160 | { | |
161 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
162 | } | |
163 | ||
96a6497b SS |
164 | static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) |
165 | { | |
166 | iwl_write_prph(trans, ofs, val & 0xffffffff); | |
167 | iwl_write_prph(trans, ofs + 4, val >> 32); | |
168 | } | |
169 | ||
49bd072d EG |
170 | /* |
171 | * iwl_pcie_rx_stop - stops the Rx DMA | |
172 | */ | |
9805c446 EG |
173 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
174 | { | |
9805c446 EG |
175 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
176 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
177 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
178 | } | |
179 | ||
990aa6d7 EG |
180 | /* |
181 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 182 | */ |
78485054 SS |
183 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, |
184 | struct iwl_rxq *rxq) | |
ab697a9f | 185 | { |
ab697a9f EG |
186 | u32 reg; |
187 | ||
5d63f926 | 188 | lockdep_assert_held(&rxq->lock); |
ab697a9f | 189 | |
5045388c EP |
190 | /* |
191 | * explicitly wake up the NIC if: | |
192 | * 1. shadow registers aren't enabled | |
193 | * 2. there is a chance that the NIC is asleep | |
194 | */ | |
195 | if (!trans->cfg->base_params->shadow_reg_enable && | |
196 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { | |
197 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | |
198 | ||
199 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
200 | IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", | |
201 | reg); | |
202 | iwl_set_bit(trans, CSR_GP_CNTRL, | |
203 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
5d63f926 JB |
204 | rxq->need_update = true; |
205 | return; | |
ab697a9f EG |
206 | } |
207 | } | |
5045388c EP |
208 | |
209 | rxq->write_actual = round_down(rxq->write, 8); | |
96a6497b SS |
210 | if (trans->cfg->mq_rx_supported) |
211 | iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), | |
212 | rxq->write_actual); | |
213 | else | |
214 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); | |
5d63f926 JB |
215 | } |
216 | ||
217 | static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) | |
218 | { | |
219 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 220 | int i; |
5d63f926 | 221 | |
78485054 SS |
222 | for (i = 0; i < trans->num_rx_queues; i++) { |
223 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
ab697a9f | 224 | |
78485054 SS |
225 | if (!rxq->need_update) |
226 | continue; | |
227 | spin_lock(&rxq->lock); | |
228 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
229 | rxq->need_update = false; | |
230 | spin_unlock(&rxq->lock); | |
231 | } | |
ab697a9f EG |
232 | } |
233 | ||
96a6497b SS |
234 | static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, |
235 | struct iwl_rxq *rxq) | |
236 | { | |
237 | struct iwl_rx_mem_buffer *rxb; | |
238 | ||
239 | /* | |
240 | * If the device isn't enabled - no need to try to add buffers... | |
241 | * This can happen when we stop the device and still have an interrupt | |
242 | * pending. We stop the APM before we sync the interrupts because we | |
243 | * have to (see comment there). On the other hand, since the APM is | |
244 | * stopped, we cannot access the HW (in particular not prph). | |
245 | * So don't try to restock if the APM has been already stopped. | |
246 | */ | |
247 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) | |
248 | return; | |
249 | ||
250 | spin_lock(&rxq->lock); | |
251 | while (rxq->free_count) { | |
252 | __le64 *bd = (__le64 *)rxq->bd; | |
253 | ||
254 | /* Get next free Rx buffer, remove from free list */ | |
255 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, | |
256 | list); | |
257 | list_del(&rxb->list); | |
258 | ||
259 | /* 12 first bits are expected to be empty */ | |
260 | WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); | |
261 | /* Point to Rx buffer via next RBD in circular buffer */ | |
262 | bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); | |
263 | rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; | |
264 | rxq->free_count--; | |
265 | } | |
266 | spin_unlock(&rxq->lock); | |
267 | ||
268 | /* | |
269 | * If we've added more space for the firmware to place data, tell it. | |
270 | * Increment device's write pointer in multiples of 8. | |
271 | */ | |
272 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
273 | spin_lock(&rxq->lock); | |
274 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
275 | spin_unlock(&rxq->lock); | |
276 | } | |
277 | } | |
278 | ||
990aa6d7 EG |
279 | /* |
280 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
ab697a9f EG |
281 | * |
282 | * If there are slots in the RX queue that need to be restocked, | |
283 | * and we have free pre-allocated buffers, fill the ranks as much | |
284 | * as we can, pulling from rx_free. | |
285 | * | |
286 | * This moves the 'write' index forward to catch up with 'processed', and | |
287 | * also updates the memory address in the firmware to reference the new | |
288 | * target buffer. | |
289 | */ | |
78485054 | 290 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) |
ab697a9f | 291 | { |
ab697a9f | 292 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 293 | |
7439046d EG |
294 | /* |
295 | * If the device isn't enabled - not need to try to add buffers... | |
296 | * This can happen when we stop the device and still have an interrupt | |
2bfb5092 JB |
297 | * pending. We stop the APM before we sync the interrupts because we |
298 | * have to (see comment there). On the other hand, since the APM is | |
299 | * stopped, we cannot access the HW (in particular not prph). | |
7439046d EG |
300 | * So don't try to restock if the APM has been already stopped. |
301 | */ | |
eb7ff77e | 302 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) |
7439046d EG |
303 | return; |
304 | ||
51232f7e | 305 | spin_lock(&rxq->lock); |
990aa6d7 | 306 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
96a6497b | 307 | __le32 *bd = (__le32 *)rxq->bd; |
ab697a9f EG |
308 | /* The overwritten rxb must be a used one */ |
309 | rxb = rxq->queue[rxq->write]; | |
310 | BUG_ON(rxb && rxb->page); | |
311 | ||
312 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
313 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
314 | list); | |
315 | list_del(&rxb->list); | |
ab697a9f EG |
316 | |
317 | /* Point to Rx buffer via next RBD in circular buffer */ | |
96a6497b | 318 | bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
319 | rxq->queue[rxq->write] = rxb; |
320 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
321 | rxq->free_count--; | |
322 | } | |
51232f7e | 323 | spin_unlock(&rxq->lock); |
ab697a9f | 324 | |
ab697a9f EG |
325 | /* If we've added more space for the firmware to place data, tell it. |
326 | * Increment device's write pointer in multiples of 8. */ | |
327 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
51232f7e | 328 | spin_lock(&rxq->lock); |
78485054 | 329 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
51232f7e | 330 | spin_unlock(&rxq->lock); |
ab697a9f EG |
331 | } |
332 | } | |
333 | ||
26d535ae SS |
334 | /* |
335 | * iwl_pcie_rx_alloc_page - allocates and returns a page. | |
336 | * | |
337 | */ | |
338 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, | |
339 | gfp_t priority) | |
340 | { | |
341 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae SS |
342 | struct page *page; |
343 | gfp_t gfp_mask = priority; | |
344 | ||
26d535ae SS |
345 | if (trans_pcie->rx_page_order > 0) |
346 | gfp_mask |= __GFP_COMP; | |
347 | ||
348 | /* Alloc a new receive buffer */ | |
349 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | |
350 | if (!page) { | |
351 | if (net_ratelimit()) | |
352 | IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", | |
353 | trans_pcie->rx_page_order); | |
78485054 SS |
354 | /* |
355 | * Issue an error if we don't have enough pre-allocated | |
356 | * buffers. | |
26d535ae | 357 | ` */ |
78485054 | 358 | if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) |
26d535ae | 359 | IWL_CRIT(trans, |
78485054 | 360 | "Failed to alloc_pages\n"); |
26d535ae SS |
361 | return NULL; |
362 | } | |
363 | return page; | |
364 | } | |
365 | ||
358a46d4 | 366 | /* |
9805c446 | 367 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 368 | * |
358a46d4 EG |
369 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
370 | * a page must be allocated and the RBD must point to the page. This function | |
371 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 372 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 373 | * allocated buffers. |
ab697a9f | 374 | */ |
78485054 SS |
375 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, |
376 | struct iwl_rxq *rxq) | |
ab697a9f | 377 | { |
20d3b647 | 378 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
379 | struct iwl_rx_mem_buffer *rxb; |
380 | struct page *page; | |
ab697a9f EG |
381 | |
382 | while (1) { | |
51232f7e | 383 | spin_lock(&rxq->lock); |
ab697a9f | 384 | if (list_empty(&rxq->rx_used)) { |
51232f7e | 385 | spin_unlock(&rxq->lock); |
ab697a9f EG |
386 | return; |
387 | } | |
51232f7e | 388 | spin_unlock(&rxq->lock); |
ab697a9f | 389 | |
ab697a9f | 390 | /* Alloc a new receive buffer */ |
26d535ae SS |
391 | page = iwl_pcie_rx_alloc_page(trans, priority); |
392 | if (!page) | |
ab697a9f | 393 | return; |
ab697a9f | 394 | |
51232f7e | 395 | spin_lock(&rxq->lock); |
ab697a9f EG |
396 | |
397 | if (list_empty(&rxq->rx_used)) { | |
51232f7e | 398 | spin_unlock(&rxq->lock); |
b2cf410c | 399 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
400 | return; |
401 | } | |
e2b1930e JB |
402 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
403 | list); | |
404 | list_del(&rxb->list); | |
51232f7e | 405 | spin_unlock(&rxq->lock); |
ab697a9f EG |
406 | |
407 | BUG_ON(rxb->page); | |
408 | rxb->page = page; | |
409 | /* Get physical address of the RB */ | |
20d3b647 JB |
410 | rxb->page_dma = |
411 | dma_map_page(trans->dev, page, 0, | |
412 | PAGE_SIZE << trans_pcie->rx_page_order, | |
413 | DMA_FROM_DEVICE); | |
7c341582 JB |
414 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
415 | rxb->page = NULL; | |
51232f7e | 416 | spin_lock(&rxq->lock); |
7c341582 | 417 | list_add(&rxb->list, &rxq->rx_used); |
51232f7e | 418 | spin_unlock(&rxq->lock); |
7c341582 JB |
419 | __free_pages(page, trans_pcie->rx_page_order); |
420 | return; | |
421 | } | |
ab697a9f | 422 | |
51232f7e | 423 | spin_lock(&rxq->lock); |
ab697a9f EG |
424 | |
425 | list_add_tail(&rxb->list, &rxq->rx_free); | |
426 | rxq->free_count++; | |
427 | ||
51232f7e | 428 | spin_unlock(&rxq->lock); |
ab697a9f EG |
429 | } |
430 | } | |
431 | ||
78485054 | 432 | static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) |
9805c446 EG |
433 | { |
434 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
9805c446 EG |
435 | int i; |
436 | ||
96a6497b | 437 | for (i = 0; i < MQ_RX_POOL_SIZE; i++) { |
78485054 | 438 | if (!trans_pcie->rx_pool[i].page) |
c7df1f4b | 439 | continue; |
78485054 | 440 | dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, |
c7df1f4b JB |
441 | PAGE_SIZE << trans_pcie->rx_page_order, |
442 | DMA_FROM_DEVICE); | |
78485054 SS |
443 | __free_pages(trans_pcie->rx_pool[i].page, |
444 | trans_pcie->rx_page_order); | |
445 | trans_pcie->rx_pool[i].page = NULL; | |
9805c446 EG |
446 | } |
447 | } | |
448 | ||
26d535ae SS |
449 | /* |
450 | * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues | |
451 | * | |
452 | * Allocates for each received request 8 pages | |
453 | * Called as a scheduled work item. | |
454 | */ | |
455 | static void iwl_pcie_rx_allocator(struct iwl_trans *trans) | |
456 | { | |
457 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
458 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
459 | struct list_head local_empty; | |
460 | int pending = atomic_xchg(&rba->req_pending, 0); | |
461 | ||
462 | IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); | |
463 | ||
464 | /* If we were scheduled - there is at least one request */ | |
465 | spin_lock(&rba->lock); | |
466 | /* swap out the rba->rbd_empty to a local list */ | |
467 | list_replace_init(&rba->rbd_empty, &local_empty); | |
468 | spin_unlock(&rba->lock); | |
469 | ||
470 | while (pending) { | |
471 | int i; | |
472 | struct list_head local_allocated; | |
78485054 SS |
473 | gfp_t gfp_mask = GFP_KERNEL; |
474 | ||
475 | /* Do not post a warning if there are only a few requests */ | |
476 | if (pending < RX_PENDING_WATERMARK) | |
477 | gfp_mask |= __GFP_NOWARN; | |
26d535ae SS |
478 | |
479 | INIT_LIST_HEAD(&local_allocated); | |
480 | ||
481 | for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { | |
482 | struct iwl_rx_mem_buffer *rxb; | |
483 | struct page *page; | |
484 | ||
485 | /* List should never be empty - each reused RBD is | |
486 | * returned to the list, and initial pool covers any | |
487 | * possible gap between the time the page is allocated | |
488 | * to the time the RBD is added. | |
489 | */ | |
490 | BUG_ON(list_empty(&local_empty)); | |
491 | /* Get the first rxb from the rbd list */ | |
492 | rxb = list_first_entry(&local_empty, | |
493 | struct iwl_rx_mem_buffer, list); | |
494 | BUG_ON(rxb->page); | |
495 | ||
496 | /* Alloc a new receive buffer */ | |
78485054 | 497 | page = iwl_pcie_rx_alloc_page(trans, gfp_mask); |
26d535ae SS |
498 | if (!page) |
499 | continue; | |
500 | rxb->page = page; | |
501 | ||
502 | /* Get physical address of the RB */ | |
503 | rxb->page_dma = dma_map_page(trans->dev, page, 0, | |
504 | PAGE_SIZE << trans_pcie->rx_page_order, | |
505 | DMA_FROM_DEVICE); | |
506 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | |
507 | rxb->page = NULL; | |
508 | __free_pages(page, trans_pcie->rx_page_order); | |
509 | continue; | |
510 | } | |
26d535ae SS |
511 | |
512 | /* move the allocated entry to the out list */ | |
513 | list_move(&rxb->list, &local_allocated); | |
514 | i++; | |
515 | } | |
516 | ||
517 | pending--; | |
518 | if (!pending) { | |
519 | pending = atomic_xchg(&rba->req_pending, 0); | |
520 | IWL_DEBUG_RX(trans, | |
521 | "Pending allocation requests = %d\n", | |
522 | pending); | |
523 | } | |
524 | ||
525 | spin_lock(&rba->lock); | |
526 | /* add the allocated rbds to the allocator allocated list */ | |
527 | list_splice_tail(&local_allocated, &rba->rbd_allocated); | |
528 | /* get more empty RBDs for current pending requests */ | |
529 | list_splice_tail_init(&rba->rbd_empty, &local_empty); | |
530 | spin_unlock(&rba->lock); | |
531 | ||
532 | atomic_inc(&rba->req_ready); | |
533 | } | |
534 | ||
535 | spin_lock(&rba->lock); | |
536 | /* return unused rbds to the allocator empty list */ | |
537 | list_splice_tail(&local_empty, &rba->rbd_empty); | |
538 | spin_unlock(&rba->lock); | |
539 | } | |
540 | ||
541 | /* | |
542 | * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages | |
543 | .* | |
544 | .* Called by queue when the queue posted allocation request and | |
545 | * has freed 8 RBDs in order to restock itself. | |
546 | */ | |
547 | static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, | |
548 | struct iwl_rx_mem_buffer | |
549 | *out[RX_CLAIM_REQ_ALLOC]) | |
550 | { | |
551 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
552 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
553 | int i; | |
554 | ||
555 | /* | |
556 | * atomic_dec_if_positive returns req_ready - 1 for any scenario. | |
557 | * If req_ready is 0 atomic_dec_if_positive will return -1 and this | |
558 | * function will return -ENOMEM, as there are no ready requests. | |
559 | * atomic_dec_if_positive will perofrm the *actual* decrement only if | |
560 | * req_ready > 0, i.e. - there are ready requests and the function | |
561 | * hands one request to the caller. | |
562 | */ | |
563 | if (atomic_dec_if_positive(&rba->req_ready) < 0) | |
564 | return -ENOMEM; | |
565 | ||
566 | spin_lock(&rba->lock); | |
567 | for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { | |
568 | /* Get next free Rx buffer, remove it from free list */ | |
569 | out[i] = list_first_entry(&rba->rbd_allocated, | |
570 | struct iwl_rx_mem_buffer, list); | |
571 | list_del(&out[i]->list); | |
572 | } | |
573 | spin_unlock(&rba->lock); | |
574 | ||
575 | return 0; | |
576 | } | |
577 | ||
578 | static void iwl_pcie_rx_allocator_work(struct work_struct *data) | |
ab697a9f | 579 | { |
26d535ae SS |
580 | struct iwl_rb_allocator *rba_p = |
581 | container_of(data, struct iwl_rb_allocator, rx_alloc); | |
5a878bf6 | 582 | struct iwl_trans_pcie *trans_pcie = |
26d535ae | 583 | container_of(rba_p, struct iwl_trans_pcie, rba); |
ab697a9f | 584 | |
26d535ae | 585 | iwl_pcie_rx_allocator(trans_pcie->trans); |
ab697a9f EG |
586 | } |
587 | ||
9805c446 EG |
588 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
589 | { | |
590 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 591 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
9805c446 | 592 | struct device *dev = trans->dev; |
78485054 | 593 | int i; |
96a6497b SS |
594 | int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : |
595 | sizeof(__le32); | |
9805c446 | 596 | |
78485054 SS |
597 | if (WARN_ON(trans_pcie->rxq)) |
598 | return -EINVAL; | |
599 | ||
600 | trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), | |
601 | GFP_KERNEL); | |
602 | if (!trans_pcie->rxq) | |
603 | return -EINVAL; | |
9805c446 | 604 | |
26d535ae | 605 | spin_lock_init(&rba->lock); |
9805c446 | 606 | |
78485054 SS |
607 | for (i = 0; i < trans->num_rx_queues; i++) { |
608 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
9805c446 | 609 | |
78485054 | 610 | spin_lock_init(&rxq->lock); |
96a6497b SS |
611 | if (trans->cfg->mq_rx_supported) |
612 | rxq->queue_size = MQ_RX_TABLE_SIZE; | |
613 | else | |
614 | rxq->queue_size = RX_QUEUE_SIZE; | |
615 | ||
78485054 SS |
616 | /* |
617 | * Allocate the circular buffer of Read Buffer Descriptors | |
618 | * (RBDs) | |
619 | */ | |
620 | rxq->bd = dma_zalloc_coherent(dev, | |
96a6497b SS |
621 | free_size * rxq->queue_size, |
622 | &rxq->bd_dma, GFP_KERNEL); | |
78485054 SS |
623 | if (!rxq->bd) |
624 | goto err; | |
9805c446 | 625 | |
96a6497b SS |
626 | if (trans->cfg->mq_rx_supported) { |
627 | rxq->used_bd = dma_zalloc_coherent(dev, | |
628 | sizeof(__le32) * | |
629 | rxq->queue_size, | |
630 | &rxq->used_bd_dma, | |
631 | GFP_KERNEL); | |
632 | if (!rxq->used_bd) | |
633 | goto err; | |
634 | } | |
9805c446 | 635 | |
78485054 SS |
636 | /*Allocate the driver's pointer to receive buffer status */ |
637 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | |
638 | &rxq->rb_stts_dma, | |
639 | GFP_KERNEL); | |
640 | if (!rxq->rb_stts) | |
641 | goto err; | |
642 | } | |
9805c446 EG |
643 | return 0; |
644 | ||
78485054 SS |
645 | err: |
646 | for (i = 0; i < trans->num_rx_queues; i++) { | |
647 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
648 | ||
649 | if (rxq->bd) | |
96a6497b | 650 | dma_free_coherent(dev, free_size * rxq->queue_size, |
78485054 SS |
651 | rxq->bd, rxq->bd_dma); |
652 | rxq->bd_dma = 0; | |
653 | rxq->bd = NULL; | |
654 | ||
655 | if (rxq->rb_stts) | |
656 | dma_free_coherent(trans->dev, | |
657 | sizeof(struct iwl_rb_status), | |
658 | rxq->rb_stts, rxq->rb_stts_dma); | |
96a6497b SS |
659 | |
660 | if (rxq->used_bd) | |
661 | dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, | |
662 | rxq->used_bd, rxq->used_bd_dma); | |
663 | rxq->used_bd_dma = 0; | |
664 | rxq->used_bd = NULL; | |
78485054 SS |
665 | } |
666 | kfree(trans_pcie->rxq); | |
96a6497b | 667 | |
9805c446 | 668 | return -ENOMEM; |
ab697a9f EG |
669 | } |
670 | ||
9805c446 EG |
671 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
672 | { | |
673 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
674 | u32 rb_size; | |
675 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
676 | ||
6c4fbcbc EG |
677 | switch (trans_pcie->rx_buf_size) { |
678 | case IWL_AMSDU_4K: | |
679 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
680 | break; | |
681 | case IWL_AMSDU_8K: | |
9805c446 | 682 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; |
6c4fbcbc EG |
683 | break; |
684 | case IWL_AMSDU_12K: | |
685 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; | |
686 | break; | |
687 | default: | |
688 | WARN_ON(1); | |
9805c446 | 689 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; |
6c4fbcbc | 690 | } |
9805c446 EG |
691 | |
692 | /* Stop Rx DMA */ | |
693 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
ddaf5a5b JB |
694 | /* reset and flush pointers */ |
695 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); | |
696 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); | |
697 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); | |
9805c446 EG |
698 | |
699 | /* Reset driver's Rx queue write index */ | |
700 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
701 | ||
702 | /* Tell device where to find RBD circular buffer in DRAM */ | |
703 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
704 | (u32)(rxq->bd_dma >> 8)); | |
705 | ||
706 | /* Tell device where in DRAM to update its Rx status */ | |
707 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
708 | rxq->rb_stts_dma >> 4); | |
709 | ||
710 | /* Enable Rx DMA | |
711 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
712 | * the credit mechanism in 5000 HW RX FIFO | |
713 | * Direct rx interrupts to hosts | |
6c4fbcbc | 714 | * Rx buffer size 4 or 8k or 12k |
9805c446 EG |
715 | * RB timeout 0x10 |
716 | * 256 RBDs | |
717 | */ | |
718 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
719 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
720 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
721 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
722 | rb_size| | |
49bd072d | 723 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| |
9805c446 EG |
724 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); |
725 | ||
726 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
727 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
6960a059 EG |
728 | |
729 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ | |
730 | if (trans->cfg->host_interrupt_operation_mode) | |
731 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); | |
9805c446 EG |
732 | } |
733 | ||
bce97731 | 734 | static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) |
c7df1f4b | 735 | { |
96a6497b SS |
736 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
737 | u32 rb_size, enabled = 0; | |
738 | int i; | |
c7df1f4b | 739 | |
96a6497b SS |
740 | switch (trans_pcie->rx_buf_size) { |
741 | case IWL_AMSDU_4K: | |
742 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
743 | break; | |
744 | case IWL_AMSDU_8K: | |
745 | rb_size = RFH_RXF_DMA_RB_SIZE_8K; | |
746 | break; | |
747 | case IWL_AMSDU_12K: | |
748 | rb_size = RFH_RXF_DMA_RB_SIZE_12K; | |
749 | break; | |
750 | default: | |
751 | WARN_ON(1); | |
752 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
753 | } | |
c7df1f4b | 754 | |
96a6497b SS |
755 | /* Stop Rx DMA */ |
756 | iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); | |
757 | /* disable free amd used rx queue operation */ | |
758 | iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); | |
26d535ae | 759 | |
96a6497b SS |
760 | for (i = 0; i < trans->num_rx_queues; i++) { |
761 | /* Tell device where to find RBD free table in DRAM */ | |
762 | iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), | |
bce97731 | 763 | (u64)(trans_pcie->rxq[i].bd_dma)); |
96a6497b SS |
764 | /* Tell device where to find RBD used table in DRAM */ |
765 | iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), | |
bce97731 | 766 | (u64)(trans_pcie->rxq[i].used_bd_dma)); |
96a6497b SS |
767 | /* Tell device where in DRAM to update its Rx status */ |
768 | iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), | |
bce97731 | 769 | trans_pcie->rxq[i].rb_stts_dma); |
96a6497b SS |
770 | /* Reset device indice tables */ |
771 | iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); | |
772 | iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); | |
773 | iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); | |
774 | ||
775 | enabled |= BIT(i) | BIT(i + 16); | |
776 | } | |
26d535ae | 777 | |
96a6497b SS |
778 | /* restock default queue */ |
779 | iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); | |
780 | ||
781 | /* | |
782 | * Enable Rx DMA | |
783 | * Single frame mode | |
784 | * Rx buffer size 4 or 8k or 12k | |
785 | * Min RB size 4 or 8 | |
786 | * 512 RBDs | |
787 | */ | |
788 | iwl_write_prph(trans, RFH_RXF_DMA_CFG, | |
789 | RFH_DMA_EN_ENABLE_VAL | | |
790 | rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | | |
791 | RFH_RXF_DMA_MIN_RB_4_8 | | |
792 | RFH_RXF_DMA_RBDCB_SIZE_512); | |
793 | ||
794 | iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | | |
795 | RFH_GEN_CFG_SERVICE_DMA_SNOOP); | |
796 | iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); | |
26d535ae | 797 | |
96a6497b SS |
798 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
799 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
26d535ae SS |
800 | } |
801 | ||
96a6497b | 802 | static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
26d535ae | 803 | { |
96a6497b | 804 | lockdep_assert_held(&rxq->lock); |
26d535ae | 805 | |
96a6497b SS |
806 | INIT_LIST_HEAD(&rxq->rx_free); |
807 | INIT_LIST_HEAD(&rxq->rx_used); | |
808 | rxq->free_count = 0; | |
809 | rxq->used_count = 0; | |
26d535ae SS |
810 | } |
811 | ||
bce97731 SS |
812 | static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) |
813 | { | |
814 | WARN_ON(1); | |
815 | return 0; | |
816 | } | |
817 | ||
9805c446 EG |
818 | int iwl_pcie_rx_init(struct iwl_trans *trans) |
819 | { | |
820 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 821 | struct iwl_rxq *def_rxq; |
26d535ae | 822 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
96a6497b | 823 | int i, err, num_rbds, allocator_pool_size; |
9805c446 | 824 | |
78485054 | 825 | if (!trans_pcie->rxq) { |
9805c446 EG |
826 | err = iwl_pcie_rx_alloc(trans); |
827 | if (err) | |
828 | return err; | |
829 | } | |
78485054 | 830 | def_rxq = trans_pcie->rxq; |
26d535ae SS |
831 | if (!rba->alloc_wq) |
832 | rba->alloc_wq = alloc_workqueue("rb_allocator", | |
833 | WQ_HIGHPRI | WQ_UNBOUND, 1); | |
834 | INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); | |
835 | ||
836 | spin_lock(&rba->lock); | |
837 | atomic_set(&rba->req_pending, 0); | |
838 | atomic_set(&rba->req_ready, 0); | |
96a6497b SS |
839 | INIT_LIST_HEAD(&rba->rbd_allocated); |
840 | INIT_LIST_HEAD(&rba->rbd_empty); | |
26d535ae | 841 | spin_unlock(&rba->lock); |
9805c446 | 842 | |
c7df1f4b | 843 | /* free all first - we might be reconfigured for a different size */ |
78485054 | 844 | iwl_pcie_free_rbs_pool(trans); |
9805c446 EG |
845 | |
846 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
78485054 | 847 | def_rxq->queue[i] = NULL; |
9805c446 | 848 | |
78485054 SS |
849 | for (i = 0; i < trans->num_rx_queues; i++) { |
850 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
851 | ||
96a6497b SS |
852 | rxq->id = i; |
853 | ||
78485054 SS |
854 | spin_lock(&rxq->lock); |
855 | /* | |
856 | * Set read write pointer to reflect that we have processed | |
857 | * and used all buffers, but have not restocked the Rx queue | |
858 | * with fresh buffers | |
859 | */ | |
860 | rxq->read = 0; | |
861 | rxq->write = 0; | |
862 | rxq->write_actual = 0; | |
863 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | |
9805c446 | 864 | |
78485054 SS |
865 | iwl_pcie_rx_init_rxb_lists(rxq); |
866 | ||
bce97731 SS |
867 | if (!rxq->napi.poll) |
868 | netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, | |
869 | iwl_pcie_dummy_napi_poll, 64); | |
870 | ||
78485054 SS |
871 | spin_unlock(&rxq->lock); |
872 | } | |
9805c446 | 873 | |
96a6497b SS |
874 | /* move the pool to the default queue and allocator ownerships */ |
875 | num_rbds = trans->cfg->mq_rx_supported ? | |
876 | MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; | |
877 | allocator_pool_size = trans->num_rx_queues * | |
878 | (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); | |
879 | for (i = 0; i < num_rbds; i++) { | |
880 | struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; | |
881 | ||
882 | if (i < allocator_pool_size) | |
883 | list_add(&rxb->list, &rba->rbd_empty); | |
884 | else | |
885 | list_add(&rxb->list, &def_rxq->rx_used); | |
886 | trans_pcie->global_table[i] = rxb; | |
887 | rxb->vid = (u16)i; | |
888 | } | |
9805c446 | 889 | |
78485054 | 890 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); |
96a6497b | 891 | if (trans->cfg->mq_rx_supported) { |
bce97731 | 892 | iwl_pcie_rx_mq_hw_init(trans); |
96a6497b SS |
893 | } else { |
894 | iwl_pcie_rxq_restock(trans, def_rxq); | |
895 | iwl_pcie_rx_hw_init(trans, def_rxq); | |
896 | } | |
78485054 SS |
897 | |
898 | spin_lock(&def_rxq->lock); | |
899 | iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); | |
900 | spin_unlock(&def_rxq->lock); | |
9805c446 EG |
901 | |
902 | return 0; | |
903 | } | |
904 | ||
905 | void iwl_pcie_rx_free(struct iwl_trans *trans) | |
906 | { | |
907 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 908 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
96a6497b SS |
909 | int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : |
910 | sizeof(__le32); | |
78485054 | 911 | int i; |
9805c446 | 912 | |
78485054 SS |
913 | /* |
914 | * if rxq is NULL, it means that nothing has been allocated, | |
915 | * exit now | |
916 | */ | |
917 | if (!trans_pcie->rxq) { | |
9805c446 EG |
918 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
919 | return; | |
920 | } | |
921 | ||
26d535ae SS |
922 | cancel_work_sync(&rba->rx_alloc); |
923 | if (rba->alloc_wq) { | |
924 | destroy_workqueue(rba->alloc_wq); | |
925 | rba->alloc_wq = NULL; | |
926 | } | |
927 | ||
78485054 SS |
928 | iwl_pcie_free_rbs_pool(trans); |
929 | ||
930 | for (i = 0; i < trans->num_rx_queues; i++) { | |
931 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
932 | ||
933 | if (rxq->bd) | |
934 | dma_free_coherent(trans->dev, | |
96a6497b | 935 | free_size * rxq->queue_size, |
78485054 SS |
936 | rxq->bd, rxq->bd_dma); |
937 | rxq->bd_dma = 0; | |
938 | rxq->bd = NULL; | |
939 | ||
940 | if (rxq->rb_stts) | |
941 | dma_free_coherent(trans->dev, | |
942 | sizeof(struct iwl_rb_status), | |
943 | rxq->rb_stts, rxq->rb_stts_dma); | |
944 | else | |
945 | IWL_DEBUG_INFO(trans, | |
946 | "Free rxq->rb_stts which is NULL\n"); | |
9805c446 | 947 | |
96a6497b SS |
948 | if (rxq->used_bd) |
949 | dma_free_coherent(trans->dev, | |
950 | sizeof(__le32) * rxq->queue_size, | |
951 | rxq->used_bd, rxq->used_bd_dma); | |
952 | rxq->used_bd_dma = 0; | |
953 | rxq->used_bd = NULL; | |
bce97731 SS |
954 | |
955 | if (rxq->napi.poll) | |
956 | netif_napi_del(&rxq->napi); | |
96a6497b | 957 | } |
78485054 | 958 | kfree(trans_pcie->rxq); |
9805c446 EG |
959 | } |
960 | ||
26d535ae SS |
961 | /* |
962 | * iwl_pcie_rx_reuse_rbd - Recycle used RBDs | |
963 | * | |
964 | * Called when a RBD can be reused. The RBD is transferred to the allocator. | |
965 | * When there are 2 empty RBDs - a request for allocation is posted | |
966 | */ | |
967 | static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, | |
968 | struct iwl_rx_mem_buffer *rxb, | |
969 | struct iwl_rxq *rxq, bool emergency) | |
970 | { | |
971 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
972 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
973 | ||
974 | /* Move the RBD to the used list, will be moved to allocator in batches | |
975 | * before claiming or posting a request*/ | |
976 | list_add_tail(&rxb->list, &rxq->rx_used); | |
977 | ||
978 | if (unlikely(emergency)) | |
979 | return; | |
980 | ||
981 | /* Count the allocator owned RBDs */ | |
982 | rxq->used_count++; | |
983 | ||
984 | /* If we have RX_POST_REQ_ALLOC new released rx buffers - | |
985 | * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is | |
986 | * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, | |
987 | * after but we still need to post another request. | |
988 | */ | |
989 | if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { | |
990 | /* Move the 2 RBDs to the allocator ownership. | |
991 | Allocator has another 6 from pool for the request completion*/ | |
992 | spin_lock(&rba->lock); | |
993 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | |
994 | spin_unlock(&rba->lock); | |
995 | ||
996 | atomic_inc(&rba->req_pending); | |
997 | queue_work(rba->alloc_wq, &rba->rx_alloc); | |
998 | } | |
999 | } | |
1000 | ||
9805c446 | 1001 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, |
78485054 | 1002 | struct iwl_rxq *rxq, |
26d535ae SS |
1003 | struct iwl_rx_mem_buffer *rxb, |
1004 | bool emergency) | |
df2f3216 JB |
1005 | { |
1006 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
990aa6d7 | 1007 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
0c19744c | 1008 | bool page_stolen = false; |
b2cf410c | 1009 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 1010 | u32 offset = 0; |
df2f3216 JB |
1011 | |
1012 | if (WARN_ON(!rxb)) | |
1013 | return; | |
1014 | ||
0c19744c JB |
1015 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
1016 | ||
1017 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
1018 | struct iwl_rx_packet *pkt; | |
0c19744c JB |
1019 | u16 sequence; |
1020 | bool reclaim; | |
f7e6469f | 1021 | int index, cmd_index, len; |
0c19744c JB |
1022 | struct iwl_rx_cmd_buffer rxcb = { |
1023 | ._offset = offset, | |
d13f1862 | 1024 | ._rx_page_order = trans_pcie->rx_page_order, |
0c19744c JB |
1025 | ._page = rxb->page, |
1026 | ._page_stolen = false, | |
0d6c4a2e | 1027 | .truesize = max_len, |
0c19744c JB |
1028 | }; |
1029 | ||
1030 | pkt = rxb_addr(&rxcb); | |
1031 | ||
1032 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | |
1033 | break; | |
1034 | ||
9243efcc LK |
1035 | IWL_DEBUG_RX(trans, |
1036 | "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", | |
1037 | rxcb._offset, | |
39bdb17e SD |
1038 | iwl_get_cmd_string(trans, |
1039 | iwl_cmd_id(pkt->hdr.cmd, | |
1040 | pkt->hdr.group_id, | |
1041 | 0)), | |
9243efcc | 1042 | pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); |
0c19744c | 1043 | |
65b30348 | 1044 | len = iwl_rx_packet_len(pkt); |
0c19744c | 1045 | len += sizeof(u32); /* account for status word */ |
f042c2eb JB |
1046 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
1047 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
1048 | |
1049 | /* Reclaim a command buffer only if this packet is a response | |
1050 | * to a (driver-originated) command. | |
1051 | * If the packet (e.g. Rx frame) originated from uCode, | |
1052 | * there is no command buffer to reclaim. | |
1053 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
1054 | * but apparently a few don't get set; catch them here. */ | |
1055 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
1056 | if (reclaim) { | |
1057 | int i; | |
1058 | ||
1059 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
1060 | if (trans_pcie->no_reclaim_cmds[i] == | |
1061 | pkt->hdr.cmd) { | |
1062 | reclaim = false; | |
1063 | break; | |
1064 | } | |
d663ee73 JB |
1065 | } |
1066 | } | |
df2f3216 | 1067 | |
0c19744c JB |
1068 | sequence = le16_to_cpu(pkt->hdr.sequence); |
1069 | index = SEQ_TO_INDEX(sequence); | |
1070 | cmd_index = get_cmd_index(&txq->q, index); | |
1071 | ||
bce97731 SS |
1072 | if (rxq->id == 0) |
1073 | iwl_op_mode_rx(trans->op_mode, &rxq->napi, | |
1074 | &rxcb); | |
1075 | else | |
1076 | iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, | |
1077 | &rxcb, rxq->id); | |
0c19744c | 1078 | |
96791422 | 1079 | if (reclaim) { |
5d4185ae | 1080 | kzfree(txq->entries[cmd_index].free_buf); |
f4feb8ac | 1081 | txq->entries[cmd_index].free_buf = NULL; |
96791422 EG |
1082 | } |
1083 | ||
0c19744c JB |
1084 | /* |
1085 | * After here, we should always check rxcb._page_stolen, | |
1086 | * if it is true then one of the handlers took the page. | |
1087 | */ | |
1088 | ||
1089 | if (reclaim) { | |
1090 | /* Invoke any callbacks, transfer the buffer to caller, | |
1091 | * and fire off the (possibly) blocking | |
1092 | * iwl_trans_send_cmd() | |
1093 | * as we reclaim the driver command queue */ | |
1094 | if (!rxcb._page_stolen) | |
f7e6469f | 1095 | iwl_pcie_hcmd_complete(trans, &rxcb); |
0c19744c JB |
1096 | else |
1097 | IWL_WARN(trans, "Claim null rxb?\n"); | |
1098 | } | |
1099 | ||
1100 | page_stolen |= rxcb._page_stolen; | |
1101 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | |
df2f3216 JB |
1102 | } |
1103 | ||
0c19744c JB |
1104 | /* page was stolen from us -- free our reference */ |
1105 | if (page_stolen) { | |
b2cf410c | 1106 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 1107 | rxb->page = NULL; |
0c19744c | 1108 | } |
df2f3216 JB |
1109 | |
1110 | /* Reuse the page if possible. For notification packets and | |
1111 | * SKBs that fail to Rx correctly, add them back into the | |
1112 | * rx_free list for reuse later. */ | |
df2f3216 JB |
1113 | if (rxb->page != NULL) { |
1114 | rxb->page_dma = | |
1115 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
1116 | PAGE_SIZE << trans_pcie->rx_page_order, |
1117 | DMA_FROM_DEVICE); | |
7c341582 JB |
1118 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
1119 | /* | |
1120 | * free the page(s) as well to not break | |
1121 | * the invariant that the items on the used | |
1122 | * list have no page(s) | |
1123 | */ | |
1124 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
1125 | rxb->page = NULL; | |
26d535ae | 1126 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
7c341582 JB |
1127 | } else { |
1128 | list_add_tail(&rxb->list, &rxq->rx_free); | |
1129 | rxq->free_count++; | |
1130 | } | |
df2f3216 | 1131 | } else |
26d535ae | 1132 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
df2f3216 JB |
1133 | } |
1134 | ||
990aa6d7 EG |
1135 | /* |
1136 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 1137 | */ |
990aa6d7 | 1138 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
ab697a9f | 1139 | { |
df2f3216 | 1140 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
78485054 | 1141 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; |
26d535ae SS |
1142 | u32 r, i, j, count = 0; |
1143 | bool emergency = false; | |
ab697a9f | 1144 | |
f14d6b39 JB |
1145 | restart: |
1146 | spin_lock(&rxq->lock); | |
ab697a9f EG |
1147 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1148 | * buffer that the driver may process (last buffer filled by ucode). */ | |
52e2a99e | 1149 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
ab697a9f EG |
1150 | i = rxq->read; |
1151 | ||
1152 | /* Rx interrupt, but nothing sent from uCode */ | |
1153 | if (i == r) | |
726f23fd | 1154 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); |
ab697a9f | 1155 | |
ab697a9f | 1156 | while (i != r) { |
48a2d66f | 1157 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 1158 | |
96a6497b | 1159 | if (unlikely(rxq->used_count == rxq->queue_size / 2)) |
26d535ae SS |
1160 | emergency = true; |
1161 | ||
96a6497b SS |
1162 | if (trans->cfg->mq_rx_supported) { |
1163 | /* | |
1164 | * used_bd is a 32 bit but only 12 are used to retrieve | |
1165 | * the vid | |
1166 | */ | |
1167 | u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); | |
1168 | ||
1169 | rxb = trans_pcie->global_table[vid]; | |
1170 | } else { | |
1171 | rxb = rxq->queue[i]; | |
1172 | rxq->queue[i] = NULL; | |
1173 | } | |
ab697a9f | 1174 | |
f02d2ccd | 1175 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); |
78485054 | 1176 | iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); |
ab697a9f | 1177 | |
96a6497b | 1178 | i = (i + 1) & (rxq->queue_size - 1); |
26d535ae SS |
1179 | |
1180 | /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - | |
1181 | * try to claim the pre-allocated buffers from the allocator */ | |
1182 | if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { | |
1183 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
1184 | struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; | |
1185 | ||
1186 | if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && | |
1187 | !emergency) { | |
1188 | /* Add the remaining 6 empty RBDs | |
1189 | * for allocator use | |
1190 | */ | |
1191 | spin_lock(&rba->lock); | |
1192 | list_splice_tail_init(&rxq->rx_used, | |
1193 | &rba->rbd_empty); | |
1194 | spin_unlock(&rba->lock); | |
1195 | } | |
1196 | ||
1197 | /* If not ready - continue, will try to reclaim later. | |
1198 | * No need to reschedule work - allocator exits only on | |
1199 | * success */ | |
1200 | if (!iwl_pcie_rx_allocator_get(trans, out)) { | |
1201 | /* If success - then RX_CLAIM_REQ_ALLOC | |
1202 | * buffers were retrieved and should be added | |
1203 | * to free list */ | |
1204 | rxq->used_count -= RX_CLAIM_REQ_ALLOC; | |
1205 | for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { | |
1206 | list_add_tail(&out[j]->list, | |
1207 | &rxq->rx_free); | |
1208 | rxq->free_count++; | |
1209 | } | |
1210 | } | |
1211 | } | |
1212 | if (emergency) { | |
255ba065 | 1213 | count++; |
26d535ae | 1214 | if (count == 8) { |
255ba065 | 1215 | count = 0; |
96a6497b | 1216 | if (rxq->used_count < rxq->queue_size / 3) |
26d535ae SS |
1217 | emergency = false; |
1218 | spin_unlock(&rxq->lock); | |
78485054 | 1219 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
26d535ae | 1220 | spin_lock(&rxq->lock); |
ab697a9f EG |
1221 | } |
1222 | } | |
26d535ae SS |
1223 | /* handle restock for three cases, can be all of them at once: |
1224 | * - we just pulled buffers from the allocator | |
1225 | * - we have 8+ unstolen pages accumulated | |
1226 | * - we are in emergency and allocated buffers | |
1227 | */ | |
1228 | if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { | |
1229 | rxq->read = i; | |
1230 | spin_unlock(&rxq->lock); | |
96a6497b SS |
1231 | if (trans->cfg->mq_rx_supported) |
1232 | iwl_pcie_rxq_mq_restock(trans, rxq); | |
1233 | else | |
1234 | iwl_pcie_rxq_restock(trans, rxq); | |
26d535ae SS |
1235 | goto restart; |
1236 | } | |
ab697a9f EG |
1237 | } |
1238 | ||
1239 | /* Backtrack one entry */ | |
1240 | rxq->read = i; | |
f14d6b39 JB |
1241 | spin_unlock(&rxq->lock); |
1242 | ||
26d535ae SS |
1243 | /* |
1244 | * handle a case where in emergency there are some unallocated RBDs. | |
1245 | * those RBDs are in the used list, but are not tracked by the queue's | |
1246 | * used_count which counts allocator owned RBDs. | |
1247 | * unallocated emergency RBDs must be allocated on exit, otherwise | |
1248 | * when called again the function may not be in emergency mode and | |
1249 | * they will be handed to the allocator with no tracking in the RBD | |
1250 | * allocator counters, which will lead to them never being claimed back | |
1251 | * by the queue. | |
1252 | * by allocating them here, they are now in the queue free list, and | |
1253 | * will be restocked by the next call of iwl_pcie_rxq_restock. | |
1254 | */ | |
1255 | if (unlikely(emergency && count)) | |
78485054 | 1256 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
255ba065 | 1257 | |
bce97731 SS |
1258 | if (rxq->napi.poll) |
1259 | napi_gro_flush(&rxq->napi, false); | |
ab697a9f EG |
1260 | } |
1261 | ||
990aa6d7 EG |
1262 | /* |
1263 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 1264 | */ |
990aa6d7 | 1265 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 1266 | { |
f946b529 | 1267 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1103323c | 1268 | int i; |
f946b529 | 1269 | |
7ff94706 | 1270 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 1271 | if (trans->cfg->internal_wimax_coex && |
95411d04 | 1272 | !trans->cfg->apmg_not_supported && |
1042db2a | 1273 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 1274 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 1275 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 1276 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
eb7ff77e | 1277 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
8a8bbdb4 | 1278 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 1279 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
1280 | return; |
1281 | } | |
1282 | ||
990aa6d7 | 1283 | iwl_pcie_dump_csr(trans); |
313b0a29 | 1284 | iwl_dump_fh(trans, NULL); |
7ff94706 | 1285 | |
2bfb5092 | 1286 | local_bh_disable(); |
2a988e98 AN |
1287 | /* The STATUS_FW_ERROR bit is set in this function. This must happen |
1288 | * before we wake up the command caller, to ensure a proper cleanup. */ | |
1289 | iwl_trans_fw_error(trans); | |
2bfb5092 | 1290 | local_bh_enable(); |
2a988e98 | 1291 | |
1103323c EG |
1292 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
1293 | del_timer(&trans_pcie->txq[i].stuck_timer); | |
1294 | ||
2a988e98 AN |
1295 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
1296 | wake_up(&trans_pcie->wait_command_queue); | |
7ff94706 EG |
1297 | } |
1298 | ||
7117c000 | 1299 | static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) |
fc84472b | 1300 | { |
fc84472b EG |
1301 | u32 inta; |
1302 | ||
46e81af9 | 1303 | lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); |
fc84472b EG |
1304 | |
1305 | trace_iwlwifi_dev_irq(trans->dev); | |
1306 | ||
1307 | /* Discover which interrupts are active/pending */ | |
1308 | inta = iwl_read32(trans, CSR_INT); | |
1309 | ||
fc84472b | 1310 | /* the thread will service interrupts and re-enable them */ |
fe523dc9 | 1311 | return inta; |
fc84472b EG |
1312 | } |
1313 | ||
1314 | /* a device (PCI-E) page is 4096 bytes long */ | |
1315 | #define ICT_SHIFT 12 | |
1316 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1317 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1318 | ||
1319 | /* interrupt handler using ict table, with this interrupt driver will | |
1320 | * stop using INTA register to get device's interrupt, reading this register | |
1321 | * is expensive, device will write interrupts in ICT dram table, increment | |
1322 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1323 | * entries from current index up to table entry with 0 value. the result is | |
1324 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1325 | * set index. | |
1326 | */ | |
7117c000 | 1327 | static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) |
fc84472b EG |
1328 | { |
1329 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
fc84472b EG |
1330 | u32 inta; |
1331 | u32 val = 0; | |
1332 | u32 read; | |
1333 | ||
fc84472b EG |
1334 | trace_iwlwifi_dev_irq(trans->dev); |
1335 | ||
1336 | /* Ignore interrupt if there's nothing in NIC to service. | |
1337 | * This may be due to IRQ shared with another device, | |
1338 | * or due to sporadic interrupts thrown from our NIC. */ | |
1339 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1340 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); | |
7ba1faa4 EG |
1341 | if (!read) |
1342 | return 0; | |
fc84472b EG |
1343 | |
1344 | /* | |
1345 | * Collect all entries up to the first 0, starting from ict_index; | |
1346 | * note we already read at ict_index. | |
1347 | */ | |
1348 | do { | |
1349 | val |= read; | |
1350 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", | |
1351 | trans_pcie->ict_index, read); | |
1352 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | |
1353 | trans_pcie->ict_index = | |
83f32a4b | 1354 | ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); |
fc84472b EG |
1355 | |
1356 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1357 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | |
1358 | read); | |
1359 | } while (read); | |
1360 | ||
1361 | /* We should not get this value, just ignore it. */ | |
1362 | if (val == 0xffffffff) | |
1363 | val = 0; | |
1364 | ||
1365 | /* | |
1366 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1367 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1368 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1369 | * so we use them to decide on the real state of the Rx bit. | |
1370 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1371 | */ | |
1372 | if (val & 0xC0000) | |
1373 | val |= 0x8000; | |
1374 | ||
1375 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
fe523dc9 | 1376 | return inta; |
fc84472b EG |
1377 | } |
1378 | ||
2bfb5092 | 1379 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) |
ab697a9f | 1380 | { |
2bfb5092 | 1381 | struct iwl_trans *trans = dev_id; |
20d3b647 JB |
1382 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1383 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
1384 | u32 inta = 0; |
1385 | u32 handled = 0; | |
ab697a9f | 1386 | |
2bfb5092 JB |
1387 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1388 | ||
7b70bd63 | 1389 | spin_lock(&trans_pcie->irq_lock); |
ab697a9f | 1390 | |
0fec9542 EG |
1391 | /* dram interrupt table not set yet, |
1392 | * use legacy interrupt. | |
1393 | */ | |
1394 | if (likely(trans_pcie->use_ict)) | |
7117c000 | 1395 | inta = iwl_pcie_int_cause_ict(trans); |
0fec9542 | 1396 | else |
7117c000 | 1397 | inta = iwl_pcie_int_cause_non_ict(trans); |
0fec9542 | 1398 | |
7ba1faa4 EG |
1399 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1400 | IWL_DEBUG_ISR(trans, | |
1401 | "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", | |
1402 | inta, trans_pcie->inta_mask, | |
1403 | iwl_read32(trans, CSR_INT_MASK), | |
1404 | iwl_read32(trans, CSR_FH_INT_STATUS)); | |
1405 | if (inta & (~trans_pcie->inta_mask)) | |
1406 | IWL_DEBUG_ISR(trans, | |
1407 | "We got a masked interrupt (0x%08x)\n", | |
1408 | inta & (~trans_pcie->inta_mask)); | |
1409 | } | |
1410 | ||
1411 | inta &= trans_pcie->inta_mask; | |
1412 | ||
1413 | /* | |
1414 | * Ignore interrupt if there's nothing in NIC to service. | |
1415 | * This may be due to IRQ shared with another device, | |
1416 | * or due to sporadic interrupts thrown from our NIC. | |
1417 | */ | |
7117c000 | 1418 | if (unlikely(!inta)) { |
7ba1faa4 EG |
1419 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1420 | /* | |
1421 | * Re-enable interrupts here since we don't | |
1422 | * have anything to service | |
1423 | */ | |
1424 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
1425 | iwl_enable_interrupts(trans); | |
7b70bd63 | 1426 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 EG |
1427 | lock_map_release(&trans->sync_cmd_lockdep_map); |
1428 | return IRQ_NONE; | |
1429 | } | |
1430 | ||
7ba1faa4 EG |
1431 | if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { |
1432 | /* | |
1433 | * Hardware disappeared. It might have | |
1434 | * already raised an interrupt. | |
1435 | */ | |
1436 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | |
7b70bd63 | 1437 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 | 1438 | goto out; |
a0f337cc EG |
1439 | } |
1440 | ||
ab697a9f EG |
1441 | /* Ack/clear/reset pending uCode interrupts. |
1442 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
1443 | */ | |
1444 | /* There is a hardware bug in the interrupt mask function that some | |
1445 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
1446 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
1447 | * ICT interrupt handling mechanism has another bug that might cause | |
1448 | * these unmasked interrupts fail to be detected. We workaround the | |
1449 | * hardware bugs here by ACKing all the possible interrupts so that | |
1450 | * interrupt coalescing can still be achieved. | |
1451 | */ | |
7117c000 | 1452 | iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); |
ab697a9f | 1453 | |
51cd53ad | 1454 | if (iwl_have_debug_level(IWL_DL_ISR)) |
0ca24daf | 1455 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
51cd53ad | 1456 | inta, iwl_read32(trans, CSR_INT_MASK)); |
ab697a9f | 1457 | |
7b70bd63 | 1458 | spin_unlock(&trans_pcie->irq_lock); |
b49ba04a | 1459 | |
ab697a9f EG |
1460 | /* Now service all interrupt bits discovered above. */ |
1461 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 1462 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
1463 | |
1464 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 1465 | iwl_disable_interrupts(trans); |
ab697a9f | 1466 | |
1f7b6172 | 1467 | isr_stats->hw++; |
990aa6d7 | 1468 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1469 | |
1470 | handled |= CSR_INT_BIT_HW_ERR; | |
1471 | ||
2bfb5092 | 1472 | goto out; |
ab697a9f EG |
1473 | } |
1474 | ||
a8bceb39 | 1475 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
1476 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
1477 | if (inta & CSR_INT_BIT_SCD) { | |
51cd53ad JB |
1478 | IWL_DEBUG_ISR(trans, |
1479 | "Scheduler finished to transmit the frame/frames.\n"); | |
1f7b6172 | 1480 | isr_stats->sch++; |
ab697a9f EG |
1481 | } |
1482 | ||
1483 | /* Alive notification via Rx interrupt will do the real work */ | |
1484 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 1485 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 1486 | isr_stats->alive++; |
ab697a9f EG |
1487 | } |
1488 | } | |
51cd53ad | 1489 | |
ab697a9f EG |
1490 | /* Safely ignore these bits for debug checks below */ |
1491 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
1492 | ||
1493 | /* HW RF KILL switch toggled */ | |
1494 | if (inta & CSR_INT_BIT_RF_KILL) { | |
c9eec95c | 1495 | bool hw_rfkill; |
ab697a9f | 1496 | |
8d425517 | 1497 | hw_rfkill = iwl_is_rfkill_set(trans); |
0c325769 | 1498 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", |
20d3b647 | 1499 | hw_rfkill ? "disable radio" : "enable radio"); |
ab697a9f | 1500 | |
1f7b6172 | 1501 | isr_stats->rfkill++; |
ab697a9f | 1502 | |
fa9f3281 | 1503 | mutex_lock(&trans_pcie->mutex); |
14cfca71 | 1504 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); |
fa9f3281 | 1505 | mutex_unlock(&trans_pcie->mutex); |
f946b529 | 1506 | if (hw_rfkill) { |
eb7ff77e AN |
1507 | set_bit(STATUS_RFKILL, &trans->status); |
1508 | if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, | |
1509 | &trans->status)) | |
f946b529 EG |
1510 | IWL_DEBUG_RF_KILL(trans, |
1511 | "Rfkill while SYNC HCMD in flight\n"); | |
1512 | wake_up(&trans_pcie->wait_command_queue); | |
1513 | } else { | |
eb7ff77e | 1514 | clear_bit(STATUS_RFKILL, &trans->status); |
f946b529 | 1515 | } |
ab697a9f EG |
1516 | |
1517 | handled |= CSR_INT_BIT_RF_KILL; | |
1518 | } | |
1519 | ||
1520 | /* Chip got too hot and stopped itself */ | |
1521 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 1522 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 1523 | isr_stats->ctkill++; |
ab697a9f EG |
1524 | handled |= CSR_INT_BIT_CT_KILL; |
1525 | } | |
1526 | ||
1527 | /* Error detected by uCode */ | |
1528 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 1529 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 1530 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 1531 | isr_stats->sw++; |
990aa6d7 | 1532 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1533 | handled |= CSR_INT_BIT_SW_ERR; |
1534 | } | |
1535 | ||
1536 | /* uCode wakes up after power-down sleep */ | |
1537 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 1538 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
5d63f926 | 1539 | iwl_pcie_rxq_check_wrptr(trans); |
ea68f460 | 1540 | iwl_pcie_txq_check_wrptrs(trans); |
ab697a9f | 1541 | |
1f7b6172 | 1542 | isr_stats->wakeup++; |
ab697a9f EG |
1543 | |
1544 | handled |= CSR_INT_BIT_WAKEUP; | |
1545 | } | |
1546 | ||
1547 | /* All uCode command responses, including Tx command responses, | |
1548 | * Rx "responses" (frame-received notification), and other | |
1549 | * notifications from uCode come through here*/ | |
1550 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 1551 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 1552 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
1553 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
1554 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 1555 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
1556 | CSR_FH_INT_RX_MASK); |
1557 | } | |
1558 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
1559 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 1560 | iwl_write32(trans, |
0c325769 | 1561 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
1562 | } |
1563 | /* Sending RX interrupt require many steps to be done in the | |
1564 | * the device: | |
1565 | * 1- write interrupt to current index in ICT table. | |
1566 | * 2- dma RX frame. | |
1567 | * 3- update RX shared data to indicate last write index. | |
1568 | * 4- send interrupt. | |
1569 | * This could lead to RX race, driver could receive RX interrupt | |
1570 | * but the shared data changes does not reflect this; | |
1571 | * periodic interrupt will detect any dangling Rx activity. | |
1572 | */ | |
1573 | ||
1574 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 1575 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 1576 | CSR_INT_PERIODIC_DIS); |
6379103e | 1577 | |
ab697a9f EG |
1578 | /* |
1579 | * Enable periodic interrupt in 8 msec only if we received | |
1580 | * real RX interrupt (instead of just periodic int), to catch | |
1581 | * any dangling Rx interrupt. If it was just the periodic | |
1582 | * interrupt, there was no dangling Rx activity, and no need | |
1583 | * to extend the periodic interrupt; one-shot is enough. | |
1584 | */ | |
1585 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 1586 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 1587 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 1588 | |
1f7b6172 | 1589 | isr_stats->rx++; |
f14d6b39 JB |
1590 | |
1591 | local_bh_disable(); | |
1592 | iwl_pcie_rx_handle(trans); | |
1593 | local_bh_enable(); | |
ab697a9f EG |
1594 | } |
1595 | ||
1596 | /* This "Tx" DMA channel is used only for loading uCode */ | |
1597 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 1598 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 1599 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 1600 | isr_stats->tx++; |
ab697a9f EG |
1601 | handled |= CSR_INT_BIT_FH_TX; |
1602 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
1603 | trans_pcie->ucode_write_complete = true; |
1604 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
1605 | } |
1606 | ||
1607 | if (inta & ~handled) { | |
0c325769 | 1608 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 1609 | isr_stats->unhandled++; |
ab697a9f EG |
1610 | } |
1611 | ||
0c325769 EG |
1612 | if (inta & ~(trans_pcie->inta_mask)) { |
1613 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
1614 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
1615 | } |
1616 | ||
a6bd005f EG |
1617 | /* we are loading the firmware, enable FH_TX interrupt only */ |
1618 | if (handled & CSR_INT_BIT_FH_TX) | |
1619 | iwl_enable_fw_load_int(trans); | |
1620 | /* only Re-enable all interrupt if disabled by irq */ | |
1621 | else if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
0c325769 | 1622 | iwl_enable_interrupts(trans); |
ab697a9f | 1623 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
1624 | else if (handled & CSR_INT_BIT_RF_KILL) |
1625 | iwl_enable_rfkill_int(trans); | |
2bfb5092 JB |
1626 | |
1627 | out: | |
1628 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1629 | return IRQ_HANDLED; | |
ab697a9f EG |
1630 | } |
1631 | ||
1a361cd8 EG |
1632 | /****************************************************************************** |
1633 | * | |
1634 | * ICT functions | |
1635 | * | |
1636 | ******************************************************************************/ | |
10667136 | 1637 | |
1a361cd8 | 1638 | /* Free dram table */ |
990aa6d7 | 1639 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1640 | { |
20d3b647 | 1641 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1642 | |
10667136 | 1643 | if (trans_pcie->ict_tbl) { |
1042db2a | 1644 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1645 | trans_pcie->ict_tbl, |
0c325769 | 1646 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1647 | trans_pcie->ict_tbl = NULL; |
1648 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1649 | } |
1650 | } | |
1651 | ||
10667136 JB |
1652 | /* |
1653 | * allocate dram shared table, it is an aligned memory | |
1654 | * block of ICT_SIZE. | |
1a361cd8 EG |
1655 | * also reset all data related to ICT table interrupt. |
1656 | */ | |
990aa6d7 | 1657 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1658 | { |
20d3b647 | 1659 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1660 | |
10667136 | 1661 | trans_pcie->ict_tbl = |
eef31718 | 1662 | dma_zalloc_coherent(trans->dev, ICT_SIZE, |
10667136 JB |
1663 | &trans_pcie->ict_tbl_dma, |
1664 | GFP_KERNEL); | |
1665 | if (!trans_pcie->ict_tbl) | |
1a361cd8 EG |
1666 | return -ENOMEM; |
1667 | ||
10667136 JB |
1668 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1669 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1670 | iwl_pcie_free_ict(trans); |
10667136 JB |
1671 | return -EINVAL; |
1672 | } | |
1a361cd8 | 1673 | |
1a361cd8 EG |
1674 | return 0; |
1675 | } | |
1676 | ||
1677 | /* Device is going up inform it about using ICT interrupt table, | |
1678 | * also we need to tell the driver to start using ICT interrupt. | |
1679 | */ | |
990aa6d7 | 1680 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1681 | { |
20d3b647 | 1682 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1683 | u32 val; |
1a361cd8 | 1684 | |
10667136 | 1685 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1686 | return; |
1a361cd8 | 1687 | |
7b70bd63 | 1688 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 1689 | iwl_disable_interrupts(trans); |
1a361cd8 | 1690 | |
10667136 | 1691 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1692 | |
10667136 | 1693 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 | 1694 | |
18f5a374 EP |
1695 | val |= CSR_DRAM_INT_TBL_ENABLE | |
1696 | CSR_DRAM_INIT_TBL_WRAP_CHECK | | |
1697 | CSR_DRAM_INIT_TBL_WRITE_POINTER; | |
1a361cd8 | 1698 | |
10667136 | 1699 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1700 | |
1042db2a | 1701 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1702 | trans_pcie->use_ict = true; |
1703 | trans_pcie->ict_index = 0; | |
1042db2a | 1704 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
0c325769 | 1705 | iwl_enable_interrupts(trans); |
7b70bd63 | 1706 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1707 | } |
1708 | ||
1709 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1710 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1711 | { |
20d3b647 | 1712 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1713 | |
7b70bd63 | 1714 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 1715 | trans_pcie->use_ict = false; |
7b70bd63 | 1716 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1717 | } |
1718 | ||
85bf9da1 EG |
1719 | irqreturn_t iwl_pcie_isr(int irq, void *data) |
1720 | { | |
1721 | struct iwl_trans *trans = data; | |
1722 | ||
1723 | if (!trans) | |
1724 | return IRQ_NONE; | |
1725 | ||
1726 | /* Disable (but don't clear!) interrupts here to avoid | |
1727 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1728 | * If we have something to service, the tasklet will re-enable ints. | |
1729 | * If we *don't* have something, we'll re-enable before leaving here. | |
1730 | */ | |
1731 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
1732 | ||
a0f337cc | 1733 | return IRQ_WAKE_THREAD; |
85bf9da1 | 1734 | } |