wifi: iwlwifi: pcie: point invalid TFDs to invalid data
[linux-2.6-block.git] / drivers / net / wireless / intel / iwlwifi / queue / tx.c
CommitLineData
8e99ea8d
JB
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
2e0ce1de 3 * Copyright (C) 2020-2023 Intel Corporation
8e99ea8d 4 */
0cd1ad2d
MG
5#include <net/tso.h>
6#include <linux/tcp.h>
7
8#include "iwl-debug.h"
9#include "iwl-io.h"
85b17a33 10#include "fw/api/commands.h"
0cd1ad2d 11#include "fw/api/tx.h"
227f2597 12#include "fw/api/datapath.h"
c83031af 13#include "fw/api/debug.h"
0cd1ad2d
MG
14#include "queue/tx.h"
15#include "iwl-fh.h"
16#include "iwl-scd.h"
17#include <linux/dmapool.h>
18
0cd1ad2d
MG
19/*
20 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
21 */
22static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
23 struct iwl_txq *txq, u16 byte_cnt,
24 int num_tbs)
25{
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
27 u8 filled_tfd_size, num_fetch_chunks;
28 u16 len = byte_cnt;
29 __le16 bc_ent;
30
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
32 return;
33
34 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
35 num_tbs * sizeof(struct iwl_tfh_tb);
36 /*
37 * filled_tfd_size contains the number of filled bytes in the TFD.
38 * Dividing it by 64 will give the number of chunks to fetch
39 * to SRAM- 0 for one chunk, 1 for 2 and so on.
40 * If, for example, TFD contains only 3 TBs then 32 bytes
41 * of the TFD are used, and only one chunk of 64 bytes should
42 * be fetched
43 */
44 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
45
46 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
d5399f11 47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
0cd1ad2d
MG
48
49 /* Starting from AX210, the HW expects bytes */
50 WARN_ON(trans->txqs.bc_table_dword);
51 WARN_ON(len > 0x3FFF);
52 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
d5399f11 53 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
0cd1ad2d
MG
54 } else {
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
56
57 /* Before AX210, the HW expects DW */
58 WARN_ON(!trans->txqs.bc_table_dword);
59 len = DIV_ROUND_UP(len, 4);
60 WARN_ON(len > 0xFFF);
61 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
62 scd_bc_tbl->tfd_offset[idx] = bc_ent;
63 }
64}
65
66/*
67 * iwl_txq_inc_wr_ptr - Send new write index to hardware
68 */
69void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
70{
71 lockdep_assert_held(&txq->lock);
72
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
74
75 /*
76 * if not in power-save mode, uCode will never sleep when we're
77 * trying to tx (during RFKILL, we're not trying to tx).
78 */
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
80}
81
82static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
83 struct iwl_tfh_tfd *tfd)
84{
85 return le16_to_cpu(tfd->num_tbs) & 0x1f;
86}
87
80fa8377
JB
88int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
89 dma_addr_t addr, u16 len)
90{
91 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
92 struct iwl_tfh_tb *tb;
93
94 /* Only WARN here so we know about the issue, but we mess up our
95 * unmap path because not every place currently checks for errors
96 * returned from this function - it can only return an error if
97 * there's no more space, and so when we know there is enough we
98 * don't always check ...
99 */
100 WARN(iwl_txq_crosses_4g_boundary(addr, len),
101 "possible DMA problem with iova:0x%llx, len:%d\n",
102 (unsigned long long)addr, len);
103
104 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
105 return -EINVAL;
106 tb = &tfd->tbs[idx];
107
108 /* Each TFD can point to a maximum max_tbs Tx buffers */
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
110 IWL_ERR(trans, "Error can not send more than %d chunks\n",
111 trans->txqs.tfd.max_tbs);
112 return -EINVAL;
113 }
114
115 put_unaligned_le64(addr, &tb->addr);
116 tb->tb_len = cpu_to_le16(len);
117
118 tfd->num_tbs = cpu_to_le16(idx + 1);
119
120 return idx;
121}
122
c83031af
JB
123static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
124 struct iwl_tfh_tfd *tfd)
125{
126 tfd->num_tbs = 0;
127
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
129 trans->invalid_tx_cmd.size);
130}
131
0cd1ad2d
MG
132void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
133 struct iwl_tfh_tfd *tfd)
134{
135 int i, num_tbs;
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
139
140 if (num_tbs > trans->txqs.tfd.max_tbs) {
141 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
142 return;
143 }
144
145 /* first TB is never freed - it's the bidirectional DMA data */
146 for (i = 1; i < num_tbs; i++) {
147 if (meta->tbs & BIT(i))
148 dma_unmap_page(trans->dev,
149 le64_to_cpu(tfd->tbs[i].addr),
150 le16_to_cpu(tfd->tbs[i].tb_len),
151 DMA_TO_DEVICE);
152 else
153 dma_unmap_single(trans->dev,
154 le64_to_cpu(tfd->tbs[i].addr),
155 le16_to_cpu(tfd->tbs[i].tb_len),
156 DMA_TO_DEVICE);
157 }
158
c83031af 159 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
0cd1ad2d
MG
160}
161
162void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
163{
164 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
165 * idx is bounded by n_window
166 */
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
0f8d5656 168 struct sk_buff *skb;
0cd1ad2d
MG
169
170 lockdep_assert_held(&txq->lock);
171
0f8d5656
EG
172 if (!txq->entries)
173 return;
174
0cd1ad2d
MG
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
176 iwl_txq_get_tfd(trans, txq, idx));
177
0f8d5656 178 skb = txq->entries[idx].skb;
0cd1ad2d 179
0f8d5656
EG
180 /* Can be called from irqs-disabled context
181 * If skb is not NULL, it means that the whole queue is being
182 * freed and that the queue is not empty - free the skb
183 */
184 if (skb) {
185 iwl_op_mode_free_skb(trans->op_mode, skb);
186 txq->entries[idx].skb = NULL;
0cd1ad2d
MG
187 }
188}
189
0cd1ad2d
MG
190static struct page *get_workaround_page(struct iwl_trans *trans,
191 struct sk_buff *skb)
192{
193 struct page **page_ptr;
194 struct page *ret;
195
196 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
197
198 ret = alloc_page(GFP_ATOMIC);
199 if (!ret)
200 return NULL;
201
202 /* set the chaining pointer to the previous page if there */
3827cb59 203 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
0cd1ad2d
MG
204 *page_ptr = ret;
205
206 return ret;
207}
208
209/*
210 * Add a TB and if needed apply the FH HW bug workaround;
211 * meta != NULL indicates that it's a page mapping and we
212 * need to dma_unmap_page() and set the meta->tbs bit in
213 * this case.
214 */
215static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
216 struct sk_buff *skb,
217 struct iwl_tfh_tfd *tfd,
218 dma_addr_t phys, void *virt,
219 u16 len, struct iwl_cmd_meta *meta)
220{
221 dma_addr_t oldphys = phys;
222 struct page *page;
223 int ret;
224
225 if (unlikely(dma_mapping_error(trans->dev, phys)))
226 return -ENOMEM;
227
228 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
229 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
230
231 if (ret < 0)
232 goto unmap;
233
234 if (meta)
235 meta->tbs |= BIT(ret);
236
237 ret = 0;
238 goto trace;
239 }
240
241 /*
242 * Work around a hardware bug. If (as expressed in the
243 * condition above) the TB ends on a 32-bit boundary,
244 * then the next TB may be accessed with the wrong
245 * address.
246 * To work around it, copy the data elsewhere and make
247 * a new mapping for it so the device will not fail.
248 */
249
250 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
251 ret = -ENOBUFS;
252 goto unmap;
253 }
254
255 page = get_workaround_page(trans, skb);
256 if (!page) {
257 ret = -ENOMEM;
258 goto unmap;
259 }
260
261 memcpy(page_address(page), virt, len);
262
263 phys = dma_map_single(trans->dev, page_address(page), len,
264 DMA_TO_DEVICE);
265 if (unlikely(dma_mapping_error(trans->dev, phys)))
266 return -ENOMEM;
267 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
268 if (ret < 0) {
269 /* unmap the new allocation as single */
270 oldphys = phys;
271 meta = NULL;
272 goto unmap;
273 }
274 IWL_WARN(trans,
275 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
276 len, (unsigned long long)oldphys, (unsigned long long)phys);
277
278 ret = 0;
279unmap:
280 if (meta)
281 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
282 else
283 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
284trace:
285 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
286
287 return ret;
288}
289
290#ifdef CONFIG_INET
291struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
292 struct sk_buff *skb)
293{
294 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
295 struct page **page_ptr;
296
297 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
298
299 if (WARN_ON(*page_ptr))
300 return NULL;
301
302 if (!p->page)
303 goto alloc;
304
305 /*
306 * Check if there's enough room on this page
307 *
308 * Note that we put a page chaining pointer *last* in the
309 * page - we need it somewhere, and if it's there then we
310 * avoid DMA mapping the last bits of the page which may
311 * trigger the 32-bit boundary hardware bug.
312 *
313 * (see also get_workaround_page() in tx-gen2.c)
314 */
315 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
316 sizeof(void *))
317 goto out;
318
319 /* We don't have enough room on this page, get a new one. */
320 __free_page(p->page);
321
322alloc:
323 p->page = alloc_page(GFP_ATOMIC);
324 if (!p->page)
325 return NULL;
326 p->pos = page_address(p->page);
327 /* set the chaining pointer to NULL */
3827cb59 328 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
0cd1ad2d
MG
329out:
330 *page_ptr = p->page;
331 get_page(p->page);
332 return p;
333}
334#endif
335
336static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
337 struct sk_buff *skb,
338 struct iwl_tfh_tfd *tfd, int start_len,
339 u8 hdr_len,
340 struct iwl_device_tx_cmd *dev_cmd)
341{
342#ifdef CONFIG_INET
343 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
344 struct ieee80211_hdr *hdr = (void *)skb->data;
345 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
346 unsigned int mss = skb_shinfo(skb)->gso_size;
347 u16 length, amsdu_pad;
348 u8 *start_hdr;
349 struct iwl_tso_hdr_page *hdr_page;
350 struct tso_t tso;
351
352 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
353 &dev_cmd->hdr, start_len, 0);
354
355 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
356 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
357 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
358 amsdu_pad = 0;
359
360 /* total amount of header we may need for this A-MSDU */
361 hdr_room = DIV_ROUND_UP(total_len, mss) *
362 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
363
364 /* Our device supports 9 segments at most, it will fit in 1 page */
365 hdr_page = get_page_hdr(trans, hdr_room, skb);
366 if (!hdr_page)
367 return -ENOMEM;
368
369 start_hdr = hdr_page->pos;
370
371 /*
372 * Pull the ieee80211 header to be able to use TSO core,
373 * we will restore it for the tx_status flow.
374 */
375 skb_pull(skb, hdr_len);
376
377 /*
378 * Remove the length of all the headers that we don't actually
379 * have in the MPDU by themselves, but that we duplicate into
380 * all the different MSDUs inside the A-MSDU.
381 */
382 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
383
384 tso_start(skb, &tso);
385
386 while (total_len) {
387 /* this is the data left for this subframe */
388 unsigned int data_left = min_t(unsigned int, mss, total_len);
0cd1ad2d
MG
389 unsigned int tb_len;
390 dma_addr_t tb_phys;
391 u8 *subf_hdrs_start = hdr_page->pos;
392
393 total_len -= data_left;
394
395 memset(hdr_page->pos, 0, amsdu_pad);
396 hdr_page->pos += amsdu_pad;
397 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
398 data_left)) & 0x3;
399 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
400 hdr_page->pos += ETH_ALEN;
401 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
402 hdr_page->pos += ETH_ALEN;
403
404 length = snap_ip_tcp_hdrlen + data_left;
405 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
406 hdr_page->pos += sizeof(length);
407
408 /*
409 * This will copy the SNAP as well which will be considered
410 * as MAC header.
411 */
412 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
413
414 hdr_page->pos += snap_ip_tcp_hdrlen;
415
416 tb_len = hdr_page->pos - start_hdr;
417 tb_phys = dma_map_single(trans->dev, start_hdr,
418 tb_len, DMA_TO_DEVICE);
fb54b863 419 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
0cd1ad2d 420 goto out_err;
0cd1ad2d
MG
421 /*
422 * No need for _with_wa, this is from the TSO page and
423 * we leave some space at the end of it so can't hit
424 * the buggy scenario.
425 */
426 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
427 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
428 tb_phys, tb_len);
429 /* add this subframe's headers' length to the tx_cmd */
430 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
431
432 /* prepare the start_hdr for the next subframe */
433 start_hdr = hdr_page->pos;
434
435 /* put the payload */
436 while (data_left) {
437 int ret;
438
439 tb_len = min_t(unsigned int, tso.size, data_left);
440 tb_phys = dma_map_single(trans->dev, tso.data,
441 tb_len, DMA_TO_DEVICE);
442 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
443 tb_phys, tso.data,
444 tb_len, NULL);
fb54b863 445 if (ret)
0cd1ad2d 446 goto out_err;
0cd1ad2d
MG
447
448 data_left -= tb_len;
449 tso_build_data(skb, &tso, tb_len);
450 }
451 }
452
453 /* re -add the WiFi header */
454 skb_push(skb, hdr_len);
455
456 return 0;
457
458out_err:
459#endif
460 return -EINVAL;
461}
462
463static struct
464iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
465 struct iwl_txq *txq,
466 struct iwl_device_tx_cmd *dev_cmd,
467 struct sk_buff *skb,
468 struct iwl_cmd_meta *out_meta,
469 int hdr_len,
470 int tx_cmd_len)
471{
472 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
473 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
474 dma_addr_t tb_phys;
475 int len;
476 void *tb1_addr;
477
478 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
479
480 /*
481 * No need for _with_wa, the first TB allocation is aligned up
482 * to a 64-byte boundary and thus can't be at the end or cross
483 * a page boundary (much less a 2^32 boundary).
484 */
485 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
486
487 /*
488 * The second TB (tb1) points to the remainder of the TX command
489 * and the 802.11 header - dword aligned size
490 * (This calculation modifies the TX command, so do it before the
491 * setup of the first TB)
492 */
493 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
494 IWL_FIRST_TB_SIZE;
495
496 /* do not align A-MSDU to dword as the subframe header aligns it */
497
498 /* map the data for TB1 */
499 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
500 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
501 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
502 goto out_err;
503 /*
504 * No need for _with_wa(), we ensure (via alignment) that the data
505 * here can never cross or end at a page boundary.
506 */
507 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
508
509 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
510 hdr_len, dev_cmd))
511 goto out_err;
512
513 /* building the A-MSDU might have changed this data, memcpy it now */
514 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
515 return tfd;
516
517out_err:
518 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
519 return NULL;
520}
521
522static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
523 struct sk_buff *skb,
524 struct iwl_tfh_tfd *tfd,
525 struct iwl_cmd_meta *out_meta)
526{
527 int i;
528
529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
530 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
531 dma_addr_t tb_phys;
532 unsigned int fragsz = skb_frag_size(frag);
533 int ret;
534
535 if (!fragsz)
536 continue;
537
538 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
539 fragsz, DMA_TO_DEVICE);
540 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
541 skb_frag_address(frag),
542 fragsz, out_meta);
543 if (ret)
544 return ret;
545 }
546
547 return 0;
548}
549
550static struct
551iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
552 struct iwl_txq *txq,
553 struct iwl_device_tx_cmd *dev_cmd,
554 struct sk_buff *skb,
555 struct iwl_cmd_meta *out_meta,
556 int hdr_len,
557 int tx_cmd_len,
558 bool pad)
559{
560 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
561 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
562 dma_addr_t tb_phys;
563 int len, tb1_len, tb2_len;
564 void *tb1_addr;
565 struct sk_buff *frag;
566
567 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
568
569 /* The first TB points to bi-directional DMA data */
570 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
571
572 /*
573 * No need for _with_wa, the first TB allocation is aligned up
574 * to a 64-byte boundary and thus can't be at the end or cross
575 * a page boundary (much less a 2^32 boundary).
576 */
577 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
578
579 /*
580 * The second TB (tb1) points to the remainder of the TX command
581 * and the 802.11 header - dword aligned size
582 * (This calculation modifies the TX command, so do it before the
583 * setup of the first TB)
584 */
585 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
586 IWL_FIRST_TB_SIZE;
587
588 if (pad)
589 tb1_len = ALIGN(len, 4);
590 else
591 tb1_len = len;
592
593 /* map the data for TB1 */
594 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
595 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
596 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
597 goto out_err;
598 /*
599 * No need for _with_wa(), we ensure (via alignment) that the data
600 * here can never cross or end at a page boundary.
601 */
602 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
603 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
604 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
605
606 /* set up TFD's third entry to point to remainder of skb's head */
607 tb2_len = skb_headlen(skb) - hdr_len;
608
609 if (tb2_len > 0) {
610 int ret;
611
612 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
613 tb2_len, DMA_TO_DEVICE);
614 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
615 skb->data + hdr_len, tb2_len,
616 NULL);
617 if (ret)
618 goto out_err;
619 }
620
621 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
622 goto out_err;
623
624 skb_walk_frags(skb, frag) {
625 int ret;
626
627 tb_phys = dma_map_single(trans->dev, frag->data,
628 skb_headlen(frag), DMA_TO_DEVICE);
629 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
630 frag->data,
631 skb_headlen(frag), NULL);
632 if (ret)
633 goto out_err;
634 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
635 goto out_err;
636 }
637
638 return tfd;
639
640out_err:
641 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
642 return NULL;
643}
644
645static
646struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
647 struct iwl_txq *txq,
648 struct iwl_device_tx_cmd *dev_cmd,
649 struct sk_buff *skb,
650 struct iwl_cmd_meta *out_meta)
651{
652 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
653 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
654 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
655 int len, hdr_len;
656 bool amsdu;
657
658 /* There must be data left over for TB1 or this code must be changed */
659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
1caa3a5e
JB
660 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
661 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
662 IWL_FIRST_TB_SIZE);
663 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
664 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
665 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
666 IWL_FIRST_TB_SIZE);
0cd1ad2d
MG
667
668 memset(tfd, 0, sizeof(*tfd));
669
670 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
671 len = sizeof(struct iwl_tx_cmd_gen2);
672 else
673 len = sizeof(struct iwl_tx_cmd_gen3);
674
675 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
676 (*ieee80211_get_qos_ctl(hdr) &
677 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
678
679 hdr_len = ieee80211_hdrlen(hdr->frame_control);
680
681 /*
682 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
683 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
684 * built in the higher layers already.
685 */
686 if (amsdu && skb_shinfo(skb)->gso_size)
687 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
688 out_meta, hdr_len, len);
689 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
690 hdr_len, len, !amsdu);
691}
692
693int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
694{
695 unsigned int max;
696 unsigned int used;
697
698 /*
699 * To avoid ambiguity between empty and completely full queues, there
700 * should always be less than max_tfd_queue_size elements in the queue.
701 * If q->n_window is smaller than max_tfd_queue_size, there is no need
702 * to reserve any queue entries for this purpose.
703 */
704 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
705 max = q->n_window;
706 else
707 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
708
709 /*
710 * max_tfd_queue_size is a power of 2, so the following is equivalent to
711 * modulo by max_tfd_queue_size and is well defined.
712 */
713 used = (q->write_ptr - q->read_ptr) &
714 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
715
716 if (WARN_ON(used > max))
717 return 0;
718
719 return max - used;
720}
721
722int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
723 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
724{
725 struct iwl_cmd_meta *out_meta;
726 struct iwl_txq *txq = trans->txqs.txq[txq_id];
727 u16 cmd_len;
728 int idx;
729 void *tfd;
730
731 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
732 "queue %d out of range", txq_id))
733 return -EINVAL;
734
735 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
736 "TX on unused queue %d\n", txq_id))
737 return -EINVAL;
738
739 if (skb_is_nonlinear(skb) &&
740 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
741 __skb_linearize(skb))
742 return -ENOMEM;
743
744 spin_lock(&txq->lock);
745
746 if (iwl_txq_space(trans, txq) < txq->high_mark) {
747 iwl_txq_stop(trans, txq);
748
749 /* don't put the packet on the ring, if there is no room */
750 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
751 struct iwl_device_tx_cmd **dev_cmd_ptr;
752
753 dev_cmd_ptr = (void *)((u8 *)skb->cb +
754 trans->txqs.dev_cmd_offs);
755
756 *dev_cmd_ptr = dev_cmd;
757 __skb_queue_tail(&txq->overflow_q, skb);
758 spin_unlock(&txq->lock);
759 return 0;
760 }
761 }
762
763 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
764
765 /* Set up driver data for this TFD */
766 txq->entries[idx].skb = skb;
767 txq->entries[idx].cmd = dev_cmd;
768
769 dev_cmd->hdr.sequence =
770 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
771 INDEX_TO_SEQ(idx)));
772
773 /* Set up first empty entry in queue's array of Tx/cmd buffers */
774 out_meta = &txq->entries[idx].meta;
775 out_meta->flags = 0;
776
777 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
778 if (!tfd) {
779 spin_unlock(&txq->lock);
780 return -1;
781 }
782
783 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
784 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
785 (void *)dev_cmd->payload;
786
787 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
788 } else {
789 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
790 (void *)dev_cmd->payload;
791
792 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
793 }
794
795 /* Set up entry for this TFD in Tx byte-count array */
796 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
797 iwl_txq_gen2_get_num_tbs(trans, tfd));
798
799 /* start timer if queue currently empty */
800 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
801 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
802
803 /* Tell device the write index *just past* this latest filled TFD */
804 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
805 iwl_txq_inc_wr_ptr(trans, txq);
806 /*
807 * At this point the frame is "transmitted" successfully
808 * and we will get a TX status notification eventually.
809 */
810 spin_unlock(&txq->lock);
811 return 0;
812}
813
814/*************** HOST COMMAND QUEUE FUNCTIONS *****/
815
816/*
817 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
818 */
819void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
820{
821 struct iwl_txq *txq = trans->txqs.txq[txq_id];
822
823 spin_lock_bh(&txq->lock);
824 while (txq->write_ptr != txq->read_ptr) {
825 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
826 txq_id, txq->read_ptr);
827
828 if (txq_id != trans->txqs.cmd.q_id) {
829 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
830 struct sk_buff *skb = txq->entries[idx].skb;
831
0bed6a2a
JB
832 if (!WARN_ON_ONCE(!skb))
833 iwl_txq_free_tso_page(trans, skb);
0cd1ad2d
MG
834 }
835 iwl_txq_gen2_free_tfd(trans, txq);
836 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
837 }
838
839 while (!skb_queue_empty(&txq->overflow_q)) {
840 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
841
842 iwl_op_mode_free_skb(trans->op_mode, skb);
843 }
844
845 spin_unlock_bh(&txq->lock);
846
847 /* just in case - this queue may have been stopped */
848 iwl_wake_queue(trans, txq);
849}
850
851static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
852 struct iwl_txq *txq)
853{
854 struct device *dev = trans->dev;
855
856 /* De-alloc circular buffer of TFDs */
857 if (txq->tfds) {
858 dma_free_coherent(dev,
859 trans->txqs.tfd.size * txq->n_window,
860 txq->tfds, txq->dma_addr);
861 dma_free_coherent(dev,
862 sizeof(*txq->first_tb_bufs) * txq->n_window,
863 txq->first_tb_bufs, txq->first_tb_dma);
864 }
865
866 kfree(txq->entries);
867 if (txq->bc_tbl.addr)
868 dma_pool_free(trans->txqs.bc_pool,
869 txq->bc_tbl.addr, txq->bc_tbl.dma);
870 kfree(txq);
871}
872
873/*
874 * iwl_pcie_txq_free - Deallocate DMA queue.
875 * @txq: Transmit queue to deallocate.
876 *
877 * Empty queue by removing and destroying all BD's.
878 * Free all buffers.
879 * 0-fill, but do not free "txq" descriptor structure.
880 */
881static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
882{
883 struct iwl_txq *txq;
884 int i;
885
886 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
887 "queue %d out of range", txq_id))
888 return;
889
890 txq = trans->txqs.txq[txq_id];
891
892 if (WARN_ON(!txq))
893 return;
894
895 iwl_txq_gen2_unmap(trans, txq_id);
896
897 /* De-alloc array of command/tx buffers */
898 if (txq_id == trans->txqs.cmd.q_id)
899 for (i = 0; i < txq->n_window; i++) {
900 kfree_sensitive(txq->entries[i].cmd);
901 kfree_sensitive(txq->entries[i].free_buf);
902 }
903 del_timer_sync(&txq->stuck_timer);
904
905 iwl_txq_gen2_free_memory(trans, txq);
906
907 trans->txqs.txq[txq_id] = NULL;
908
909 clear_bit(txq_id, trans->txqs.queue_used);
910}
911
912/*
913 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
914 */
915static int iwl_queue_init(struct iwl_txq *q, int slots_num)
916{
917 q->n_window = slots_num;
918
919 /* slots_num must be power-of-two size, otherwise
920 * iwl_txq_get_cmd_index is broken. */
921 if (WARN_ON(!is_power_of_2(slots_num)))
922 return -EINVAL;
923
924 q->low_mark = q->n_window / 4;
925 if (q->low_mark < 4)
926 q->low_mark = 4;
927
928 q->high_mark = q->n_window / 8;
929 if (q->high_mark < 2)
930 q->high_mark = 2;
931
932 q->write_ptr = 0;
933 q->read_ptr = 0;
934
935 return 0;
936}
937
938int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
939 bool cmd_queue)
940{
941 int ret;
942 u32 tfd_queue_max_size =
943 trans->trans_cfg->base_params->max_tfd_queue_size;
944
945 txq->need_update = false;
946
947 /* max_tfd_queue_size must be power-of-two size, otherwise
948 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
949 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
950 "Max tfd queue size must be a power of two, but is %d",
951 tfd_queue_max_size))
952 return -EINVAL;
953
954 /* Initialize queue's high/low-water marks, and head/tail indexes */
955 ret = iwl_queue_init(txq, slots_num);
956 if (ret)
957 return ret;
958
959 spin_lock_init(&txq->lock);
960
961 if (cmd_queue) {
962 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
963
964 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
965 }
966
967 __skb_queue_head_init(&txq->overflow_q);
968
969 return 0;
970}
971
972void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
973{
974 struct page **page_ptr;
975 struct page *next;
976
977 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
978 next = *page_ptr;
979 *page_ptr = NULL;
980
981 while (next) {
982 struct page *tmp = next;
983
3827cb59 984 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
0cd1ad2d
MG
985 sizeof(void *));
986 __free_page(tmp);
987 }
988}
989
990void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
991{
992 u32 txq_id = txq->id;
993 u32 status;
994 bool active;
995 u8 fifo;
996
12a89f01 997 if (trans->trans_cfg->gen2) {
0cd1ad2d
MG
998 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
999 txq->read_ptr, txq->write_ptr);
1000 /* TODO: access new SCD registers and dump them */
1001 return;
1002 }
1003
1004 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1005 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1006 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1007
1008 IWL_ERR(trans,
1009 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1010 txq_id, active ? "" : "in", fifo,
1011 jiffies_to_msecs(txq->wd_timeout),
1012 txq->read_ptr, txq->write_ptr,
1013 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1014 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1015 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1016 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1017 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1018}
1019
1020static void iwl_txq_stuck_timer(struct timer_list *t)
1021{
1022 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1023 struct iwl_trans *trans = txq->trans;
1024
1025 spin_lock(&txq->lock);
1026 /* check if triggered erroneously */
1027 if (txq->read_ptr == txq->write_ptr) {
1028 spin_unlock(&txq->lock);
1029 return;
1030 }
1031 spin_unlock(&txq->lock);
1032
1033 iwl_txq_log_scd_error(trans, txq);
1034
1035 iwl_force_nmi(trans);
1036}
1037
c83031af
JB
1038static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
1039 struct iwl_tfd *tfd)
1040{
1041 tfd->num_tbs = 0;
1042
1043 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
1044 trans->invalid_tx_cmd.size);
1045}
1046
0cd1ad2d
MG
1047int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1048 bool cmd_queue)
1049{
c83031af
JB
1050 size_t num_entries = trans->trans_cfg->gen2 ?
1051 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
1052 size_t tfd_sz;
0cd1ad2d
MG
1053 size_t tb0_buf_sz;
1054 int i;
1055
2e0ce1de
A
1056 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1057 return -EINVAL;
1058
0cd1ad2d
MG
1059 if (WARN_ON(txq->entries || txq->tfds))
1060 return -EINVAL;
1061
c83031af 1062 tfd_sz = trans->txqs.tfd.size * num_entries;
0cd1ad2d
MG
1063
1064 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1065 txq->trans = trans;
1066
1067 txq->n_window = slots_num;
1068
1069 txq->entries = kcalloc(slots_num,
1070 sizeof(struct iwl_pcie_txq_entry),
1071 GFP_KERNEL);
1072
1073 if (!txq->entries)
1074 goto error;
1075
1076 if (cmd_queue)
1077 for (i = 0; i < slots_num; i++) {
1078 txq->entries[i].cmd =
1079 kmalloc(sizeof(struct iwl_device_cmd),
1080 GFP_KERNEL);
1081 if (!txq->entries[i].cmd)
1082 goto error;
1083 }
1084
1085 /* Circular buffer of transmit frame descriptors (TFDs),
1086 * shared with device */
1087 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1088 &txq->dma_addr, GFP_KERNEL);
1089 if (!txq->tfds)
1090 goto error;
1091
1092 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1093
1094 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1095
1096 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1097 &txq->first_tb_dma,
1098 GFP_KERNEL);
1099 if (!txq->first_tb_bufs)
1100 goto err_free_tfds;
1101
c83031af
JB
1102 for (i = 0; i < num_entries; i++) {
1103 void *tfd = iwl_txq_get_tfd(trans, txq, i);
1104
1105 if (trans->trans_cfg->gen2)
1106 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
1107 else
1108 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1109 }
1110
0cd1ad2d
MG
1111 return 0;
1112err_free_tfds:
1113 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
f973795a 1114 txq->tfds = NULL;
0cd1ad2d
MG
1115error:
1116 if (txq->entries && cmd_queue)
1117 for (i = 0; i < slots_num; i++)
1118 kfree(txq->entries[i].cmd);
1119 kfree(txq->entries);
1120 txq->entries = NULL;
1121
1122 return -ENOMEM;
1123}
1124
ba3d4acd
JB
1125static struct iwl_txq *
1126iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
0cd1ad2d
MG
1127{
1128 size_t bc_tbl_size, bc_tbl_entries;
1129 struct iwl_txq *txq;
1130 int ret;
1131
1132 WARN_ON(!trans->txqs.bc_tbl_size);
1133
1134 bc_tbl_size = trans->txqs.bc_tbl_size;
1135 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1136
1137 if (WARN_ON(size > bc_tbl_entries))
ba3d4acd 1138 return ERR_PTR(-EINVAL);
0cd1ad2d
MG
1139
1140 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1141 if (!txq)
ba3d4acd 1142 return ERR_PTR(-ENOMEM);
0cd1ad2d
MG
1143
1144 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1145 &txq->bc_tbl.dma);
1146 if (!txq->bc_tbl.addr) {
1147 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1148 kfree(txq);
ba3d4acd 1149 return ERR_PTR(-ENOMEM);
0cd1ad2d
MG
1150 }
1151
1152 ret = iwl_txq_alloc(trans, txq, size, false);
1153 if (ret) {
1154 IWL_ERR(trans, "Tx queue alloc failed\n");
1155 goto error;
1156 }
1157 ret = iwl_txq_init(trans, txq, size, false);
1158 if (ret) {
1159 IWL_ERR(trans, "Tx queue init failed\n");
1160 goto error;
1161 }
1162
1163 txq->wd_timeout = msecs_to_jiffies(timeout);
1164
ba3d4acd 1165 return txq;
0cd1ad2d
MG
1166
1167error:
1168 iwl_txq_gen2_free_memory(trans, txq);
ba3d4acd 1169 return ERR_PTR(ret);
0cd1ad2d
MG
1170}
1171
1172static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1173 struct iwl_host_cmd *hcmd)
1174{
1175 struct iwl_tx_queue_cfg_rsp *rsp;
1176 int ret, qid;
1177 u32 wr_ptr;
1178
1179 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1180 sizeof(*rsp))) {
1181 ret = -EINVAL;
1182 goto error_free_resp;
1183 }
1184
1185 rsp = (void *)hcmd->resp_pkt->data;
1186 qid = le16_to_cpu(rsp->queue_number);
1187 wr_ptr = le16_to_cpu(rsp->write_pointer);
1188
1189 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1190 WARN_ONCE(1, "queue index %d unsupported", qid);
1191 ret = -EIO;
1192 goto error_free_resp;
1193 }
1194
1195 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1196 WARN_ONCE(1, "queue %d already used", qid);
1197 ret = -EIO;
1198 goto error_free_resp;
1199 }
1200
4cf2f590
MG
1201 if (WARN_ONCE(trans->txqs.txq[qid],
1202 "queue %d already allocated\n", qid)) {
1203 ret = -EIO;
1204 goto error_free_resp;
1205 }
1206
0cd1ad2d
MG
1207 txq->id = qid;
1208 trans->txqs.txq[qid] = txq;
1209 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1210
1211 /* Place first TFD at index corresponding to start sequence number */
1212 txq->read_ptr = wr_ptr;
1213 txq->write_ptr = wr_ptr;
1214
1215 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1216
1217 iwl_free_resp(hcmd);
1218 return qid;
1219
1220error_free_resp:
1221 iwl_free_resp(hcmd);
1222 iwl_txq_gen2_free_memory(trans, txq);
1223 return ret;
1224}
1225
227f2597
JB
1226int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1227 u8 tid, int size, unsigned int timeout)
0cd1ad2d 1228{
ba3d4acd 1229 struct iwl_txq *txq;
227f2597
JB
1230 union {
1231 struct iwl_tx_queue_cfg_cmd old;
1232 struct iwl_scd_queue_cfg_cmd new;
1233 } cmd;
0cd1ad2d 1234 struct iwl_host_cmd hcmd = {
0cd1ad2d
MG
1235 .flags = CMD_WANT_SKB,
1236 };
1237 int ret;
1238
bb16ffd5
JB
1239 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1240 trans->hw_rev_step == SILICON_A_STEP)
1241 size = 4096;
1242
ba3d4acd
JB
1243 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1244 if (IS_ERR(txq))
1245 return PTR_ERR(txq);
0cd1ad2d 1246
227f2597
JB
1247 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1248 memset(&cmd.old, 0, sizeof(cmd.old));
1249 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1250 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1251 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1252 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1253 cmd.old.tid = tid;
1254
1255 if (hweight32(sta_mask) != 1) {
1256 ret = -EINVAL;
1257 goto error;
1258 }
1259 cmd.old.sta_id = ffs(sta_mask) - 1;
1260
1261 hcmd.id = SCD_QUEUE_CFG;
1262 hcmd.len[0] = sizeof(cmd.old);
1263 hcmd.data[0] = &cmd.old;
1264 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1265 memset(&cmd.new, 0, sizeof(cmd.new));
1266 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1267 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1268 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1269 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1270 cmd.new.u.add.flags = cpu_to_le32(flags);
1271 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1272 cmd.new.u.add.tid = tid;
1273
1274 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1275 hcmd.len[0] = sizeof(cmd.new);
1276 hcmd.data[0] = &cmd.new;
1277 } else {
1278 ret = -EOPNOTSUPP;
1279 goto error;
1280 }
0cd1ad2d
MG
1281
1282 ret = iwl_trans_send_cmd(trans, &hcmd);
1283 if (ret)
1284 goto error;
1285
1286 return iwl_txq_alloc_response(trans, txq, &hcmd);
1287
1288error:
1289 iwl_txq_gen2_free_memory(trans, txq);
1290 return ret;
1291}
1292
1293void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1294{
1295 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1296 "queue %d out of range", queue))
1297 return;
1298
1299 /*
1300 * Upon HW Rfkill - we stop the device, and then stop the queues
1301 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1302 * allow the op_mode to call txq_disable after it already called
1303 * stop_device.
1304 */
1305 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1306 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1307 "queue %d not used", queue);
1308 return;
1309 }
1310
2f8cfcc4 1311 iwl_txq_gen2_free(trans, queue);
0cd1ad2d
MG
1312
1313 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1314}
1315
1316void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1317{
1318 int i;
1319
1320 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1321
1322 /* Free all TX queues */
1323 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1324 if (!trans->txqs.txq[i])
1325 continue;
1326
1327 iwl_txq_gen2_free(trans, i);
1328 }
1329}
1330
1331int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1332{
1333 struct iwl_txq *queue;
1334 int ret;
1335
1336 /* alloc and init the tx queue */
1337 if (!trans->txqs.txq[txq_id]) {
1338 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1339 if (!queue) {
1340 IWL_ERR(trans, "Not enough memory for tx queue\n");
1341 return -ENOMEM;
1342 }
1343 trans->txqs.txq[txq_id] = queue;
1344 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1345 if (ret) {
1346 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1347 goto error;
1348 }
1349 } else {
1350 queue = trans->txqs.txq[txq_id];
1351 }
1352
1353 ret = iwl_txq_init(trans, queue, queue_size,
1354 (txq_id == trans->txqs.cmd.q_id));
1355 if (ret) {
1356 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1357 goto error;
1358 }
1359 trans->txqs.txq[txq_id]->id = txq_id;
1360 set_bit(txq_id, trans->txqs.queue_used);
1361
1362 return 0;
1363
1364error:
1365 iwl_txq_gen2_tx_free(trans);
1366 return ret;
1367}
1368
0179bfff 1369static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
a0632004 1370 struct iwl_tfd *tfd, u8 idx)
0179bfff 1371{
a0632004 1372 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
0179bfff
MG
1373 dma_addr_t addr;
1374 dma_addr_t hi_len;
1375
0179bfff
MG
1376 addr = get_unaligned_le32(&tb->lo);
1377
1378 if (sizeof(dma_addr_t) <= sizeof(u32))
1379 return addr;
1380
1381 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1382
1383 /*
1384 * shift by 16 twice to avoid warnings on 32-bit
1385 * (where this code never runs anyway due to the
1386 * if statement above)
1387 */
1388 return addr | ((hi_len << 16) << 16);
1389}
1390
1391void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1392 struct iwl_cmd_meta *meta,
1393 struct iwl_txq *txq, int index)
1394{
1395 int i, num_tbs;
a0632004 1396 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
0179bfff
MG
1397
1398 /* Sanity check on number of chunks */
1399 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1400
1401 if (num_tbs > trans->txqs.tfd.max_tbs) {
1402 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1403 /* @todo issue fatal error, it is quite serious situation */
1404 return;
1405 }
1406
1407 /* first TB is never freed - it's the bidirectional DMA data */
1408
1409 for (i = 1; i < num_tbs; i++) {
1410 if (meta->tbs & BIT(i))
1411 dma_unmap_page(trans->dev,
1412 iwl_txq_gen1_tfd_tb_get_addr(trans,
1413 tfd, i),
1414 iwl_txq_gen1_tfd_tb_get_len(trans,
1415 tfd, i),
1416 DMA_TO_DEVICE);
1417 else
1418 dma_unmap_single(trans->dev,
1419 iwl_txq_gen1_tfd_tb_get_addr(trans,
1420 tfd, i),
1421 iwl_txq_gen1_tfd_tb_get_len(trans,
1422 tfd, i),
1423 DMA_TO_DEVICE);
1424 }
1425
1426 meta->tbs = 0;
1427
c83031af 1428 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
0179bfff
MG
1429}
1430
1431#define IWL_TX_CRC_SIZE 4
1432#define IWL_TX_DELIMITER_SIZE 4
1433
1434/*
1435 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1436 */
1437void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1438 struct iwl_txq *txq, u16 byte_cnt,
1439 int num_tbs)
1440{
1441 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1442 int write_ptr = txq->write_ptr;
1443 int txq_id = txq->id;
1444 u8 sec_ctl = 0;
1445 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1446 __le16 bc_ent;
1447 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1448 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1449 u8 sta_id = tx_cmd->sta_id;
1450
1451 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1452
1453 sec_ctl = tx_cmd->sec_ctl;
1454
1455 switch (sec_ctl & TX_CMD_SEC_MSK) {
1456 case TX_CMD_SEC_CCM:
1457 len += IEEE80211_CCMP_MIC_LEN;
1458 break;
1459 case TX_CMD_SEC_TKIP:
1460 len += IEEE80211_TKIP_ICV_LEN;
1461 break;
1462 case TX_CMD_SEC_WEP:
1463 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1464 break;
1465 }
1466 if (trans->txqs.bc_table_dword)
1467 len = DIV_ROUND_UP(len, 4);
1468
1469 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1470 return;
1471
1472 bc_ent = cpu_to_le16(len | (sta_id << 12));
1473
1474 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1475
1476 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1477 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1478 bc_ent;
1479}
1480
1481void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1482 struct iwl_txq *txq)
1483{
1484 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1485 int txq_id = txq->id;
1486 int read_ptr = txq->read_ptr;
1487 u8 sta_id = 0;
1488 __le16 bc_ent;
1489 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1490 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1491
1492 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1493
1494 if (txq_id != trans->txqs.cmd.q_id)
1495 sta_id = tx_cmd->sta_id;
1496
1497 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1498
1499 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1500
1501 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1502 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1503 bc_ent;
1504}
a4450980
MG
1505
1506/*
1507 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1508 * @trans - transport private data
1509 * @txq - tx queue
1510 * @dma_dir - the direction of the DMA mapping
1511 *
1512 * Does NOT advance any TFD circular buffer read/write indexes
1513 * Does NOT free the TFD itself (which is within circular buffer)
1514 */
1515void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1516{
1517 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1518 * idx is bounded by n_window
1519 */
1520 int rd_ptr = txq->read_ptr;
1521 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
0f8d5656 1522 struct sk_buff *skb;
a4450980
MG
1523
1524 lockdep_assert_held(&txq->lock);
1525
0f8d5656
EG
1526 if (!txq->entries)
1527 return;
1528
a4450980
MG
1529 /* We have only q->n_window txq->entries, but we use
1530 * TFD_QUEUE_SIZE_MAX tfds
1531 */
a0632004
JB
1532 if (trans->trans_cfg->gen2)
1533 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
1534 iwl_txq_get_tfd(trans, txq, rd_ptr));
1535 else
1536 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
1537 txq, rd_ptr);
a4450980
MG
1538
1539 /* free SKB */
0f8d5656 1540 skb = txq->entries[idx].skb;
a4450980 1541
0f8d5656
EG
1542 /* Can be called from irqs-disabled context
1543 * If skb is not NULL, it means that the whole queue is being
1544 * freed and that the queue is not empty - free the skb
1545 */
1546 if (skb) {
1547 iwl_op_mode_free_skb(trans->op_mode, skb);
1548 txq->entries[idx].skb = NULL;
a4450980
MG
1549 }
1550}
1551
1552void iwl_txq_progress(struct iwl_txq *txq)
1553{
1554 lockdep_assert_held(&txq->lock);
1555
1556 if (!txq->wd_timeout)
1557 return;
1558
1559 /*
1560 * station is asleep and we send data - that must
1561 * be uAPSD or PS-Poll. Don't rearm the timer.
1562 */
1563 if (txq->frozen)
1564 return;
1565
1566 /*
1567 * if empty delete timer, otherwise move timer forward
1568 * since we're making progress on this queue
1569 */
1570 if (txq->read_ptr == txq->write_ptr)
1571 del_timer(&txq->stuck_timer);
1572 else
1573 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1574}
1575
1576/* Frees buffers until index _not_ inclusive */
1577void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1578 struct sk_buff_head *skbs)
1579{
1580 struct iwl_txq *txq = trans->txqs.txq[txq_id];
413be839 1581 int tfd_num, read_ptr, last_to_free;
a4450980
MG
1582
1583 /* This function is not meant to release cmd queue*/
1584 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1585 return;
1586
413be839
MK
1587 if (WARN_ON(!txq))
1588 return;
1589
1590 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1591 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1592
a4450980
MG
1593 spin_lock_bh(&txq->lock);
1594
1595 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1596 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1597 txq_id, ssn);
1598 goto out;
1599 }
1600
1601 if (read_ptr == tfd_num)
1602 goto out;
1603
1604 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1605 txq_id, txq->read_ptr, tfd_num, ssn);
1606
1607 /*Since we free until index _not_ inclusive, the one before index is
1608 * the last we will free. This one must be used */
1609 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1610
1611 if (!iwl_txq_used(txq, last_to_free)) {
1612 IWL_ERR(trans,
1613 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1614 __func__, txq_id, last_to_free,
1615 trans->trans_cfg->base_params->max_tfd_queue_size,
1616 txq->write_ptr, txq->read_ptr);
9cd3de81
MG
1617
1618 iwl_op_mode_time_point(trans->op_mode,
1619 IWL_FW_INI_TIME_POINT_FAKE_TX,
1620 NULL);
a4450980
MG
1621 goto out;
1622 }
1623
1624 if (WARN_ON(!skb_queue_empty(skbs)))
1625 goto out;
1626
1627 for (;
1628 read_ptr != tfd_num;
1629 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1630 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1631 struct sk_buff *skb = txq->entries[read_ptr].skb;
1632
1633 if (WARN_ON_ONCE(!skb))
1634 continue;
1635
1636 iwl_txq_free_tso_page(trans, skb);
1637
1638 __skb_queue_tail(skbs, skb);
1639
1640 txq->entries[read_ptr].skb = NULL;
1641
12a89f01 1642 if (!trans->trans_cfg->gen2)
a4450980
MG
1643 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1644
1645 iwl_txq_free_tfd(trans, txq);
1646 }
1647
1648 iwl_txq_progress(txq);
1649
1650 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1651 test_bit(txq_id, trans->txqs.queue_stopped)) {
1652 struct sk_buff_head overflow_skbs;
1653
1654 __skb_queue_head_init(&overflow_skbs);
1655 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1656
1657 /*
1658 * We are going to transmit from the overflow queue.
1659 * Remember this state so that wait_for_txq_empty will know we
1660 * are adding more packets to the TFD queue. It cannot rely on
1661 * the state of &txq->overflow_q, as we just emptied it, but
1662 * haven't TXed the content yet.
1663 */
1664 txq->overflow_tx = true;
1665
1666 /*
1667 * This is tricky: we are in reclaim path which is non
1668 * re-entrant, so noone will try to take the access the
1669 * txq data from that path. We stopped tx, so we can't
1670 * have tx as well. Bottom line, we can unlock and re-lock
1671 * later.
1672 */
1673 spin_unlock_bh(&txq->lock);
1674
1675 while (!skb_queue_empty(&overflow_skbs)) {
1676 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1677 struct iwl_device_tx_cmd *dev_cmd_ptr;
1678
1679 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1680 trans->txqs.dev_cmd_offs);
1681
1682 /*
1683 * Note that we can very well be overflowing again.
1684 * In that case, iwl_txq_space will be small again
1685 * and we won't wake mac80211's queue.
1686 */
1687 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1688 }
1689
1690 if (iwl_txq_space(trans, txq) > txq->low_mark)
1691 iwl_wake_queue(trans, txq);
1692
1693 spin_lock_bh(&txq->lock);
1694 txq->overflow_tx = false;
1695 }
1696
1697out:
1698 spin_unlock_bh(&txq->lock);
1699}
1700
1701/* Set wr_ptr of specific device and txq */
1702void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1703{
1704 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1705
1706 spin_lock_bh(&txq->lock);
1707
1708 txq->write_ptr = ptr;
1709 txq->read_ptr = txq->write_ptr;
1710
1711 spin_unlock_bh(&txq->lock);
1712}
1713
1714void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1715 bool freeze)
1716{
1717 int queue;
1718
1719 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1720 struct iwl_txq *txq = trans->txqs.txq[queue];
1721 unsigned long now;
1722
1723 spin_lock_bh(&txq->lock);
1724
1725 now = jiffies;
1726
1727 if (txq->frozen == freeze)
1728 goto next_queue;
1729
1730 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1731 freeze ? "Freezing" : "Waking", queue);
1732
1733 txq->frozen = freeze;
1734
1735 if (txq->read_ptr == txq->write_ptr)
1736 goto next_queue;
1737
1738 if (freeze) {
1739 if (unlikely(time_after(now,
1740 txq->stuck_timer.expires))) {
1741 /*
1742 * The timer should have fired, maybe it is
1743 * spinning right now on the lock.
1744 */
1745 goto next_queue;
1746 }
1747 /* remember how long until the timer fires */
1748 txq->frozen_expiry_remainder =
1749 txq->stuck_timer.expires - now;
1750 del_timer(&txq->stuck_timer);
1751 goto next_queue;
1752 }
1753
1754 /*
1755 * Wake a non-empty queue -> arm timer with the
1756 * remainder before it froze
1757 */
1758 mod_timer(&txq->stuck_timer,
1759 now + txq->frozen_expiry_remainder);
1760
1761next_queue:
1762 spin_unlock_bh(&txq->lock);
1763 }
1764}
1765
13f028b4
MG
1766#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1767
1768static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1769 struct iwl_host_cmd *cmd)
1770{
1771 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1772 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1773 int cmd_idx;
1774 int ret;
1775
1776 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1777
1778 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1779 &trans->status),
1780 "Command %s: a command is already active!\n", cmd_str))
1781 return -EIO;
1782
1783 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1784
1785 cmd_idx = trans->ops->send_cmd(trans, cmd);
1786 if (cmd_idx < 0) {
1787 ret = cmd_idx;
1788 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1789 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1790 cmd_str, ret);
1791 return ret;
1792 }
1793
1794 ret = wait_event_timeout(trans->wait_command_queue,
1795 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1796 &trans->status),
1797 HOST_COMPLETE_TIMEOUT);
1798 if (!ret) {
1799 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1800 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1801
1802 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1803 txq->read_ptr, txq->write_ptr);
1804
1805 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1806 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1807 cmd_str);
1808 ret = -ETIMEDOUT;
1809
1810 iwl_trans_sync_nmi(trans);
1811 goto cancel;
1812 }
1813
1814 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
4b992db6
JB
1815 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1816 &trans->status)) {
1817 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1818 dump_stack();
1819 }
13f028b4
MG
1820 ret = -EIO;
1821 goto cancel;
1822 }
1823
1824 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1825 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1826 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1827 ret = -ERFKILL;
1828 goto cancel;
1829 }
1830
1831 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1832 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1833 ret = -EIO;
1834 goto cancel;
1835 }
1836
1837 return 0;
1838
1839cancel:
1840 if (cmd->flags & CMD_WANT_SKB) {
1841 /*
1842 * Cancel the CMD_WANT_SKB flag for the cmd in the
1843 * TX cmd queue. Otherwise in case the cmd comes
1844 * in later, it will possibly set an invalid
1845 * address (cmd->meta.source).
1846 */
1847 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1848 }
1849
1850 if (cmd->resp_pkt) {
1851 iwl_free_resp(cmd);
1852 cmd->resp_pkt = NULL;
1853 }
1854
1855 return ret;
1856}
1857
1858int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1859 struct iwl_host_cmd *cmd)
1860{
1861 /* Make sure the NIC is still alive in the bus */
1862 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1863 return -ENODEV;
1864
1865 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1866 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1867 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1868 cmd->id);
1869 return -ERFKILL;
1870 }
1871
1872 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1873 !(cmd->flags & CMD_SEND_IN_D3))) {
1874 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1875 return -EHOSTDOWN;
1876 }
1877
1878 if (cmd->flags & CMD_ASYNC) {
1879 int ret;
1880
1881 /* An asynchronous command can not expect an SKB to be set. */
1882 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1883 return -EINVAL;
1884
1885 ret = trans->ops->send_cmd(trans, cmd);
1886 if (ret < 0) {
1887 IWL_ERR(trans,
1888 "Error sending %s: enqueue_hcmd failed: %d\n",
1889 iwl_get_cmd_string(trans, cmd->id), ret);
1890 return ret;
1891 }
1892 return 0;
1893 }
1894
1895 return iwl_trans_txq_send_hcmd_sync(trans, cmd);
1896}
1897