Merge branch 'bkl/procfs' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[linux-2.6-block.git] / drivers / net / wireless / iwlwifi / iwl-tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
1f447808 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
1053d35f
RR
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
fd4abac5 30#include <linux/etherdevice.h>
d43c36dc 31#include <linux/sched.h>
5a0e3ad6 32#include <linux/slab.h>
1053d35f
RR
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
30e553e3
TW
41static const u16 default_tid_to_tx_fifo[] = {
42 IWL_TX_FIFO_AC1,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC0,
45 IWL_TX_FIFO_AC1,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC2,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_AC3,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_NONE,
58 IWL_TX_FIFO_AC3
59};
60
4ddbb7d0
TW
61static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
62 struct iwl_dma_ptr *ptr, size_t size)
63{
f36d04ab
SG
64 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
65 GFP_KERNEL);
4ddbb7d0
TW
66 if (!ptr->addr)
67 return -ENOMEM;
68 ptr->size = size;
69 return 0;
70}
71
72static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
73 struct iwl_dma_ptr *ptr)
74{
75 if (unlikely(!ptr->addr))
76 return;
77
f36d04ab 78 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
4ddbb7d0
TW
79 memset(ptr, 0, sizeof(*ptr));
80}
81
fd4abac5
TW
82/**
83 * iwl_txq_update_write_ptr - Send new write index to hardware
84 */
7bfedc59 85void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
fd4abac5
TW
86{
87 u32 reg = 0;
fd4abac5
TW
88 int txq_id = txq->q.id;
89
90 if (txq->need_update == 0)
7bfedc59 91 return;
fd4abac5
TW
92
93 /* if we're trying to save power */
94 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
95 /* wake up nic if it's powered down ...
96 * uCode will wake up, and interrupt us again, so next
97 * time we'll skip this part. */
98 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
99
100 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
309e731a
BC
101 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
102 txq_id, reg);
fd4abac5
TW
103 iwl_set_bit(priv, CSR_GP_CNTRL,
104 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
7bfedc59 105 return;
fd4abac5
TW
106 }
107
fd4abac5
TW
108 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
109 txq->q.write_ptr | (txq_id << 8));
fd4abac5
TW
110
111 /* else not in power-save mode, uCode will never sleep when we're
112 * trying to tx (during RFKILL, we're not trying to tx). */
113 } else
114 iwl_write32(priv, HBUS_TARG_WRPTR,
115 txq->q.write_ptr | (txq_id << 8));
116
117 txq->need_update = 0;
fd4abac5
TW
118}
119EXPORT_SYMBOL(iwl_txq_update_write_ptr);
120
121
a239a8b4
WYG
122void iwl_free_tfds_in_queue(struct iwl_priv *priv,
123 int sta_id, int tid, int freed)
124{
125 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
126 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
127 else {
c8406ea8 128 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
a239a8b4
WYG
129 priv->stations[sta_id].tid[tid].tfds_in_queue,
130 freed);
131 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
132 }
133}
134EXPORT_SYMBOL(iwl_free_tfds_in_queue);
135
1053d35f
RR
136/**
137 * iwl_tx_queue_free - Deallocate DMA queue.
138 * @txq: Transmit queue to deallocate.
139 *
140 * Empty queue by removing and destroying all BD's.
141 * Free all buffers.
142 * 0-fill, but do not free "txq" descriptor structure.
143 */
a8e74e27 144void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
1053d35f 145{
da99c4b6 146 struct iwl_tx_queue *txq = &priv->txq[txq_id];
443cfd45 147 struct iwl_queue *q = &txq->q;
f36d04ab 148 struct device *dev = &priv->pci_dev->dev;
71c55d90 149 int i;
1053d35f
RR
150
151 if (q->n_bd == 0)
152 return;
153
154 /* first, empty all BD's */
155 for (; q->write_ptr != q->read_ptr;
156 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
7aaa1d79 157 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1053d35f 158
1053d35f 159 /* De-alloc array of command/tx buffers */
961ba60a 160 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
da99c4b6 161 kfree(txq->cmd[i]);
1053d35f
RR
162
163 /* De-alloc circular buffer of TFDs */
164 if (txq->q.n_bd)
f36d04ab
SG
165 dma_free_coherent(dev, priv->hw_params.tfd_size *
166 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
1053d35f
RR
167
168 /* De-alloc array of per-TFD driver data */
169 kfree(txq->txb);
170 txq->txb = NULL;
171
c2acea8e
JB
172 /* deallocate arrays */
173 kfree(txq->cmd);
174 kfree(txq->meta);
175 txq->cmd = NULL;
176 txq->meta = NULL;
177
1053d35f
RR
178 /* 0-fill queue descriptor structure */
179 memset(txq, 0, sizeof(*txq));
180}
a8e74e27 181EXPORT_SYMBOL(iwl_tx_queue_free);
961ba60a
TW
182
183/**
184 * iwl_cmd_queue_free - Deallocate DMA queue.
185 * @txq: Transmit queue to deallocate.
186 *
187 * Empty queue by removing and destroying all BD's.
188 * Free all buffers.
189 * 0-fill, but do not free "txq" descriptor structure.
190 */
3e5d238f 191void iwl_cmd_queue_free(struct iwl_priv *priv)
961ba60a
TW
192{
193 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
194 struct iwl_queue *q = &txq->q;
f36d04ab 195 struct device *dev = &priv->pci_dev->dev;
71c55d90 196 int i;
dd487449 197 bool huge = false;
961ba60a
TW
198
199 if (q->n_bd == 0)
200 return;
201
dd487449
ZY
202 for (; q->read_ptr != q->write_ptr;
203 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
204 /* we have no way to tell if it is a huge cmd ATM */
205 i = get_cmd_index(q, q->read_ptr, 0);
206
207 if (txq->meta[i].flags & CMD_SIZE_HUGE) {
208 huge = true;
209 continue;
210 }
211
212 pci_unmap_single(priv->pci_dev,
213 pci_unmap_addr(&txq->meta[i], mapping),
214 pci_unmap_len(&txq->meta[i], len),
215 PCI_DMA_BIDIRECTIONAL);
216 }
217 if (huge) {
218 i = q->n_window;
219 pci_unmap_single(priv->pci_dev,
220 pci_unmap_addr(&txq->meta[i], mapping),
221 pci_unmap_len(&txq->meta[i], len),
222 PCI_DMA_BIDIRECTIONAL);
223 }
224
961ba60a
TW
225 /* De-alloc array of command/tx buffers */
226 for (i = 0; i <= TFD_CMD_SLOTS; i++)
227 kfree(txq->cmd[i]);
228
229 /* De-alloc circular buffer of TFDs */
230 if (txq->q.n_bd)
f36d04ab
SG
231 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
232 txq->tfds, txq->q.dma_addr);
961ba60a 233
28142986
RC
234 /* deallocate arrays */
235 kfree(txq->cmd);
236 kfree(txq->meta);
237 txq->cmd = NULL;
238 txq->meta = NULL;
239
961ba60a
TW
240 /* 0-fill queue descriptor structure */
241 memset(txq, 0, sizeof(*txq));
242}
3e5d238f
AK
243EXPORT_SYMBOL(iwl_cmd_queue_free);
244
fd4abac5
TW
245/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
246 * DMA services
247 *
248 * Theory of operation
249 *
250 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
251 * of buffer descriptors, each of which points to one or more data buffers for
252 * the device to read from or fill. Driver and device exchange status of each
253 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
254 * entries in each circular buffer, to protect against confusing empty and full
255 * queue states.
256 *
257 * The device reads or writes the data in the queues via the device's several
258 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
259 *
260 * For Tx queue, there are low mark and high mark limits. If, after queuing
261 * the packet for Tx, free space become < low mark, Tx queue stopped. When
262 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
263 * Tx queue resumed.
264 *
265 * See more detailed info in iwl-4965-hw.h.
266 ***************************************************/
267
268int iwl_queue_space(const struct iwl_queue *q)
269{
270 int s = q->read_ptr - q->write_ptr;
271
272 if (q->read_ptr > q->write_ptr)
273 s -= q->n_bd;
274
275 if (s <= 0)
276 s += q->n_window;
277 /* keep some reserve to not confuse empty and full situations */
278 s -= 2;
279 if (s < 0)
280 s = 0;
281 return s;
282}
283EXPORT_SYMBOL(iwl_queue_space);
284
285
1053d35f
RR
286/**
287 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
288 */
443cfd45 289static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
1053d35f
RR
290 int count, int slots_num, u32 id)
291{
292 q->n_bd = count;
293 q->n_window = slots_num;
294 q->id = id;
295
296 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
297 * and iwl_queue_dec_wrap are broken. */
298 BUG_ON(!is_power_of_2(count));
299
300 /* slots_num must be power-of-two size, otherwise
301 * get_cmd_index is broken. */
302 BUG_ON(!is_power_of_2(slots_num));
303
304 q->low_mark = q->n_window / 4;
305 if (q->low_mark < 4)
306 q->low_mark = 4;
307
308 q->high_mark = q->n_window / 8;
309 if (q->high_mark < 2)
310 q->high_mark = 2;
311
312 q->write_ptr = q->read_ptr = 0;
313
314 return 0;
315}
316
317/**
318 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
319 */
320static int iwl_tx_queue_alloc(struct iwl_priv *priv,
16466903 321 struct iwl_tx_queue *txq, u32 id)
1053d35f 322{
f36d04ab 323 struct device *dev = &priv->pci_dev->dev;
3978e5bc 324 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
1053d35f
RR
325
326 /* Driver private data, only for Tx (not command) queues,
327 * not shared with device. */
328 if (id != IWL_CMD_QUEUE_NUM) {
329 txq->txb = kmalloc(sizeof(txq->txb[0]) *
330 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 if (!txq->txb) {
15b1687c 332 IWL_ERR(priv, "kmalloc for auxiliary BD "
1053d35f
RR
333 "structures failed\n");
334 goto error;
335 }
3978e5bc 336 } else {
1053d35f 337 txq->txb = NULL;
3978e5bc 338 }
1053d35f
RR
339
340 /* Circular buffer of transmit frame descriptors (TFDs),
341 * shared with device */
f36d04ab
SG
342 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
343 GFP_KERNEL);
499b1883 344 if (!txq->tfds) {
3978e5bc 345 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
1053d35f
RR
346 goto error;
347 }
348 txq->q.id = id;
349
350 return 0;
351
352 error:
353 kfree(txq->txb);
354 txq->txb = NULL;
355
356 return -ENOMEM;
357}
358
1053d35f
RR
359/**
360 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
361 */
a8e74e27
SO
362int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
363 int slots_num, u32 txq_id)
1053d35f 364{
da99c4b6 365 int i, len;
73b7d742 366 int ret;
c2acea8e 367 int actual_slots = slots_num;
1053d35f
RR
368
369 /*
370 * Alloc buffer array for commands (Tx or other types of commands).
371 * For the command queue (#4), allocate command space + one big
372 * command for scan, since scan command is very huge; the system will
373 * not have two scans at the same time, so only one is needed.
374 * For normal Tx queues (all other queues), no super-size command
375 * space is needed.
376 */
c2acea8e
JB
377 if (txq_id == IWL_CMD_QUEUE_NUM)
378 actual_slots++;
379
380 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
381 GFP_KERNEL);
382 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
383 GFP_KERNEL);
384
385 if (!txq->meta || !txq->cmd)
386 goto out_free_arrays;
387
388 len = sizeof(struct iwl_device_cmd);
389 for (i = 0; i < actual_slots; i++) {
390 /* only happens for cmd queue */
391 if (i == slots_num)
89612124 392 len = IWL_MAX_CMD_SIZE;
da99c4b6 393
49898852 394 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
da99c4b6 395 if (!txq->cmd[i])
73b7d742 396 goto err;
da99c4b6 397 }
1053d35f
RR
398
399 /* Alloc driver data array and TFD circular buffer */
73b7d742
TW
400 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
401 if (ret)
402 goto err;
1053d35f 403
1053d35f
RR
404 txq->need_update = 0;
405
1a716557
JB
406 /*
407 * Aggregation TX queues will get their ID when aggregation begins;
408 * they overwrite the setting done here. The command FIFO doesn't
409 * need an swq_id so don't set one to catch errors, all others can
410 * be set up to the identity mapping.
411 */
412 if (txq_id != IWL_CMD_QUEUE_NUM)
45af8195
JB
413 txq->swq_id = txq_id;
414
1053d35f
RR
415 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
416 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
417 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
418
419 /* Initialize queue's high/low-water marks, and head/tail indexes */
420 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
421
422 /* Tell device where to find queue */
a8e74e27 423 priv->cfg->ops->lib->txq_init(priv, txq);
1053d35f
RR
424
425 return 0;
73b7d742 426err:
c2acea8e 427 for (i = 0; i < actual_slots; i++)
73b7d742 428 kfree(txq->cmd[i]);
c2acea8e
JB
429out_free_arrays:
430 kfree(txq->meta);
431 kfree(txq->cmd);
73b7d742 432
73b7d742 433 return -ENOMEM;
1053d35f 434}
a8e74e27
SO
435EXPORT_SYMBOL(iwl_tx_queue_init);
436
de0f60ea
ZY
437void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
438 int slots_num, u32 txq_id)
439{
440 int actual_slots = slots_num;
441
442 if (txq_id == IWL_CMD_QUEUE_NUM)
443 actual_slots++;
444
445 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
446
447 txq->need_update = 0;
448
449 /* Initialize queue's high/low-water marks, and head/tail indexes */
450 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
451
452 /* Tell device where to find queue */
453 priv->cfg->ops->lib->txq_init(priv, txq);
454}
455EXPORT_SYMBOL(iwl_tx_queue_reset);
456
da1bc453
TW
457/**
458 * iwl_hw_txq_ctx_free - Free TXQ Context
459 *
460 * Destroy all TX DMA queues and structures
461 */
462void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
463{
464 int txq_id;
465
466 /* Tx queues */
77ca7d9e 467 if (priv->txq) {
de0f60ea 468 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
88804e2b
WYG
469 if (txq_id == IWL_CMD_QUEUE_NUM)
470 iwl_cmd_queue_free(priv);
471 else
472 iwl_tx_queue_free(priv, txq_id);
77ca7d9e 473 }
4ddbb7d0
TW
474 iwl_free_dma_ptr(priv, &priv->kw);
475
476 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
88804e2b
WYG
477
478 /* free tx queue structure */
479 iwl_free_txq_mem(priv);
da1bc453
TW
480}
481EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
482
1053d35f 483/**
de0f60ea
ZY
484 * iwl_txq_ctx_alloc - allocate TX queue context
485 * Allocate all Tx DMA structures and initialize them
1053d35f
RR
486 *
487 * @param priv
488 * @return error code
489 */
de0f60ea 490int iwl_txq_ctx_alloc(struct iwl_priv *priv)
1053d35f 491{
de0f60ea 492 int ret;
1053d35f 493 int txq_id, slots_num;
da1bc453 494 unsigned long flags;
1053d35f 495
1053d35f
RR
496 /* Free all tx/cmd queues and keep-warm buffer */
497 iwl_hw_txq_ctx_free(priv);
498
4ddbb7d0
TW
499 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
500 priv->hw_params.scd_bc_tbls_size);
501 if (ret) {
15b1687c 502 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
4ddbb7d0
TW
503 goto error_bc_tbls;
504 }
1053d35f 505 /* Alloc keep-warm buffer */
4ddbb7d0 506 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
1053d35f 507 if (ret) {
15b1687c 508 IWL_ERR(priv, "Keep Warm allocation failed\n");
1053d35f
RR
509 goto error_kw;
510 }
88804e2b
WYG
511
512 /* allocate tx queue structure */
513 ret = iwl_alloc_txq_mem(priv);
514 if (ret)
515 goto error;
516
da1bc453 517 spin_lock_irqsave(&priv->lock, flags);
1053d35f
RR
518
519 /* Turn off all Tx DMA fifos */
da1bc453
TW
520 priv->cfg->ops->lib->txq_set_sched(priv, 0);
521
4ddbb7d0
TW
522 /* Tell NIC where to find the "keep warm" buffer */
523 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
524
da1bc453
TW
525 spin_unlock_irqrestore(&priv->lock, flags);
526
da1bc453 527 /* Alloc and init all Tx queues, including the command queue (#4) */
1053d35f
RR
528 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
529 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
530 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
531 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
532 txq_id);
533 if (ret) {
15b1687c 534 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
1053d35f
RR
535 goto error;
536 }
537 }
538
539 return ret;
540
541 error:
542 iwl_hw_txq_ctx_free(priv);
4ddbb7d0 543 iwl_free_dma_ptr(priv, &priv->kw);
1053d35f 544 error_kw:
4ddbb7d0
TW
545 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
546 error_bc_tbls:
1053d35f
RR
547 return ret;
548}
a33c2f47 549
de0f60ea
ZY
550void iwl_txq_ctx_reset(struct iwl_priv *priv)
551{
552 int txq_id, slots_num;
553 unsigned long flags;
554
555 spin_lock_irqsave(&priv->lock, flags);
556
557 /* Turn off all Tx DMA fifos */
558 priv->cfg->ops->lib->txq_set_sched(priv, 0);
559
560 /* Tell NIC where to find the "keep warm" buffer */
561 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
562
563 spin_unlock_irqrestore(&priv->lock, flags);
564
565 /* Alloc and init all Tx queues, including the command queue (#4) */
566 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
567 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
568 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
569 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
570 }
571}
572
da1bc453 573/**
de0f60ea 574 * iwl_txq_ctx_stop - Stop all Tx DMA channels
da1bc453
TW
575 */
576void iwl_txq_ctx_stop(struct iwl_priv *priv)
577{
f3f911d1 578 int ch;
da1bc453
TW
579 unsigned long flags;
580
da1bc453
TW
581 /* Turn off all Tx DMA fifos */
582 spin_lock_irqsave(&priv->lock, flags);
da1bc453
TW
583
584 priv->cfg->ops->lib->txq_set_sched(priv, 0);
585
586 /* Stop each Tx DMA channel, and wait for it to be idle */
f3f911d1
ZY
587 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
588 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
da1bc453 589 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
f3f911d1 590 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
f056658b 591 1000);
da1bc453 592 }
da1bc453 593 spin_unlock_irqrestore(&priv->lock, flags);
da1bc453
TW
594}
595EXPORT_SYMBOL(iwl_txq_ctx_stop);
fd4abac5
TW
596
597/*
598 * handle build REPLY_TX command notification.
599 */
600static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
601 struct iwl_tx_cmd *tx_cmd,
e039fa4a 602 struct ieee80211_tx_info *info,
fd4abac5 603 struct ieee80211_hdr *hdr,
0e7690f1 604 u8 std_id)
fd4abac5 605{
fd7c8a40 606 __le16 fc = hdr->frame_control;
fd4abac5
TW
607 __le32 tx_flags = tx_cmd->tx_flags;
608
609 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
e039fa4a 610 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
fd4abac5 611 tx_flags |= TX_CMD_FLG_ACK_MSK;
fd7c8a40 612 if (ieee80211_is_mgmt(fc))
fd4abac5 613 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
fd7c8a40 614 if (ieee80211_is_probe_resp(fc) &&
fd4abac5
TW
615 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
616 tx_flags |= TX_CMD_FLG_TSF_MSK;
617 } else {
618 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
619 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
620 }
621
fd7c8a40 622 if (ieee80211_is_back_req(fc))
fd4abac5
TW
623 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
624
625
626 tx_cmd->sta_id = std_id;
8b7b1e05 627 if (ieee80211_has_morefrags(fc))
fd4abac5
TW
628 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
629
fd7c8a40
HH
630 if (ieee80211_is_data_qos(fc)) {
631 u8 *qc = ieee80211_get_qos_ctl(hdr);
fd4abac5
TW
632 tx_cmd->tid_tspec = qc[0] & 0xf;
633 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
634 } else {
635 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
636 }
637
a326a5d0 638 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
fd4abac5
TW
639
640 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
641 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
fd7c8a40
HH
644 if (ieee80211_is_mgmt(fc)) {
645 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
fd4abac5
TW
646 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
647 else
648 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
649 } else {
650 tx_cmd->timeout.pm_frame_timeout = 0;
651 }
652
653 tx_cmd->driver_txop = 0;
654 tx_cmd->tx_flags = tx_flags;
655 tx_cmd->next_frame_len = 0;
656}
657
658#define RTS_HCCA_RETRY_LIMIT 3
659#define RTS_DFAULT_RETRY_LIMIT 60
660
661static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
662 struct iwl_tx_cmd *tx_cmd,
e039fa4a 663 struct ieee80211_tx_info *info,
b58ef214 664 __le16 fc, int is_hcca)
fd4abac5 665{
b58ef214 666 u32 rate_flags;
76eff18b 667 int rate_idx;
b58ef214
DH
668 u8 rts_retry_limit;
669 u8 data_retry_limit;
fd4abac5 670 u8 rate_plcp;
2e92e6f2 671
b58ef214 672 /* Set retry limit on DATA packets and Probe Responses*/
1f0436f4 673 if (ieee80211_is_probe_resp(fc))
b58ef214
DH
674 data_retry_limit = 3;
675 else
676 data_retry_limit = IWL_DEFAULT_TX_RETRY;
677 tx_cmd->data_retry_limit = data_retry_limit;
fd4abac5 678
b58ef214
DH
679 /* Set retry limit on RTS packets */
680 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
681 RTS_DFAULT_RETRY_LIMIT;
682 if (data_retry_limit < rts_retry_limit)
683 rts_retry_limit = data_retry_limit;
684 tx_cmd->rts_retry_limit = rts_retry_limit;
fd4abac5 685
b58ef214
DH
686 /* DATA packets will use the uCode station table for rate/antenna
687 * selection */
fd4abac5
TW
688 if (ieee80211_is_data(fc)) {
689 tx_cmd->initial_rate_index = 0;
690 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
b58ef214
DH
691 return;
692 }
693
694 /**
695 * If the current TX rate stored in mac80211 has the MCS bit set, it's
696 * not really a TX rate. Thus, we use the lowest supported rate for
697 * this band. Also use the lowest supported rate if the stored rate
698 * index is invalid.
699 */
700 rate_idx = info->control.rates[0].idx;
701 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
702 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
703 rate_idx = rate_lowest_index(&priv->bands[info->band],
704 info->control.sta);
705 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
706 if (info->band == IEEE80211_BAND_5GHZ)
707 rate_idx += IWL_FIRST_OFDM_RATE;
708 /* Get PLCP rate for tx_cmd->rate_n_flags */
709 rate_plcp = iwl_rates[rate_idx].plcp;
710 /* Zero out flags for this packet */
711 rate_flags = 0;
fd4abac5 712
b58ef214
DH
713 /* Set CCK flag as needed */
714 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
715 rate_flags |= RATE_MCS_CCK_MSK;
716
717 /* Set up RTS and CTS flags for certain packets */
718 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
719 case cpu_to_le16(IEEE80211_STYPE_AUTH):
720 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
721 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
722 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
723 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
724 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
725 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
726 }
727 break;
728 default:
729 break;
fd4abac5
TW
730 }
731
b58ef214
DH
732 /* Set up antennas */
733 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
734 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
735
736 /* Set the rate in the TX cmd */
e7d326ac 737 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
fd4abac5
TW
738}
739
740static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
e039fa4a 741 struct ieee80211_tx_info *info,
fd4abac5
TW
742 struct iwl_tx_cmd *tx_cmd,
743 struct sk_buff *skb_frag,
744 int sta_id)
745{
e039fa4a 746 struct ieee80211_key_conf *keyconf = info->control.hw_key;
fd4abac5 747
ccc038ab 748 switch (keyconf->alg) {
fd4abac5
TW
749 case ALG_CCMP:
750 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
ccc038ab 751 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
e039fa4a 752 if (info->flags & IEEE80211_TX_CTL_AMPDU)
fd4abac5 753 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
e1623446 754 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
fd4abac5
TW
755 break;
756
757 case ALG_TKIP:
758 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ccc038ab 759 ieee80211_get_tkip_key(keyconf, skb_frag,
fd4abac5 760 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
e1623446 761 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
fd4abac5
TW
762 break;
763
764 case ALG_WEP:
fd4abac5 765 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
ccc038ab
EG
766 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
767
768 if (keyconf->keylen == WEP_KEY_LEN_128)
769 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
770
771 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
fd4abac5 772
e1623446 773 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
ccc038ab 774 "with key %d\n", keyconf->keyidx);
fd4abac5
TW
775 break;
776
777 default:
978785a3 778 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
fd4abac5
TW
779 break;
780 }
781}
782
fd4abac5
TW
783/*
784 * start REPLY_TX command process
785 */
e039fa4a 786int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
fd4abac5
TW
787{
788 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
6ab10ff8
JB
790 struct ieee80211_sta *sta = info->control.sta;
791 struct iwl_station_priv *sta_priv = NULL;
f3674227
TW
792 struct iwl_tx_queue *txq;
793 struct iwl_queue *q;
c2acea8e
JB
794 struct iwl_device_cmd *out_cmd;
795 struct iwl_cmd_meta *out_meta;
f3674227
TW
796 struct iwl_tx_cmd *tx_cmd;
797 int swq_id, txq_id;
fd4abac5
TW
798 dma_addr_t phys_addr;
799 dma_addr_t txcmd_phys;
800 dma_addr_t scratch_phys;
be1a71a1 801 u16 len, len_org, firstlen, secondlen;
fd4abac5 802 u16 seq_number = 0;
fd7c8a40 803 __le16 fc;
0e7690f1 804 u8 hdr_len;
f3674227 805 u8 sta_id;
fd4abac5
TW
806 u8 wait_write_ptr = 0;
807 u8 tid = 0;
808 u8 *qc = NULL;
809 unsigned long flags;
fd4abac5
TW
810
811 spin_lock_irqsave(&priv->lock, flags);
812 if (iwl_is_rfkill(priv)) {
e1623446 813 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
fd4abac5
TW
814 goto drop_unlock;
815 }
816
fd7c8a40 817 fc = hdr->frame_control;
fd4abac5
TW
818
819#ifdef CONFIG_IWLWIFI_DEBUG
820 if (ieee80211_is_auth(fc))
e1623446 821 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
fd7c8a40 822 else if (ieee80211_is_assoc_req(fc))
e1623446 823 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
fd7c8a40 824 else if (ieee80211_is_reassoc_req(fc))
e1623446 825 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
fd4abac5
TW
826#endif
827
aa065263 828 /* drop all non-injected data frame if we are not associated */
fd7c8a40 829 if (ieee80211_is_data(fc) &&
aa065263 830 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
d10c4ec8 831 (!iwl_is_associated(priv) ||
05c914fe 832 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
d10c4ec8 833 !priv->assoc_station_added)) {
e1623446 834 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
fd4abac5
TW
835 goto drop_unlock;
836 }
837
7294ec95 838 hdr_len = ieee80211_hdrlen(fc);
fd4abac5
TW
839
840 /* Find (or create) index into station table for destination station */
aa065263
GS
841 if (info->flags & IEEE80211_TX_CTL_INJECTED)
842 sta_id = priv->hw_params.bcast_sta_id;
843 else
844 sta_id = iwl_get_sta_id(priv, hdr);
fd4abac5 845 if (sta_id == IWL_INVALID_STATION) {
e1623446 846 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
e174961c 847 hdr->addr1);
3995bd93 848 goto drop_unlock;
fd4abac5
TW
849 }
850
e1623446 851 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
fd4abac5 852
6ab10ff8
JB
853 if (sta)
854 sta_priv = (void *)sta->drv_priv;
855
856 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
857 sta_priv->asleep) {
858 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
859 /*
860 * This sends an asynchronous command to the device,
861 * but we can rely on it being processed before the
862 * next frame is processed -- and the next frame to
863 * this station is the one that will consume this
864 * counter.
865 * For now set the counter to just 1 since we do not
866 * support uAPSD yet.
867 */
868 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
869 }
870
45af8195 871 txq_id = skb_get_queue_mapping(skb);
fd7c8a40
HH
872 if (ieee80211_is_data_qos(fc)) {
873 qc = ieee80211_get_qos_ctl(hdr);
7294ec95 874 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
e6a6cf4c
RC
875 if (unlikely(tid >= MAX_TID_COUNT))
876 goto drop_unlock;
f3674227
TW
877 seq_number = priv->stations[sta_id].tid[tid].seq_number;
878 seq_number &= IEEE80211_SCTL_SEQ;
879 hdr->seq_ctrl = hdr->seq_ctrl &
c1b4aa3f 880 cpu_to_le16(IEEE80211_SCTL_FRAG);
f3674227 881 hdr->seq_ctrl |= cpu_to_le16(seq_number);
fd4abac5 882 seq_number += 0x10;
fd4abac5 883 /* aggregation is on for this <sta,tid> */
45d42700
WYG
884 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
885 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
fd4abac5 886 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
45d42700 887 }
fd4abac5
TW
888 }
889
fd4abac5 890 txq = &priv->txq[txq_id];
45af8195 891 swq_id = txq->swq_id;
fd4abac5
TW
892 q = &txq->q;
893
3995bd93
JB
894 if (unlikely(iwl_queue_space(q) < q->high_mark))
895 goto drop_unlock;
896
897 if (ieee80211_is_data_qos(fc))
898 priv->stations[sta_id].tid[tid].tfds_in_queue++;
fd4abac5 899
fd4abac5
TW
900 /* Set up driver data for this TFD */
901 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
902 txq->txb[q->write_ptr].skb[0] = skb;
fd4abac5
TW
903
904 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b88b15df 905 out_cmd = txq->cmd[q->write_ptr];
c2acea8e 906 out_meta = &txq->meta[q->write_ptr];
fd4abac5
TW
907 tx_cmd = &out_cmd->cmd.tx;
908 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
909 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
910
911 /*
912 * Set up the Tx-command (not MAC!) header.
913 * Store the chosen Tx queue and TFD index within the sequence field;
914 * after Tx, uCode's Tx response will return this value so driver can
915 * locate the frame within the tx queue and do post-tx processing.
916 */
917 out_cmd->hdr.cmd = REPLY_TX;
918 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
919 INDEX_TO_SEQ(q->write_ptr)));
920
921 /* Copy MAC header from skb into command buffer */
922 memcpy(tx_cmd->hdr, hdr, hdr_len);
923
df833b1d
RC
924
925 /* Total # bytes to be transmitted */
926 len = (u16)skb->len;
927 tx_cmd->len = cpu_to_le16(len);
928
929 if (info->control.hw_key)
930 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
931
932 /* TODO need this for burst mode later on */
933 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
20594eb0 934 iwl_dbg_log_tx_data_frame(priv, len, hdr);
df833b1d
RC
935
936 /* set is_hcca to 0; it probably will never be implemented */
b58ef214 937 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
df833b1d 938
22fdf3c9 939 iwl_update_stats(priv, true, fc, len);
fd4abac5
TW
940 /*
941 * Use the first empty entry in this queue's command buffer array
942 * to contain the Tx command and MAC header concatenated together
943 * (payload data will be in another buffer).
944 * Size of this varies, due to varying MAC header length.
945 * If end is not dword aligned, we'll have 2 extra bytes at the end
946 * of the MAC header (device reads on dword boundaries).
947 * We'll tell device about this padding later.
948 */
949 len = sizeof(struct iwl_tx_cmd) +
950 sizeof(struct iwl_cmd_header) + hdr_len;
951
952 len_org = len;
be1a71a1 953 firstlen = len = (len + 3) & ~3;
fd4abac5
TW
954
955 if (len_org != len)
956 len_org = 1;
957 else
958 len_org = 0;
959
df833b1d
RC
960 /* Tell NIC about any 2-byte padding after MAC header */
961 if (len_org)
962 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
963
fd4abac5
TW
964 /* Physical address of this Tx command's header (not MAC header!),
965 * within command buffer array. */
499b1883 966 txcmd_phys = pci_map_single(priv->pci_dev,
df833b1d 967 &out_cmd->hdr, len,
96891cee 968 PCI_DMA_BIDIRECTIONAL);
c2acea8e
JB
969 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
970 pci_unmap_len_set(out_meta, len, len);
fd4abac5
TW
971 /* Add buffer containing Tx command and MAC(!) header to TFD's
972 * first entry */
7aaa1d79
SO
973 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
974 txcmd_phys, len, 1, 0);
fd4abac5 975
df833b1d
RC
976 if (!ieee80211_has_morefrags(hdr->frame_control)) {
977 txq->need_update = 1;
978 if (qc)
979 priv->stations[sta_id].tid[tid].seq_number = seq_number;
980 } else {
981 wait_write_ptr = 1;
982 txq->need_update = 0;
983 }
fd4abac5
TW
984
985 /* Set up TFD's 2nd entry to point directly to remainder of skb,
986 * if any (802.11 null frames have no payload). */
be1a71a1 987 secondlen = len = skb->len - hdr_len;
fd4abac5
TW
988 if (len) {
989 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
990 len, PCI_DMA_TODEVICE);
7aaa1d79
SO
991 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
992 phys_addr, len,
993 0, 0);
fd4abac5
TW
994 }
995
fd4abac5 996 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
df833b1d
RC
997 offsetof(struct iwl_tx_cmd, scratch);
998
999 len = sizeof(struct iwl_tx_cmd) +
1000 sizeof(struct iwl_cmd_header) + hdr_len;
1001 /* take back ownership of DMA buffer to enable update */
1002 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
1003 len, PCI_DMA_BIDIRECTIONAL);
fd4abac5 1004 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
499b1883 1005 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
fd4abac5 1006
d2ee9cd2
RC
1007 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1008 le16_to_cpu(out_cmd->hdr.sequence));
1009 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
3d816c77
RC
1010 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1011 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
fd4abac5
TW
1012
1013 /* Set up entry for this TFD in Tx byte-count array */
7b80ece4
RC
1014 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1015 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
df833b1d
RC
1016 le16_to_cpu(tx_cmd->len));
1017
1018 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
1019 len, PCI_DMA_BIDIRECTIONAL);
fd4abac5 1020
be1a71a1
JB
1021 trace_iwlwifi_dev_tx(priv,
1022 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1023 sizeof(struct iwl_tfd),
1024 &out_cmd->hdr, firstlen,
1025 skb->data + hdr_len, secondlen);
1026
fd4abac5
TW
1027 /* Tell device the write index *just past* this latest filled TFD */
1028 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
7bfedc59 1029 iwl_txq_update_write_ptr(priv, txq);
fd4abac5
TW
1030 spin_unlock_irqrestore(&priv->lock, flags);
1031
6ab10ff8
JB
1032 /*
1033 * At this point the frame is "transmitted" successfully
1034 * and we will get a TX status notification eventually,
1035 * regardless of the value of ret. "ret" only indicates
1036 * whether or not we should update the write pointer.
1037 */
1038
1039 /* avoid atomic ops if it isn't an associated client */
1040 if (sta_priv && sta_priv->client)
1041 atomic_inc(&sta_priv->pending_frames);
1042
143b09ef 1043 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
fd4abac5
TW
1044 if (wait_write_ptr) {
1045 spin_lock_irqsave(&priv->lock, flags);
1046 txq->need_update = 1;
1047 iwl_txq_update_write_ptr(priv, txq);
1048 spin_unlock_irqrestore(&priv->lock, flags);
143b09ef 1049 } else {
e4e72fb4 1050 iwl_stop_queue(priv, txq->swq_id);
fd4abac5 1051 }
fd4abac5
TW
1052 }
1053
1054 return 0;
1055
1056drop_unlock:
1057 spin_unlock_irqrestore(&priv->lock, flags);
fd4abac5
TW
1058 return -1;
1059}
1060EXPORT_SYMBOL(iwl_tx_skb);
1061
1062/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1063
1064/**
1065 * iwl_enqueue_hcmd - enqueue a uCode command
1066 * @priv: device private data point
1067 * @cmd: a point to the ucode command structure
1068 *
1069 * The function returns < 0 values to indicate the operation is
1070 * failed. On success, it turns the index (> 0) of command in the
1071 * command queue.
1072 */
1073int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1074{
1075 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1076 struct iwl_queue *q = &txq->q;
c2acea8e
JB
1077 struct iwl_device_cmd *out_cmd;
1078 struct iwl_cmd_meta *out_meta;
fd4abac5 1079 dma_addr_t phys_addr;
fd4abac5 1080 unsigned long flags;
7bfedc59 1081 int len;
f3674227
TW
1082 u32 idx;
1083 u16 fix_size;
fd4abac5
TW
1084
1085 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1086 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1087
1088 /* If any of the command structures end up being larger than
1089 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
89612124
AK
1090 * we will need to increase the size of the TFD entries
1091 * Also, check to see if command buffer should not exceed the size
1092 * of device_cmd and max_cmd_size. */
fd4abac5 1093 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
c2acea8e 1094 !(cmd->flags & CMD_SIZE_HUGE));
89612124 1095 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
fd4abac5 1096
7812b167 1097 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
f2f21b49
RC
1098 IWL_WARN(priv, "Not sending command - %s KILL\n",
1099 iwl_is_rfkill(priv) ? "RF" : "CT");
fd4abac5
TW
1100 return -EIO;
1101 }
1102
c2acea8e 1103 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2d237f71 1104 IWL_ERR(priv, "No space in command queue\n");
7812b167
WYG
1105 if (iwl_within_ct_kill_margin(priv))
1106 iwl_tt_enter_ct_kill(priv);
1107 else {
1108 IWL_ERR(priv, "Restarting adapter due to queue full\n");
1109 queue_work(priv->workqueue, &priv->restart);
1110 }
fd4abac5
TW
1111 return -ENOSPC;
1112 }
1113
1114 spin_lock_irqsave(&priv->hcmd_lock, flags);
1115
dd487449
ZY
1116 /* If this is a huge cmd, mark the huge flag also on the meta.flags
1117 * of the _original_ cmd. This is used for DMA mapping clean up.
1118 */
1119 if (cmd->flags & CMD_SIZE_HUGE) {
1120 idx = get_cmd_index(q, q->write_ptr, 0);
1121 txq->meta[idx].flags = CMD_SIZE_HUGE;
1122 }
1123
c2acea8e 1124 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
da99c4b6 1125 out_cmd = txq->cmd[idx];
c2acea8e
JB
1126 out_meta = &txq->meta[idx];
1127
8ce73f3a 1128 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
1129 out_meta->flags = cmd->flags;
1130 if (cmd->flags & CMD_WANT_SKB)
1131 out_meta->source = cmd;
1132 if (cmd->flags & CMD_ASYNC)
1133 out_meta->callback = cmd->callback;
fd4abac5
TW
1134
1135 out_cmd->hdr.cmd = cmd->id;
fd4abac5
TW
1136 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1137
1138 /* At this point, the out_cmd now has all of the incoming cmd
1139 * information */
1140
1141 out_cmd->hdr.flags = 0;
1142 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1143 INDEX_TO_SEQ(q->write_ptr));
c2acea8e 1144 if (cmd->flags & CMD_SIZE_HUGE)
9734cb23 1145 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
c2acea8e 1146 len = sizeof(struct iwl_device_cmd);
89612124
AK
1147 if (idx == TFD_CMD_SLOTS)
1148 len = IWL_MAX_CMD_SIZE;
fd4abac5 1149
ded2ae7c
EK
1150#ifdef CONFIG_IWLWIFI_DEBUG
1151 switch (out_cmd->hdr.cmd) {
1152 case REPLY_TX_LINK_QUALITY_CMD:
1153 case SENSITIVITY_CMD:
e1623446 1154 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
ded2ae7c
EK
1155 "%d bytes at %d[%d]:%d\n",
1156 get_cmd_string(out_cmd->hdr.cmd),
1157 out_cmd->hdr.cmd,
1158 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1159 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1160 break;
1161 default:
e1623446 1162 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
ded2ae7c
EK
1163 "%d bytes at %d[%d]:%d\n",
1164 get_cmd_string(out_cmd->hdr.cmd),
1165 out_cmd->hdr.cmd,
1166 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1167 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1168 }
1169#endif
fd4abac5
TW
1170 txq->need_update = 1;
1171
518099a8
SO
1172 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1173 /* Set up entry in queue's byte count circular buffer */
1174 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
fd4abac5 1175
df833b1d
RC
1176 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1177 fix_size, PCI_DMA_BIDIRECTIONAL);
c2acea8e
JB
1178 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1179 pci_unmap_len_set(out_meta, len, fix_size);
df833b1d 1180
be1a71a1
JB
1181 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1182
df833b1d
RC
1183 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1184 phys_addr, fix_size, 1,
1185 U32_PAD(cmd->len));
1186
fd4abac5
TW
1187 /* Increment and update queue's write index */
1188 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
7bfedc59 1189 iwl_txq_update_write_ptr(priv, txq);
fd4abac5
TW
1190
1191 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
7bfedc59 1192 return idx;
fd4abac5
TW
1193}
1194
6ab10ff8
JB
1195static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1196{
1197 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1198 struct ieee80211_sta *sta;
1199 struct iwl_station_priv *sta_priv;
1200
1201 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1202 if (sta) {
1203 sta_priv = (void *)sta->drv_priv;
1204 /* avoid atomic ops if this isn't a client */
1205 if (sta_priv->client &&
1206 atomic_dec_return(&sta_priv->pending_frames) == 0)
1207 ieee80211_sta_block_awake(priv->hw, sta, false);
1208 }
1209
1210 ieee80211_tx_status_irqsafe(priv->hw, skb);
1211}
1212
17b88929
TW
1213int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1214{
1215 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1216 struct iwl_queue *q = &txq->q;
1217 struct iwl_tx_info *tx_info;
1218 int nfreed = 0;
a120e912 1219 struct ieee80211_hdr *hdr;
17b88929
TW
1220
1221 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
15b1687c 1222 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
17b88929
TW
1223 "is out of range [0-%d] %d %d.\n", txq_id,
1224 index, q->n_bd, q->write_ptr, q->read_ptr);
1225 return 0;
1226 }
1227
499b1883
TW
1228 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1229 q->read_ptr != index;
1230 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929
TW
1231
1232 tx_info = &txq->txb[txq->q.read_ptr];
6ab10ff8 1233 iwl_tx_status(priv, tx_info->skb[0]);
a120e912
SG
1234
1235 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1236 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1237 nfreed++;
17b88929 1238 tx_info->skb[0] = NULL;
17b88929 1239
972cf447
TW
1240 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1241 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1242
7aaa1d79 1243 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
17b88929
TW
1244 }
1245 return nfreed;
1246}
1247EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1248
1249
1250/**
1251 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1252 *
1253 * When FW advances 'R' index, all entries between old and new 'R' index
1254 * need to be reclaimed. As result, some free space forms. If there is
1255 * enough free space (> low mark), wake the stack that feeds us.
1256 */
499b1883
TW
1257static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1258 int idx, int cmd_idx)
17b88929
TW
1259{
1260 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1261 struct iwl_queue *q = &txq->q;
1262 int nfreed = 0;
1263
499b1883 1264 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
15b1687c 1265 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
17b88929 1266 "is out of range [0-%d] %d %d.\n", txq_id,
499b1883 1267 idx, q->n_bd, q->write_ptr, q->read_ptr);
17b88929
TW
1268 return;
1269 }
1270
499b1883
TW
1271 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1272 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929 1273
499b1883 1274 if (nfreed++ > 0) {
15b1687c 1275 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
17b88929
TW
1276 q->write_ptr, q->read_ptr);
1277 queue_work(priv->workqueue, &priv->restart);
1278 }
da99c4b6 1279
17b88929
TW
1280 }
1281}
1282
1283/**
1284 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1285 * @rxb: Rx buffer to reclaim
1286 *
1287 * If an Rx buffer has an async callback associated with it the callback
1288 * will be executed. The attached skb (if present) will only be freed
1289 * if the callback returns 1
1290 */
1291void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1292{
2f301227 1293 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
1294 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1295 int txq_id = SEQ_TO_QUEUE(sequence);
1296 int index = SEQ_TO_INDEX(sequence);
17b88929 1297 int cmd_index;
9734cb23 1298 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
c2acea8e
JB
1299 struct iwl_device_cmd *cmd;
1300 struct iwl_cmd_meta *meta;
dd487449 1301 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
17b88929
TW
1302
1303 /* If a Tx command is being handled and it isn't in the actual
1304 * command queue then there a command routing bug has been introduced
1305 * in the queue management code. */
55d6a3cd 1306 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
01ef9323
WT
1307 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1308 txq_id, sequence,
1309 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1310 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
ec741164 1311 iwl_print_hex_error(priv, pkt, 32);
55d6a3cd 1312 return;
01ef9323 1313 }
17b88929 1314
dd487449
ZY
1315 /* If this is a huge cmd, clear the huge flag on the meta.flags
1316 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
1317 * the DMA buffer for the scan (huge) command.
1318 */
1319 if (huge) {
1320 cmd_index = get_cmd_index(&txq->q, index, 0);
1321 txq->meta[cmd_index].flags = 0;
1322 }
1323 cmd_index = get_cmd_index(&txq->q, index, huge);
1324 cmd = txq->cmd[cmd_index];
1325 meta = &txq->meta[cmd_index];
17b88929 1326
c33de625
RC
1327 pci_unmap_single(priv->pci_dev,
1328 pci_unmap_addr(meta, mapping),
1329 pci_unmap_len(meta, len),
1330 PCI_DMA_BIDIRECTIONAL);
1331
17b88929 1332 /* Input error checking is done when commands are added to queue. */
c2acea8e 1333 if (meta->flags & CMD_WANT_SKB) {
2f301227
ZY
1334 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1335 rxb->page = NULL;
5696aea6 1336 } else if (meta->callback)
2f301227 1337 meta->callback(priv, cmd, pkt);
17b88929 1338
499b1883 1339 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
17b88929 1340
c2acea8e 1341 if (!(meta->flags & CMD_ASYNC)) {
17b88929 1342 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
d2dfe6df
RC
1343 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
1344 get_cmd_string(cmd->hdr.cmd));
17b88929
TW
1345 wake_up_interruptible(&priv->wait_command_queue);
1346 }
dd487449 1347 meta->flags = 0;
17b88929
TW
1348}
1349EXPORT_SYMBOL(iwl_tx_cmd_complete);
1350
30e553e3
TW
1351/*
1352 * Find first available (lowest unused) Tx Queue, mark it "active".
1353 * Called only when finding queue for aggregation.
1354 * Should never return anything < 7, because they should already
1355 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1356 */
1357static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1358{
1359 int txq_id;
1360
1361 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1362 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1363 return txq_id;
1364 return -1;
1365}
1366
1367int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1368{
1369 int sta_id;
1370 int tx_fifo;
1371 int txq_id;
1372 int ret;
1373 unsigned long flags;
1374 struct iwl_tid_data *tid_data;
30e553e3
TW
1375
1376 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1377 tx_fifo = default_tid_to_tx_fifo[tid];
1378 else
1379 return -EINVAL;
1380
39aadf8c 1381 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
e174961c 1382 __func__, ra, tid);
30e553e3
TW
1383
1384 sta_id = iwl_find_station(priv, ra);
3eb92969
WYG
1385 if (sta_id == IWL_INVALID_STATION) {
1386 IWL_ERR(priv, "Start AGG on invalid station\n");
30e553e3 1387 return -ENXIO;
3eb92969 1388 }
082e708a
RK
1389 if (unlikely(tid >= MAX_TID_COUNT))
1390 return -EINVAL;
30e553e3
TW
1391
1392 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
15b1687c 1393 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
30e553e3
TW
1394 return -ENXIO;
1395 }
1396
1397 txq_id = iwl_txq_ctx_activate_free(priv);
3eb92969
WYG
1398 if (txq_id == -1) {
1399 IWL_ERR(priv, "No free aggregation queue available\n");
30e553e3 1400 return -ENXIO;
3eb92969 1401 }
30e553e3
TW
1402
1403 spin_lock_irqsave(&priv->sta_lock, flags);
1404 tid_data = &priv->stations[sta_id].tid[tid];
1405 *ssn = SEQ_TO_SN(tid_data->seq_number);
1406 tid_data->agg.txq_id = txq_id;
45af8195 1407 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
30e553e3
TW
1408 spin_unlock_irqrestore(&priv->sta_lock, flags);
1409
1410 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1411 sta_id, tid, *ssn);
1412 if (ret)
1413 return ret;
1414
1415 if (tid_data->tfds_in_queue == 0) {
3eb92969 1416 IWL_DEBUG_HT(priv, "HW queue is empty\n");
30e553e3 1417 tid_data->agg.state = IWL_AGG_ON;
c951ad35 1418 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
30e553e3 1419 } else {
e1623446 1420 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
30e553e3
TW
1421 tid_data->tfds_in_queue);
1422 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1423 }
1424 return ret;
1425}
1426EXPORT_SYMBOL(iwl_tx_agg_start);
1427
1428int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1429{
1430 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1431 struct iwl_tid_data *tid_data;
45d42700 1432 int write_ptr, read_ptr;
30e553e3 1433 unsigned long flags;
30e553e3
TW
1434
1435 if (!ra) {
15b1687c 1436 IWL_ERR(priv, "ra = NULL\n");
30e553e3
TW
1437 return -EINVAL;
1438 }
1439
e6a6cf4c
RC
1440 if (unlikely(tid >= MAX_TID_COUNT))
1441 return -EINVAL;
1442
30e553e3
TW
1443 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1444 tx_fifo_id = default_tid_to_tx_fifo[tid];
1445 else
1446 return -EINVAL;
1447
1448 sta_id = iwl_find_station(priv, ra);
1449
a2f1cbeb
WYG
1450 if (sta_id == IWL_INVALID_STATION) {
1451 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
30e553e3 1452 return -ENXIO;
a2f1cbeb 1453 }
30e553e3 1454
827d42c9
JB
1455 if (priv->stations[sta_id].tid[tid].agg.state ==
1456 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1457 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
9b1cb21c 1458 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
827d42c9
JB
1459 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1460 return 0;
1461 }
1462
30e553e3 1463 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
827d42c9 1464 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
30e553e3
TW
1465
1466 tid_data = &priv->stations[sta_id].tid[tid];
1467 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1468 txq_id = tid_data->agg.txq_id;
1469 write_ptr = priv->txq[txq_id].q.write_ptr;
1470 read_ptr = priv->txq[txq_id].q.read_ptr;
1471
1472 /* The queue is not empty */
1473 if (write_ptr != read_ptr) {
e1623446 1474 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
30e553e3
TW
1475 priv->stations[sta_id].tid[tid].agg.state =
1476 IWL_EMPTYING_HW_QUEUE_DELBA;
1477 return 0;
1478 }
1479
e1623446 1480 IWL_DEBUG_HT(priv, "HW queue is empty\n");
30e553e3
TW
1481 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1482
1483 spin_lock_irqsave(&priv->lock, flags);
45d42700
WYG
1484 /*
1485 * the only reason this call can fail is queue number out of range,
1486 * which can happen if uCode is reloaded and all the station
1487 * information are lost. if it is outside the range, there is no need
1488 * to deactivate the uCode queue, just return "success" to allow
1489 * mac80211 to clean up it own data.
1490 */
1491 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
30e553e3
TW
1492 tx_fifo_id);
1493 spin_unlock_irqrestore(&priv->lock, flags);
1494
c951ad35 1495 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
30e553e3
TW
1496
1497 return 0;
1498}
1499EXPORT_SYMBOL(iwl_tx_agg_stop);
1500
1501int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1502{
1503 struct iwl_queue *q = &priv->txq[txq_id].q;
1504 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1505 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1506
1507 switch (priv->stations[sta_id].tid[tid].agg.state) {
1508 case IWL_EMPTYING_HW_QUEUE_DELBA:
1509 /* We are reclaiming the last packet of the */
1510 /* aggregated HW queue */
3fd07a1e
TW
1511 if ((txq_id == tid_data->agg.txq_id) &&
1512 (q->read_ptr == q->write_ptr)) {
30e553e3
TW
1513 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1514 int tx_fifo = default_tid_to_tx_fifo[tid];
e1623446 1515 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
30e553e3
TW
1516 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1517 ssn, tx_fifo);
1518 tid_data->agg.state = IWL_AGG_OFF;
c951ad35 1519 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
30e553e3
TW
1520 }
1521 break;
1522 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1523 /* We are reclaiming the last packet of the queue */
1524 if (tid_data->tfds_in_queue == 0) {
e1623446 1525 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
30e553e3 1526 tid_data->agg.state = IWL_AGG_ON;
c951ad35 1527 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
30e553e3
TW
1528 }
1529 break;
1530 }
1531 return 0;
1532}
1533EXPORT_SYMBOL(iwl_txq_check_empty);
30e553e3 1534
653fa4a0
EG
1535/**
1536 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1537 *
1538 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1539 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1540 */
1541static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1542 struct iwl_ht_agg *agg,
1543 struct iwl_compressed_ba_resp *ba_resp)
1544
1545{
1546 int i, sh, ack;
1547 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1548 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1549 u64 bitmap;
1550 int successes = 0;
1551 struct ieee80211_tx_info *info;
1552
1553 if (unlikely(!agg->wait_for_ba)) {
15b1687c 1554 IWL_ERR(priv, "Received BA when not expected\n");
653fa4a0
EG
1555 return -EINVAL;
1556 }
1557
1558 /* Mark that the expected block-ack response arrived */
1559 agg->wait_for_ba = 0;
e1623446 1560 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
653fa4a0
EG
1561
1562 /* Calculate shift to align block-ack bits with our Tx window bits */
3fd07a1e 1563 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
653fa4a0
EG
1564 if (sh < 0) /* tbw something is wrong with indices */
1565 sh += 0x100;
1566
1567 /* don't use 64-bit values for now */
1568 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1569
1570 if (agg->frame_count > (64 - sh)) {
e1623446 1571 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
653fa4a0
EG
1572 return -1;
1573 }
1574
1575 /* check for success or failure according to the
1576 * transmitted bitmap and block-ack bitmap */
1577 bitmap &= agg->bitmap;
1578
1579 /* For each frame attempted in aggregation,
1580 * update driver's record of tx frame's status. */
1581 for (i = 0; i < agg->frame_count ; i++) {
4aa41f12 1582 ack = bitmap & (1ULL << i);
653fa4a0 1583 successes += !!ack;
e1623446 1584 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
c3056065 1585 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
653fa4a0
EG
1586 agg->start_idx + i);
1587 }
1588
1589 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1590 memset(&info->status, 0, sizeof(info->status));
91a55ae6 1591 info->flags |= IEEE80211_TX_STAT_ACK;
653fa4a0
EG
1592 info->flags |= IEEE80211_TX_STAT_AMPDU;
1593 info->status.ampdu_ack_map = successes;
1594 info->status.ampdu_ack_len = agg->frame_count;
1595 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1596
e1623446 1597 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
653fa4a0
EG
1598
1599 return 0;
1600}
1601
1602/**
1603 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1604 *
1605 * Handles block-acknowledge notification from device, which reports success
1606 * of frames sent via aggregation.
1607 */
1608void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1609 struct iwl_rx_mem_buffer *rxb)
1610{
2f301227 1611 struct iwl_rx_packet *pkt = rxb_addr(rxb);
653fa4a0 1612 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
653fa4a0
EG
1613 struct iwl_tx_queue *txq = NULL;
1614 struct iwl_ht_agg *agg;
3fd07a1e
TW
1615 int index;
1616 int sta_id;
1617 int tid;
653fa4a0
EG
1618
1619 /* "flow" corresponds to Tx queue */
1620 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1621
1622 /* "ssn" is start of block-ack Tx window, corresponds to index
1623 * (in Tx queue's circular buffer) of first TFD/frame in window */
1624 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1625
1626 if (scd_flow >= priv->hw_params.max_txq_num) {
15b1687c
WT
1627 IWL_ERR(priv,
1628 "BUG_ON scd_flow is bigger than number of queues\n");
653fa4a0
EG
1629 return;
1630 }
1631
1632 txq = &priv->txq[scd_flow];
3fd07a1e
TW
1633 sta_id = ba_resp->sta_id;
1634 tid = ba_resp->tid;
1635 agg = &priv->stations[sta_id].tid[tid].agg;
653fa4a0
EG
1636
1637 /* Find index just before block-ack window */
1638 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1639
1640 /* TODO: Need to get this copy more safely - now good for debug */
1641
e1623446 1642 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
653fa4a0
EG
1643 "sta_id = %d\n",
1644 agg->wait_for_ba,
e174961c 1645 (u8 *) &ba_resp->sta_addr_lo32,
653fa4a0 1646 ba_resp->sta_id);
e1623446 1647 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
653fa4a0
EG
1648 "%d, scd_ssn = %d\n",
1649 ba_resp->tid,
1650 ba_resp->seq_ctl,
1651 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1652 ba_resp->scd_flow,
1653 ba_resp->scd_ssn);
e1623446 1654 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
653fa4a0
EG
1655 agg->start_idx,
1656 (unsigned long long)agg->bitmap);
1657
1658 /* Update driver's record of ACK vs. not for each frame in window */
1659 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1660
1661 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1662 * block-ack window (we assume that they've been successfully
1663 * transmitted ... if not, it's too late anyway). */
1664 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1665 /* calculate mac80211 ampdu sw queue to wake */
653fa4a0 1666 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
a239a8b4 1667 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
3fd07a1e
TW
1668
1669 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1670 priv->mac80211_registered &&
1671 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
e4e72fb4 1672 iwl_wake_queue(priv, txq->swq_id);
3fd07a1e
TW
1673
1674 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
653fa4a0
EG
1675 }
1676}
1677EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1678
994d31f7 1679#ifdef CONFIG_IWLWIFI_DEBUG
a332f8d6
TW
1680#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1681
1682const char *iwl_get_tx_fail_reason(u32 status)
1683{
1684 switch (status & TX_STATUS_MSK) {
1685 case TX_STATUS_SUCCESS:
1686 return "SUCCESS";
1687 TX_STATUS_ENTRY(SHORT_LIMIT);
1688 TX_STATUS_ENTRY(LONG_LIMIT);
1689 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1690 TX_STATUS_ENTRY(MGMNT_ABORT);
1691 TX_STATUS_ENTRY(NEXT_FRAG);
1692 TX_STATUS_ENTRY(LIFE_EXPIRE);
1693 TX_STATUS_ENTRY(DEST_PS);
1694 TX_STATUS_ENTRY(ABORTED);
1695 TX_STATUS_ENTRY(BT_RETRY);
1696 TX_STATUS_ENTRY(STA_INVALID);
1697 TX_STATUS_ENTRY(FRAG_DROPPED);
1698 TX_STATUS_ENTRY(TID_DISABLE);
1699 TX_STATUS_ENTRY(FRAME_FLUSHED);
1700 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1701 TX_STATUS_ENTRY(TX_LOCKED);
1702 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1703 }
1704
1705 return "UNKNOWN";
1706}
1707EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1708#endif /* CONFIG_IWLWIFI_DEBUG */