ath9k: improve BT FTP/PAN performance
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 55 struct ath_txq *txq, struct list_head *bf_q,
156369fa 56 struct ath_tx_status *ts, int txok);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
81357a28
FF
67 struct sk_buff *skb,
68 bool dequeue);
c4288390 69
545750d3 70enum {
0e668cde
FF
71 MCS_HT20,
72 MCS_HT20_SGI,
545750d3
FF
73 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
0e668cde
FF
77static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
89 },
90 [MCS_HT40] = {
0e668cde
FF
91 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
95 },
96 [MCS_HT40_SGI] = {
0e668cde
FF
97 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
101 }
102};
103
e8324357
S
104/*********************/
105/* Aggregation logic */
106/*********************/
f078f209 107
23de5dc9 108static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 109 __acquires(&txq->axq_lock)
23de5dc9
FF
110{
111 spin_lock_bh(&txq->axq_lock);
112}
113
114static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 115 __releases(&txq->axq_lock)
23de5dc9
FF
116{
117 spin_unlock_bh(&txq->axq_lock);
118}
119
120static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
1512a486 121 __releases(&txq->axq_lock)
23de5dc9
FF
122{
123 struct sk_buff_head q;
124 struct sk_buff *skb;
125
126 __skb_queue_head_init(&q);
127 skb_queue_splice_init(&txq->complete_q, &q);
128 spin_unlock_bh(&txq->axq_lock);
129
130 while ((skb = __skb_dequeue(&q)))
131 ieee80211_tx_status(sc->hw, skb);
132}
133
e8324357 134static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 135{
e8324357 136 struct ath_atx_ac *ac = tid->ac;
ff37e337 137
e8324357
S
138 if (tid->paused)
139 return;
ff37e337 140
e8324357
S
141 if (tid->sched)
142 return;
ff37e337 143
e8324357
S
144 tid->sched = true;
145 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 146
e8324357
S
147 if (ac->sched)
148 return;
f078f209 149
e8324357
S
150 ac->sched = true;
151 list_add_tail(&ac->list, &txq->axq_acq);
152}
f078f209 153
e8324357 154static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 155{
066dae93 156 struct ath_txq *txq = tid->ac->txq;
e6a9854b 157
75401849 158 WARN_ON(!tid->paused);
f078f209 159
23de5dc9 160 ath_txq_lock(sc, txq);
75401849 161 tid->paused = false;
f078f209 162
56dc6336 163 if (skb_queue_empty(&tid->buf_q))
e8324357 164 goto unlock;
f078f209 165
e8324357
S
166 ath_tx_queue_tid(txq, tid);
167 ath_txq_schedule(sc, txq);
168unlock:
23de5dc9 169 ath_txq_unlock_complete(sc, txq);
528f0c6b 170}
f078f209 171
2d42efc4 172static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
173{
174 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
175 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
176 sizeof(tx_info->rate_driver_data));
177 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
178}
179
156369fa
FF
180static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
181{
182 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
183 seqno << IEEE80211_SEQ_SEQ_SHIFT);
184}
185
e8324357 186static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 187{
066dae93 188 struct ath_txq *txq = tid->ac->txq;
56dc6336 189 struct sk_buff *skb;
e8324357
S
190 struct ath_buf *bf;
191 struct list_head bf_head;
90fa539c 192 struct ath_tx_status ts;
2d42efc4 193 struct ath_frame_info *fi;
156369fa 194 bool sendbar = false;
f078f209 195
90fa539c 196 INIT_LIST_HEAD(&bf_head);
e6a9854b 197
90fa539c 198 memset(&ts, 0, sizeof(ts));
f078f209 199
56dc6336
FF
200 while ((skb = __skb_dequeue(&tid->buf_q))) {
201 fi = get_frame_info(skb);
202 bf = fi->bf;
203
44f1d26c
FF
204 if (bf && fi->retries) {
205 list_add_tail(&bf->list, &bf_head);
6a0ddaef 206 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
156369fa
FF
207 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
208 sendbar = true;
90fa539c 209 } else {
44f1d26c 210 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 211 }
528f0c6b 212 }
f078f209 213
4eb287a4
NM
214 if (tid->baw_head == tid->baw_tail) {
215 tid->state &= ~AGGR_ADDBA_COMPLETE;
216 tid->state &= ~AGGR_CLEANUP;
217 }
218
23de5dc9
FF
219 if (sendbar) {
220 ath_txq_unlock(sc, txq);
156369fa 221 ath_send_bar(tid, tid->seq_start);
23de5dc9
FF
222 ath_txq_lock(sc, txq);
223 }
528f0c6b 224}
f078f209 225
e8324357
S
226static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
227 int seqno)
528f0c6b 228{
e8324357 229 int index, cindex;
f078f209 230
e8324357
S
231 index = ATH_BA_INDEX(tid->seq_start, seqno);
232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 233
81ee13ba 234 __clear_bit(cindex, tid->tx_buf);
528f0c6b 235
81ee13ba 236 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
237 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
238 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
f9437543
FF
239 if (tid->bar_index >= 0)
240 tid->bar_index--;
e8324357 241 }
528f0c6b 242}
f078f209 243
e8324357 244static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 245 u16 seqno)
528f0c6b 246{
e8324357 247 int index, cindex;
528f0c6b 248
2d3bcba0 249 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 250 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 251 __set_bit(cindex, tid->tx_buf);
f078f209 252
e8324357
S
253 if (index >= ((tid->baw_tail - tid->baw_head) &
254 (ATH_TID_MAX_BUFS - 1))) {
255 tid->baw_tail = cindex;
256 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 257 }
f078f209
LR
258}
259
260/*
e8324357
S
261 * TODO: For frame(s) that are in the retry state, we will reuse the
262 * sequence number(s) without setting the retry bit. The
263 * alternative is to give up on these and BAR the receiver's window
264 * forward.
f078f209 265 */
e8324357
S
266static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
267 struct ath_atx_tid *tid)
f078f209 268
f078f209 269{
56dc6336 270 struct sk_buff *skb;
e8324357
S
271 struct ath_buf *bf;
272 struct list_head bf_head;
db1a052b 273 struct ath_tx_status ts;
2d42efc4 274 struct ath_frame_info *fi;
db1a052b
FF
275
276 memset(&ts, 0, sizeof(ts));
e8324357 277 INIT_LIST_HEAD(&bf_head);
f078f209 278
56dc6336
FF
279 while ((skb = __skb_dequeue(&tid->buf_q))) {
280 fi = get_frame_info(skb);
281 bf = fi->bf;
f078f209 282
44f1d26c 283 if (!bf) {
44f1d26c 284 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
44f1d26c
FF
285 continue;
286 }
287
56dc6336 288 list_add_tail(&bf->list, &bf_head);
f078f209 289
2d42efc4 290 if (fi->retries)
6a0ddaef 291 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 292
156369fa 293 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
e8324357 294 }
f078f209 295
e8324357
S
296 tid->seq_next = tid->seq_start;
297 tid->baw_tail = tid->baw_head;
f9437543 298 tid->bar_index = -1;
f078f209
LR
299}
300
fec247c0 301static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
da647626 302 struct sk_buff *skb, int count)
f078f209 303{
8b7f8532 304 struct ath_frame_info *fi = get_frame_info(skb);
f11cc949 305 struct ath_buf *bf = fi->bf;
e8324357 306 struct ieee80211_hdr *hdr;
da647626 307 int prev = fi->retries;
f078f209 308
fec247c0 309 TX_STAT_INC(txq->axq_qnum, a_retries);
da647626
FF
310 fi->retries += count;
311
312 if (prev > 0)
2d42efc4 313 return;
f078f209 314
e8324357
S
315 hdr = (struct ieee80211_hdr *)skb->data;
316 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f11cc949
FF
317 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
318 sizeof(*hdr), DMA_TO_DEVICE);
f078f209
LR
319}
320
0a8cea84 321static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 322{
0a8cea84 323 struct ath_buf *bf = NULL;
d43f3015
S
324
325 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
326
327 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
328 spin_unlock_bh(&sc->tx.txbuflock);
329 return NULL;
330 }
0a8cea84
FF
331
332 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
333 list_del(&bf->list);
334
d43f3015
S
335 spin_unlock_bh(&sc->tx.txbuflock);
336
0a8cea84
FF
337 return bf;
338}
339
340static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
341{
342 spin_lock_bh(&sc->tx.txbuflock);
343 list_add_tail(&bf->list, &sc->tx.txbuf);
344 spin_unlock_bh(&sc->tx.txbuflock);
345}
346
347static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
348{
349 struct ath_buf *tbf;
350
351 tbf = ath_tx_get_buffer(sc);
352 if (WARN_ON(!tbf))
353 return NULL;
354
d43f3015
S
355 ATH_TXBUF_RESET(tbf);
356
357 tbf->bf_mpdu = bf->bf_mpdu;
358 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 359 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 360 tbf->bf_state = bf->bf_state;
d43f3015
S
361
362 return tbf;
363}
364
b572d033
FF
365static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
366 struct ath_tx_status *ts, int txok,
367 int *nframes, int *nbad)
368{
2d42efc4 369 struct ath_frame_info *fi;
b572d033
FF
370 u16 seq_st = 0;
371 u32 ba[WME_BA_BMP_SIZE >> 5];
372 int ba_index;
373 int isaggr = 0;
374
375 *nbad = 0;
376 *nframes = 0;
377
b572d033
FF
378 isaggr = bf_isaggr(bf);
379 if (isaggr) {
380 seq_st = ts->ts_seqnum;
381 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
382 }
383
384 while (bf) {
2d42efc4 385 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 386 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
387
388 (*nframes)++;
389 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
390 (*nbad)++;
391
392 bf = bf->bf_next;
393 }
394}
395
396
d43f3015
S
397static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
398 struct ath_buf *bf, struct list_head *bf_q,
c5992618 399 struct ath_tx_status *ts, int txok, bool retry)
f078f209 400{
e8324357
S
401 struct ath_node *an = NULL;
402 struct sk_buff *skb;
1286ec6d 403 struct ieee80211_sta *sta;
0cdd5c60 404 struct ieee80211_hw *hw = sc->hw;
1286ec6d 405 struct ieee80211_hdr *hdr;
76d5a9e8 406 struct ieee80211_tx_info *tx_info;
e8324357 407 struct ath_atx_tid *tid = NULL;
d43f3015 408 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
409 struct list_head bf_head;
410 struct sk_buff_head bf_pending;
156369fa 411 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
f078f209 412 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
413 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
414 bool rc_update = true;
78c4653a 415 struct ieee80211_tx_rate rates[4];
2d42efc4 416 struct ath_frame_info *fi;
ebd02287 417 int nframes;
5daefbd0 418 u8 tidno;
daa5c408 419 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
da647626 420 int i, retries;
156369fa 421 int bar_index = -1;
f078f209 422
a22be22a 423 skb = bf->bf_mpdu;
1286ec6d
S
424 hdr = (struct ieee80211_hdr *)skb->data;
425
76d5a9e8 426 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 427
78c4653a
FF
428 memcpy(rates, tx_info->control.rates, sizeof(rates));
429
da647626
FF
430 retries = ts->ts_longretry + 1;
431 for (i = 0; i < ts->ts_rateindex; i++)
432 retries += rates[i].count;
433
1286ec6d 434 rcu_read_lock();
f078f209 435
686b9cb9 436 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
437 if (!sta) {
438 rcu_read_unlock();
73e19463 439
31e79a59
FF
440 INIT_LIST_HEAD(&bf_head);
441 while (bf) {
442 bf_next = bf->bf_next;
443
fce041be 444 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
445 list_move_tail(&bf->list, &bf_head);
446
156369fa 447 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
31e79a59
FF
448
449 bf = bf_next;
450 }
1286ec6d 451 return;
f078f209
LR
452 }
453
1286ec6d 454 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
455 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
456 tid = ATH_AN_2_TID(an, tidno);
156369fa 457 seq_first = tid->seq_start;
1286ec6d 458
b11b160d
FF
459 /*
460 * The hardware occasionally sends a tx status for the wrong TID.
461 * In this case, the BA status cannot be considered valid and all
462 * subframes need to be retransmitted
463 */
5daefbd0 464 if (tidno != ts->tid)
b11b160d
FF
465 txok = false;
466
e8324357 467 isaggr = bf_isaggr(bf);
d43f3015 468 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 469
d43f3015 470 if (isaggr && txok) {
db1a052b
FF
471 if (ts->ts_flags & ATH9K_TX_BA) {
472 seq_st = ts->ts_seqnum;
473 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 474 } else {
d43f3015
S
475 /*
476 * AR5416 can become deaf/mute when BA
477 * issue happens. Chip needs to be reset.
478 * But AP code may have sychronization issues
479 * when perform internal reset in this routine.
480 * Only enable reset in STA mode for now.
481 */
2660b81a 482 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 483 needreset = 1;
e8324357 484 }
f078f209
LR
485 }
486
56dc6336 487 __skb_queue_head_init(&bf_pending);
f078f209 488
b572d033 489 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 490 while (bf) {
6a0ddaef
FF
491 u16 seqno = bf->bf_state.seqno;
492
f0b8220c 493 txfail = txpending = sendbar = 0;
e8324357 494 bf_next = bf->bf_next;
f078f209 495
78c4653a
FF
496 skb = bf->bf_mpdu;
497 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 498 fi = get_frame_info(skb);
78c4653a 499
6a0ddaef 500 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
501 /* transmit completion, subframe is
502 * acked by block ack */
0934af23 503 acked_cnt++;
e8324357
S
504 } else if (!isaggr && txok) {
505 /* transmit completion */
0934af23 506 acked_cnt++;
b0477013
FF
507 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
508 /*
509 * cleanup in progress, just fail
510 * the un-acked sub-frames
511 */
512 txfail = 1;
513 } else if (flush) {
514 txpending = 1;
515 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
516 if (txok || !an->sleeping)
517 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
518 retries);
519
520 txpending = 1;
e8324357 521 } else {
b0477013
FF
522 txfail = 1;
523 txfail_cnt++;
524 bar_index = max_t(int, bar_index,
525 ATH_BA_INDEX(seq_first, seqno));
e8324357 526 }
f078f209 527
fce041be
FF
528 /*
529 * Make sure the last desc is reclaimed if it
530 * not a holding desc.
531 */
56dc6336
FF
532 INIT_LIST_HEAD(&bf_head);
533 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
534 bf_next != NULL || !bf_last->bf_stale)
d43f3015 535 list_move_tail(&bf->list, &bf_head);
f078f209 536
90fa539c 537 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
538 /*
539 * complete the acked-ones/xretried ones; update
540 * block-ack window
541 */
6a0ddaef 542 ath_tx_update_baw(sc, tid, seqno);
f078f209 543
8a92e2ee 544 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 545 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 546 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 547 rc_update = false;
8a92e2ee
VT
548 }
549
db1a052b 550 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
156369fa 551 !txfail);
e8324357 552 } else {
d43f3015 553 /* retry the un-acked ones */
b0477013
FF
554 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
555 bf->bf_next == NULL && bf_last->bf_stale) {
556 struct ath_buf *tbf;
557
558 tbf = ath_clone_txbuf(sc, bf_last);
559 /*
560 * Update tx baw and complete the
561 * frame with failed status if we
562 * run out of tx buf.
563 */
564 if (!tbf) {
b0477013 565 ath_tx_update_baw(sc, tid, seqno);
b0477013
FF
566
567 ath_tx_complete_buf(sc, bf, txq,
568 &bf_head, ts, 0);
569 bar_index = max_t(int, bar_index,
570 ATH_BA_INDEX(seq_first, seqno));
571 break;
c41d92dc 572 }
b0477013
FF
573
574 fi->bf = tbf;
e8324357
S
575 }
576
577 /*
578 * Put this buffer to the temporary pending
579 * queue to retain ordering
580 */
56dc6336 581 __skb_queue_tail(&bf_pending, skb);
e8324357
S
582 }
583
584 bf = bf_next;
f078f209 585 }
f078f209 586
4cee7861 587 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 588 if (!skb_queue_empty(&bf_pending)) {
5519541d 589 if (an->sleeping)
042ec453 590 ieee80211_sta_set_buffered(sta, tid->tidno, true);
5519541d 591
56dc6336 592 skb_queue_splice(&bf_pending, &tid->buf_q);
26a64259 593 if (!an->sleeping) {
9af73cf7 594 ath_tx_queue_tid(txq, tid);
26a64259
FF
595
596 if (ts->ts_status & ATH9K_TXERR_FILT)
597 tid->ac->clear_ps_filter = true;
598 }
4cee7861
FF
599 }
600
23de5dc9
FF
601 if (bar_index >= 0) {
602 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
603
604 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
605 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
606
607 ath_txq_unlock(sc, txq);
608 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
609 ath_txq_lock(sc, txq);
610 }
611
4eb287a4 612 if (tid->state & AGGR_CLEANUP)
90fa539c
FF
613 ath_tx_flush_tid(sc, tid);
614
1286ec6d
S
615 rcu_read_unlock();
616
030d6294
FF
617 if (needreset) {
618 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
236de514 619 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
030d6294 620 }
e8324357 621}
f078f209 622
1a6e9d0f
RM
623static bool ath_lookup_legacy(struct ath_buf *bf)
624{
625 struct sk_buff *skb;
626 struct ieee80211_tx_info *tx_info;
627 struct ieee80211_tx_rate *rates;
628 int i;
629
630 skb = bf->bf_mpdu;
631 tx_info = IEEE80211_SKB_CB(skb);
632 rates = tx_info->control.rates;
633
059ee09b
FF
634 for (i = 0; i < 4; i++) {
635 if (!rates[i].count || rates[i].idx < 0)
636 break;
637
1a6e9d0f
RM
638 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
639 return true;
640 }
641
642 return false;
643}
644
e8324357
S
645static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
646 struct ath_atx_tid *tid)
f078f209 647{
528f0c6b
S
648 struct sk_buff *skb;
649 struct ieee80211_tx_info *tx_info;
a8efee4f 650 struct ieee80211_tx_rate *rates;
d43f3015 651 u32 max_4ms_framelen, frmlen;
c0ac53fa 652 u16 aggr_limit, bt_aggr_limit, legacy = 0;
e8324357 653 int i;
528f0c6b 654
a22be22a 655 skb = bf->bf_mpdu;
528f0c6b 656 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 657 rates = tx_info->control.rates;
528f0c6b 658
e8324357
S
659 /*
660 * Find the lowest frame length among the rate series that will have a
661 * 4ms transmit duration.
662 * TODO - TXOP limit needs to be considered.
663 */
664 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 665
e8324357 666 for (i = 0; i < 4; i++) {
b0477013 667 int modeidx;
e8324357 668
b0477013
FF
669 if (!rates[i].count)
670 continue;
545750d3 671
b0477013
FF
672 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
673 legacy = 1;
674 break;
f078f209 675 }
b0477013
FF
676
677 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
678 modeidx = MCS_HT40;
679 else
680 modeidx = MCS_HT20;
681
682 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
683 modeidx++;
684
685 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
686 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209 687 }
e63835b0 688
f078f209 689 /*
e8324357
S
690 * limit aggregate size by the minimum rate if rate selected is
691 * not a probe rate, if rate selected is a probe rate then
692 * avoid aggregation of this packet.
f078f209 693 */
e8324357
S
694 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
695 return 0;
f078f209 696
c0ac53fa
SM
697 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
698
699 /*
700 * Override the default aggregation limit for BTCOEX.
701 */
702 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
703 if (bt_aggr_limit)
704 aggr_limit = bt_aggr_limit;
f078f209 705
e8324357 706 /*
25985edc
LDM
707 * h/w can accept aggregates up to 16 bit lengths (65535).
708 * The IE, however can hold up to 65536, which shows up here
e8324357 709 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 710 */
4ef70841
S
711 if (tid->an->maxampdu)
712 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 713
e8324357
S
714 return aggr_limit;
715}
f078f209 716
e8324357 717/*
d43f3015 718 * Returns the number of delimiters to be added to
e8324357 719 * meet the minimum required mpdudensity.
e8324357
S
720 */
721static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
722 struct ath_buf *bf, u16 frmlen,
723 bool first_subfrm)
e8324357 724{
7a12dfdb 725#define FIRST_DESC_NDELIMS 60
e8324357
S
726 struct sk_buff *skb = bf->bf_mpdu;
727 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 728 u32 nsymbits, nsymbols;
e8324357 729 u16 minlen;
545750d3 730 u8 flags, rix;
c6663876 731 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 732 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
733
734 /* Select standard number of delimiters based on frame length alone */
735 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
736
737 /*
e8324357
S
738 * If encryption enabled, hardware requires some more padding between
739 * subframes.
740 * TODO - this could be improved to be dependent on the rate.
741 * The hardware can keep up at lower rates, but not higher rates
f078f209 742 */
4f6760b0
RM
743 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
744 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 745 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 746
7a12dfdb
RM
747 /*
748 * Add delimiter when using RTS/CTS with aggregation
749 * and non enterprise AR9003 card
750 */
3459731a
FF
751 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
752 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
753 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
754
e8324357
S
755 /*
756 * Convert desired mpdu density from microeconds to bytes based
757 * on highest rate in rate series (i.e. first rate) to determine
758 * required minimum length for subframe. Take into account
759 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 760 *
e8324357
S
761 * If there is no mpdu density restriction, no further calculation
762 * is needed.
763 */
4ef70841
S
764
765 if (tid->an->mpdudensity == 0)
e8324357 766 return ndelim;
f078f209 767
e8324357
S
768 rix = tx_info->control.rates[0].idx;
769 flags = tx_info->control.rates[0].flags;
e8324357
S
770 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
771 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 772
e8324357 773 if (half_gi)
4ef70841 774 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 775 else
4ef70841 776 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 777
e8324357
S
778 if (nsymbols == 0)
779 nsymbols = 1;
f078f209 780
c6663876
FF
781 streams = HT_RC_2_STREAMS(rix);
782 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 783 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 784
e8324357 785 if (frmlen < minlen) {
e8324357
S
786 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
787 ndelim = max(mindelim, ndelim);
f078f209
LR
788 }
789
e8324357 790 return ndelim;
f078f209
LR
791}
792
e8324357 793static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 794 struct ath_txq *txq,
d43f3015 795 struct ath_atx_tid *tid,
269c44bc
FF
796 struct list_head *bf_q,
797 int *aggr_len)
f078f209 798{
e8324357 799#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 800 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 801 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
802 u16 aggr_limit = 0, al = 0, bpad = 0,
803 al_delta, h_baw = tid->baw_size / 2;
804 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 805 struct ieee80211_tx_info *tx_info;
2d42efc4 806 struct ath_frame_info *fi;
56dc6336 807 struct sk_buff *skb;
6a0ddaef 808 u16 seqno;
f078f209 809
e8324357 810 do {
56dc6336
FF
811 skb = skb_peek(&tid->buf_q);
812 fi = get_frame_info(skb);
813 bf = fi->bf;
44f1d26c 814 if (!fi->bf)
81357a28 815 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
56dc6336 816
44f1d26c
FF
817 if (!bf)
818 continue;
819
399c6489 820 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 821 seqno = bf->bf_state.seqno;
f078f209 822
d43f3015 823 /* do not step over block-ack window */
6a0ddaef 824 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
825 status = ATH_AGGR_BAW_CLOSED;
826 break;
827 }
f078f209 828
f9437543
FF
829 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
830 struct ath_tx_status ts = {};
831 struct list_head bf_head;
832
833 INIT_LIST_HEAD(&bf_head);
834 list_add(&bf->list, &bf_head);
835 __skb_unlink(skb, &tid->buf_q);
836 ath_tx_update_baw(sc, tid, seqno);
837 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
838 continue;
839 }
840
841 if (!bf_first)
842 bf_first = bf;
843
e8324357
S
844 if (!rl) {
845 aggr_limit = ath_lookup_rate(sc, bf, tid);
846 rl = 1;
847 }
f078f209 848
d43f3015 849 /* do not exceed aggregation limit */
2d42efc4 850 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 851
d43f3015 852 if (nframes &&
1a6e9d0f
RM
853 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
854 ath_lookup_legacy(bf))) {
e8324357
S
855 status = ATH_AGGR_LIMITED;
856 break;
857 }
f078f209 858
0299a50a 859 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 860 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
861 break;
862
d43f3015
S
863 /* do not exceed subframe limit */
864 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
865 status = ATH_AGGR_LIMITED;
866 break;
867 }
f078f209 868
d43f3015 869 /* add padding for previous frame to aggregation length */
e8324357 870 al += bpad + al_delta;
f078f209 871
e8324357
S
872 /*
873 * Get the delimiters needed to meet the MPDU
874 * density for this node.
875 */
7a12dfdb
RM
876 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
877 !nframes);
e8324357 878 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 879
7a12dfdb 880 nframes++;
e8324357 881 bf->bf_next = NULL;
f078f209 882
d43f3015 883 /* link buffers of this frame to the aggregate */
2d42efc4 884 if (!fi->retries)
6a0ddaef 885 ath_tx_addto_baw(sc, tid, seqno);
399c6489 886 bf->bf_state.ndelim = ndelim;
56dc6336
FF
887
888 __skb_unlink(skb, &tid->buf_q);
889 list_add_tail(&bf->list, bf_q);
399c6489 890 if (bf_prev)
e8324357 891 bf_prev->bf_next = bf;
399c6489 892
e8324357 893 bf_prev = bf;
fec247c0 894
56dc6336 895 } while (!skb_queue_empty(&tid->buf_q));
f078f209 896
269c44bc 897 *aggr_len = al;
d43f3015 898
e8324357
S
899 return status;
900#undef PADBYTES
901}
f078f209 902
38dad7ba
FF
903/*
904 * rix - rate index
905 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
906 * width - 0 for 20 MHz, 1 for 40 MHz
907 * half_gi - to use 4us v/s 3.6 us for symbol time
908 */
909static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
910 int width, int half_gi, bool shortPreamble)
911{
912 u32 nbits, nsymbits, duration, nsymbols;
913 int streams;
914
915 /* find number of symbols: PLCP + data */
916 streams = HT_RC_2_STREAMS(rix);
917 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
918 nsymbits = bits_per_symbol[rix % 8][width] * streams;
919 nsymbols = (nbits + nsymbits - 1) / nsymbits;
920
921 if (!half_gi)
922 duration = SYMBOL_TIME(nsymbols);
923 else
924 duration = SYMBOL_TIME_HALFGI(nsymbols);
925
926 /* addup duration for legacy/ht training and signal fields */
927 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
928
929 return duration;
930}
931
493cf04f
FF
932static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
933 struct ath_tx_info *info, int len)
38dad7ba
FF
934{
935 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
936 struct sk_buff *skb;
937 struct ieee80211_tx_info *tx_info;
938 struct ieee80211_tx_rate *rates;
939 const struct ieee80211_rate *rate;
940 struct ieee80211_hdr *hdr;
493cf04f
FF
941 int i;
942 u8 rix = 0;
38dad7ba
FF
943
944 skb = bf->bf_mpdu;
945 tx_info = IEEE80211_SKB_CB(skb);
946 rates = tx_info->control.rates;
947 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
948
949 /* set dur_update_en for l-sig computation except for PS-Poll frames */
950 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
951
952 /*
953 * We check if Short Preamble is needed for the CTS rate by
954 * checking the BSS's global flag.
955 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
956 */
957 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 958 info->rtscts_rate = rate->hw_value;
d47a61aa
SM
959
960 if (tx_info->control.vif &&
961 tx_info->control.vif->bss_conf.use_short_preamble)
493cf04f 962 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
963
964 for (i = 0; i < 4; i++) {
965 bool is_40, is_sgi, is_sp;
966 int phy;
967
968 if (!rates[i].count || (rates[i].idx < 0))
969 continue;
970
971 rix = rates[i].idx;
493cf04f 972 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
973
974 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
975 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
976 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 977 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
978 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
979 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
980 }
981
982 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 983 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 984 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 985 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
986
987 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
988 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
989 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
990
991 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
992 /* MCS rates */
493cf04f
FF
993 info->rates[i].Rate = rix | 0x80;
994 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
995 ah->txchainmask, info->rates[i].Rate);
996 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
997 is_40, is_sgi, is_sp);
998 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 999 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
1000 continue;
1001 }
1002
1003 /* legacy rates */
1004 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1005 !(rate->flags & IEEE80211_RATE_ERP_G))
1006 phy = WLAN_RC_PHY_CCK;
1007 else
1008 phy = WLAN_RC_PHY_OFDM;
1009
1010 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 1011 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
1012 if (rate->hw_value_short) {
1013 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 1014 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
1015 } else {
1016 is_sp = false;
1017 }
1018
1019 if (bf->bf_state.bfs_paprd)
493cf04f 1020 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 1021 else
493cf04f
FF
1022 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1023 ah->txchainmask, info->rates[i].Rate);
38dad7ba 1024
493cf04f 1025 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
1026 phy, rate->bitrate * 100, len, rix, is_sp);
1027 }
1028
1029 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1030 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 1031 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
1032
1033 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
1034 if (info->flags & ATH9K_TXDESC_RTSENA)
1035 info->flags &= ~ATH9K_TXDESC_CTSENA;
1036}
38dad7ba 1037
493cf04f
FF
1038static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1039{
1040 struct ieee80211_hdr *hdr;
1041 enum ath9k_pkt_type htype;
1042 __le16 fc;
1043
1044 hdr = (struct ieee80211_hdr *)skb->data;
1045 fc = hdr->frame_control;
38dad7ba 1046
493cf04f
FF
1047 if (ieee80211_is_beacon(fc))
1048 htype = ATH9K_PKT_TYPE_BEACON;
1049 else if (ieee80211_is_probe_resp(fc))
1050 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1051 else if (ieee80211_is_atim(fc))
1052 htype = ATH9K_PKT_TYPE_ATIM;
1053 else if (ieee80211_is_pspoll(fc))
1054 htype = ATH9K_PKT_TYPE_PSPOLL;
1055 else
1056 htype = ATH9K_PKT_TYPE_NORMAL;
1057
1058 return htype;
38dad7ba
FF
1059}
1060
493cf04f
FF
1061static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1062 struct ath_txq *txq, int len)
399c6489
FF
1063{
1064 struct ath_hw *ah = sc->sc_ah;
1065 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1066 struct ath_buf *bf_first = bf;
493cf04f 1067 struct ath_tx_info info;
399c6489 1068 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 1069
493cf04f
FF
1070 memset(&info, 0, sizeof(info));
1071 info.is_first = true;
1072 info.is_last = true;
1073 info.txpower = MAX_RATE_POWER;
1074 info.qcu = txq->axq_qnum;
1075
1076 info.flags = ATH9K_TXDESC_INTREQ;
1077 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1078 info.flags |= ATH9K_TXDESC_NOACK;
1079 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1080 info.flags |= ATH9K_TXDESC_LDPC;
1081
1082 ath_buf_set_rate(sc, bf, &info, len);
1083
1084 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1085 info.flags |= ATH9K_TXDESC_CLRDMASK;
1086
1087 if (bf->bf_state.bfs_paprd)
1088 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1089
399c6489
FF
1090
1091 while (bf) {
493cf04f
FF
1092 struct sk_buff *skb = bf->bf_mpdu;
1093 struct ath_frame_info *fi = get_frame_info(skb);
1094
1095 info.type = get_hw_packet_type(skb);
399c6489 1096 if (bf->bf_next)
493cf04f 1097 info.link = bf->bf_next->bf_daddr;
399c6489 1098 else
493cf04f
FF
1099 info.link = 0;
1100
42cecc34
JL
1101 info.buf_addr[0] = bf->bf_buf_addr;
1102 info.buf_len[0] = skb->len;
493cf04f
FF
1103 info.pkt_len = fi->framelen;
1104 info.keyix = fi->keyix;
1105 info.keytype = fi->keytype;
1106
1107 if (aggr) {
399c6489 1108 if (bf == bf_first)
493cf04f
FF
1109 info.aggr = AGGR_BUF_FIRST;
1110 else if (!bf->bf_next)
1111 info.aggr = AGGR_BUF_LAST;
1112 else
1113 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1114
493cf04f
FF
1115 info.ndelim = bf->bf_state.ndelim;
1116 info.aggr_len = len;
399c6489
FF
1117 }
1118
493cf04f 1119 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1120 bf = bf->bf_next;
1121 }
1122}
1123
e8324357
S
1124static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1125 struct ath_atx_tid *tid)
1126{
d43f3015 1127 struct ath_buf *bf;
e8324357 1128 enum ATH_AGGR_STATUS status;
399c6489 1129 struct ieee80211_tx_info *tx_info;
e8324357 1130 struct list_head bf_q;
269c44bc 1131 int aggr_len;
f078f209 1132
e8324357 1133 do {
56dc6336 1134 if (skb_queue_empty(&tid->buf_q))
e8324357 1135 return;
f078f209 1136
e8324357
S
1137 INIT_LIST_HEAD(&bf_q);
1138
269c44bc 1139 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1140
f078f209 1141 /*
d43f3015
S
1142 * no frames picked up to be aggregated;
1143 * block-ack window is not open.
f078f209 1144 */
e8324357
S
1145 if (list_empty(&bf_q))
1146 break;
f078f209 1147
e8324357 1148 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1149 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1150 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1151
5519541d
FF
1152 if (tid->ac->clear_ps_filter) {
1153 tid->ac->clear_ps_filter = false;
399c6489
FF
1154 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1155 } else {
1156 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1157 }
1158
d43f3015 1159 /* if only one frame, send as non-aggregate */
b572d033 1160 if (bf == bf->bf_lastbf) {
399c6489
FF
1161 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1162 bf->bf_state.bf_type = BUF_AMPDU;
1163 } else {
1164 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1165 }
f078f209 1166
493cf04f 1167 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1168 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1169 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1170 status != ATH_AGGR_BAW_CLOSED);
1171}
1172
231c3a1f
FF
1173int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1174 u16 tid, u16 *ssn)
e8324357
S
1175{
1176 struct ath_atx_tid *txtid;
1177 struct ath_node *an;
1178
1179 an = (struct ath_node *)sta->drv_priv;
f83da965 1180 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1181
1182 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1183 return -EAGAIN;
1184
f83da965 1185 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1186 txtid->paused = true;
49447f2f 1187 *ssn = txtid->seq_start = txtid->seq_next;
f9437543 1188 txtid->bar_index = -1;
231c3a1f 1189
2ed72229
FF
1190 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1191 txtid->baw_head = txtid->baw_tail = 0;
1192
231c3a1f 1193 return 0;
e8324357 1194}
f078f209 1195
f83da965 1196void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1197{
1198 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1199 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1200 struct ath_txq *txq = txtid->ac->txq;
f078f209 1201
e8324357 1202 if (txtid->state & AGGR_CLEANUP)
f83da965 1203 return;
f078f209 1204
e8324357 1205 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1206 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1207 return;
e8324357 1208 }
f078f209 1209
23de5dc9 1210 ath_txq_lock(sc, txq);
75401849 1211 txtid->paused = true;
f078f209 1212
90fa539c
FF
1213 /*
1214 * If frames are still being transmitted for this TID, they will be
1215 * cleaned up during tx completion. To prevent race conditions, this
1216 * TID can only be reused after all in-progress subframes have been
1217 * completed.
1218 */
1219 if (txtid->baw_head != txtid->baw_tail)
e8324357 1220 txtid->state |= AGGR_CLEANUP;
90fa539c 1221 else
e8324357 1222 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1223
1224 ath_tx_flush_tid(sc, txtid);
23de5dc9 1225 ath_txq_unlock_complete(sc, txq);
e8324357 1226}
f078f209 1227
042ec453
JB
1228void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1229 struct ath_node *an)
5519541d
FF
1230{
1231 struct ath_atx_tid *tid;
1232 struct ath_atx_ac *ac;
1233 struct ath_txq *txq;
042ec453 1234 bool buffered;
5519541d
FF
1235 int tidno;
1236
1237 for (tidno = 0, tid = &an->tid[tidno];
1238 tidno < WME_NUM_TID; tidno++, tid++) {
1239
1240 if (!tid->sched)
1241 continue;
1242
1243 ac = tid->ac;
1244 txq = ac->txq;
1245
23de5dc9 1246 ath_txq_lock(sc, txq);
5519541d 1247
042ec453 1248 buffered = !skb_queue_empty(&tid->buf_q);
5519541d
FF
1249
1250 tid->sched = false;
1251 list_del(&tid->list);
1252
1253 if (ac->sched) {
1254 ac->sched = false;
1255 list_del(&ac->list);
1256 }
1257
23de5dc9 1258 ath_txq_unlock(sc, txq);
5519541d 1259
042ec453
JB
1260 ieee80211_sta_set_buffered(sta, tidno, buffered);
1261 }
5519541d
FF
1262}
1263
1264void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1265{
1266 struct ath_atx_tid *tid;
1267 struct ath_atx_ac *ac;
1268 struct ath_txq *txq;
1269 int tidno;
1270
1271 for (tidno = 0, tid = &an->tid[tidno];
1272 tidno < WME_NUM_TID; tidno++, tid++) {
1273
1274 ac = tid->ac;
1275 txq = ac->txq;
1276
23de5dc9 1277 ath_txq_lock(sc, txq);
5519541d
FF
1278 ac->clear_ps_filter = true;
1279
56dc6336 1280 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1281 ath_tx_queue_tid(txq, tid);
1282 ath_txq_schedule(sc, txq);
1283 }
1284
23de5dc9 1285 ath_txq_unlock_complete(sc, txq);
5519541d
FF
1286 }
1287}
1288
e8324357
S
1289void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1290{
1291 struct ath_atx_tid *txtid;
1292 struct ath_node *an;
1293
1294 an = (struct ath_node *)sta->drv_priv;
1295
3d4e20f2
SM
1296 txtid = ATH_AN_2_TID(an, tid);
1297 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1298 txtid->state |= AGGR_ADDBA_COMPLETE;
1299 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1300 ath_tx_resume_tid(sc, txtid);
f078f209
LR
1301}
1302
e8324357
S
1303/********************/
1304/* Queue Management */
1305/********************/
f078f209 1306
e8324357
S
1307static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1308 struct ath_txq *txq)
f078f209 1309{
e8324357
S
1310 struct ath_atx_ac *ac, *ac_tmp;
1311 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1312
e8324357
S
1313 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1314 list_del(&ac->list);
1315 ac->sched = false;
1316 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1317 list_del(&tid->list);
1318 tid->sched = false;
1319 ath_tid_drain(sc, txq, tid);
1320 }
f078f209
LR
1321 }
1322}
1323
e8324357 1324struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1325{
cbe61d8a 1326 struct ath_hw *ah = sc->sc_ah;
e8324357 1327 struct ath9k_tx_queue_info qi;
066dae93
FF
1328 static const int subtype_txq_to_hwq[] = {
1329 [WME_AC_BE] = ATH_TXQ_AC_BE,
1330 [WME_AC_BK] = ATH_TXQ_AC_BK,
1331 [WME_AC_VI] = ATH_TXQ_AC_VI,
1332 [WME_AC_VO] = ATH_TXQ_AC_VO,
1333 };
60f2d1d5 1334 int axq_qnum, i;
f078f209 1335
e8324357 1336 memset(&qi, 0, sizeof(qi));
066dae93 1337 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1338 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1339 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1340 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1341 qi.tqi_physCompBuf = 0;
f078f209
LR
1342
1343 /*
e8324357
S
1344 * Enable interrupts only for EOL and DESC conditions.
1345 * We mark tx descriptors to receive a DESC interrupt
1346 * when a tx queue gets deep; otherwise waiting for the
1347 * EOL to reap descriptors. Note that this is done to
1348 * reduce interrupt load and this only defers reaping
1349 * descriptors, never transmitting frames. Aside from
1350 * reducing interrupts this also permits more concurrency.
1351 * The only potential downside is if the tx queue backs
1352 * up in which case the top half of the kernel may backup
1353 * due to a lack of tx descriptors.
1354 *
1355 * The UAPSD queue is an exception, since we take a desc-
1356 * based intr on the EOSP frames.
f078f209 1357 */
afe754d6 1358 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ce8fdf6e 1359 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
afe754d6
VT
1360 } else {
1361 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1362 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1363 else
1364 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1365 TXQ_FLAG_TXDESCINT_ENABLE;
1366 }
60f2d1d5
BG
1367 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1368 if (axq_qnum == -1) {
f078f209 1369 /*
e8324357
S
1370 * NB: don't print a message, this happens
1371 * normally on parts with too few tx queues
f078f209 1372 */
e8324357 1373 return NULL;
f078f209 1374 }
60f2d1d5
BG
1375 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1376 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1377
60f2d1d5
BG
1378 txq->axq_qnum = axq_qnum;
1379 txq->mac80211_qnum = -1;
e8324357 1380 txq->axq_link = NULL;
23de5dc9 1381 __skb_queue_head_init(&txq->complete_q);
e8324357
S
1382 INIT_LIST_HEAD(&txq->axq_q);
1383 INIT_LIST_HEAD(&txq->axq_acq);
1384 spin_lock_init(&txq->axq_lock);
1385 txq->axq_depth = 0;
4b3ba66a 1386 txq->axq_ampdu_depth = 0;
164ace38 1387 txq->axq_tx_inprogress = false;
60f2d1d5 1388 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1389
1390 txq->txq_headidx = txq->txq_tailidx = 0;
1391 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1392 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1393 }
60f2d1d5 1394 return &sc->tx.txq[axq_qnum];
f078f209
LR
1395}
1396
e8324357
S
1397int ath_txq_update(struct ath_softc *sc, int qnum,
1398 struct ath9k_tx_queue_info *qinfo)
1399{
cbe61d8a 1400 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1401 int error = 0;
1402 struct ath9k_tx_queue_info qi;
1403
1404 if (qnum == sc->beacon.beaconq) {
1405 /*
1406 * XXX: for beacon queue, we just save the parameter.
1407 * It will be picked up by ath_beaconq_config when
1408 * it's necessary.
1409 */
1410 sc->beacon.beacon_qi = *qinfo;
f078f209 1411 return 0;
e8324357 1412 }
f078f209 1413
9680e8a3 1414 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1415
1416 ath9k_hw_get_txq_props(ah, qnum, &qi);
1417 qi.tqi_aifs = qinfo->tqi_aifs;
1418 qi.tqi_cwmin = qinfo->tqi_cwmin;
1419 qi.tqi_cwmax = qinfo->tqi_cwmax;
1420 qi.tqi_burstTime = qinfo->tqi_burstTime;
1421 qi.tqi_readyTime = qinfo->tqi_readyTime;
1422
1423 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1424 ath_err(ath9k_hw_common(sc->sc_ah),
1425 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1426 error = -EIO;
1427 } else {
1428 ath9k_hw_resettxqueue(ah, qnum);
1429 }
1430
1431 return error;
1432}
1433
1434int ath_cabq_update(struct ath_softc *sc)
1435{
1436 struct ath9k_tx_queue_info qi;
9814f6b3 1437 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1438 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1439
e8324357 1440 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1441 /*
e8324357 1442 * Ensure the readytime % is within the bounds.
f078f209 1443 */
17d7904d
S
1444 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1445 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1446 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1447 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1448
9814f6b3 1449 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1450 sc->config.cabqReadytime) / 100;
e8324357
S
1451 ath_txq_update(sc, qnum, &qi);
1452
1453 return 0;
f078f209
LR
1454}
1455
4b3ba66a
FF
1456static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1457{
1458 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1459 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1460}
1461
fce041be
FF
1462static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1463 struct list_head *list, bool retry_tx)
f078f209 1464{
e8324357
S
1465 struct ath_buf *bf, *lastbf;
1466 struct list_head bf_head;
db1a052b
FF
1467 struct ath_tx_status ts;
1468
1469 memset(&ts, 0, sizeof(ts));
daa5c408 1470 ts.ts_status = ATH9K_TX_FLUSH;
e8324357 1471 INIT_LIST_HEAD(&bf_head);
f078f209 1472
fce041be
FF
1473 while (!list_empty(list)) {
1474 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1475
fce041be
FF
1476 if (bf->bf_stale) {
1477 list_del(&bf->list);
f078f209 1478
fce041be
FF
1479 ath_tx_return_buffer(sc, bf);
1480 continue;
e8324357 1481 }
f078f209 1482
e8324357 1483 lastbf = bf->bf_lastbf;
fce041be 1484 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1485
e8324357 1486 txq->axq_depth--;
4b3ba66a
FF
1487 if (bf_is_ampdu_not_probing(bf))
1488 txq->axq_ampdu_depth--;
e8324357
S
1489
1490 if (bf_isampdu(bf))
c5992618
FF
1491 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1492 retry_tx);
e8324357 1493 else
156369fa 1494 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
f078f209 1495 }
fce041be 1496}
f078f209 1497
fce041be
FF
1498/*
1499 * Drain a given TX queue (could be Beacon or Data)
1500 *
1501 * This assumes output has been stopped and
1502 * we do not need to block ath_tx_tasklet.
1503 */
1504void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1505{
23de5dc9
FF
1506 ath_txq_lock(sc, txq);
1507
e5003249 1508 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1509 int idx = txq->txq_tailidx;
e5003249 1510
fce041be
FF
1511 while (!list_empty(&txq->txq_fifo[idx])) {
1512 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1513 retry_tx);
1514
1515 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1516 }
fce041be 1517 txq->txq_tailidx = idx;
e5003249 1518 }
e609e2ea 1519
fce041be
FF
1520 txq->axq_link = NULL;
1521 txq->axq_tx_inprogress = false;
1522 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1523
e609e2ea 1524 /* flush any pending frames if aggregation is enabled */
3d4e20f2 1525 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
fce041be
FF
1526 ath_txq_drain_pending_buffers(sc, txq);
1527
23de5dc9 1528 ath_txq_unlock_complete(sc, txq);
f078f209
LR
1529}
1530
080e1a25 1531bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1532{
cbe61d8a 1533 struct ath_hw *ah = sc->sc_ah;
c46917bb 1534 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405 1535 struct ath_txq *txq;
34d25810
FF
1536 int i;
1537 u32 npend = 0;
043a0405
S
1538
1539 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1540 return true;
043a0405 1541
0d51cccc 1542 ath9k_hw_abort_tx_dma(ah);
043a0405 1543
0d51cccc 1544 /* Check if any queue remains active */
043a0405 1545 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1546 if (!ATH_TXQ_SETUP(sc, i))
1547 continue;
1548
34d25810
FF
1549 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1550 npend |= BIT(i);
043a0405
S
1551 }
1552
080e1a25 1553 if (npend)
34d25810 1554 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
043a0405
S
1555
1556 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1557 if (!ATH_TXQ_SETUP(sc, i))
1558 continue;
1559
1560 /*
1561 * The caller will resume queues with ieee80211_wake_queues.
1562 * Mark the queue as not stopped to prevent ath_tx_complete
1563 * from waking the queue too early.
1564 */
1565 txq = &sc->tx.txq[i];
1566 txq->stopped = false;
1567 ath_draintxq(sc, txq, retry_tx);
043a0405 1568 }
080e1a25
FF
1569
1570 return !npend;
e8324357 1571}
f078f209 1572
043a0405 1573void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1574{
043a0405
S
1575 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1576 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1577}
f078f209 1578
7755bad9
BG
1579/* For each axq_acq entry, for each tid, try to schedule packets
1580 * for transmit until ampdu_depth has reached min Q depth.
1581 */
e8324357
S
1582void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1583{
7755bad9
BG
1584 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1585 struct ath_atx_tid *tid, *last_tid;
f078f209 1586
236de514 1587 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1588 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1589 return;
f078f209 1590
e8324357 1591 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1592 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1593
7755bad9
BG
1594 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1595 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1596 list_del(&ac->list);
1597 ac->sched = false;
f078f209 1598
7755bad9
BG
1599 while (!list_empty(&ac->tid_q)) {
1600 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1601 list);
1602 list_del(&tid->list);
1603 tid->sched = false;
f078f209 1604
7755bad9
BG
1605 if (tid->paused)
1606 continue;
f078f209 1607
7755bad9 1608 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1609
7755bad9
BG
1610 /*
1611 * add tid to round-robin queue if more frames
1612 * are pending for the tid
1613 */
56dc6336 1614 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1615 ath_tx_queue_tid(txq, tid);
f078f209 1616
7755bad9
BG
1617 if (tid == last_tid ||
1618 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1619 break;
1620 }
f078f209 1621
b0477013
FF
1622 if (!list_empty(&ac->tid_q) && !ac->sched) {
1623 ac->sched = true;
1624 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1625 }
7755bad9
BG
1626
1627 if (ac == last_ac ||
1628 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1629 return;
e8324357
S
1630 }
1631}
f078f209 1632
e8324357
S
1633/***********/
1634/* TX, DMA */
1635/***********/
1636
f078f209 1637/*
e8324357
S
1638 * Insert a chain of ath_buf (descriptors) on a txq and
1639 * assume the descriptors are already chained together by caller.
f078f209 1640 */
e8324357 1641static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1642 struct list_head *head, bool internal)
f078f209 1643{
cbe61d8a 1644 struct ath_hw *ah = sc->sc_ah;
c46917bb 1645 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1646 struct ath_buf *bf, *bf_last;
1647 bool puttxbuf = false;
1648 bool edma;
f078f209 1649
e8324357
S
1650 /*
1651 * Insert the frame on the outbound list and
1652 * pass it on to the hardware.
1653 */
f078f209 1654
e8324357
S
1655 if (list_empty(head))
1656 return;
f078f209 1657
fce041be 1658 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1659 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1660 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1661
d2182b69
JP
1662 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1663 txq->axq_qnum, txq->axq_depth);
f078f209 1664
fce041be
FF
1665 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1666 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1667 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1668 puttxbuf = true;
e8324357 1669 } else {
e5003249
VT
1670 list_splice_tail_init(head, &txq->axq_q);
1671
fce041be
FF
1672 if (txq->axq_link) {
1673 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
d2182b69 1674 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
226afe68
JP
1675 txq->axq_qnum, txq->axq_link,
1676 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1677 } else if (!edma)
1678 puttxbuf = true;
1679
1680 txq->axq_link = bf_last->bf_desc;
1681 }
1682
1683 if (puttxbuf) {
1684 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1685 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
d2182b69 1686 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
fce041be
FF
1687 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1688 }
1689
1690 if (!edma) {
8d8d3fdc 1691 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1692 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1693 }
fce041be
FF
1694
1695 if (!internal) {
1696 txq->axq_depth++;
1697 if (bf_is_ampdu_not_probing(bf))
1698 txq->axq_ampdu_depth++;
1699 }
e8324357 1700}
f078f209 1701
e8324357 1702static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1703 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1704{
44f1d26c 1705 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1706 struct list_head bf_head;
44f1d26c 1707 struct ath_buf *bf;
f078f209 1708
e8324357
S
1709 /*
1710 * Do not queue to h/w when any of the following conditions is true:
1711 * - there are pending frames in software queue
1712 * - the TID is currently paused for ADDBA/BAR request
1713 * - seqno is not within block-ack window
1714 * - h/w queue depth exceeds low water mark
1715 */
56dc6336 1716 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1717 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1718 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1719 /*
e8324357
S
1720 * Add this frame to software queue for scheduling later
1721 * for aggregation.
f078f209 1722 */
bda8adda 1723 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1724 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1725 if (!txctl->an || !txctl->an->sleeping)
1726 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1727 return;
1728 }
1729
81357a28 1730 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
44f1d26c
FF
1731 if (!bf)
1732 return;
1733
399c6489 1734 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1735 INIT_LIST_HEAD(&bf_head);
1736 list_add(&bf->list, &bf_head);
1737
e8324357 1738 /* Add sub-frame to BAW */
44f1d26c 1739 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1740
1741 /* Queue to h/w without aggregation */
bda8adda 1742 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1743 bf->bf_lastbf = bf;
493cf04f 1744 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1745 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1746}
1747
82b873af 1748static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1749 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1750{
44f1d26c
FF
1751 struct ath_frame_info *fi = get_frame_info(skb);
1752 struct list_head bf_head;
e8324357
S
1753 struct ath_buf *bf;
1754
44f1d26c
FF
1755 bf = fi->bf;
1756 if (!bf)
81357a28 1757 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
44f1d26c
FF
1758
1759 if (!bf)
1760 return;
1761
1762 INIT_LIST_HEAD(&bf_head);
1763 list_add_tail(&bf->list, &bf_head);
399c6489 1764 bf->bf_state.bf_type = 0;
e8324357 1765
d43f3015 1766 bf->bf_lastbf = bf;
493cf04f 1767 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1768 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1769 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1770}
1771
2d42efc4
FF
1772static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1773 int framelen)
e8324357
S
1774{
1775 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1776 struct ieee80211_sta *sta = tx_info->control.sta;
1777 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1778 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1779 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1780 struct ath_node *an = NULL;
2d42efc4 1781 enum ath9k_key_type keytype;
e8324357 1782
2d42efc4 1783 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1784
93ae2dd2
FF
1785 if (sta)
1786 an = (struct ath_node *) sta->drv_priv;
1787
2d42efc4
FF
1788 memset(fi, 0, sizeof(*fi));
1789 if (hw_key)
1790 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1791 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1792 fi->keyix = an->ps_key;
2d42efc4
FF
1793 else
1794 fi->keyix = ATH9K_TXKEYIX_INVALID;
1795 fi->keytype = keytype;
1796 fi->framelen = framelen;
e8324357
S
1797}
1798
ea066d5a
MSS
1799u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1800{
1801 struct ath_hw *ah = sc->sc_ah;
1802 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1803 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1804 (curchan->channelFlags & CHANNEL_5GHZ) &&
1805 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1806 return 0x3;
1807 else
1808 return chainmask;
1809}
1810
44f1d26c
FF
1811/*
1812 * Assign a descriptor (and sequence number if necessary,
1813 * and map buffer for DMA. Frees skb on error
1814 */
fa05f87a 1815static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1816 struct ath_txq *txq,
fa05f87a 1817 struct ath_atx_tid *tid,
81357a28
FF
1818 struct sk_buff *skb,
1819 bool dequeue)
f078f209 1820{
82b873af 1821 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1822 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1824 struct ath_buf *bf;
fd09c85f 1825 int fragno;
fa05f87a 1826 u16 seqno;
82b873af
FF
1827
1828 bf = ath_tx_get_buffer(sc);
1829 if (!bf) {
d2182b69 1830 ath_dbg(common, XMIT, "TX buffers are full\n");
44f1d26c 1831 goto error;
82b873af 1832 }
e022edbd 1833
528f0c6b 1834 ATH_TXBUF_RESET(bf);
f078f209 1835
fa05f87a 1836 if (tid) {
fd09c85f 1837 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
fa05f87a
FF
1838 seqno = tid->seq_next;
1839 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
fd09c85f
SM
1840
1841 if (fragno)
1842 hdr->seq_ctrl |= cpu_to_le16(fragno);
1843
1844 if (!ieee80211_has_morefrags(hdr->frame_control))
1845 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1846
fa05f87a
FF
1847 bf->bf_state.seqno = seqno;
1848 }
1849
f078f209 1850 bf->bf_mpdu = skb;
f8316df1 1851
c1739eb3
BG
1852 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1853 skb->len, DMA_TO_DEVICE);
1854 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1855 bf->bf_mpdu = NULL;
6cf9e995 1856 bf->bf_buf_addr = 0;
3800276a
JP
1857 ath_err(ath9k_hw_common(sc->sc_ah),
1858 "dma_mapping_error() on TX\n");
82b873af 1859 ath_tx_return_buffer(sc, bf);
44f1d26c 1860 goto error;
f8316df1
LR
1861 }
1862
56dc6336 1863 fi->bf = bf;
04caf863
FF
1864
1865 return bf;
44f1d26c
FF
1866
1867error:
81357a28
FF
1868 if (dequeue)
1869 __skb_unlink(skb, &tid->buf_q);
44f1d26c
FF
1870 dev_kfree_skb_any(skb);
1871 return NULL;
04caf863
FF
1872}
1873
1874/* FIXME: tx power */
44f1d26c 1875static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1876 struct ath_tx_control *txctl)
1877{
04caf863
FF
1878 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1879 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1880 struct ath_atx_tid *tid = NULL;
fa05f87a 1881 struct ath_buf *bf;
04caf863 1882 u8 tidno;
f078f209 1883
3d4e20f2 1884 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
61e1b0b0 1885 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1886 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1887 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1888 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1889
066dae93 1890 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1891 }
1892
1893 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1894 /*
1895 * Try aggregation if it's a unicast data frame
1896 * and the destination is HT capable.
1897 */
44f1d26c 1898 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1899 } else {
81357a28 1900 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
44f1d26c 1901 if (!bf)
3ad29529 1902 return;
04caf863 1903
82b873af
FF
1904 bf->bf_state.bfs_paprd = txctl->paprd;
1905
9cf04dcc
MSS
1906 if (txctl->paprd)
1907 bf->bf_state.bfs_paprd_timestamp = jiffies;
1908
44f1d26c 1909 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1910 }
f078f209
LR
1911}
1912
f8316df1 1913/* Upon failure caller should free skb */
c52f33d0 1914int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1915 struct ath_tx_control *txctl)
f078f209 1916{
28d16708
FF
1917 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1918 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1919 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1920 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1921 struct ath_softc *sc = hw->priv;
84642d6b 1922 struct ath_txq *txq = txctl->txq;
4d91f9f3 1923 int padpos, padsize;
04caf863 1924 int frmlen = skb->len + FCS_LEN;
28d16708 1925 int q;
f078f209 1926
a9927ba3
BG
1927 /* NOTE: sta can be NULL according to net/mac80211.h */
1928 if (sta)
1929 txctl->an = (struct ath_node *)sta->drv_priv;
1930
04caf863
FF
1931 if (info->control.hw_key)
1932 frmlen += info->control.hw_key->icv_len;
1933
f078f209 1934 /*
e8324357
S
1935 * As a temporary workaround, assign seq# here; this will likely need
1936 * to be cleaned up to work better with Beacon transmission and virtual
1937 * BSSes.
f078f209 1938 */
e8324357 1939 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1940 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1941 sc->tx.seq_no += 0x10;
1942 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1943 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1944 }
f078f209 1945
42cecc34
JL
1946 /* Add the padding after the header if this is not already done */
1947 padpos = ath9k_cmn_padpos(hdr->frame_control);
1948 padsize = padpos & 3;
1949 if (padsize && skb->len > padpos) {
1950 if (skb_headroom(skb) < padsize)
1951 return -ENOMEM;
28d16708 1952
42cecc34
JL
1953 skb_push(skb, padsize);
1954 memmove(skb->data, skb->data + padsize, padpos);
6e82bc4a 1955 hdr = (struct ieee80211_hdr *) skb->data;
f078f209 1956 }
f078f209 1957
f59a59fe
FF
1958 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1959 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1960 !ieee80211_is_data(hdr->frame_control))
1961 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1962
2d42efc4
FF
1963 setup_frame_info(hw, skb, frmlen);
1964
1965 /*
1966 * At this point, the vif, hw_key and sta pointers in the tx control
1967 * info are no longer valid (overwritten by the ath_frame_info data.
1968 */
1969
28d16708 1970 q = skb_get_queue_mapping(skb);
23de5dc9
FF
1971
1972 ath_txq_lock(sc, txq);
28d16708
FF
1973 if (txq == sc->tx.txq_map[q] &&
1974 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1975 ieee80211_stop_queue(sc->hw, q);
3db1cd5c 1976 txq->stopped = true;
f078f209 1977 }
f078f209 1978
44f1d26c 1979 ath_tx_start_dma(sc, skb, txctl);
3ad29529 1980
23de5dc9 1981 ath_txq_unlock(sc, txq);
3ad29529 1982
44f1d26c 1983 return 0;
f078f209
LR
1984}
1985
e8324357
S
1986/*****************/
1987/* TX Completion */
1988/*****************/
528f0c6b 1989
e8324357 1990static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1991 int tx_flags, struct ath_txq *txq)
528f0c6b 1992{
e8324357 1993 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1994 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1995 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1996 int q, padpos, padsize;
528f0c6b 1997
d2182b69 1998 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1999
55797b1a 2000 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
2001 /* Frame was ACKed */
2002 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 2003
42cecc34
JL
2004 padpos = ath9k_cmn_padpos(hdr->frame_control);
2005 padsize = padpos & 3;
2006 if (padsize && skb->len>padpos+padsize) {
2007 /*
2008 * Remove MAC header padding before giving the frame back to
2009 * mac80211.
2010 */
2011 memmove(skb->data + padsize, skb->data, padpos);
2012 skb_pull(skb, padsize);
e8324357 2013 }
528f0c6b 2014
c8e8868e 2015 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
1b04b930 2016 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
d2182b69 2017 ath_dbg(common, PS,
226afe68 2018 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
2019 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2020 PS_WAIT_FOR_CAB |
2021 PS_WAIT_FOR_PSPOLL_DATA |
2022 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
2023 }
2024
7545daf4
FF
2025 q = skb_get_queue_mapping(skb);
2026 if (txq == sc->tx.txq_map[q]) {
7545daf4
FF
2027 if (WARN_ON(--txq->pending_frames < 0))
2028 txq->pending_frames = 0;
92460412 2029
7545daf4
FF
2030 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2031 ieee80211_wake_queue(sc->hw, q);
3db1cd5c 2032 txq->stopped = false;
066dae93 2033 }
97923b14 2034 }
7545daf4 2035
23de5dc9 2036 __skb_queue_tail(&txq->complete_q, skb);
e8324357 2037}
f078f209 2038
e8324357 2039static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 2040 struct ath_txq *txq, struct list_head *bf_q,
156369fa 2041 struct ath_tx_status *ts, int txok)
f078f209 2042{
e8324357 2043 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 2044 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 2045 unsigned long flags;
6b2c4032 2046 int tx_flags = 0;
f078f209 2047
55797b1a 2048 if (!txok)
6b2c4032 2049 tx_flags |= ATH_TX_ERROR;
f078f209 2050
3afd21e7
FF
2051 if (ts->ts_status & ATH9K_TXERR_FILT)
2052 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2053
c1739eb3 2054 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 2055 bf->bf_buf_addr = 0;
9f42c2b6
FF
2056
2057 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
2058 if (time_after(jiffies,
2059 bf->bf_state.bfs_paprd_timestamp +
2060 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2061 dev_kfree_skb_any(skb);
78a18172 2062 else
ca369eb4 2063 complete(&sc->paprd_complete);
9f42c2b6 2064 } else {
55797b1a 2065 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2066 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2067 }
6cf9e995
BG
2068 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2069 * accidentally reference it later.
2070 */
2071 bf->bf_mpdu = NULL;
e8324357
S
2072
2073 /*
2074 * Return the list of ath_buf of this mpdu to free queue
2075 */
2076 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2077 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2078 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2079}
2080
0cdd5c60
FF
2081static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2082 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2083 int txok)
f078f209 2084{
a22be22a 2085 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2086 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2087 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2088 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2089 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2090 u8 i, tx_rateindex;
f078f209 2091
95e4acb7 2092 if (txok)
db1a052b 2093 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2094
db1a052b 2095 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2096 WARN_ON(tx_rateindex >= hw->max_rates);
2097
3afd21e7 2098 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2099 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2100
b572d033 2101 BUG_ON(nbad > nframes);
ebd02287 2102 }
185d1589
RM
2103 tx_info->status.ampdu_len = nframes;
2104 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287 2105
db1a052b 2106 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2107 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2108 /*
2109 * If an underrun error is seen assume it as an excessive
2110 * retry only if max frame trigger level has been reached
2111 * (2 KB for single stream, and 4 KB for dual stream).
2112 * Adjust the long retry as if the frame was tried
2113 * hw->max_rate_tries times to affect how rate control updates
2114 * PER for the failed rate.
2115 * In case of congestion on the bus penalizing this type of
2116 * underruns should help hardware actually transmit new frames
2117 * successfully by eventually preferring slower rates.
2118 * This itself should also alleviate congestion on the bus.
2119 */
3afd21e7
FF
2120 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2121 ATH9K_TX_DELIM_UNDERRUN)) &&
2122 ieee80211_is_data(hdr->frame_control) &&
83860c59 2123 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2124 tx_info->status.rates[tx_rateindex].count =
2125 hw->max_rate_tries;
f078f209 2126 }
8a92e2ee 2127
545750d3 2128 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2129 tx_info->status.rates[i].count = 0;
545750d3
FF
2130 tx_info->status.rates[i].idx = -1;
2131 }
8a92e2ee 2132
78c4653a 2133 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2134}
2135
fce041be
FF
2136static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2137 struct ath_tx_status *ts, struct ath_buf *bf,
2138 struct list_head *bf_head)
2139{
2140 int txok;
2141
2142 txq->axq_depth--;
2143 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2144 txq->axq_tx_inprogress = false;
2145 if (bf_is_ampdu_not_probing(bf))
2146 txq->axq_ampdu_depth--;
2147
fce041be 2148 if (!bf_isampdu(bf)) {
3afd21e7 2149 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
156369fa 2150 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
fce041be
FF
2151 } else
2152 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2153
3d4e20f2 2154 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
fce041be
FF
2155 ath_txq_schedule(sc, txq);
2156}
2157
e8324357 2158static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2159{
cbe61d8a 2160 struct ath_hw *ah = sc->sc_ah;
c46917bb 2161 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2162 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2163 struct list_head bf_head;
e8324357 2164 struct ath_desc *ds;
29bffa96 2165 struct ath_tx_status ts;
e8324357 2166 int status;
f078f209 2167
d2182b69 2168 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
226afe68
JP
2169 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2170 txq->axq_link);
f078f209 2171
23de5dc9 2172 ath_txq_lock(sc, txq);
f078f209 2173 for (;;) {
236de514
FF
2174 if (work_pending(&sc->hw_reset_work))
2175 break;
2176
f078f209
LR
2177 if (list_empty(&txq->axq_q)) {
2178 txq->axq_link = NULL;
3d4e20f2 2179 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
082f6536 2180 ath_txq_schedule(sc, txq);
f078f209
LR
2181 break;
2182 }
f078f209
LR
2183 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2184
e8324357
S
2185 /*
2186 * There is a race condition that a BH gets scheduled
2187 * after sw writes TxE and before hw re-load the last
2188 * descriptor to get the newly chained one.
2189 * Software must keep the last DONE descriptor as a
2190 * holding descriptor - software does so by marking
2191 * it with the STALE flag.
2192 */
2193 bf_held = NULL;
a119cc49 2194 if (bf->bf_stale) {
e8324357 2195 bf_held = bf;
fce041be 2196 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2197 break;
fce041be
FF
2198
2199 bf = list_entry(bf_held->list.next, struct ath_buf,
2200 list);
f078f209
LR
2201 }
2202
2203 lastbf = bf->bf_lastbf;
e8324357 2204 ds = lastbf->bf_desc;
f078f209 2205
29bffa96
FF
2206 memset(&ts, 0, sizeof(ts));
2207 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2208 if (status == -EINPROGRESS)
e8324357 2209 break;
fce041be 2210
2dac4fb9 2211 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2212
e8324357
S
2213 /*
2214 * Remove ath_buf's of the same transmit unit from txq,
2215 * however leave the last descriptor back as the holding
2216 * descriptor for hw.
2217 */
a119cc49 2218 lastbf->bf_stale = true;
e8324357 2219 INIT_LIST_HEAD(&bf_head);
e8324357
S
2220 if (!list_is_singular(&lastbf->list))
2221 list_cut_position(&bf_head,
2222 &txq->axq_q, lastbf->list.prev);
f078f209 2223
fce041be 2224 if (bf_held) {
0a8cea84 2225 list_del(&bf_held->list);
0a8cea84 2226 ath_tx_return_buffer(sc, bf_held);
e8324357 2227 }
f078f209 2228
fce041be 2229 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2230 }
23de5dc9 2231 ath_txq_unlock_complete(sc, txq);
8469cdef
S
2232}
2233
305fe47f 2234static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2235{
2236 struct ath_softc *sc = container_of(work, struct ath_softc,
2237 tx_complete_work.work);
2238 struct ath_txq *txq;
2239 int i;
2240 bool needreset = false;
60f2d1d5
BG
2241#ifdef CONFIG_ATH9K_DEBUGFS
2242 sc->tx_complete_poll_work_seen++;
2243#endif
164ace38
SB
2244
2245 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2246 if (ATH_TXQ_SETUP(sc, i)) {
2247 txq = &sc->tx.txq[i];
23de5dc9 2248 ath_txq_lock(sc, txq);
164ace38
SB
2249 if (txq->axq_depth) {
2250 if (txq->axq_tx_inprogress) {
2251 needreset = true;
23de5dc9 2252 ath_txq_unlock(sc, txq);
164ace38
SB
2253 break;
2254 } else {
2255 txq->axq_tx_inprogress = true;
2256 }
2257 }
23de5dc9 2258 ath_txq_unlock_complete(sc, txq);
164ace38
SB
2259 }
2260
2261 if (needreset) {
d2182b69 2262 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
226afe68 2263 "tx hung, resetting the chip\n");
030d6294 2264 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
236de514 2265 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2266 }
2267
42935eca 2268 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2269 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2270}
2271
2272
f078f209 2273
e8324357 2274void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2275{
239c795d
FF
2276 struct ath_hw *ah = sc->sc_ah;
2277 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
e8324357 2278 int i;
f078f209 2279
e8324357
S
2280 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2281 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2282 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2283 }
2284}
2285
e5003249
VT
2286void ath_tx_edma_tasklet(struct ath_softc *sc)
2287{
fce041be 2288 struct ath_tx_status ts;
e5003249
VT
2289 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2290 struct ath_hw *ah = sc->sc_ah;
2291 struct ath_txq *txq;
2292 struct ath_buf *bf, *lastbf;
2293 struct list_head bf_head;
2294 int status;
e5003249
VT
2295
2296 for (;;) {
236de514
FF
2297 if (work_pending(&sc->hw_reset_work))
2298 break;
2299
fce041be 2300 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2301 if (status == -EINPROGRESS)
2302 break;
2303 if (status == -EIO) {
d2182b69 2304 ath_dbg(common, XMIT, "Error processing tx status\n");
e5003249
VT
2305 break;
2306 }
2307
4e0ad259
FF
2308 /* Process beacon completions separately */
2309 if (ts.qid == sc->beacon.beaconq) {
2310 sc->beacon.tx_processed = true;
2311 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
e5003249 2312 continue;
4e0ad259 2313 }
e5003249 2314
fce041be 2315 txq = &sc->tx.txq[ts.qid];
e5003249 2316
23de5dc9 2317 ath_txq_lock(sc, txq);
fce041be 2318
e5003249 2319 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
23de5dc9 2320 ath_txq_unlock(sc, txq);
e5003249
VT
2321 return;
2322 }
2323
2324 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2325 struct ath_buf, list);
2326 lastbf = bf->bf_lastbf;
2327
2328 INIT_LIST_HEAD(&bf_head);
2329 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2330 &lastbf->list);
e5003249 2331
fce041be
FF
2332 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2333 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2334
fce041be
FF
2335 if (!list_empty(&txq->axq_q)) {
2336 struct list_head bf_q;
60f2d1d5 2337
fce041be
FF
2338 INIT_LIST_HEAD(&bf_q);
2339 txq->axq_link = NULL;
2340 list_splice_tail_init(&txq->axq_q, &bf_q);
2341 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2342 }
2343 }
86271e46 2344
fce041be 2345 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
23de5dc9 2346 ath_txq_unlock_complete(sc, txq);
e5003249
VT
2347 }
2348}
2349
e8324357
S
2350/*****************/
2351/* Init, Cleanup */
2352/*****************/
f078f209 2353
5088c2f1
VT
2354static int ath_txstatus_setup(struct ath_softc *sc, int size)
2355{
2356 struct ath_descdma *dd = &sc->txsdma;
2357 u8 txs_len = sc->sc_ah->caps.txs_len;
2358
2359 dd->dd_desc_len = size * txs_len;
2360 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2361 &dd->dd_desc_paddr, GFP_KERNEL);
2362 if (!dd->dd_desc)
2363 return -ENOMEM;
2364
2365 return 0;
2366}
2367
2368static int ath_tx_edma_init(struct ath_softc *sc)
2369{
2370 int err;
2371
2372 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2373 if (!err)
2374 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2375 sc->txsdma.dd_desc_paddr,
2376 ATH_TXSTATUS_RING_SIZE);
2377
2378 return err;
2379}
2380
2381static void ath_tx_edma_cleanup(struct ath_softc *sc)
2382{
2383 struct ath_descdma *dd = &sc->txsdma;
2384
2385 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2386 dd->dd_desc_paddr);
2387}
2388
e8324357 2389int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2390{
c46917bb 2391 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2392 int error = 0;
f078f209 2393
797fe5cb 2394 spin_lock_init(&sc->tx.txbuflock);
f078f209 2395
797fe5cb 2396 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2397 "tx", nbufs, 1, 1);
797fe5cb 2398 if (error != 0) {
3800276a
JP
2399 ath_err(common,
2400 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2401 goto err;
2402 }
f078f209 2403
797fe5cb 2404 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2405 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2406 if (error != 0) {
3800276a
JP
2407 ath_err(common,
2408 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2409 goto err;
2410 }
f078f209 2411
164ace38
SB
2412 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2413
5088c2f1
VT
2414 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2415 error = ath_tx_edma_init(sc);
2416 if (error)
2417 goto err;
2418 }
2419
797fe5cb 2420err:
e8324357
S
2421 if (error != 0)
2422 ath_tx_cleanup(sc);
f078f209 2423
e8324357 2424 return error;
f078f209
LR
2425}
2426
797fe5cb 2427void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2428{
2429 if (sc->beacon.bdma.dd_desc_len != 0)
2430 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2431
2432 if (sc->tx.txdma.dd_desc_len != 0)
2433 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2434
2435 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2436 ath_tx_edma_cleanup(sc);
e8324357 2437}
f078f209
LR
2438
2439void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2440{
c5170163
S
2441 struct ath_atx_tid *tid;
2442 struct ath_atx_ac *ac;
2443 int tidno, acno;
f078f209 2444
8ee5afbc 2445 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2446 tidno < WME_NUM_TID;
2447 tidno++, tid++) {
2448 tid->an = an;
2449 tid->tidno = tidno;
2450 tid->seq_start = tid->seq_next = 0;
2451 tid->baw_size = WME_MAX_BA;
2452 tid->baw_head = tid->baw_tail = 0;
2453 tid->sched = false;
e8324357 2454 tid->paused = false;
a37c2c79 2455 tid->state &= ~AGGR_CLEANUP;
56dc6336 2456 __skb_queue_head_init(&tid->buf_q);
c5170163 2457 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2458 tid->ac = &an->ac[acno];
a37c2c79
S
2459 tid->state &= ~AGGR_ADDBA_COMPLETE;
2460 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2461 }
f078f209 2462
8ee5afbc 2463 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2464 acno < WME_NUM_AC; acno++, ac++) {
2465 ac->sched = false;
066dae93 2466 ac->txq = sc->tx.txq_map[acno];
c5170163 2467 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2468 }
2469}
2470
b5aa9bf9 2471void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2472{
2b40994c
FF
2473 struct ath_atx_ac *ac;
2474 struct ath_atx_tid *tid;
f078f209 2475 struct ath_txq *txq;
066dae93 2476 int tidno;
e8324357 2477
2b40994c
FF
2478 for (tidno = 0, tid = &an->tid[tidno];
2479 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2480
2b40994c 2481 ac = tid->ac;
066dae93 2482 txq = ac->txq;
f078f209 2483
23de5dc9 2484 ath_txq_lock(sc, txq);
2b40994c
FF
2485
2486 if (tid->sched) {
2487 list_del(&tid->list);
2488 tid->sched = false;
2489 }
2490
2491 if (ac->sched) {
2492 list_del(&ac->list);
2493 tid->ac->sched = false;
f078f209 2494 }
2b40994c
FF
2495
2496 ath_tid_drain(sc, txq, tid);
2497 tid->state &= ~AGGR_ADDBA_COMPLETE;
2498 tid->state &= ~AGGR_CLEANUP;
2499
23de5dc9 2500 ath_txq_unlock(sc, txq);
f078f209
LR
2501 }
2502}