ath9k: fix initial sequence number after starting an ampdu session
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
82b873af
FF
51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
2d42efc4 53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357 58 struct list_head *head);
269c44bc 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
db1a052b 60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
b572d033 61 int nframes, int nbad, int txok, bool update_rc);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 124{
066dae93 125 struct ath_txq *txq = tid->ac->txq;
e6a9854b 126
75401849 127 WARN_ON(!tid->paused);
f078f209 128
75401849
LB
129 spin_lock_bh(&txq->axq_lock);
130 tid->paused = false;
f078f209 131
e8324357
S
132 if (list_empty(&tid->buf_q))
133 goto unlock;
f078f209 134
e8324357
S
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
528f0c6b 139}
f078f209 140
2d42efc4 141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
147}
148
e8324357 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 150{
066dae93 151 struct ath_txq *txq = tid->ac->txq;
e8324357
S
152 struct ath_buf *bf;
153 struct list_head bf_head;
90fa539c 154 struct ath_tx_status ts;
2d42efc4 155 struct ath_frame_info *fi;
f078f209 156
90fa539c 157 INIT_LIST_HEAD(&bf_head);
e6a9854b 158
90fa539c 159 memset(&ts, 0, sizeof(ts));
75401849 160 spin_lock_bh(&txq->axq_lock);
f078f209 161
e8324357
S
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 164 list_move_tail(&bf->list, &bf_head);
90fa539c 165
e1566d1f 166 spin_unlock_bh(&txq->axq_lock);
2d42efc4
FF
167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
90fa539c
FF
170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
a9e99a0c 172 ath_tx_send_normal(sc, txq, NULL, &bf_head);
90fa539c 173 }
e1566d1f 174 spin_lock_bh(&txq->axq_lock);
528f0c6b 175 }
f078f209 176
e8324357 177 spin_unlock_bh(&txq->axq_lock);
528f0c6b 178}
f078f209 179
e8324357
S
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
528f0c6b 182{
e8324357 183 int index, cindex;
f078f209 184
e8324357
S
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 187
81ee13ba 188 __clear_bit(cindex, tid->tx_buf);
528f0c6b 189
81ee13ba 190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
528f0c6b 194}
f078f209 195
e8324357 196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 197 u16 seqno)
528f0c6b 198{
e8324357 199 int index, cindex;
528f0c6b 200
2d3bcba0 201 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 203 __set_bit(cindex, tid->tx_buf);
f078f209 204
e8324357
S
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 209 }
f078f209
LR
210}
211
212/*
e8324357
S
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
f078f209 217 */
e8324357
S
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
f078f209 220
f078f209 221{
e8324357
S
222 struct ath_buf *bf;
223 struct list_head bf_head;
db1a052b 224 struct ath_tx_status ts;
2d42efc4 225 struct ath_frame_info *fi;
db1a052b
FF
226
227 memset(&ts, 0, sizeof(ts));
e8324357 228 INIT_LIST_HEAD(&bf_head);
f078f209 229
e8324357
S
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
f078f209 233
d43f3015
S
234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
f078f209 236
2d42efc4
FF
237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
f078f209 240
e8324357 241 spin_unlock(&txq->axq_lock);
db1a052b 242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
243 spin_lock(&txq->axq_lock);
244 }
f078f209 245
e8324357
S
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
f078f209
LR
248}
249
fec247c0 250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 251 struct sk_buff *skb)
f078f209 252{
8b7f8532 253 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 254 struct ieee80211_hdr *hdr;
f078f209 255
fec247c0 256 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 257 if (fi->retries++ > 0)
2d42efc4 258 return;
f078f209 259
e8324357
S
260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
262}
263
0a8cea84 264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 265{
0a8cea84 266 struct ath_buf *bf = NULL;
d43f3015
S
267
268 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
0a8cea84
FF
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
d43f3015
S
278 spin_unlock_bh(&sc->tx.txbuflock);
279
0a8cea84
FF
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
296 return NULL;
297
d43f3015
S
298 ATH_TXBUF_RESET(tbf);
299
827e69bf 300 tbf->aphy = bf->aphy;
d43f3015
S
301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 304 tbf->bf_state = bf->bf_state;
d43f3015
S
305
306 return tbf;
307}
308
b572d033
FF
309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
2d42efc4 313 struct ath_frame_info *fi;
b572d033
FF
314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
b572d033
FF
322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
2d42efc4
FF
329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
b572d033
FF
331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
d43f3015
S
341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
c5992618 343 struct ath_tx_status *ts, int txok, bool retry)
f078f209 344{
e8324357
S
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
1286ec6d 347 struct ieee80211_sta *sta;
76d5a9e8 348 struct ieee80211_hw *hw;
1286ec6d 349 struct ieee80211_hdr *hdr;
76d5a9e8 350 struct ieee80211_tx_info *tx_info;
e8324357 351 struct ath_atx_tid *tid = NULL;
d43f3015 352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 353 struct list_head bf_head, bf_pending;
0934af23 354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 355 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
78c4653a 358 struct ieee80211_tx_rate rates[4];
2d42efc4 359 struct ath_frame_info *fi;
ebd02287 360 int nframes;
5daefbd0 361 u8 tidno;
f078f209 362
a22be22a 363 skb = bf->bf_mpdu;
1286ec6d
S
364 hdr = (struct ieee80211_hdr *)skb->data;
365
76d5a9e8 366 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 367 hw = bf->aphy->hw;
76d5a9e8 368
78c4653a
FF
369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
1286ec6d 371 rcu_read_lock();
f078f209 372
686b9cb9 373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
374 if (!sta) {
375 rcu_read_unlock();
73e19463 376
31e79a59
FF
377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
b572d033 386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
31e79a59
FF
387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
1286ec6d 392 return;
f078f209
LR
393 }
394
1286ec6d 395 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 398
b11b160d
FF
399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
5daefbd0 404 if (tidno != ts->tid)
b11b160d
FF
405 txok = false;
406
e8324357 407 isaggr = bf_isaggr(bf);
d43f3015 408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 409
d43f3015 410 if (isaggr && txok) {
db1a052b
FF
411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 414 } else {
d43f3015
S
415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
2660b81a 422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 423 needreset = 1;
e8324357 424 }
f078f209
LR
425 }
426
e8324357
S
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
f078f209 429
b572d033 430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357
S
431 while (bf) {
432 txfail = txpending = 0;
433 bf_next = bf->bf_next;
f078f209 434
78c4653a
FF
435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 437 fi = get_frame_info(skb);
78c4653a 438
2d42efc4 439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
e8324357
S
440 /* transmit completion, subframe is
441 * acked by block ack */
0934af23 442 acked_cnt++;
e8324357
S
443 } else if (!isaggr && txok) {
444 /* transmit completion */
0934af23 445 acked_cnt++;
e8324357 446 } else {
c5992618 447 if (!(tid->state & AGGR_CLEANUP) && retry) {
2d42efc4
FF
448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
e8324357
S
450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
0934af23 455 txfail_cnt++;
e8324357
S
456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
f078f209 465
e5003249
VT
466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
cbfe89c6
VT
468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
e8324357 476 } else {
9680e8a3 477 BUG_ON(list_empty(bf_q));
d43f3015 478 list_move_tail(&bf->list, &bf_head);
e8324357 479 }
f078f209 480
90fa539c 481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
2d42efc4 487 ath_tx_update_baw(sc, tid, fi->seqno);
e8324357 488 spin_unlock_bh(&txq->axq_lock);
f078f209 489
8a92e2ee 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
b572d033 492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
8a92e2ee
VT
493 rc_update = false;
494 } else {
b572d033 495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
8a92e2ee
VT
496 }
497
db1a052b
FF
498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
e8324357 500 } else {
d43f3015 501 /* retry the un-acked ones */
e5003249
VT
502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
505
506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
2d42efc4 514 ath_tx_update_baw(sc, tid, fi->seqno);
e5003249
VT
515 spin_unlock_bh(&txq->axq_lock);
516
517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
b572d033
FF
519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
e5003249
VT
521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
c41d92dc 537 }
e8324357
S
538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
f078f209 548 }
f078f209 549
4cee7861
FF
550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
e8324357 558 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
559 ath_tx_flush_tid(sc, tid);
560
e8324357
S
561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 563 tid->state &= ~AGGR_CLEANUP;
d43f3015 564 }
e8324357 565 }
f078f209 566
1286ec6d
S
567 rcu_read_unlock();
568
e8324357
S
569 if (needreset)
570 ath_reset(sc, false);
e8324357 571}
f078f209 572
e8324357
S
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
f078f209 575{
528f0c6b
S
576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
a8efee4f 578 struct ieee80211_tx_rate *rates;
d43f3015 579 u32 max_4ms_framelen, frmlen;
4ef70841 580 u16 aggr_limit, legacy = 0;
e8324357 581 int i;
528f0c6b 582
a22be22a 583 skb = bf->bf_mpdu;
528f0c6b 584 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 585 rates = tx_info->control.rates;
528f0c6b 586
e8324357
S
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 593
e8324357
S
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
545750d3
FF
596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
598 legacy = 1;
599 break;
600 }
601
0e668cde 602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
603 modeidx = MCS_HT40;
604 else
0e668cde
FF
605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
545750d3
FF
609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
612 }
613 }
e63835b0 614
f078f209 615 /*
e8324357
S
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
f078f209 619 */
e8324357
S
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
f078f209 622
1773912b
VT
623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 629
e8324357
S
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 634 */
4ef70841
S
635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 637
e8324357
S
638 return aggr_limit;
639}
f078f209 640
e8324357 641/*
d43f3015 642 * Returns the number of delimiters to be added to
e8324357 643 * meet the minimum required mpdudensity.
e8324357
S
644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
e8324357
S
648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 650 u32 nsymbits, nsymbols;
e8324357 651 u16 minlen;
545750d3 652 u8 flags, rix;
c6663876 653 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
658
659 /*
e8324357
S
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
f078f209 664 */
2d42efc4 665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
e8324357 666 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 667
e8324357
S
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 673 *
e8324357
S
674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
4ef70841
S
677
678 if (tid->an->mpdudensity == 0)
e8324357 679 return ndelim;
f078f209 680
e8324357
S
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
e8324357
S
683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 685
e8324357 686 if (half_gi)
4ef70841 687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 688 else
4ef70841 689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 690
e8324357
S
691 if (nsymbols == 0)
692 nsymbols = 1;
f078f209 693
c6663876
FF
694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 697
e8324357 698 if (frmlen < minlen) {
e8324357
S
699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
f078f209
LR
701 }
702
e8324357 703 return ndelim;
f078f209
LR
704}
705
e8324357 706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 707 struct ath_txq *txq,
d43f3015 708 struct ath_atx_tid *tid,
269c44bc
FF
709 struct list_head *bf_q,
710 int *aggr_len)
f078f209 711{
e8324357 712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 718 struct ieee80211_tx_info *tx_info;
2d42efc4 719 struct ath_frame_info *fi;
f078f209 720
e8324357 721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 722
e8324357
S
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
2d42efc4 725 fi = get_frame_info(bf->bf_mpdu);
f078f209 726
d43f3015 727 /* do not step over block-ack window */
2d42efc4 728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
e8324357
S
729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
f078f209 732
e8324357
S
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
f078f209 737
d43f3015 738 /* do not exceed aggregation limit */
2d42efc4 739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 740
d43f3015
S
741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
743 status = ATH_AGGR_LIMITED;
744 break;
745 }
f078f209 746
0299a50a
FF
747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
d43f3015
S
752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
754 status = ATH_AGGR_LIMITED;
755 break;
756 }
d43f3015 757 nframes++;
f078f209 758
d43f3015 759 /* add padding for previous frame to aggregation length */
e8324357 760 al += bpad + al_delta;
f078f209 761
e8324357
S
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
2d42efc4 766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
e8324357 767 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 768
e8324357 769 bf->bf_next = NULL;
87d5efbb 770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 771
d43f3015 772 /* link buffers of this frame to the aggregate */
2d42efc4
FF
773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
d43f3015
S
775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
e8324357
S
777 if (bf_prev) {
778 bf_prev->bf_next = bf;
87d5efbb
VT
779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
e8324357
S
781 }
782 bf_prev = bf;
fec247c0 783
e8324357 784 } while (!list_empty(&tid->buf_q));
f078f209 785
269c44bc 786 *aggr_len = al;
d43f3015 787
e8324357
S
788 return status;
789#undef PADBYTES
790}
f078f209 791
e8324357
S
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
d43f3015 795 struct ath_buf *bf;
e8324357 796 enum ATH_AGGR_STATUS status;
2d42efc4 797 struct ath_frame_info *fi;
e8324357 798 struct list_head bf_q;
269c44bc 799 int aggr_len;
f078f209 800
e8324357
S
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
f078f209 804
e8324357
S
805 INIT_LIST_HEAD(&bf_q);
806
269c44bc 807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 808
f078f209 809 /*
d43f3015
S
810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
f078f209 812 */
e8324357
S
813 if (list_empty(&bf_q))
814 break;
f078f209 815
e8324357 816 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 818
d43f3015 819 /* if only one frame, send as non-aggregate */
b572d033 820 if (bf == bf->bf_lastbf) {
2d42efc4
FF
821 fi = get_frame_info(bf->bf_mpdu);
822
e8324357 823 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
2d42efc4 825 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357
S
826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
f078f209 829
d43f3015 830 /* setup first desc of aggregate */
e8324357 831 bf->bf_state.bf_type |= BUF_AGGR;
269c44bc
FF
832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
f078f209 834
d43f3015
S
835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 837
e8324357 838 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 839 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 840
4b3ba66a 841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
842 status != ATH_AGGR_BAW_CLOSED);
843}
844
231c3a1f
FF
845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
e8324357
S
847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
f83da965 852 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
f83da965 857 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 858 txtid->paused = true;
49447f2f 859 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f
FF
860
861 return 0;
e8324357 862}
f078f209 863
f83da965 864void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
865{
866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 868 struct ath_txq *txq = txtid->ac->txq;
f078f209 869
e8324357 870 if (txtid->state & AGGR_CLEANUP)
f83da965 871 return;
f078f209 872
e8324357 873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 874 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 875 return;
e8324357 876 }
f078f209 877
e8324357 878 spin_lock_bh(&txq->axq_lock);
75401849 879 txtid->paused = true;
f078f209 880
90fa539c
FF
881 /*
882 * If frames are still being transmitted for this TID, they will be
883 * cleaned up during tx completion. To prevent race conditions, this
884 * TID can only be reused after all in-progress subframes have been
885 * completed.
886 */
887 if (txtid->baw_head != txtid->baw_tail)
e8324357 888 txtid->state |= AGGR_CLEANUP;
90fa539c 889 else
e8324357 890 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
891 spin_unlock_bh(&txq->axq_lock);
892
893 ath_tx_flush_tid(sc, txtid);
e8324357 894}
f078f209 895
e8324357
S
896void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
902
903 if (sc->sc_flags & SC_OP_TXAGGR) {
904 txtid = ATH_AN_2_TID(an, tid);
905 txtid->baw_size =
906 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
907 txtid->state |= AGGR_ADDBA_COMPLETE;
908 txtid->state &= ~AGGR_ADDBA_PROGRESS;
909 ath_tx_resume_tid(sc, txtid);
910 }
f078f209
LR
911}
912
e8324357
S
913/********************/
914/* Queue Management */
915/********************/
f078f209 916
e8324357
S
917static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
918 struct ath_txq *txq)
f078f209 919{
e8324357
S
920 struct ath_atx_ac *ac, *ac_tmp;
921 struct ath_atx_tid *tid, *tid_tmp;
f078f209 922
e8324357
S
923 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
924 list_del(&ac->list);
925 ac->sched = false;
926 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
927 list_del(&tid->list);
928 tid->sched = false;
929 ath_tid_drain(sc, txq, tid);
930 }
f078f209
LR
931 }
932}
933
e8324357 934struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 935{
cbe61d8a 936 struct ath_hw *ah = sc->sc_ah;
c46917bb 937 struct ath_common *common = ath9k_hw_common(ah);
e8324357 938 struct ath9k_tx_queue_info qi;
066dae93
FF
939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
e5003249 945 int qnum, i;
f078f209 946
e8324357 947 memset(&qi, 0, sizeof(qi));
066dae93 948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_physCompBuf = 0;
f078f209
LR
953
954 /*
e8324357
S
955 * Enable interrupts only for EOL and DESC conditions.
956 * We mark tx descriptors to receive a DESC interrupt
957 * when a tx queue gets deep; otherwise waiting for the
958 * EOL to reap descriptors. Note that this is done to
959 * reduce interrupt load and this only defers reaping
960 * descriptors, never transmitting frames. Aside from
961 * reducing interrupts this also permits more concurrency.
962 * The only potential downside is if the tx queue backs
963 * up in which case the top half of the kernel may backup
964 * due to a lack of tx descriptors.
965 *
966 * The UAPSD queue is an exception, since we take a desc-
967 * based intr on the EOSP frames.
f078f209 968 */
afe754d6
VT
969 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
970 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
971 TXQ_FLAG_TXERRINT_ENABLE;
972 } else {
973 if (qtype == ATH9K_TX_QUEUE_UAPSD)
974 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
975 else
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE;
978 }
e8324357
S
979 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (qnum == -1) {
f078f209 981 /*
e8324357
S
982 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues
f078f209 984 */
e8324357 985 return NULL;
f078f209 986 }
e8324357 987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
3800276a
JP
988 ath_err(common, "qnum %u out of range, max %zu!\n",
989 qnum, ARRAY_SIZE(sc->tx.txq));
e8324357
S
990 ath9k_hw_releasetxqueue(ah, qnum);
991 return NULL;
992 }
993 if (!ATH_TXQ_SETUP(sc, qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 995
e8324357
S
996 txq->axq_qnum = qnum;
997 txq->axq_link = NULL;
998 INIT_LIST_HEAD(&txq->axq_q);
999 INIT_LIST_HEAD(&txq->axq_acq);
1000 spin_lock_init(&txq->axq_lock);
1001 txq->axq_depth = 0;
4b3ba66a 1002 txq->axq_ampdu_depth = 0;
164ace38 1003 txq->axq_tx_inprogress = false;
e8324357 1004 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
1005
1006 txq->txq_headidx = txq->txq_tailidx = 0;
1007 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1008 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1009 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
1010 }
1011 return &sc->tx.txq[qnum];
f078f209
LR
1012}
1013
e8324357
S
1014int ath_txq_update(struct ath_softc *sc, int qnum,
1015 struct ath9k_tx_queue_info *qinfo)
1016{
cbe61d8a 1017 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1018 int error = 0;
1019 struct ath9k_tx_queue_info qi;
1020
1021 if (qnum == sc->beacon.beaconq) {
1022 /*
1023 * XXX: for beacon queue, we just save the parameter.
1024 * It will be picked up by ath_beaconq_config when
1025 * it's necessary.
1026 */
1027 sc->beacon.beacon_qi = *qinfo;
f078f209 1028 return 0;
e8324357 1029 }
f078f209 1030
9680e8a3 1031 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1032
1033 ath9k_hw_get_txq_props(ah, qnum, &qi);
1034 qi.tqi_aifs = qinfo->tqi_aifs;
1035 qi.tqi_cwmin = qinfo->tqi_cwmin;
1036 qi.tqi_cwmax = qinfo->tqi_cwmax;
1037 qi.tqi_burstTime = qinfo->tqi_burstTime;
1038 qi.tqi_readyTime = qinfo->tqi_readyTime;
1039
1040 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1041 ath_err(ath9k_hw_common(sc->sc_ah),
1042 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1043 error = -EIO;
1044 } else {
1045 ath9k_hw_resettxqueue(ah, qnum);
1046 }
1047
1048 return error;
1049}
1050
1051int ath_cabq_update(struct ath_softc *sc)
1052{
1053 struct ath9k_tx_queue_info qi;
1054 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1055
e8324357 1056 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1057 /*
e8324357 1058 * Ensure the readytime % is within the bounds.
f078f209 1059 */
17d7904d
S
1060 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1061 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1062 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1064
57c4d7b4 1065 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1066 sc->config.cabqReadytime) / 100;
e8324357
S
1067 ath_txq_update(sc, qnum, &qi);
1068
1069 return 0;
f078f209
LR
1070}
1071
4b3ba66a
FF
1072static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1073{
1074 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1075 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1076}
1077
043a0405
S
1078/*
1079 * Drain a given TX queue (could be Beacon or Data)
1080 *
1081 * This assumes output has been stopped and
1082 * we do not need to block ath_tx_tasklet.
1083 */
1084void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1085{
e8324357
S
1086 struct ath_buf *bf, *lastbf;
1087 struct list_head bf_head;
db1a052b
FF
1088 struct ath_tx_status ts;
1089
1090 memset(&ts, 0, sizeof(ts));
e8324357 1091 INIT_LIST_HEAD(&bf_head);
f078f209 1092
e8324357
S
1093 for (;;) {
1094 spin_lock_bh(&txq->axq_lock);
f078f209 1095
e5003249
VT
1096 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1097 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1098 txq->txq_headidx = txq->txq_tailidx = 0;
1099 spin_unlock_bh(&txq->axq_lock);
1100 break;
1101 } else {
1102 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1103 struct ath_buf, list);
1104 }
1105 } else {
1106 if (list_empty(&txq->axq_q)) {
1107 txq->axq_link = NULL;
1108 spin_unlock_bh(&txq->axq_lock);
1109 break;
1110 }
1111 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1112 list);
f078f209 1113
e5003249
VT
1114 if (bf->bf_stale) {
1115 list_del(&bf->list);
1116 spin_unlock_bh(&txq->axq_lock);
f078f209 1117
0a8cea84 1118 ath_tx_return_buffer(sc, bf);
e5003249
VT
1119 continue;
1120 }
e8324357 1121 }
f078f209 1122
e8324357 1123 lastbf = bf->bf_lastbf;
f078f209 1124
e5003249
VT
1125 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1126 list_cut_position(&bf_head,
1127 &txq->txq_fifo[txq->txq_tailidx],
1128 &lastbf->list);
1129 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1130 } else {
1131 /* remove ath_buf's of the same mpdu from txq */
1132 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1133 }
1134
e8324357 1135 txq->axq_depth--;
4b3ba66a
FF
1136 if (bf_is_ampdu_not_probing(bf))
1137 txq->axq_ampdu_depth--;
e8324357
S
1138 spin_unlock_bh(&txq->axq_lock);
1139
1140 if (bf_isampdu(bf))
c5992618
FF
1141 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1142 retry_tx);
e8324357 1143 else
db1a052b 1144 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1145 }
1146
164ace38
SB
1147 spin_lock_bh(&txq->axq_lock);
1148 txq->axq_tx_inprogress = false;
1149 spin_unlock_bh(&txq->axq_lock);
1150
e5003249
VT
1151 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1152 spin_lock_bh(&txq->axq_lock);
1153 while (!list_empty(&txq->txq_fifo_pending)) {
1154 bf = list_first_entry(&txq->txq_fifo_pending,
1155 struct ath_buf, list);
1156 list_cut_position(&bf_head,
1157 &txq->txq_fifo_pending,
1158 &bf->bf_lastbf->list);
1159 spin_unlock_bh(&txq->axq_lock);
1160
1161 if (bf_isampdu(bf))
1162 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
c5992618 1163 &ts, 0, retry_tx);
e5003249
VT
1164 else
1165 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1166 &ts, 0, 0);
1167 spin_lock_bh(&txq->axq_lock);
1168 }
1169 spin_unlock_bh(&txq->axq_lock);
1170 }
e609e2ea
FF
1171
1172 /* flush any pending frames if aggregation is enabled */
1173 if (sc->sc_flags & SC_OP_TXAGGR) {
1174 if (!retry_tx) {
1175 spin_lock_bh(&txq->axq_lock);
1176 ath_txq_drain_pending_buffers(sc, txq);
1177 spin_unlock_bh(&txq->axq_lock);
1178 }
1179 }
f078f209
LR
1180}
1181
080e1a25 1182bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1183{
cbe61d8a 1184 struct ath_hw *ah = sc->sc_ah;
c46917bb 1185 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1186 struct ath_txq *txq;
1187 int i, npend = 0;
1188
1189 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1190 return true;
043a0405
S
1191
1192 /* Stop beacon queue */
1193 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1194
1195 /* Stop data queues */
1196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1197 if (ATH_TXQ_SETUP(sc, i)) {
1198 txq = &sc->tx.txq[i];
1199 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1200 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1201 }
1202 }
1203
080e1a25 1204 if (npend)
393934c6 1205 ath_err(common, "Failed to stop TX DMA!\n");
043a0405
S
1206
1207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1208 if (ATH_TXQ_SETUP(sc, i))
1209 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1210 }
080e1a25
FF
1211
1212 return !npend;
e8324357 1213}
f078f209 1214
043a0405 1215void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1216{
043a0405
S
1217 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1218 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1219}
f078f209 1220
e8324357
S
1221void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1222{
1223 struct ath_atx_ac *ac;
1224 struct ath_atx_tid *tid;
f078f209 1225
e8324357
S
1226 if (list_empty(&txq->axq_acq))
1227 return;
f078f209 1228
e8324357
S
1229 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1230 list_del(&ac->list);
1231 ac->sched = false;
f078f209 1232
e8324357
S
1233 do {
1234 if (list_empty(&ac->tid_q))
1235 return;
f078f209 1236
e8324357
S
1237 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1238 list_del(&tid->list);
1239 tid->sched = false;
f078f209 1240
e8324357
S
1241 if (tid->paused)
1242 continue;
f078f209 1243
164ace38 1244 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1245
1246 /*
e8324357
S
1247 * add tid to round-robin queue if more frames
1248 * are pending for the tid
f078f209 1249 */
e8324357
S
1250 if (!list_empty(&tid->buf_q))
1251 ath_tx_queue_tid(txq, tid);
f078f209 1252
e8324357
S
1253 break;
1254 } while (!list_empty(&ac->tid_q));
f078f209 1255
e8324357
S
1256 if (!list_empty(&ac->tid_q)) {
1257 if (!ac->sched) {
1258 ac->sched = true;
1259 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1260 }
e8324357
S
1261 }
1262}
f078f209 1263
e8324357
S
1264/***********/
1265/* TX, DMA */
1266/***********/
1267
f078f209 1268/*
e8324357
S
1269 * Insert a chain of ath_buf (descriptors) on a txq and
1270 * assume the descriptors are already chained together by caller.
f078f209 1271 */
e8324357
S
1272static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1273 struct list_head *head)
f078f209 1274{
cbe61d8a 1275 struct ath_hw *ah = sc->sc_ah;
c46917bb 1276 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1277 struct ath_buf *bf;
f078f209 1278
e8324357
S
1279 /*
1280 * Insert the frame on the outbound list and
1281 * pass it on to the hardware.
1282 */
f078f209 1283
e8324357
S
1284 if (list_empty(head))
1285 return;
f078f209 1286
e8324357 1287 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1288
226afe68
JP
1289 ath_dbg(common, ATH_DBG_QUEUE,
1290 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1291
e5003249
VT
1292 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1293 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1294 list_splice_tail_init(head, &txq->txq_fifo_pending);
1295 return;
1296 }
1297 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
226afe68
JP
1298 ath_dbg(common, ATH_DBG_XMIT,
1299 "Initializing tx fifo %d which is non-empty\n",
1300 txq->txq_headidx);
e5003249
VT
1301 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1302 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1303 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1305 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1307 } else {
e5003249
VT
1308 list_splice_tail_init(head, &txq->axq_q);
1309
1310 if (txq->axq_link == NULL) {
1311 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1312 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1313 txq->axq_qnum, ito64(bf->bf_daddr),
1314 bf->bf_desc);
e5003249
VT
1315 } else {
1316 *txq->axq_link = bf->bf_daddr;
226afe68
JP
1317 ath_dbg(common, ATH_DBG_XMIT,
1318 "link[%u] (%p)=%llx (%p)\n",
1319 txq->axq_qnum, txq->axq_link,
1320 ito64(bf->bf_daddr), bf->bf_desc);
e5003249
VT
1321 }
1322 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1323 &txq->axq_link);
1324 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1325 }
e5003249 1326 txq->axq_depth++;
4b3ba66a
FF
1327 if (bf_is_ampdu_not_probing(bf))
1328 txq->axq_ampdu_depth++;
e8324357 1329}
f078f209 1330
e8324357 1331static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
04caf863 1332 struct ath_buf *bf, struct ath_tx_control *txctl)
f078f209 1333{
2d42efc4 1334 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
04caf863 1335 struct list_head bf_head;
f078f209 1336
e8324357 1337 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1338 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1339
e8324357
S
1340 /*
1341 * Do not queue to h/w when any of the following conditions is true:
1342 * - there are pending frames in software queue
1343 * - the TID is currently paused for ADDBA/BAR request
1344 * - seqno is not within block-ack window
1345 * - h/w queue depth exceeds low water mark
1346 */
1347 if (!list_empty(&tid->buf_q) || tid->paused ||
2d42efc4 1348 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
4b3ba66a 1349 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1350 /*
e8324357
S
1351 * Add this frame to software queue for scheduling later
1352 * for aggregation.
f078f209 1353 */
04caf863 1354 list_add_tail(&bf->list, &tid->buf_q);
e8324357
S
1355 ath_tx_queue_tid(txctl->txq, tid);
1356 return;
1357 }
1358
04caf863
FF
1359 INIT_LIST_HEAD(&bf_head);
1360 list_add(&bf->list, &bf_head);
1361
e8324357 1362 /* Add sub-frame to BAW */
2d42efc4
FF
1363 if (!fi->retries)
1364 ath_tx_addto_baw(sc, tid, fi->seqno);
e8324357
S
1365
1366 /* Queue to h/w without aggregation */
d43f3015 1367 bf->bf_lastbf = bf;
2d42efc4 1368 ath_buf_set_rate(sc, bf, fi->framelen);
04caf863 1369 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
e8324357
S
1370}
1371
82b873af
FF
1372static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1373 struct ath_atx_tid *tid,
2d42efc4 1374 struct list_head *bf_head)
e8324357 1375{
2d42efc4 1376 struct ath_frame_info *fi;
e8324357
S
1377 struct ath_buf *bf;
1378
e8324357
S
1379 bf = list_first_entry(bf_head, struct ath_buf, list);
1380 bf->bf_state.bf_type &= ~BUF_AMPDU;
1381
1382 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1383 if (tid)
1384 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1385
d43f3015 1386 bf->bf_lastbf = bf;
2d42efc4
FF
1387 fi = get_frame_info(bf->bf_mpdu);
1388 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357 1389 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1390 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1391}
1392
1393static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1394{
1395 struct ieee80211_hdr *hdr;
1396 enum ath9k_pkt_type htype;
1397 __le16 fc;
1398
1399 hdr = (struct ieee80211_hdr *)skb->data;
1400 fc = hdr->frame_control;
1401
1402 if (ieee80211_is_beacon(fc))
1403 htype = ATH9K_PKT_TYPE_BEACON;
1404 else if (ieee80211_is_probe_resp(fc))
1405 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1406 else if (ieee80211_is_atim(fc))
1407 htype = ATH9K_PKT_TYPE_ATIM;
1408 else if (ieee80211_is_pspoll(fc))
1409 htype = ATH9K_PKT_TYPE_PSPOLL;
1410 else
1411 htype = ATH9K_PKT_TYPE_NORMAL;
1412
1413 return htype;
1414}
1415
2d42efc4
FF
1416static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1417 int framelen)
e8324357 1418{
2d42efc4
FF
1419 struct ath_wiphy *aphy = hw->priv;
1420 struct ath_softc *sc = aphy->sc;
e8324357 1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1422 struct ieee80211_sta *sta = tx_info->control.sta;
1423 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
e8324357 1424 struct ieee80211_hdr *hdr;
2d42efc4 1425 struct ath_frame_info *fi = get_frame_info(skb);
e8324357
S
1426 struct ath_node *an;
1427 struct ath_atx_tid *tid;
2d42efc4
FF
1428 enum ath9k_key_type keytype;
1429 u16 seqno = 0;
5daefbd0 1430 u8 tidno;
e8324357 1431
2d42efc4 1432 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1433
e8324357 1434 hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4
FF
1435 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1436 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
e8324357 1437
2d42efc4
FF
1438 an = (struct ath_node *) sta->drv_priv;
1439 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1440
1441 /*
1442 * Override seqno set by upper layer with the one
1443 * in tx aggregation state.
1444 */
1445 tid = ATH_AN_2_TID(an, tidno);
1446 seqno = tid->seq_next;
1447 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1448 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1449 }
1450
1451 memset(fi, 0, sizeof(*fi));
1452 if (hw_key)
1453 fi->keyix = hw_key->hw_key_idx;
1454 else
1455 fi->keyix = ATH9K_TXKEYIX_INVALID;
1456 fi->keytype = keytype;
1457 fi->framelen = framelen;
1458 fi->seqno = seqno;
e8324357
S
1459}
1460
82b873af 1461static int setup_tx_flags(struct sk_buff *skb)
e8324357
S
1462{
1463 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1464 int flags = 0;
1465
1466 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1467 flags |= ATH9K_TXDESC_INTREQ;
1468
1469 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1470 flags |= ATH9K_TXDESC_NOACK;
e8324357 1471
82b873af 1472 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
b0a33448
LR
1473 flags |= ATH9K_TXDESC_LDPC;
1474
e8324357
S
1475 return flags;
1476}
1477
1478/*
1479 * rix - rate index
1480 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1481 * width - 0 for 20 MHz, 1 for 40 MHz
1482 * half_gi - to use 4us v/s 3.6 us for symbol time
1483 */
269c44bc 1484static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
e8324357
S
1485 int width, int half_gi, bool shortPreamble)
1486{
e8324357 1487 u32 nbits, nsymbits, duration, nsymbols;
269c44bc 1488 int streams;
e8324357
S
1489
1490 /* find number of symbols: PLCP + data */
c6663876 1491 streams = HT_RC_2_STREAMS(rix);
e8324357 1492 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1493 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1494 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1495
1496 if (!half_gi)
1497 duration = SYMBOL_TIME(nsymbols);
1498 else
1499 duration = SYMBOL_TIME_HALFGI(nsymbols);
1500
1501 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1502 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1503
1504 return duration;
1505}
1506
ea066d5a
MSS
1507u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1508{
1509 struct ath_hw *ah = sc->sc_ah;
1510 struct ath9k_channel *curchan = ah->curchan;
1511 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1512 (curchan->channelFlags & CHANNEL_5GHZ) &&
1513 (chainmask == 0x7) && (rate < 0x90))
1514 return 0x3;
1515 else
1516 return chainmask;
1517}
1518
269c44bc 1519static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
e8324357 1520{
43c27613 1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1522 struct ath9k_11n_rate_series series[4];
1523 struct sk_buff *skb;
1524 struct ieee80211_tx_info *tx_info;
1525 struct ieee80211_tx_rate *rates;
545750d3 1526 const struct ieee80211_rate *rate;
254ad0ff 1527 struct ieee80211_hdr *hdr;
c89424df
S
1528 int i, flags = 0;
1529 u8 rix = 0, ctsrate = 0;
254ad0ff 1530 bool is_pspoll;
e8324357
S
1531
1532 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1533
a22be22a 1534 skb = bf->bf_mpdu;
e8324357
S
1535 tx_info = IEEE80211_SKB_CB(skb);
1536 rates = tx_info->control.rates;
254ad0ff
S
1537 hdr = (struct ieee80211_hdr *)skb->data;
1538 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1539
e8324357 1540 /*
c89424df
S
1541 * We check if Short Preamble is needed for the CTS rate by
1542 * checking the BSS's global flag.
1543 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1544 */
545750d3
FF
1545 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1546 ctsrate = rate->hw_value;
c89424df 1547 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1548 ctsrate |= rate->hw_value_short;
e8324357 1549
e8324357 1550 for (i = 0; i < 4; i++) {
545750d3
FF
1551 bool is_40, is_sgi, is_sp;
1552 int phy;
1553
e8324357
S
1554 if (!rates[i].count || (rates[i].idx < 0))
1555 continue;
1556
1557 rix = rates[i].idx;
e8324357
S
1558 series[i].Tries = rates[i].count;
1559
27032059
FF
1560 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1561 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1562 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1563 flags |= ATH9K_TXDESC_RTSENA;
1564 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1565 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1566 flags |= ATH9K_TXDESC_CTSENA;
1567 }
1568
c89424df
S
1569 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1570 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1571 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1572 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1573
545750d3
FF
1574 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1575 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1576 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1577
1578 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1579 /* MCS rates */
1580 series[i].Rate = rix | 0x80;
ea066d5a
MSS
1581 series[i].ChSel = ath_txchainmask_reduction(sc,
1582 common->tx_chainmask, series[i].Rate);
269c44bc 1583 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
545750d3 1584 is_40, is_sgi, is_sp);
074a8c0d
FF
1585 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1586 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1587 continue;
1588 }
1589
ea066d5a 1590 /* legacy rates */
545750d3
FF
1591 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1592 !(rate->flags & IEEE80211_RATE_ERP_G))
1593 phy = WLAN_RC_PHY_CCK;
1594 else
1595 phy = WLAN_RC_PHY_OFDM;
1596
1597 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1598 series[i].Rate = rate->hw_value;
1599 if (rate->hw_value_short) {
1600 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1601 series[i].Rate |= rate->hw_value_short;
1602 } else {
1603 is_sp = false;
1604 }
1605
ea066d5a
MSS
1606 if (bf->bf_state.bfs_paprd)
1607 series[i].ChSel = common->tx_chainmask;
1608 else
1609 series[i].ChSel = ath_txchainmask_reduction(sc,
1610 common->tx_chainmask, series[i].Rate);
1611
545750d3 1612 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
269c44bc 1613 phy, rate->bitrate * 100, len, rix, is_sp);
f078f209
LR
1614 }
1615
27032059 1616 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
269c44bc 1617 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
27032059
FF
1618 flags &= ~ATH9K_TXDESC_RTSENA;
1619
1620 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1621 if (flags & ATH9K_TXDESC_RTSENA)
1622 flags &= ~ATH9K_TXDESC_CTSENA;
1623
e8324357 1624 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1625 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1626 bf->bf_lastbf->bf_desc,
254ad0ff 1627 !is_pspoll, ctsrate,
c89424df 1628 0, series, 4, flags);
f078f209 1629
17d7904d 1630 if (sc->config.ath_aggr_prot && flags)
c89424df 1631 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1632}
1633
82b873af 1634static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
04caf863 1635 struct ath_txq *txq,
2d42efc4 1636 struct sk_buff *skb)
f078f209 1637{
c52f33d0
JM
1638 struct ath_wiphy *aphy = hw->priv;
1639 struct ath_softc *sc = aphy->sc;
04caf863 1640 struct ath_hw *ah = sc->sc_ah;
82b873af 1641 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1642 struct ath_frame_info *fi = get_frame_info(skb);
82b873af 1643 struct ath_buf *bf;
04caf863 1644 struct ath_desc *ds;
04caf863 1645 int frm_type;
82b873af
FF
1646
1647 bf = ath_tx_get_buffer(sc);
1648 if (!bf) {
226afe68 1649 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
82b873af
FF
1650 return NULL;
1651 }
e022edbd 1652
528f0c6b 1653 ATH_TXBUF_RESET(bf);
f078f209 1654
04caf863 1655 bf->aphy = aphy;
82b873af 1656 bf->bf_flags = setup_tx_flags(skb);
f078f209 1657 bf->bf_mpdu = skb;
f8316df1 1658
c1739eb3
BG
1659 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1660 skb->len, DMA_TO_DEVICE);
1661 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1662 bf->bf_mpdu = NULL;
6cf9e995 1663 bf->bf_buf_addr = 0;
3800276a
JP
1664 ath_err(ath9k_hw_common(sc->sc_ah),
1665 "dma_mapping_error() on TX\n");
82b873af
FF
1666 ath_tx_return_buffer(sc, bf);
1667 return NULL;
f8316df1
LR
1668 }
1669
528f0c6b 1670 frm_type = get_hw_packet_type(skb);
f078f209 1671
f078f209 1672 ds = bf->bf_desc;
87d5efbb 1673 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1674
2d42efc4
FF
1675 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1676 fi->keyix, fi->keytype, bf->bf_flags);
528f0c6b
S
1677
1678 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1679 skb->len, /* segment length */
1680 true, /* first segment */
1681 true, /* last segment */
3f3a1c80 1682 ds, /* first descriptor */
cc610ac0 1683 bf->bf_buf_addr,
04caf863
FF
1684 txq->axq_qnum);
1685
1686
1687 return bf;
1688}
1689
1690/* FIXME: tx power */
1691static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1692 struct ath_tx_control *txctl)
1693{
1694 struct sk_buff *skb = bf->bf_mpdu;
1695 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1696 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
04caf863 1697 struct list_head bf_head;
248a38d0 1698 struct ath_atx_tid *tid = NULL;
04caf863 1699 u8 tidno;
f078f209 1700
528f0c6b 1701 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1702
248a38d0 1703 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
5daefbd0
FF
1704 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1705 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1706 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1707
066dae93 1708 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1709 }
1710
1711 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1712 /*
1713 * Try aggregation if it's a unicast data frame
1714 * and the destination is HT capable.
1715 */
1716 ath_tx_send_ampdu(sc, tid, bf, txctl);
f078f209 1717 } else {
04caf863
FF
1718 INIT_LIST_HEAD(&bf_head);
1719 list_add_tail(&bf->list, &bf_head);
1720
61117f01 1721 bf->bf_state.bfs_ftype = txctl->frame_type;
82b873af
FF
1722 bf->bf_state.bfs_paprd = txctl->paprd;
1723
9a6b8270 1724 if (bf->bf_state.bfs_paprd)
04caf863
FF
1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd);
9a6b8270 1727
248a38d0 1728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
f078f209 1729 }
528f0c6b
S
1730
1731 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1732}
1733
f8316df1 1734/* Upon failure caller should free skb */
c52f33d0 1735int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1736 struct ath_tx_control *txctl)
f078f209 1737{
28d16708
FF
1738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1739 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1740 struct ieee80211_sta *sta = info->control.sta;
c52f33d0
JM
1741 struct ath_wiphy *aphy = hw->priv;
1742 struct ath_softc *sc = aphy->sc;
84642d6b 1743 struct ath_txq *txq = txctl->txq;
528f0c6b 1744 struct ath_buf *bf;
4d91f9f3 1745 int padpos, padsize;
04caf863 1746 int frmlen = skb->len + FCS_LEN;
28d16708 1747 int q;
f078f209 1748
a9927ba3
BG
1749 /* NOTE: sta can be NULL according to net/mac80211.h */
1750 if (sta)
1751 txctl->an = (struct ath_node *)sta->drv_priv;
1752
04caf863
FF
1753 if (info->control.hw_key)
1754 frmlen += info->control.hw_key->icv_len;
1755
f078f209 1756 /*
e8324357
S
1757 * As a temporary workaround, assign seq# here; this will likely need
1758 * to be cleaned up to work better with Beacon transmission and virtual
1759 * BSSes.
f078f209 1760 */
e8324357 1761 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1762 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1763 sc->tx.seq_no += 0x10;
1764 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1765 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1766 }
f078f209 1767
e8324357 1768 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1769 padpos = ath9k_cmn_padpos(hdr->frame_control);
1770 padsize = padpos & 3;
28d16708
FF
1771 if (padsize && skb->len > padpos) {
1772 if (skb_headroom(skb) < padsize)
1773 return -ENOMEM;
1774
e8324357 1775 skb_push(skb, padsize);
4d91f9f3 1776 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1777 }
f078f209 1778
2d42efc4
FF
1779 setup_frame_info(hw, skb, frmlen);
1780
1781 /*
1782 * At this point, the vif, hw_key and sta pointers in the tx control
1783 * info are no longer valid (overwritten by the ath_frame_info data.
1784 */
1785
1786 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
28d16708
FF
1787 if (unlikely(!bf))
1788 return -ENOMEM;
f078f209 1789
28d16708
FF
1790 q = skb_get_queue_mapping(skb);
1791 spin_lock_bh(&txq->axq_lock);
1792 if (txq == sc->tx.txq_map[q] &&
1793 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1794 ath_mac80211_stop_queue(sc, q);
1795 txq->stopped = 1;
f078f209 1796 }
28d16708 1797 spin_unlock_bh(&txq->axq_lock);
f078f209 1798
28d16708
FF
1799 ath_tx_start_dma(sc, bf, txctl);
1800
1801 return 0;
f078f209
LR
1802}
1803
e8324357
S
1804/*****************/
1805/* TX Completion */
1806/*****************/
528f0c6b 1807
e8324357 1808static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
61117f01 1809 struct ath_wiphy *aphy, int tx_flags, int ftype,
066dae93 1810 struct ath_txq *txq)
528f0c6b 1811{
e8324357
S
1812 struct ieee80211_hw *hw = sc->hw;
1813 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1814 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1815 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1816 int q, padpos, padsize;
528f0c6b 1817
226afe68 1818 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1819
827e69bf
FF
1820 if (aphy)
1821 hw = aphy->hw;
528f0c6b 1822
6b2c4032 1823 if (tx_flags & ATH_TX_BAR)
e8324357 1824 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1825
6b2c4032 1826 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1827 /* Frame was ACKed */
1828 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1829 }
1830
4d91f9f3
BP
1831 padpos = ath9k_cmn_padpos(hdr->frame_control);
1832 padsize = padpos & 3;
1833 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1834 /*
1835 * Remove MAC header padding before giving the frame back to
1836 * mac80211.
1837 */
4d91f9f3 1838 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1839 skb_pull(skb, padsize);
1840 }
528f0c6b 1841
1b04b930
S
1842 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1843 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1844 ath_dbg(common, ATH_DBG_PS,
1845 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1846 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1847 PS_WAIT_FOR_CAB |
1848 PS_WAIT_FOR_PSPOLL_DATA |
1849 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1850 }
1851
61117f01
FF
1852 if (unlikely(ftype))
1853 ath9k_tx_status(hw, skb, ftype);
97923b14
FF
1854 else {
1855 q = skb_get_queue_mapping(skb);
066dae93
FF
1856 if (txq == sc->tx.txq_map[q]) {
1857 spin_lock_bh(&txq->axq_lock);
1858 if (WARN_ON(--txq->pending_frames < 0))
1859 txq->pending_frames = 0;
1860 spin_unlock_bh(&txq->axq_lock);
1861 }
97923b14 1862
827e69bf 1863 ieee80211_tx_status(hw, skb);
97923b14 1864 }
e8324357 1865}
f078f209 1866
e8324357 1867static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1868 struct ath_txq *txq, struct list_head *bf_q,
1869 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1870{
e8324357 1871 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1872 unsigned long flags;
6b2c4032 1873 int tx_flags = 0;
f078f209 1874
e8324357 1875 if (sendbar)
6b2c4032 1876 tx_flags = ATH_TX_BAR;
f078f209 1877
e8324357 1878 if (!txok) {
6b2c4032 1879 tx_flags |= ATH_TX_ERROR;
f078f209 1880
e8324357 1881 if (bf_isxretried(bf))
6b2c4032 1882 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1883 }
1884
c1739eb3 1885 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1886 bf->bf_buf_addr = 0;
9f42c2b6
FF
1887
1888 if (bf->bf_state.bfs_paprd) {
82259b77 1889 if (!sc->paprd_pending)
ca369eb4 1890 dev_kfree_skb_any(skb);
78a18172 1891 else
ca369eb4 1892 complete(&sc->paprd_complete);
9f42c2b6 1893 } else {
066dae93 1894 ath_debug_stat_tx(sc, bf, ts);
61117f01
FF
1895 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1896 bf->bf_state.bfs_ftype, txq);
9f42c2b6 1897 }
6cf9e995
BG
1898 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1899 * accidentally reference it later.
1900 */
1901 bf->bf_mpdu = NULL;
e8324357
S
1902
1903 /*
1904 * Return the list of ath_buf of this mpdu to free queue
1905 */
1906 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1907 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1908 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1909}
1910
db1a052b 1911static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
b572d033 1912 int nframes, int nbad, int txok, bool update_rc)
f078f209 1913{
a22be22a 1914 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1916 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1917 struct ieee80211_hw *hw = bf->aphy->hw;
f0c255a0
FF
1918 struct ath_softc *sc = bf->aphy->sc;
1919 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 1920 u8 i, tx_rateindex;
f078f209 1921
95e4acb7 1922 if (txok)
db1a052b 1923 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1924
db1a052b 1925 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1926 WARN_ON(tx_rateindex >= hw->max_rates);
1927
db1a052b 1928 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1929 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1930 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1931 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1932
b572d033 1933 BUG_ON(nbad > nframes);
ebd02287 1934
b572d033
FF
1935 tx_info->status.ampdu_len = nframes;
1936 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
1937 }
1938
db1a052b 1939 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1940 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
1941 /*
1942 * If an underrun error is seen assume it as an excessive
1943 * retry only if max frame trigger level has been reached
1944 * (2 KB for single stream, and 4 KB for dual stream).
1945 * Adjust the long retry as if the frame was tried
1946 * hw->max_rate_tries times to affect how rate control updates
1947 * PER for the failed rate.
1948 * In case of congestion on the bus penalizing this type of
1949 * underruns should help hardware actually transmit new frames
1950 * successfully by eventually preferring slower rates.
1951 * This itself should also alleviate congestion on the bus.
1952 */
1953 if (ieee80211_is_data(hdr->frame_control) &&
1954 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1955 ATH9K_TX_DELIM_UNDERRUN)) &&
1956 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1957 tx_info->status.rates[tx_rateindex].count =
1958 hw->max_rate_tries;
f078f209 1959 }
8a92e2ee 1960
545750d3 1961 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 1962 tx_info->status.rates[i].count = 0;
545750d3
FF
1963 tx_info->status.rates[i].idx = -1;
1964 }
8a92e2ee 1965
78c4653a 1966 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
1967}
1968
066dae93 1969static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
059d806c 1970{
066dae93 1971 struct ath_txq *txq;
97923b14 1972
066dae93 1973 txq = sc->tx.txq_map[qnum];
059d806c 1974 spin_lock_bh(&txq->axq_lock);
066dae93 1975 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
68e8f2fa
VT
1976 if (ath_mac80211_start_queue(sc, qnum))
1977 txq->stopped = 0;
059d806c
S
1978 }
1979 spin_unlock_bh(&txq->axq_lock);
1980}
1981
e8324357 1982static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 1983{
cbe61d8a 1984 struct ath_hw *ah = sc->sc_ah;
c46917bb 1985 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1986 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 1987 struct list_head bf_head;
e8324357 1988 struct ath_desc *ds;
29bffa96 1989 struct ath_tx_status ts;
0934af23 1990 int txok;
e8324357 1991 int status;
066dae93 1992 int qnum;
f078f209 1993
226afe68
JP
1994 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1995 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1996 txq->axq_link);
f078f209 1997
f078f209
LR
1998 for (;;) {
1999 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2000 if (list_empty(&txq->axq_q)) {
2001 txq->axq_link = NULL;
f078f209
LR
2002 spin_unlock_bh(&txq->axq_lock);
2003 break;
2004 }
f078f209
LR
2005 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2006
e8324357
S
2007 /*
2008 * There is a race condition that a BH gets scheduled
2009 * after sw writes TxE and before hw re-load the last
2010 * descriptor to get the newly chained one.
2011 * Software must keep the last DONE descriptor as a
2012 * holding descriptor - software does so by marking
2013 * it with the STALE flag.
2014 */
2015 bf_held = NULL;
a119cc49 2016 if (bf->bf_stale) {
e8324357
S
2017 bf_held = bf;
2018 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2019 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2020 break;
2021 } else {
2022 bf = list_entry(bf_held->list.next,
6ef9b13d 2023 struct ath_buf, list);
e8324357 2024 }
f078f209
LR
2025 }
2026
2027 lastbf = bf->bf_lastbf;
e8324357 2028 ds = lastbf->bf_desc;
f078f209 2029
29bffa96
FF
2030 memset(&ts, 0, sizeof(ts));
2031 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2032 if (status == -EINPROGRESS) {
f078f209 2033 spin_unlock_bh(&txq->axq_lock);
e8324357 2034 break;
f078f209 2035 }
f078f209 2036
e8324357
S
2037 /*
2038 * Remove ath_buf's of the same transmit unit from txq,
2039 * however leave the last descriptor back as the holding
2040 * descriptor for hw.
2041 */
a119cc49 2042 lastbf->bf_stale = true;
e8324357 2043 INIT_LIST_HEAD(&bf_head);
e8324357
S
2044 if (!list_is_singular(&lastbf->list))
2045 list_cut_position(&bf_head,
2046 &txq->axq_q, lastbf->list.prev);
f078f209 2047
e8324357 2048 txq->axq_depth--;
29bffa96 2049 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2050 txq->axq_tx_inprogress = false;
0a8cea84
FF
2051 if (bf_held)
2052 list_del(&bf_held->list);
4b3ba66a
FF
2053
2054 if (bf_is_ampdu_not_probing(bf))
2055 txq->axq_ampdu_depth--;
e8324357 2056 spin_unlock_bh(&txq->axq_lock);
f078f209 2057
0a8cea84
FF
2058 if (bf_held)
2059 ath_tx_return_buffer(sc, bf_held);
f078f209 2060
e8324357
S
2061 if (!bf_isampdu(bf)) {
2062 /*
2063 * This frame is sent out as a single frame.
2064 * Use hardware retry status for this frame.
2065 */
29bffa96 2066 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2067 bf->bf_state.bf_type |= BUF_XRETRY;
b572d033 2068 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
e8324357 2069 }
f078f209 2070
066dae93
FF
2071 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2072
e8324357 2073 if (bf_isampdu(bf))
c5992618
FF
2074 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2075 true);
e8324357 2076 else
29bffa96 2077 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2078
066dae93
FF
2079 if (txq == sc->tx.txq_map[qnum])
2080 ath_wake_mac80211_queue(sc, qnum);
8469cdef 2081
059d806c 2082 spin_lock_bh(&txq->axq_lock);
e8324357
S
2083 if (sc->sc_flags & SC_OP_TXAGGR)
2084 ath_txq_schedule(sc, txq);
2085 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2086 }
2087}
2088
305fe47f 2089static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2090{
2091 struct ath_softc *sc = container_of(work, struct ath_softc,
2092 tx_complete_work.work);
2093 struct ath_txq *txq;
2094 int i;
2095 bool needreset = false;
2096
2097 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2098 if (ATH_TXQ_SETUP(sc, i)) {
2099 txq = &sc->tx.txq[i];
2100 spin_lock_bh(&txq->axq_lock);
2101 if (txq->axq_depth) {
2102 if (txq->axq_tx_inprogress) {
2103 needreset = true;
2104 spin_unlock_bh(&txq->axq_lock);
2105 break;
2106 } else {
2107 txq->axq_tx_inprogress = true;
2108 }
2109 }
2110 spin_unlock_bh(&txq->axq_lock);
2111 }
2112
2113 if (needreset) {
226afe68
JP
2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2115 "tx hung, resetting the chip\n");
332c5566 2116 ath9k_ps_wakeup(sc);
fac6b6a0 2117 ath_reset(sc, true);
332c5566 2118 ath9k_ps_restore(sc);
164ace38
SB
2119 }
2120
42935eca 2121 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2122 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2123}
2124
2125
f078f209 2126
e8324357 2127void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2128{
e8324357
S
2129 int i;
2130 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2131
e8324357 2132 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2133
e8324357
S
2134 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2135 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2136 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2137 }
2138}
2139
e5003249
VT
2140void ath_tx_edma_tasklet(struct ath_softc *sc)
2141{
2142 struct ath_tx_status txs;
2143 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2144 struct ath_hw *ah = sc->sc_ah;
2145 struct ath_txq *txq;
2146 struct ath_buf *bf, *lastbf;
2147 struct list_head bf_head;
2148 int status;
2149 int txok;
066dae93 2150 int qnum;
e5003249
VT
2151
2152 for (;;) {
2153 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2154 if (status == -EINPROGRESS)
2155 break;
2156 if (status == -EIO) {
226afe68
JP
2157 ath_dbg(common, ATH_DBG_XMIT,
2158 "Error processing tx status\n");
e5003249
VT
2159 break;
2160 }
2161
2162 /* Skip beacon completions */
2163 if (txs.qid == sc->beacon.beaconq)
2164 continue;
2165
2166 txq = &sc->tx.txq[txs.qid];
2167
2168 spin_lock_bh(&txq->axq_lock);
2169 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2170 spin_unlock_bh(&txq->axq_lock);
2171 return;
2172 }
2173
2174 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2175 struct ath_buf, list);
2176 lastbf = bf->bf_lastbf;
2177
2178 INIT_LIST_HEAD(&bf_head);
2179 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2180 &lastbf->list);
2181 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2182 txq->axq_depth--;
2183 txq->axq_tx_inprogress = false;
4b3ba66a
FF
2184 if (bf_is_ampdu_not_probing(bf))
2185 txq->axq_ampdu_depth--;
e5003249
VT
2186 spin_unlock_bh(&txq->axq_lock);
2187
2188 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2189
2190 if (!bf_isampdu(bf)) {
e5003249
VT
2191 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2192 bf->bf_state.bf_type |= BUF_XRETRY;
b572d033 2193 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
e5003249
VT
2194 }
2195
066dae93
FF
2196 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2197
e5003249 2198 if (bf_isampdu(bf))
c5992618
FF
2199 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2200 txok, true);
e5003249
VT
2201 else
2202 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2203 &txs, txok, 0);
2204
066dae93
FF
2205 if (txq == sc->tx.txq_map[qnum])
2206 ath_wake_mac80211_queue(sc, qnum);
7f9f3600 2207
e5003249
VT
2208 spin_lock_bh(&txq->axq_lock);
2209 if (!list_empty(&txq->txq_fifo_pending)) {
2210 INIT_LIST_HEAD(&bf_head);
2211 bf = list_first_entry(&txq->txq_fifo_pending,
2212 struct ath_buf, list);
2213 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2214 &bf->bf_lastbf->list);
2215 ath_tx_txqaddbuf(sc, txq, &bf_head);
2216 } else if (sc->sc_flags & SC_OP_TXAGGR)
2217 ath_txq_schedule(sc, txq);
2218 spin_unlock_bh(&txq->axq_lock);
2219 }
2220}
2221
e8324357
S
2222/*****************/
2223/* Init, Cleanup */
2224/*****************/
f078f209 2225
5088c2f1
VT
2226static int ath_txstatus_setup(struct ath_softc *sc, int size)
2227{
2228 struct ath_descdma *dd = &sc->txsdma;
2229 u8 txs_len = sc->sc_ah->caps.txs_len;
2230
2231 dd->dd_desc_len = size * txs_len;
2232 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2233 &dd->dd_desc_paddr, GFP_KERNEL);
2234 if (!dd->dd_desc)
2235 return -ENOMEM;
2236
2237 return 0;
2238}
2239
2240static int ath_tx_edma_init(struct ath_softc *sc)
2241{
2242 int err;
2243
2244 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2245 if (!err)
2246 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2247 sc->txsdma.dd_desc_paddr,
2248 ATH_TXSTATUS_RING_SIZE);
2249
2250 return err;
2251}
2252
2253static void ath_tx_edma_cleanup(struct ath_softc *sc)
2254{
2255 struct ath_descdma *dd = &sc->txsdma;
2256
2257 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2258 dd->dd_desc_paddr);
2259}
2260
e8324357 2261int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2262{
c46917bb 2263 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2264 int error = 0;
f078f209 2265
797fe5cb 2266 spin_lock_init(&sc->tx.txbuflock);
f078f209 2267
797fe5cb 2268 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2269 "tx", nbufs, 1, 1);
797fe5cb 2270 if (error != 0) {
3800276a
JP
2271 ath_err(common,
2272 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2273 goto err;
2274 }
f078f209 2275
797fe5cb 2276 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2277 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2278 if (error != 0) {
3800276a
JP
2279 ath_err(common,
2280 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2281 goto err;
2282 }
f078f209 2283
164ace38
SB
2284 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2285
5088c2f1
VT
2286 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2287 error = ath_tx_edma_init(sc);
2288 if (error)
2289 goto err;
2290 }
2291
797fe5cb 2292err:
e8324357
S
2293 if (error != 0)
2294 ath_tx_cleanup(sc);
f078f209 2295
e8324357 2296 return error;
f078f209
LR
2297}
2298
797fe5cb 2299void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2300{
2301 if (sc->beacon.bdma.dd_desc_len != 0)
2302 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2303
2304 if (sc->tx.txdma.dd_desc_len != 0)
2305 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2306
2307 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2308 ath_tx_edma_cleanup(sc);
e8324357 2309}
f078f209
LR
2310
2311void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2312{
c5170163
S
2313 struct ath_atx_tid *tid;
2314 struct ath_atx_ac *ac;
2315 int tidno, acno;
f078f209 2316
8ee5afbc 2317 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2318 tidno < WME_NUM_TID;
2319 tidno++, tid++) {
2320 tid->an = an;
2321 tid->tidno = tidno;
2322 tid->seq_start = tid->seq_next = 0;
2323 tid->baw_size = WME_MAX_BA;
2324 tid->baw_head = tid->baw_tail = 0;
2325 tid->sched = false;
e8324357 2326 tid->paused = false;
a37c2c79 2327 tid->state &= ~AGGR_CLEANUP;
c5170163 2328 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2329 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2330 tid->ac = &an->ac[acno];
a37c2c79
S
2331 tid->state &= ~AGGR_ADDBA_COMPLETE;
2332 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2333 }
f078f209 2334
8ee5afbc 2335 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2336 acno < WME_NUM_AC; acno++, ac++) {
2337 ac->sched = false;
066dae93 2338 ac->txq = sc->tx.txq_map[acno];
c5170163 2339 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2340 }
2341}
2342
b5aa9bf9 2343void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2344{
2b40994c
FF
2345 struct ath_atx_ac *ac;
2346 struct ath_atx_tid *tid;
f078f209 2347 struct ath_txq *txq;
066dae93 2348 int tidno;
e8324357 2349
2b40994c
FF
2350 for (tidno = 0, tid = &an->tid[tidno];
2351 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2352
2b40994c 2353 ac = tid->ac;
066dae93 2354 txq = ac->txq;
f078f209 2355
2b40994c
FF
2356 spin_lock_bh(&txq->axq_lock);
2357
2358 if (tid->sched) {
2359 list_del(&tid->list);
2360 tid->sched = false;
2361 }
2362
2363 if (ac->sched) {
2364 list_del(&ac->list);
2365 tid->ac->sched = false;
f078f209 2366 }
2b40994c
FF
2367
2368 ath_tid_drain(sc, txq, tid);
2369 tid->state &= ~AGGR_ADDBA_COMPLETE;
2370 tid->state &= ~AGGR_CLEANUP;
2371
2372 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2373 }
2374}