ath9k: Drag the driver to the year 2011
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
f078f209
LR
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
f078f209 34
c6663876 35static u16 bits_per_symbol[][2] = {
f078f209
LR
36 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
45};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
82b873af
FF
49static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
2d42efc4 51 struct list_head *bf_head);
e8324357 52static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
53 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 55static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357 56 struct list_head *head);
269c44bc 57static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
0cdd5c60
FF
58static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
90fa539c
FF
61static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
c4288390 63
545750d3 64enum {
0e668cde
FF
65 MCS_HT20,
66 MCS_HT20_SGI,
545750d3
FF
67 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
0e668cde
FF
71static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
83 },
84 [MCS_HT40] = {
0e668cde
FF
85 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
89 },
90 [MCS_HT40_SGI] = {
0e668cde
FF
91 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
95 }
96};
97
e8324357
S
98/*********************/
99/* Aggregation logic */
100/*********************/
f078f209 101
e8324357 102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 103{
e8324357 104 struct ath_atx_ac *ac = tid->ac;
ff37e337 105
e8324357
S
106 if (tid->paused)
107 return;
ff37e337 108
e8324357
S
109 if (tid->sched)
110 return;
ff37e337 111
e8324357
S
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 114
e8324357
S
115 if (ac->sched)
116 return;
f078f209 117
e8324357
S
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
f078f209 121
e8324357 122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 123{
066dae93 124 struct ath_txq *txq = tid->ac->txq;
e6a9854b 125
75401849 126 WARN_ON(!tid->paused);
f078f209 127
75401849
LB
128 spin_lock_bh(&txq->axq_lock);
129 tid->paused = false;
f078f209 130
e8324357
S
131 if (list_empty(&tid->buf_q))
132 goto unlock;
f078f209 133
e8324357
S
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
528f0c6b 138}
f078f209 139
2d42efc4 140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
146}
147
e8324357 148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 149{
066dae93 150 struct ath_txq *txq = tid->ac->txq;
e8324357
S
151 struct ath_buf *bf;
152 struct list_head bf_head;
90fa539c 153 struct ath_tx_status ts;
2d42efc4 154 struct ath_frame_info *fi;
f078f209 155
90fa539c 156 INIT_LIST_HEAD(&bf_head);
e6a9854b 157
90fa539c 158 memset(&ts, 0, sizeof(ts));
75401849 159 spin_lock_bh(&txq->axq_lock);
f078f209 160
e8324357
S
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 163 list_move_tail(&bf->list, &bf_head);
90fa539c 164
e1566d1f 165 spin_unlock_bh(&txq->axq_lock);
2d42efc4
FF
166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
7d2c16be 169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
90fa539c 170 } else {
a9e99a0c 171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
90fa539c 172 }
e1566d1f 173 spin_lock_bh(&txq->axq_lock);
528f0c6b 174 }
f078f209 175
e8324357 176 spin_unlock_bh(&txq->axq_lock);
528f0c6b 177}
f078f209 178
e8324357
S
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
528f0c6b 181{
e8324357 182 int index, cindex;
f078f209 183
e8324357
S
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 186
81ee13ba 187 __clear_bit(cindex, tid->tx_buf);
528f0c6b 188
81ee13ba 189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
528f0c6b 193}
f078f209 194
e8324357 195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 196 u16 seqno)
528f0c6b 197{
e8324357 198 int index, cindex;
528f0c6b 199
2d3bcba0 200 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 202 __set_bit(cindex, tid->tx_buf);
f078f209 203
e8324357
S
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 208 }
f078f209
LR
209}
210
211/*
e8324357
S
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
f078f209 216 */
e8324357
S
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
f078f209 219
f078f209 220{
e8324357
S
221 struct ath_buf *bf;
222 struct list_head bf_head;
db1a052b 223 struct ath_tx_status ts;
2d42efc4 224 struct ath_frame_info *fi;
db1a052b
FF
225
226 memset(&ts, 0, sizeof(ts));
e8324357 227 INIT_LIST_HEAD(&bf_head);
f078f209 228
e8324357
S
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
f078f209 232
d43f3015
S
233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
f078f209 235
2d42efc4
FF
236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
f078f209 239
e8324357 240 spin_unlock(&txq->axq_lock);
db1a052b 241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
242 spin_lock(&txq->axq_lock);
243 }
f078f209 244
e8324357
S
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
f078f209
LR
247}
248
fec247c0 249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 250 struct sk_buff *skb)
f078f209 251{
8b7f8532 252 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 253 struct ieee80211_hdr *hdr;
f078f209 254
fec247c0 255 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 256 if (fi->retries++ > 0)
2d42efc4 257 return;
f078f209 258
e8324357
S
259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
261}
262
0a8cea84 263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 264{
0a8cea84 265 struct ath_buf *bf = NULL;
d43f3015
S
266
267 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
0a8cea84
FF
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
d43f3015
S
277 spin_unlock_bh(&sc->tx.txbuflock);
278
0a8cea84
FF
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
295 return NULL;
296
d43f3015
S
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 302 tbf->bf_state = bf->bf_state;
d43f3015
S
303
304 return tbf;
305}
306
b572d033
FF
307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
2d42efc4 311 struct ath_frame_info *fi;
b572d033
FF
312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
b572d033
FF
320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
2d42efc4
FF
327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
b572d033
FF
329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
d43f3015
S
339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
c5992618 341 struct ath_tx_status *ts, int txok, bool retry)
f078f209 342{
e8324357
S
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
1286ec6d 345 struct ieee80211_sta *sta;
0cdd5c60 346 struct ieee80211_hw *hw = sc->hw;
1286ec6d 347 struct ieee80211_hdr *hdr;
76d5a9e8 348 struct ieee80211_tx_info *tx_info;
e8324357 349 struct ath_atx_tid *tid = NULL;
d43f3015 350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 351 struct list_head bf_head, bf_pending;
0934af23 352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 353 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
78c4653a 356 struct ieee80211_tx_rate rates[4];
2d42efc4 357 struct ath_frame_info *fi;
ebd02287 358 int nframes;
5daefbd0 359 u8 tidno;
5519541d 360 bool clear_filter;
f078f209 361
a22be22a 362 skb = bf->bf_mpdu;
1286ec6d
S
363 hdr = (struct ieee80211_hdr *)skb->data;
364
76d5a9e8 365 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 366
78c4653a
FF
367 memcpy(rates, tx_info->control.rates, sizeof(rates));
368
1286ec6d 369 rcu_read_lock();
f078f209 370
686b9cb9 371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
372 if (!sta) {
373 rcu_read_unlock();
73e19463 374
31e79a59
FF
375 INIT_LIST_HEAD(&bf_head);
376 while (bf) {
377 bf_next = bf->bf_next;
378
379 bf->bf_state.bf_type |= BUF_XRETRY;
380 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
381 !bf->bf_stale || bf_next != NULL)
382 list_move_tail(&bf->list, &bf_head);
383
0cdd5c60 384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
31e79a59
FF
385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
1286ec6d 390 return;
f078f209
LR
391 }
392
1286ec6d 393 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 396
b11b160d
FF
397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
5daefbd0 402 if (tidno != ts->tid)
b11b160d
FF
403 txok = false;
404
e8324357 405 isaggr = bf_isaggr(bf);
d43f3015 406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 407
d43f3015 408 if (isaggr && txok) {
db1a052b
FF
409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 412 } else {
d43f3015
S
413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
2660b81a 420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 421 needreset = 1;
e8324357 422 }
f078f209
LR
423 }
424
e8324357
S
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
f078f209 427
b572d033 428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 429 while (bf) {
f0b8220c 430 txfail = txpending = sendbar = 0;
e8324357 431 bf_next = bf->bf_next;
f078f209 432
78c4653a
FF
433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 435 fi = get_frame_info(skb);
78c4653a 436
2d42efc4 437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
e8324357
S
438 /* transmit completion, subframe is
439 * acked by block ack */
0934af23 440 acked_cnt++;
e8324357
S
441 } else if (!isaggr && txok) {
442 /* transmit completion */
0934af23 443 acked_cnt++;
e8324357 444 } else {
5519541d 445 if ((tid->state & AGGR_CLEANUP) || !retry) {
e8324357
S
446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
5519541d
FF
451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
e8324357
S
463 }
464 }
f078f209 465
e5003249
VT
466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
cbfe89c6
VT
468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
e8324357 476 } else {
9680e8a3 477 BUG_ON(list_empty(bf_q));
d43f3015 478 list_move_tail(&bf->list, &bf_head);
e8324357 479 }
f078f209 480
90fa539c 481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
2d42efc4 487 ath_tx_update_baw(sc, tid, fi->seqno);
e8324357 488 spin_unlock_bh(&txq->axq_lock);
f078f209 489
8a92e2ee 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
0cdd5c60 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
8a92e2ee
VT
493 rc_update = false;
494 } else {
0cdd5c60 495 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
8a92e2ee
VT
496 }
497
db1a052b
FF
498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
e8324357 500 } else {
d43f3015 501 /* retry the un-acked ones */
5519541d 502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
e5003249
VT
503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
504 if (bf->bf_next == NULL && bf_last->bf_stale) {
505 struct ath_buf *tbf;
506
507 tbf = ath_clone_txbuf(sc, bf_last);
508 /*
509 * Update tx baw and complete the
510 * frame with failed status if we
511 * run out of tx buf.
512 */
513 if (!tbf) {
514 spin_lock_bh(&txq->axq_lock);
2d42efc4 515 ath_tx_update_baw(sc, tid, fi->seqno);
e5003249
VT
516 spin_unlock_bh(&txq->axq_lock);
517
518 bf->bf_state.bf_type |=
519 BUF_XRETRY;
0cdd5c60 520 ath_tx_rc_status(sc, bf, ts, nframes,
b572d033 521 nbad, 0, false);
e5003249
VT
522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
524 ts, 0, 0);
525 break;
526 }
527
528 ath9k_hw_cleartxdesc(sc->sc_ah,
529 tbf->bf_desc);
530 list_add_tail(&tbf->list, &bf_head);
531 } else {
532 /*
533 * Clear descriptor status words for
534 * software retry
535 */
536 ath9k_hw_cleartxdesc(sc->sc_ah,
537 bf->bf_desc);
c41d92dc 538 }
e8324357
S
539 }
540
541 /*
542 * Put this buffer to the temporary pending
543 * queue to retain ordering
544 */
545 list_splice_tail_init(&bf_head, &bf_pending);
546 }
547
548 bf = bf_next;
f078f209 549 }
f078f209 550
4cee7861
FF
551 /* prepend un-acked frames to the beginning of the pending frame queue */
552 if (!list_empty(&bf_pending)) {
5519541d
FF
553 if (an->sleeping)
554 ieee80211_sta_set_tim(sta);
555
4cee7861 556 spin_lock_bh(&txq->axq_lock);
5519541d
FF
557 if (clear_filter)
558 tid->ac->clear_ps_filter = true;
4cee7861
FF
559 list_splice(&bf_pending, &tid->buf_q);
560 ath_tx_queue_tid(txq, tid);
561 spin_unlock_bh(&txq->axq_lock);
562 }
563
e8324357 564 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
565 ath_tx_flush_tid(sc, tid);
566
e8324357
S
567 if (tid->baw_head == tid->baw_tail) {
568 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 569 tid->state &= ~AGGR_CLEANUP;
d43f3015 570 }
e8324357 571 }
f078f209 572
1286ec6d
S
573 rcu_read_unlock();
574
bdd62c06
VN
575 if (needreset) {
576 spin_unlock_bh(&sc->sc_pcu_lock);
e8324357 577 ath_reset(sc, false);
bdd62c06
VN
578 spin_lock_bh(&sc->sc_pcu_lock);
579 }
e8324357 580}
f078f209 581
e8324357
S
582static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
583 struct ath_atx_tid *tid)
f078f209 584{
528f0c6b
S
585 struct sk_buff *skb;
586 struct ieee80211_tx_info *tx_info;
a8efee4f 587 struct ieee80211_tx_rate *rates;
d43f3015 588 u32 max_4ms_framelen, frmlen;
4ef70841 589 u16 aggr_limit, legacy = 0;
e8324357 590 int i;
528f0c6b 591
a22be22a 592 skb = bf->bf_mpdu;
528f0c6b 593 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 594 rates = tx_info->control.rates;
528f0c6b 595
e8324357
S
596 /*
597 * Find the lowest frame length among the rate series that will have a
598 * 4ms transmit duration.
599 * TODO - TXOP limit needs to be considered.
600 */
601 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 602
e8324357
S
603 for (i = 0; i < 4; i++) {
604 if (rates[i].count) {
545750d3
FF
605 int modeidx;
606 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
607 legacy = 1;
608 break;
609 }
610
0e668cde 611 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
612 modeidx = MCS_HT40;
613 else
0e668cde
FF
614 modeidx = MCS_HT20;
615
616 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
617 modeidx++;
545750d3
FF
618
619 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 620 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
621 }
622 }
e63835b0 623
f078f209 624 /*
e8324357
S
625 * limit aggregate size by the minimum rate if rate selected is
626 * not a probe rate, if rate selected is a probe rate then
627 * avoid aggregation of this packet.
f078f209 628 */
e8324357
S
629 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
630 return 0;
f078f209 631
1773912b
VT
632 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
633 aggr_limit = min((max_4ms_framelen * 3) / 8,
634 (u32)ATH_AMPDU_LIMIT_MAX);
635 else
636 aggr_limit = min(max_4ms_framelen,
637 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 638
e8324357
S
639 /*
640 * h/w can accept aggregates upto 16 bit lengths (65535).
641 * The IE, however can hold upto 65536, which shows up here
642 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 643 */
4ef70841
S
644 if (tid->an->maxampdu)
645 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 646
e8324357
S
647 return aggr_limit;
648}
f078f209 649
e8324357 650/*
d43f3015 651 * Returns the number of delimiters to be added to
e8324357 652 * meet the minimum required mpdudensity.
e8324357
S
653 */
654static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
655 struct ath_buf *bf, u16 frmlen)
656{
e8324357
S
657 struct sk_buff *skb = bf->bf_mpdu;
658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 659 u32 nsymbits, nsymbols;
e8324357 660 u16 minlen;
545750d3 661 u8 flags, rix;
c6663876 662 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 663 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
664
665 /* Select standard number of delimiters based on frame length alone */
666 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
667
668 /*
e8324357
S
669 * If encryption enabled, hardware requires some more padding between
670 * subframes.
671 * TODO - this could be improved to be dependent on the rate.
672 * The hardware can keep up at lower rates, but not higher rates
f078f209 673 */
2d42efc4 674 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
e8324357 675 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 676
e8324357
S
677 /*
678 * Convert desired mpdu density from microeconds to bytes based
679 * on highest rate in rate series (i.e. first rate) to determine
680 * required minimum length for subframe. Take into account
681 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 682 *
e8324357
S
683 * If there is no mpdu density restriction, no further calculation
684 * is needed.
685 */
4ef70841
S
686
687 if (tid->an->mpdudensity == 0)
e8324357 688 return ndelim;
f078f209 689
e8324357
S
690 rix = tx_info->control.rates[0].idx;
691 flags = tx_info->control.rates[0].flags;
e8324357
S
692 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
693 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 694
e8324357 695 if (half_gi)
4ef70841 696 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 697 else
4ef70841 698 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 699
e8324357
S
700 if (nsymbols == 0)
701 nsymbols = 1;
f078f209 702
c6663876
FF
703 streams = HT_RC_2_STREAMS(rix);
704 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 705 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 706
e8324357 707 if (frmlen < minlen) {
e8324357
S
708 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
709 ndelim = max(mindelim, ndelim);
f078f209
LR
710 }
711
e8324357 712 return ndelim;
f078f209
LR
713}
714
e8324357 715static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 716 struct ath_txq *txq,
d43f3015 717 struct ath_atx_tid *tid,
269c44bc
FF
718 struct list_head *bf_q,
719 int *aggr_len)
f078f209 720{
e8324357 721#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
722 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
723 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
724 u16 aggr_limit = 0, al = 0, bpad = 0,
725 al_delta, h_baw = tid->baw_size / 2;
726 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 727 struct ieee80211_tx_info *tx_info;
2d42efc4 728 struct ath_frame_info *fi;
f078f209 729
e8324357 730 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 731
e8324357
S
732 do {
733 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
2d42efc4 734 fi = get_frame_info(bf->bf_mpdu);
f078f209 735
d43f3015 736 /* do not step over block-ack window */
2d42efc4 737 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
e8324357
S
738 status = ATH_AGGR_BAW_CLOSED;
739 break;
740 }
f078f209 741
e8324357
S
742 if (!rl) {
743 aggr_limit = ath_lookup_rate(sc, bf, tid);
744 rl = 1;
745 }
f078f209 746
d43f3015 747 /* do not exceed aggregation limit */
2d42efc4 748 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 749
d43f3015
S
750 if (nframes &&
751 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
752 status = ATH_AGGR_LIMITED;
753 break;
754 }
f078f209 755
0299a50a
FF
756 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
757 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
758 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
759 break;
760
d43f3015
S
761 /* do not exceed subframe limit */
762 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
763 status = ATH_AGGR_LIMITED;
764 break;
765 }
d43f3015 766 nframes++;
f078f209 767
d43f3015 768 /* add padding for previous frame to aggregation length */
e8324357 769 al += bpad + al_delta;
f078f209 770
e8324357
S
771 /*
772 * Get the delimiters needed to meet the MPDU
773 * density for this node.
774 */
2d42efc4 775 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
e8324357 776 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 777
e8324357 778 bf->bf_next = NULL;
87d5efbb 779 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 780
d43f3015 781 /* link buffers of this frame to the aggregate */
2d42efc4
FF
782 if (!fi->retries)
783 ath_tx_addto_baw(sc, tid, fi->seqno);
d43f3015
S
784 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
785 list_move_tail(&bf->list, bf_q);
e8324357
S
786 if (bf_prev) {
787 bf_prev->bf_next = bf;
87d5efbb
VT
788 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
789 bf->bf_daddr);
e8324357
S
790 }
791 bf_prev = bf;
fec247c0 792
e8324357 793 } while (!list_empty(&tid->buf_q));
f078f209 794
269c44bc 795 *aggr_len = al;
d43f3015 796
e8324357
S
797 return status;
798#undef PADBYTES
799}
f078f209 800
e8324357
S
801static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
802 struct ath_atx_tid *tid)
803{
d43f3015 804 struct ath_buf *bf;
e8324357 805 enum ATH_AGGR_STATUS status;
2d42efc4 806 struct ath_frame_info *fi;
e8324357 807 struct list_head bf_q;
269c44bc 808 int aggr_len;
f078f209 809
e8324357
S
810 do {
811 if (list_empty(&tid->buf_q))
812 return;
f078f209 813
e8324357
S
814 INIT_LIST_HEAD(&bf_q);
815
269c44bc 816 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 817
f078f209 818 /*
d43f3015
S
819 * no frames picked up to be aggregated;
820 * block-ack window is not open.
f078f209 821 */
e8324357
S
822 if (list_empty(&bf_q))
823 break;
f078f209 824
e8324357 825 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 826 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 827
5519541d
FF
828 if (tid->ac->clear_ps_filter) {
829 tid->ac->clear_ps_filter = false;
830 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
831 }
832
d43f3015 833 /* if only one frame, send as non-aggregate */
b572d033 834 if (bf == bf->bf_lastbf) {
2d42efc4
FF
835 fi = get_frame_info(bf->bf_mpdu);
836
e8324357 837 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 838 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
2d42efc4 839 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357
S
840 ath_tx_txqaddbuf(sc, txq, &bf_q);
841 continue;
842 }
f078f209 843
d43f3015 844 /* setup first desc of aggregate */
e8324357 845 bf->bf_state.bf_type |= BUF_AGGR;
269c44bc
FF
846 ath_buf_set_rate(sc, bf, aggr_len);
847 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
f078f209 848
d43f3015
S
849 /* anchor last desc of aggregate */
850 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 851
e8324357 852 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 853 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 854
4b3ba66a 855 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
856 status != ATH_AGGR_BAW_CLOSED);
857}
858
231c3a1f
FF
859int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
860 u16 tid, u16 *ssn)
e8324357
S
861{
862 struct ath_atx_tid *txtid;
863 struct ath_node *an;
864
865 an = (struct ath_node *)sta->drv_priv;
f83da965 866 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
867
868 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
869 return -EAGAIN;
870
f83da965 871 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 872 txtid->paused = true;
49447f2f 873 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f 874
2ed72229
FF
875 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
876 txtid->baw_head = txtid->baw_tail = 0;
877
231c3a1f 878 return 0;
e8324357 879}
f078f209 880
f83da965 881void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
882{
883 struct ath_node *an = (struct ath_node *)sta->drv_priv;
884 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 885 struct ath_txq *txq = txtid->ac->txq;
f078f209 886
e8324357 887 if (txtid->state & AGGR_CLEANUP)
f83da965 888 return;
f078f209 889
e8324357 890 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 891 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 892 return;
e8324357 893 }
f078f209 894
e8324357 895 spin_lock_bh(&txq->axq_lock);
75401849 896 txtid->paused = true;
f078f209 897
90fa539c
FF
898 /*
899 * If frames are still being transmitted for this TID, they will be
900 * cleaned up during tx completion. To prevent race conditions, this
901 * TID can only be reused after all in-progress subframes have been
902 * completed.
903 */
904 if (txtid->baw_head != txtid->baw_tail)
e8324357 905 txtid->state |= AGGR_CLEANUP;
90fa539c 906 else
e8324357 907 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
908 spin_unlock_bh(&txq->axq_lock);
909
910 ath_tx_flush_tid(sc, txtid);
e8324357 911}
f078f209 912
5519541d
FF
913bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
914{
915 struct ath_atx_tid *tid;
916 struct ath_atx_ac *ac;
917 struct ath_txq *txq;
918 bool buffered = false;
919 int tidno;
920
921 for (tidno = 0, tid = &an->tid[tidno];
922 tidno < WME_NUM_TID; tidno++, tid++) {
923
924 if (!tid->sched)
925 continue;
926
927 ac = tid->ac;
928 txq = ac->txq;
929
930 spin_lock_bh(&txq->axq_lock);
931
932 if (!list_empty(&tid->buf_q))
933 buffered = true;
934
935 tid->sched = false;
936 list_del(&tid->list);
937
938 if (ac->sched) {
939 ac->sched = false;
940 list_del(&ac->list);
941 }
942
943 spin_unlock_bh(&txq->axq_lock);
944 }
945
946 return buffered;
947}
948
949void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
950{
951 struct ath_atx_tid *tid;
952 struct ath_atx_ac *ac;
953 struct ath_txq *txq;
954 int tidno;
955
956 for (tidno = 0, tid = &an->tid[tidno];
957 tidno < WME_NUM_TID; tidno++, tid++) {
958
959 ac = tid->ac;
960 txq = ac->txq;
961
962 spin_lock_bh(&txq->axq_lock);
963 ac->clear_ps_filter = true;
964
965 if (!list_empty(&tid->buf_q) && !tid->paused) {
966 ath_tx_queue_tid(txq, tid);
967 ath_txq_schedule(sc, txq);
968 }
969
970 spin_unlock_bh(&txq->axq_lock);
971 }
972}
973
e8324357
S
974void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
975{
976 struct ath_atx_tid *txtid;
977 struct ath_node *an;
978
979 an = (struct ath_node *)sta->drv_priv;
980
981 if (sc->sc_flags & SC_OP_TXAGGR) {
982 txtid = ATH_AN_2_TID(an, tid);
983 txtid->baw_size =
984 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
985 txtid->state |= AGGR_ADDBA_COMPLETE;
986 txtid->state &= ~AGGR_ADDBA_PROGRESS;
987 ath_tx_resume_tid(sc, txtid);
988 }
f078f209
LR
989}
990
e8324357
S
991/********************/
992/* Queue Management */
993/********************/
f078f209 994
e8324357
S
995static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
996 struct ath_txq *txq)
f078f209 997{
e8324357
S
998 struct ath_atx_ac *ac, *ac_tmp;
999 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1000
e8324357
S
1001 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1002 list_del(&ac->list);
1003 ac->sched = false;
1004 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1005 list_del(&tid->list);
1006 tid->sched = false;
1007 ath_tid_drain(sc, txq, tid);
1008 }
f078f209
LR
1009 }
1010}
1011
e8324357 1012struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1013{
cbe61d8a 1014 struct ath_hw *ah = sc->sc_ah;
c46917bb 1015 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1016 struct ath9k_tx_queue_info qi;
066dae93
FF
1017 static const int subtype_txq_to_hwq[] = {
1018 [WME_AC_BE] = ATH_TXQ_AC_BE,
1019 [WME_AC_BK] = ATH_TXQ_AC_BK,
1020 [WME_AC_VI] = ATH_TXQ_AC_VI,
1021 [WME_AC_VO] = ATH_TXQ_AC_VO,
1022 };
60f2d1d5 1023 int axq_qnum, i;
f078f209 1024
e8324357 1025 memset(&qi, 0, sizeof(qi));
066dae93 1026 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1027 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1028 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1029 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1030 qi.tqi_physCompBuf = 0;
f078f209
LR
1031
1032 /*
e8324357
S
1033 * Enable interrupts only for EOL and DESC conditions.
1034 * We mark tx descriptors to receive a DESC interrupt
1035 * when a tx queue gets deep; otherwise waiting for the
1036 * EOL to reap descriptors. Note that this is done to
1037 * reduce interrupt load and this only defers reaping
1038 * descriptors, never transmitting frames. Aside from
1039 * reducing interrupts this also permits more concurrency.
1040 * The only potential downside is if the tx queue backs
1041 * up in which case the top half of the kernel may backup
1042 * due to a lack of tx descriptors.
1043 *
1044 * The UAPSD queue is an exception, since we take a desc-
1045 * based intr on the EOSP frames.
f078f209 1046 */
afe754d6
VT
1047 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1048 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1049 TXQ_FLAG_TXERRINT_ENABLE;
1050 } else {
1051 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1052 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1053 else
1054 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1055 TXQ_FLAG_TXDESCINT_ENABLE;
1056 }
60f2d1d5
BG
1057 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1058 if (axq_qnum == -1) {
f078f209 1059 /*
e8324357
S
1060 * NB: don't print a message, this happens
1061 * normally on parts with too few tx queues
f078f209 1062 */
e8324357 1063 return NULL;
f078f209 1064 }
60f2d1d5 1065 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
3800276a 1066 ath_err(common, "qnum %u out of range, max %zu!\n",
60f2d1d5
BG
1067 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1068 ath9k_hw_releasetxqueue(ah, axq_qnum);
e8324357
S
1069 return NULL;
1070 }
60f2d1d5
BG
1071 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1072 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1073
60f2d1d5
BG
1074 txq->axq_qnum = axq_qnum;
1075 txq->mac80211_qnum = -1;
e8324357
S
1076 txq->axq_link = NULL;
1077 INIT_LIST_HEAD(&txq->axq_q);
1078 INIT_LIST_HEAD(&txq->axq_acq);
1079 spin_lock_init(&txq->axq_lock);
1080 txq->axq_depth = 0;
4b3ba66a 1081 txq->axq_ampdu_depth = 0;
164ace38 1082 txq->axq_tx_inprogress = false;
60f2d1d5 1083 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1084
1085 txq->txq_headidx = txq->txq_tailidx = 0;
1086 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1087 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1088 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357 1089 }
60f2d1d5 1090 return &sc->tx.txq[axq_qnum];
f078f209
LR
1091}
1092
e8324357
S
1093int ath_txq_update(struct ath_softc *sc, int qnum,
1094 struct ath9k_tx_queue_info *qinfo)
1095{
cbe61d8a 1096 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1097 int error = 0;
1098 struct ath9k_tx_queue_info qi;
1099
1100 if (qnum == sc->beacon.beaconq) {
1101 /*
1102 * XXX: for beacon queue, we just save the parameter.
1103 * It will be picked up by ath_beaconq_config when
1104 * it's necessary.
1105 */
1106 sc->beacon.beacon_qi = *qinfo;
f078f209 1107 return 0;
e8324357 1108 }
f078f209 1109
9680e8a3 1110 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1111
1112 ath9k_hw_get_txq_props(ah, qnum, &qi);
1113 qi.tqi_aifs = qinfo->tqi_aifs;
1114 qi.tqi_cwmin = qinfo->tqi_cwmin;
1115 qi.tqi_cwmax = qinfo->tqi_cwmax;
1116 qi.tqi_burstTime = qinfo->tqi_burstTime;
1117 qi.tqi_readyTime = qinfo->tqi_readyTime;
1118
1119 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1120 ath_err(ath9k_hw_common(sc->sc_ah),
1121 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1122 error = -EIO;
1123 } else {
1124 ath9k_hw_resettxqueue(ah, qnum);
1125 }
1126
1127 return error;
1128}
1129
1130int ath_cabq_update(struct ath_softc *sc)
1131{
1132 struct ath9k_tx_queue_info qi;
9814f6b3 1133 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1134 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1135
e8324357 1136 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1137 /*
e8324357 1138 * Ensure the readytime % is within the bounds.
f078f209 1139 */
17d7904d
S
1140 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1141 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1142 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1143 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1144
9814f6b3 1145 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1146 sc->config.cabqReadytime) / 100;
e8324357
S
1147 ath_txq_update(sc, qnum, &qi);
1148
1149 return 0;
f078f209
LR
1150}
1151
4b3ba66a
FF
1152static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1153{
1154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1155 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1156}
1157
043a0405
S
1158/*
1159 * Drain a given TX queue (could be Beacon or Data)
1160 *
1161 * This assumes output has been stopped and
1162 * we do not need to block ath_tx_tasklet.
1163 */
1164void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1165{
e8324357
S
1166 struct ath_buf *bf, *lastbf;
1167 struct list_head bf_head;
db1a052b
FF
1168 struct ath_tx_status ts;
1169
1170 memset(&ts, 0, sizeof(ts));
e8324357 1171 INIT_LIST_HEAD(&bf_head);
f078f209 1172
e8324357
S
1173 for (;;) {
1174 spin_lock_bh(&txq->axq_lock);
f078f209 1175
e5003249
VT
1176 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1177 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1178 txq->txq_headidx = txq->txq_tailidx = 0;
1179 spin_unlock_bh(&txq->axq_lock);
1180 break;
1181 } else {
1182 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1183 struct ath_buf, list);
1184 }
1185 } else {
1186 if (list_empty(&txq->axq_q)) {
1187 txq->axq_link = NULL;
1188 spin_unlock_bh(&txq->axq_lock);
1189 break;
1190 }
1191 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1192 list);
f078f209 1193
e5003249
VT
1194 if (bf->bf_stale) {
1195 list_del(&bf->list);
1196 spin_unlock_bh(&txq->axq_lock);
f078f209 1197
0a8cea84 1198 ath_tx_return_buffer(sc, bf);
e5003249
VT
1199 continue;
1200 }
e8324357 1201 }
f078f209 1202
e8324357 1203 lastbf = bf->bf_lastbf;
f078f209 1204
e5003249
VT
1205 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1206 list_cut_position(&bf_head,
1207 &txq->txq_fifo[txq->txq_tailidx],
1208 &lastbf->list);
1209 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1210 } else {
1211 /* remove ath_buf's of the same mpdu from txq */
1212 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1213 }
1214
e8324357 1215 txq->axq_depth--;
4b3ba66a
FF
1216 if (bf_is_ampdu_not_probing(bf))
1217 txq->axq_ampdu_depth--;
e8324357
S
1218 spin_unlock_bh(&txq->axq_lock);
1219
1220 if (bf_isampdu(bf))
c5992618
FF
1221 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1222 retry_tx);
e8324357 1223 else
db1a052b 1224 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1225 }
1226
164ace38
SB
1227 spin_lock_bh(&txq->axq_lock);
1228 txq->axq_tx_inprogress = false;
1229 spin_unlock_bh(&txq->axq_lock);
1230
e5003249
VT
1231 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1232 spin_lock_bh(&txq->axq_lock);
1233 while (!list_empty(&txq->txq_fifo_pending)) {
1234 bf = list_first_entry(&txq->txq_fifo_pending,
1235 struct ath_buf, list);
1236 list_cut_position(&bf_head,
1237 &txq->txq_fifo_pending,
1238 &bf->bf_lastbf->list);
1239 spin_unlock_bh(&txq->axq_lock);
1240
1241 if (bf_isampdu(bf))
1242 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
c5992618 1243 &ts, 0, retry_tx);
e5003249
VT
1244 else
1245 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1246 &ts, 0, 0);
1247 spin_lock_bh(&txq->axq_lock);
1248 }
1249 spin_unlock_bh(&txq->axq_lock);
1250 }
e609e2ea
FF
1251
1252 /* flush any pending frames if aggregation is enabled */
1253 if (sc->sc_flags & SC_OP_TXAGGR) {
1254 if (!retry_tx) {
1255 spin_lock_bh(&txq->axq_lock);
1256 ath_txq_drain_pending_buffers(sc, txq);
1257 spin_unlock_bh(&txq->axq_lock);
1258 }
1259 }
f078f209
LR
1260}
1261
080e1a25 1262bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1263{
cbe61d8a 1264 struct ath_hw *ah = sc->sc_ah;
c46917bb 1265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1266 struct ath_txq *txq;
1267 int i, npend = 0;
1268
1269 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1270 return true;
043a0405 1271
0d51cccc 1272 ath9k_hw_abort_tx_dma(ah);
043a0405 1273
0d51cccc 1274 /* Check if any queue remains active */
043a0405 1275 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1276 if (!ATH_TXQ_SETUP(sc, i))
1277 continue;
1278
1279 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
043a0405
S
1280 }
1281
080e1a25 1282 if (npend)
393934c6 1283 ath_err(common, "Failed to stop TX DMA!\n");
043a0405
S
1284
1285 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1286 if (!ATH_TXQ_SETUP(sc, i))
1287 continue;
1288
1289 /*
1290 * The caller will resume queues with ieee80211_wake_queues.
1291 * Mark the queue as not stopped to prevent ath_tx_complete
1292 * from waking the queue too early.
1293 */
1294 txq = &sc->tx.txq[i];
1295 txq->stopped = false;
1296 ath_draintxq(sc, txq, retry_tx);
043a0405 1297 }
080e1a25
FF
1298
1299 return !npend;
e8324357 1300}
f078f209 1301
043a0405 1302void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1303{
043a0405
S
1304 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1305 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1306}
f078f209 1307
7755bad9
BG
1308/* For each axq_acq entry, for each tid, try to schedule packets
1309 * for transmit until ampdu_depth has reached min Q depth.
1310 */
e8324357
S
1311void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1312{
7755bad9
BG
1313 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1314 struct ath_atx_tid *tid, *last_tid;
f078f209 1315
21f28e6f
FF
1316 if (list_empty(&txq->axq_acq) ||
1317 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1318 return;
f078f209 1319
e8324357 1320 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1321 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1322
7755bad9
BG
1323 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1324 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1325 list_del(&ac->list);
1326 ac->sched = false;
f078f209 1327
7755bad9
BG
1328 while (!list_empty(&ac->tid_q)) {
1329 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1330 list);
1331 list_del(&tid->list);
1332 tid->sched = false;
f078f209 1333
7755bad9
BG
1334 if (tid->paused)
1335 continue;
f078f209 1336
7755bad9 1337 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1338
7755bad9
BG
1339 /*
1340 * add tid to round-robin queue if more frames
1341 * are pending for the tid
1342 */
1343 if (!list_empty(&tid->buf_q))
1344 ath_tx_queue_tid(txq, tid);
f078f209 1345
7755bad9
BG
1346 if (tid == last_tid ||
1347 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1348 break;
1349 }
f078f209 1350
7755bad9
BG
1351 if (!list_empty(&ac->tid_q)) {
1352 if (!ac->sched) {
1353 ac->sched = true;
1354 list_add_tail(&ac->list, &txq->axq_acq);
1355 }
f078f209 1356 }
7755bad9
BG
1357
1358 if (ac == last_ac ||
1359 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1360 return;
e8324357
S
1361 }
1362}
f078f209 1363
e8324357
S
1364/***********/
1365/* TX, DMA */
1366/***********/
1367
f078f209 1368/*
e8324357
S
1369 * Insert a chain of ath_buf (descriptors) on a txq and
1370 * assume the descriptors are already chained together by caller.
f078f209 1371 */
e8324357
S
1372static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1373 struct list_head *head)
f078f209 1374{
cbe61d8a 1375 struct ath_hw *ah = sc->sc_ah;
c46917bb 1376 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1377 struct ath_buf *bf;
f078f209 1378
e8324357
S
1379 /*
1380 * Insert the frame on the outbound list and
1381 * pass it on to the hardware.
1382 */
f078f209 1383
e8324357
S
1384 if (list_empty(head))
1385 return;
f078f209 1386
e8324357 1387 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1388
226afe68
JP
1389 ath_dbg(common, ATH_DBG_QUEUE,
1390 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1391
e5003249
VT
1392 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1393 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1394 list_splice_tail_init(head, &txq->txq_fifo_pending);
1395 return;
1396 }
1397 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
226afe68
JP
1398 ath_dbg(common, ATH_DBG_XMIT,
1399 "Initializing tx fifo %d which is non-empty\n",
1400 txq->txq_headidx);
e5003249
VT
1401 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1402 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1403 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
8d8d3fdc 1404 TX_STAT_INC(txq->axq_qnum, puttxbuf);
e8324357 1405 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1406 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1407 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1408 } else {
e5003249
VT
1409 list_splice_tail_init(head, &txq->axq_q);
1410
1411 if (txq->axq_link == NULL) {
8d8d3fdc 1412 TX_STAT_INC(txq->axq_qnum, puttxbuf);
e5003249 1413 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1414 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1415 txq->axq_qnum, ito64(bf->bf_daddr),
1416 bf->bf_desc);
e5003249
VT
1417 } else {
1418 *txq->axq_link = bf->bf_daddr;
226afe68
JP
1419 ath_dbg(common, ATH_DBG_XMIT,
1420 "link[%u] (%p)=%llx (%p)\n",
1421 txq->axq_qnum, txq->axq_link,
1422 ito64(bf->bf_daddr), bf->bf_desc);
e5003249
VT
1423 }
1424 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1425 &txq->axq_link);
8d8d3fdc 1426 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1427 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1428 }
e5003249 1429 txq->axq_depth++;
4b3ba66a
FF
1430 if (bf_is_ampdu_not_probing(bf))
1431 txq->axq_ampdu_depth++;
e8324357 1432}
f078f209 1433
e8324357 1434static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
04caf863 1435 struct ath_buf *bf, struct ath_tx_control *txctl)
f078f209 1436{
2d42efc4 1437 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
04caf863 1438 struct list_head bf_head;
f078f209 1439
e8324357 1440 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209 1441
e8324357
S
1442 /*
1443 * Do not queue to h/w when any of the following conditions is true:
1444 * - there are pending frames in software queue
1445 * - the TID is currently paused for ADDBA/BAR request
1446 * - seqno is not within block-ack window
1447 * - h/w queue depth exceeds low water mark
1448 */
1449 if (!list_empty(&tid->buf_q) || tid->paused ||
2d42efc4 1450 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
4b3ba66a 1451 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1452 /*
e8324357
S
1453 * Add this frame to software queue for scheduling later
1454 * for aggregation.
f078f209 1455 */
bda8adda 1456 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
04caf863 1457 list_add_tail(&bf->list, &tid->buf_q);
e8324357
S
1458 ath_tx_queue_tid(txctl->txq, tid);
1459 return;
1460 }
1461
04caf863
FF
1462 INIT_LIST_HEAD(&bf_head);
1463 list_add(&bf->list, &bf_head);
1464
e8324357 1465 /* Add sub-frame to BAW */
2d42efc4
FF
1466 if (!fi->retries)
1467 ath_tx_addto_baw(sc, tid, fi->seqno);
e8324357
S
1468
1469 /* Queue to h/w without aggregation */
bda8adda 1470 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1471 bf->bf_lastbf = bf;
2d42efc4 1472 ath_buf_set_rate(sc, bf, fi->framelen);
04caf863 1473 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
e8324357
S
1474}
1475
82b873af
FF
1476static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1477 struct ath_atx_tid *tid,
2d42efc4 1478 struct list_head *bf_head)
e8324357 1479{
2d42efc4 1480 struct ath_frame_info *fi;
e8324357
S
1481 struct ath_buf *bf;
1482
e8324357
S
1483 bf = list_first_entry(bf_head, struct ath_buf, list);
1484 bf->bf_state.bf_type &= ~BUF_AMPDU;
1485
1486 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1487 if (tid)
1488 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1489
d43f3015 1490 bf->bf_lastbf = bf;
2d42efc4
FF
1491 fi = get_frame_info(bf->bf_mpdu);
1492 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357 1493 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1494 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1495}
1496
1497static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1498{
1499 struct ieee80211_hdr *hdr;
1500 enum ath9k_pkt_type htype;
1501 __le16 fc;
1502
1503 hdr = (struct ieee80211_hdr *)skb->data;
1504 fc = hdr->frame_control;
1505
1506 if (ieee80211_is_beacon(fc))
1507 htype = ATH9K_PKT_TYPE_BEACON;
1508 else if (ieee80211_is_probe_resp(fc))
1509 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1510 else if (ieee80211_is_atim(fc))
1511 htype = ATH9K_PKT_TYPE_ATIM;
1512 else if (ieee80211_is_pspoll(fc))
1513 htype = ATH9K_PKT_TYPE_PSPOLL;
1514 else
1515 htype = ATH9K_PKT_TYPE_NORMAL;
1516
1517 return htype;
1518}
1519
2d42efc4
FF
1520static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1521 int framelen)
e8324357 1522{
9ac58615 1523 struct ath_softc *sc = hw->priv;
e8324357 1524 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1525 struct ieee80211_sta *sta = tx_info->control.sta;
1526 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
e8324357 1527 struct ieee80211_hdr *hdr;
2d42efc4 1528 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1529 struct ath_node *an = NULL;
e8324357 1530 struct ath_atx_tid *tid;
2d42efc4
FF
1531 enum ath9k_key_type keytype;
1532 u16 seqno = 0;
5daefbd0 1533 u8 tidno;
e8324357 1534
2d42efc4 1535 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1536
93ae2dd2
FF
1537 if (sta)
1538 an = (struct ath_node *) sta->drv_priv;
1539
e8324357 1540 hdr = (struct ieee80211_hdr *)skb->data;
93ae2dd2 1541 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
2d42efc4 1542 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
e8324357 1543
2d42efc4
FF
1544 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1545
1546 /*
1547 * Override seqno set by upper layer with the one
1548 * in tx aggregation state.
1549 */
1550 tid = ATH_AN_2_TID(an, tidno);
1551 seqno = tid->seq_next;
1552 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1553 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1554 }
1555
1556 memset(fi, 0, sizeof(*fi));
1557 if (hw_key)
1558 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1559 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1560 fi->keyix = an->ps_key;
2d42efc4
FF
1561 else
1562 fi->keyix = ATH9K_TXKEYIX_INVALID;
1563 fi->keytype = keytype;
1564 fi->framelen = framelen;
1565 fi->seqno = seqno;
e8324357
S
1566}
1567
82b873af 1568static int setup_tx_flags(struct sk_buff *skb)
e8324357
S
1569{
1570 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1571 int flags = 0;
1572
e8324357
S
1573 flags |= ATH9K_TXDESC_INTREQ;
1574
1575 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1576 flags |= ATH9K_TXDESC_NOACK;
e8324357 1577
82b873af 1578 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
b0a33448
LR
1579 flags |= ATH9K_TXDESC_LDPC;
1580
e8324357
S
1581 return flags;
1582}
1583
1584/*
1585 * rix - rate index
1586 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1587 * width - 0 for 20 MHz, 1 for 40 MHz
1588 * half_gi - to use 4us v/s 3.6 us for symbol time
1589 */
269c44bc 1590static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
e8324357
S
1591 int width, int half_gi, bool shortPreamble)
1592{
e8324357 1593 u32 nbits, nsymbits, duration, nsymbols;
269c44bc 1594 int streams;
e8324357
S
1595
1596 /* find number of symbols: PLCP + data */
c6663876 1597 streams = HT_RC_2_STREAMS(rix);
e8324357 1598 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1599 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1600 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1601
1602 if (!half_gi)
1603 duration = SYMBOL_TIME(nsymbols);
1604 else
1605 duration = SYMBOL_TIME_HALFGI(nsymbols);
1606
1607 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1608 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1609
1610 return duration;
1611}
1612
ea066d5a
MSS
1613u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1614{
1615 struct ath_hw *ah = sc->sc_ah;
1616 struct ath9k_channel *curchan = ah->curchan;
1617 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1618 (curchan->channelFlags & CHANNEL_5GHZ) &&
1619 (chainmask == 0x7) && (rate < 0x90))
1620 return 0x3;
1621 else
1622 return chainmask;
1623}
1624
269c44bc 1625static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
e8324357 1626{
43c27613 1627 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1628 struct ath9k_11n_rate_series series[4];
1629 struct sk_buff *skb;
1630 struct ieee80211_tx_info *tx_info;
1631 struct ieee80211_tx_rate *rates;
545750d3 1632 const struct ieee80211_rate *rate;
254ad0ff 1633 struct ieee80211_hdr *hdr;
c89424df
S
1634 int i, flags = 0;
1635 u8 rix = 0, ctsrate = 0;
254ad0ff 1636 bool is_pspoll;
e8324357
S
1637
1638 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1639
a22be22a 1640 skb = bf->bf_mpdu;
e8324357
S
1641 tx_info = IEEE80211_SKB_CB(skb);
1642 rates = tx_info->control.rates;
254ad0ff
S
1643 hdr = (struct ieee80211_hdr *)skb->data;
1644 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1645
e8324357 1646 /*
c89424df
S
1647 * We check if Short Preamble is needed for the CTS rate by
1648 * checking the BSS's global flag.
1649 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1650 */
545750d3
FF
1651 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1652 ctsrate = rate->hw_value;
c89424df 1653 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1654 ctsrate |= rate->hw_value_short;
e8324357 1655
e8324357 1656 for (i = 0; i < 4; i++) {
545750d3
FF
1657 bool is_40, is_sgi, is_sp;
1658 int phy;
1659
e8324357
S
1660 if (!rates[i].count || (rates[i].idx < 0))
1661 continue;
1662
1663 rix = rates[i].idx;
e8324357
S
1664 series[i].Tries = rates[i].count;
1665
cbe8c735 1666 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
c89424df 1667 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1668 flags |= ATH9K_TXDESC_RTSENA;
1669 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1670 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1671 flags |= ATH9K_TXDESC_CTSENA;
1672 }
1673
c89424df
S
1674 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1675 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1676 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1677 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1678
545750d3
FF
1679 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1680 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1681 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1682
1683 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1684 /* MCS rates */
1685 series[i].Rate = rix | 0x80;
ea066d5a
MSS
1686 series[i].ChSel = ath_txchainmask_reduction(sc,
1687 common->tx_chainmask, series[i].Rate);
269c44bc 1688 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
545750d3 1689 is_40, is_sgi, is_sp);
074a8c0d
FF
1690 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1691 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1692 continue;
1693 }
1694
ea066d5a 1695 /* legacy rates */
545750d3
FF
1696 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1697 !(rate->flags & IEEE80211_RATE_ERP_G))
1698 phy = WLAN_RC_PHY_CCK;
1699 else
1700 phy = WLAN_RC_PHY_OFDM;
1701
1702 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1703 series[i].Rate = rate->hw_value;
1704 if (rate->hw_value_short) {
1705 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1706 series[i].Rate |= rate->hw_value_short;
1707 } else {
1708 is_sp = false;
1709 }
1710
ea066d5a
MSS
1711 if (bf->bf_state.bfs_paprd)
1712 series[i].ChSel = common->tx_chainmask;
1713 else
1714 series[i].ChSel = ath_txchainmask_reduction(sc,
1715 common->tx_chainmask, series[i].Rate);
1716
545750d3 1717 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
269c44bc 1718 phy, rate->bitrate * 100, len, rix, is_sp);
f078f209
LR
1719 }
1720
27032059 1721 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
269c44bc 1722 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
27032059
FF
1723 flags &= ~ATH9K_TXDESC_RTSENA;
1724
1725 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1726 if (flags & ATH9K_TXDESC_RTSENA)
1727 flags &= ~ATH9K_TXDESC_CTSENA;
1728
e8324357 1729 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1730 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1731 bf->bf_lastbf->bf_desc,
254ad0ff 1732 !is_pspoll, ctsrate,
c89424df 1733 0, series, 4, flags);
f078f209 1734
f078f209
LR
1735}
1736
82b873af 1737static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
04caf863 1738 struct ath_txq *txq,
2d42efc4 1739 struct sk_buff *skb)
f078f209 1740{
9ac58615 1741 struct ath_softc *sc = hw->priv;
04caf863 1742 struct ath_hw *ah = sc->sc_ah;
82b873af 1743 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1744 struct ath_frame_info *fi = get_frame_info(skb);
82b873af 1745 struct ath_buf *bf;
04caf863 1746 struct ath_desc *ds;
04caf863 1747 int frm_type;
82b873af
FF
1748
1749 bf = ath_tx_get_buffer(sc);
1750 if (!bf) {
226afe68 1751 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
82b873af
FF
1752 return NULL;
1753 }
e022edbd 1754
528f0c6b 1755 ATH_TXBUF_RESET(bf);
f078f209 1756
82b873af 1757 bf->bf_flags = setup_tx_flags(skb);
f078f209 1758 bf->bf_mpdu = skb;
f8316df1 1759
c1739eb3
BG
1760 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1761 skb->len, DMA_TO_DEVICE);
1762 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1763 bf->bf_mpdu = NULL;
6cf9e995 1764 bf->bf_buf_addr = 0;
3800276a
JP
1765 ath_err(ath9k_hw_common(sc->sc_ah),
1766 "dma_mapping_error() on TX\n");
82b873af
FF
1767 ath_tx_return_buffer(sc, bf);
1768 return NULL;
f8316df1
LR
1769 }
1770
528f0c6b 1771 frm_type = get_hw_packet_type(skb);
f078f209 1772
f078f209 1773 ds = bf->bf_desc;
87d5efbb 1774 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1775
2d42efc4
FF
1776 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1777 fi->keyix, fi->keytype, bf->bf_flags);
528f0c6b
S
1778
1779 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1780 skb->len, /* segment length */
1781 true, /* first segment */
1782 true, /* last segment */
3f3a1c80 1783 ds, /* first descriptor */
cc610ac0 1784 bf->bf_buf_addr,
04caf863
FF
1785 txq->axq_qnum);
1786
1787
1788 return bf;
1789}
1790
1791/* FIXME: tx power */
1792static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1793 struct ath_tx_control *txctl)
1794{
1795 struct sk_buff *skb = bf->bf_mpdu;
1796 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1797 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
04caf863 1798 struct list_head bf_head;
248a38d0 1799 struct ath_atx_tid *tid = NULL;
04caf863 1800 u8 tidno;
f078f209 1801
528f0c6b 1802 spin_lock_bh(&txctl->txq->axq_lock);
61e1b0b0
MSS
1803 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1804 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1805 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1806 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1807 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1808
066dae93 1809 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1810 }
1811
1812 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1813 /*
1814 * Try aggregation if it's a unicast data frame
1815 * and the destination is HT capable.
1816 */
1817 ath_tx_send_ampdu(sc, tid, bf, txctl);
f078f209 1818 } else {
04caf863
FF
1819 INIT_LIST_HEAD(&bf_head);
1820 list_add_tail(&bf->list, &bf_head);
1821
61117f01 1822 bf->bf_state.bfs_ftype = txctl->frame_type;
82b873af
FF
1823 bf->bf_state.bfs_paprd = txctl->paprd;
1824
9a6b8270 1825 if (bf->bf_state.bfs_paprd)
04caf863
FF
1826 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1827 bf->bf_state.bfs_paprd);
9a6b8270 1828
9cf04dcc
MSS
1829 if (txctl->paprd)
1830 bf->bf_state.bfs_paprd_timestamp = jiffies;
1831
5519541d
FF
1832 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1833 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1834
248a38d0 1835 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
f078f209 1836 }
528f0c6b
S
1837
1838 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1839}
1840
f8316df1 1841/* Upon failure caller should free skb */
c52f33d0 1842int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1843 struct ath_tx_control *txctl)
f078f209 1844{
28d16708
FF
1845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1846 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1847 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1848 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1849 struct ath_softc *sc = hw->priv;
84642d6b 1850 struct ath_txq *txq = txctl->txq;
528f0c6b 1851 struct ath_buf *bf;
4d91f9f3 1852 int padpos, padsize;
04caf863 1853 int frmlen = skb->len + FCS_LEN;
28d16708 1854 int q;
f078f209 1855
a9927ba3
BG
1856 /* NOTE: sta can be NULL according to net/mac80211.h */
1857 if (sta)
1858 txctl->an = (struct ath_node *)sta->drv_priv;
1859
04caf863
FF
1860 if (info->control.hw_key)
1861 frmlen += info->control.hw_key->icv_len;
1862
f078f209 1863 /*
e8324357
S
1864 * As a temporary workaround, assign seq# here; this will likely need
1865 * to be cleaned up to work better with Beacon transmission and virtual
1866 * BSSes.
f078f209 1867 */
e8324357 1868 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1869 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1870 sc->tx.seq_no += 0x10;
1871 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1872 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1873 }
f078f209 1874
e8324357 1875 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1876 padpos = ath9k_cmn_padpos(hdr->frame_control);
1877 padsize = padpos & 3;
28d16708
FF
1878 if (padsize && skb->len > padpos) {
1879 if (skb_headroom(skb) < padsize)
1880 return -ENOMEM;
1881
e8324357 1882 skb_push(skb, padsize);
4d91f9f3 1883 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1884 }
f078f209 1885
f59a59fe
FF
1886 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1887 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1888 !ieee80211_is_data(hdr->frame_control))
1889 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1890
2d42efc4
FF
1891 setup_frame_info(hw, skb, frmlen);
1892
1893 /*
1894 * At this point, the vif, hw_key and sta pointers in the tx control
1895 * info are no longer valid (overwritten by the ath_frame_info data.
1896 */
1897
1898 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
28d16708
FF
1899 if (unlikely(!bf))
1900 return -ENOMEM;
f078f209 1901
28d16708
FF
1902 q = skb_get_queue_mapping(skb);
1903 spin_lock_bh(&txq->axq_lock);
1904 if (txq == sc->tx.txq_map[q] &&
1905 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1906 ieee80211_stop_queue(sc->hw, q);
28d16708 1907 txq->stopped = 1;
f078f209 1908 }
28d16708 1909 spin_unlock_bh(&txq->axq_lock);
f078f209 1910
28d16708
FF
1911 ath_tx_start_dma(sc, bf, txctl);
1912
1913 return 0;
f078f209
LR
1914}
1915
e8324357
S
1916/*****************/
1917/* TX Completion */
1918/*****************/
528f0c6b 1919
e8324357 1920static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0cdd5c60 1921 int tx_flags, int ftype, struct ath_txq *txq)
528f0c6b 1922{
e8324357
S
1923 struct ieee80211_hw *hw = sc->hw;
1924 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1925 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1926 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1927 int q, padpos, padsize;
528f0c6b 1928
226afe68 1929 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1930
6b2c4032 1931 if (tx_flags & ATH_TX_BAR)
e8324357 1932 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1933
6b2c4032 1934 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1935 /* Frame was ACKed */
1936 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1937 }
1938
4d91f9f3
BP
1939 padpos = ath9k_cmn_padpos(hdr->frame_control);
1940 padsize = padpos & 3;
1941 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1942 /*
1943 * Remove MAC header padding before giving the frame back to
1944 * mac80211.
1945 */
4d91f9f3 1946 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1947 skb_pull(skb, padsize);
1948 }
528f0c6b 1949
1b04b930
S
1950 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1951 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1952 ath_dbg(common, ATH_DBG_PS,
1953 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1954 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1955 PS_WAIT_FOR_CAB |
1956 PS_WAIT_FOR_PSPOLL_DATA |
1957 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1958 }
1959
7545daf4
FF
1960 q = skb_get_queue_mapping(skb);
1961 if (txq == sc->tx.txq_map[q]) {
1962 spin_lock_bh(&txq->axq_lock);
1963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
92460412 1965
7545daf4
FF
1966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
066dae93 1969 }
7545daf4 1970 spin_unlock_bh(&txq->axq_lock);
97923b14 1971 }
7545daf4
FF
1972
1973 ieee80211_tx_status(hw, skb);
e8324357 1974}
f078f209 1975
e8324357 1976static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1977 struct ath_txq *txq, struct list_head *bf_q,
1978 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1979{
e8324357 1980 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1981 unsigned long flags;
6b2c4032 1982 int tx_flags = 0;
f078f209 1983
e8324357 1984 if (sendbar)
6b2c4032 1985 tx_flags = ATH_TX_BAR;
f078f209 1986
e8324357 1987 if (!txok) {
6b2c4032 1988 tx_flags |= ATH_TX_ERROR;
f078f209 1989
e8324357 1990 if (bf_isxretried(bf))
6b2c4032 1991 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1992 }
1993
c1739eb3 1994 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1995 bf->bf_buf_addr = 0;
9f42c2b6
FF
1996
1997 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
1998 if (time_after(jiffies,
1999 bf->bf_state.bfs_paprd_timestamp +
2000 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2001 dev_kfree_skb_any(skb);
78a18172 2002 else
ca369eb4 2003 complete(&sc->paprd_complete);
9f42c2b6 2004 } else {
5bec3e5a 2005 ath_debug_stat_tx(sc, bf, ts, txq);
0cdd5c60 2006 ath_tx_complete(sc, skb, tx_flags,
61117f01 2007 bf->bf_state.bfs_ftype, txq);
9f42c2b6 2008 }
6cf9e995
BG
2009 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2010 * accidentally reference it later.
2011 */
2012 bf->bf_mpdu = NULL;
e8324357
S
2013
2014 /*
2015 * Return the list of ath_buf of this mpdu to free queue
2016 */
2017 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2018 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2019 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2020}
2021
0cdd5c60
FF
2022static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2023 struct ath_tx_status *ts, int nframes, int nbad,
2024 int txok, bool update_rc)
f078f209 2025{
a22be22a 2026 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2027 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2028 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2029 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2030 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2031 u8 i, tx_rateindex;
f078f209 2032
95e4acb7 2033 if (txok)
db1a052b 2034 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2035
db1a052b 2036 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2037 WARN_ON(tx_rateindex >= hw->max_rates);
2038
db1a052b 2039 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2040 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 2041 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 2042 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2043
b572d033 2044 BUG_ON(nbad > nframes);
ebd02287 2045
b572d033
FF
2046 tx_info->status.ampdu_len = nframes;
2047 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
2048 }
2049
db1a052b 2050 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2051 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
2052 /*
2053 * If an underrun error is seen assume it as an excessive
2054 * retry only if max frame trigger level has been reached
2055 * (2 KB for single stream, and 4 KB for dual stream).
2056 * Adjust the long retry as if the frame was tried
2057 * hw->max_rate_tries times to affect how rate control updates
2058 * PER for the failed rate.
2059 * In case of congestion on the bus penalizing this type of
2060 * underruns should help hardware actually transmit new frames
2061 * successfully by eventually preferring slower rates.
2062 * This itself should also alleviate congestion on the bus.
2063 */
2064 if (ieee80211_is_data(hdr->frame_control) &&
2065 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2066 ATH9K_TX_DELIM_UNDERRUN)) &&
83860c59 2067 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2068 tx_info->status.rates[tx_rateindex].count =
2069 hw->max_rate_tries;
f078f209 2070 }
8a92e2ee 2071
545750d3 2072 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2073 tx_info->status.rates[i].count = 0;
545750d3
FF
2074 tx_info->status.rates[i].idx = -1;
2075 }
8a92e2ee 2076
78c4653a 2077 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2078}
2079
e8324357 2080static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2081{
cbe61d8a 2082 struct ath_hw *ah = sc->sc_ah;
c46917bb 2083 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2084 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2085 struct list_head bf_head;
e8324357 2086 struct ath_desc *ds;
29bffa96 2087 struct ath_tx_status ts;
0934af23 2088 int txok;
e8324357 2089 int status;
f078f209 2090
226afe68
JP
2091 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2092 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2093 txq->axq_link);
f078f209 2094
f078f209
LR
2095 for (;;) {
2096 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2097 if (list_empty(&txq->axq_q)) {
2098 txq->axq_link = NULL;
86271e46 2099 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2100 ath_txq_schedule(sc, txq);
f078f209
LR
2101 spin_unlock_bh(&txq->axq_lock);
2102 break;
2103 }
f078f209
LR
2104 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2105
e8324357
S
2106 /*
2107 * There is a race condition that a BH gets scheduled
2108 * after sw writes TxE and before hw re-load the last
2109 * descriptor to get the newly chained one.
2110 * Software must keep the last DONE descriptor as a
2111 * holding descriptor - software does so by marking
2112 * it with the STALE flag.
2113 */
2114 bf_held = NULL;
a119cc49 2115 if (bf->bf_stale) {
e8324357
S
2116 bf_held = bf;
2117 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2118 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2119 break;
2120 } else {
2121 bf = list_entry(bf_held->list.next,
6ef9b13d 2122 struct ath_buf, list);
e8324357 2123 }
f078f209
LR
2124 }
2125
2126 lastbf = bf->bf_lastbf;
e8324357 2127 ds = lastbf->bf_desc;
f078f209 2128
29bffa96
FF
2129 memset(&ts, 0, sizeof(ts));
2130 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2131 if (status == -EINPROGRESS) {
f078f209 2132 spin_unlock_bh(&txq->axq_lock);
e8324357 2133 break;
f078f209 2134 }
2dac4fb9 2135 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2136
e8324357
S
2137 /*
2138 * Remove ath_buf's of the same transmit unit from txq,
2139 * however leave the last descriptor back as the holding
2140 * descriptor for hw.
2141 */
a119cc49 2142 lastbf->bf_stale = true;
e8324357 2143 INIT_LIST_HEAD(&bf_head);
e8324357
S
2144 if (!list_is_singular(&lastbf->list))
2145 list_cut_position(&bf_head,
2146 &txq->axq_q, lastbf->list.prev);
f078f209 2147
e8324357 2148 txq->axq_depth--;
29bffa96 2149 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2150 txq->axq_tx_inprogress = false;
0a8cea84
FF
2151 if (bf_held)
2152 list_del(&bf_held->list);
4b3ba66a
FF
2153
2154 if (bf_is_ampdu_not_probing(bf))
2155 txq->axq_ampdu_depth--;
69081624 2156
e8324357 2157 spin_unlock_bh(&txq->axq_lock);
f078f209 2158
0a8cea84
FF
2159 if (bf_held)
2160 ath_tx_return_buffer(sc, bf_held);
f078f209 2161
e8324357
S
2162 if (!bf_isampdu(bf)) {
2163 /*
2164 * This frame is sent out as a single frame.
2165 * Use hardware retry status for this frame.
2166 */
29bffa96 2167 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2168 bf->bf_state.bf_type |= BUF_XRETRY;
0cdd5c60 2169 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
e8324357 2170 }
f078f209 2171
e8324357 2172 if (bf_isampdu(bf))
c5992618
FF
2173 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2174 true);
e8324357 2175 else
29bffa96 2176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2177
059d806c 2178 spin_lock_bh(&txq->axq_lock);
60f2d1d5 2179
86271e46 2180 if (sc->sc_flags & SC_OP_TXAGGR)
e8324357
S
2181 ath_txq_schedule(sc, txq);
2182 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2183 }
2184}
2185
305fe47f 2186static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2187{
2188 struct ath_softc *sc = container_of(work, struct ath_softc,
2189 tx_complete_work.work);
2190 struct ath_txq *txq;
2191 int i;
2192 bool needreset = false;
60f2d1d5
BG
2193#ifdef CONFIG_ATH9K_DEBUGFS
2194 sc->tx_complete_poll_work_seen++;
2195#endif
164ace38
SB
2196
2197 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2198 if (ATH_TXQ_SETUP(sc, i)) {
2199 txq = &sc->tx.txq[i];
2200 spin_lock_bh(&txq->axq_lock);
2201 if (txq->axq_depth) {
2202 if (txq->axq_tx_inprogress) {
2203 needreset = true;
2204 spin_unlock_bh(&txq->axq_lock);
2205 break;
2206 } else {
2207 txq->axq_tx_inprogress = true;
2208 }
2209 }
2210 spin_unlock_bh(&txq->axq_lock);
2211 }
2212
2213 if (needreset) {
226afe68
JP
2214 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2215 "tx hung, resetting the chip\n");
fac6b6a0 2216 ath_reset(sc, true);
164ace38
SB
2217 }
2218
42935eca 2219 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2220 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2221}
2222
2223
f078f209 2224
e8324357 2225void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2226{
e8324357
S
2227 int i;
2228 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2229
e8324357 2230 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2231
e8324357
S
2232 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2233 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2234 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2235 }
2236}
2237
e5003249
VT
2238void ath_tx_edma_tasklet(struct ath_softc *sc)
2239{
2240 struct ath_tx_status txs;
2241 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2242 struct ath_hw *ah = sc->sc_ah;
2243 struct ath_txq *txq;
2244 struct ath_buf *bf, *lastbf;
2245 struct list_head bf_head;
2246 int status;
2247 int txok;
2248
2249 for (;;) {
2250 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2251 if (status == -EINPROGRESS)
2252 break;
2253 if (status == -EIO) {
226afe68
JP
2254 ath_dbg(common, ATH_DBG_XMIT,
2255 "Error processing tx status\n");
e5003249
VT
2256 break;
2257 }
2258
2259 /* Skip beacon completions */
2260 if (txs.qid == sc->beacon.beaconq)
2261 continue;
2262
2263 txq = &sc->tx.txq[txs.qid];
2264
2265 spin_lock_bh(&txq->axq_lock);
2266 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2267 spin_unlock_bh(&txq->axq_lock);
2268 return;
2269 }
2270
2271 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2272 struct ath_buf, list);
2273 lastbf = bf->bf_lastbf;
2274
2275 INIT_LIST_HEAD(&bf_head);
2276 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2277 &lastbf->list);
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2279 txq->axq_depth--;
2280 txq->axq_tx_inprogress = false;
4b3ba66a
FF
2281 if (bf_is_ampdu_not_probing(bf))
2282 txq->axq_ampdu_depth--;
e5003249
VT
2283 spin_unlock_bh(&txq->axq_lock);
2284
2285 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2286
2287 if (!bf_isampdu(bf)) {
e5003249
VT
2288 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2289 bf->bf_state.bf_type |= BUF_XRETRY;
0cdd5c60 2290 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
e5003249
VT
2291 }
2292
2293 if (bf_isampdu(bf))
c5992618
FF
2294 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2295 txok, true);
e5003249
VT
2296 else
2297 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2298 &txs, txok, 0);
2299
2300 spin_lock_bh(&txq->axq_lock);
60f2d1d5 2301
86271e46
FF
2302 if (!list_empty(&txq->txq_fifo_pending)) {
2303 INIT_LIST_HEAD(&bf_head);
2304 bf = list_first_entry(&txq->txq_fifo_pending,
2305 struct ath_buf, list);
2306 list_cut_position(&bf_head,
2307 &txq->txq_fifo_pending,
2308 &bf->bf_lastbf->list);
2309 ath_tx_txqaddbuf(sc, txq, &bf_head);
2310 } else if (sc->sc_flags & SC_OP_TXAGGR)
2311 ath_txq_schedule(sc, txq);
2312
e5003249
VT
2313 spin_unlock_bh(&txq->axq_lock);
2314 }
2315}
2316
e8324357
S
2317/*****************/
2318/* Init, Cleanup */
2319/*****************/
f078f209 2320
5088c2f1
VT
2321static int ath_txstatus_setup(struct ath_softc *sc, int size)
2322{
2323 struct ath_descdma *dd = &sc->txsdma;
2324 u8 txs_len = sc->sc_ah->caps.txs_len;
2325
2326 dd->dd_desc_len = size * txs_len;
2327 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2328 &dd->dd_desc_paddr, GFP_KERNEL);
2329 if (!dd->dd_desc)
2330 return -ENOMEM;
2331
2332 return 0;
2333}
2334
2335static int ath_tx_edma_init(struct ath_softc *sc)
2336{
2337 int err;
2338
2339 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2340 if (!err)
2341 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2342 sc->txsdma.dd_desc_paddr,
2343 ATH_TXSTATUS_RING_SIZE);
2344
2345 return err;
2346}
2347
2348static void ath_tx_edma_cleanup(struct ath_softc *sc)
2349{
2350 struct ath_descdma *dd = &sc->txsdma;
2351
2352 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2353 dd->dd_desc_paddr);
2354}
2355
e8324357 2356int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2357{
c46917bb 2358 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2359 int error = 0;
f078f209 2360
797fe5cb 2361 spin_lock_init(&sc->tx.txbuflock);
f078f209 2362
797fe5cb 2363 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2364 "tx", nbufs, 1, 1);
797fe5cb 2365 if (error != 0) {
3800276a
JP
2366 ath_err(common,
2367 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2368 goto err;
2369 }
f078f209 2370
797fe5cb 2371 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2372 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2373 if (error != 0) {
3800276a
JP
2374 ath_err(common,
2375 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2376 goto err;
2377 }
f078f209 2378
164ace38
SB
2379 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2380
5088c2f1
VT
2381 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2382 error = ath_tx_edma_init(sc);
2383 if (error)
2384 goto err;
2385 }
2386
797fe5cb 2387err:
e8324357
S
2388 if (error != 0)
2389 ath_tx_cleanup(sc);
f078f209 2390
e8324357 2391 return error;
f078f209
LR
2392}
2393
797fe5cb 2394void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2395{
2396 if (sc->beacon.bdma.dd_desc_len != 0)
2397 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2398
2399 if (sc->tx.txdma.dd_desc_len != 0)
2400 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2401
2402 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2403 ath_tx_edma_cleanup(sc);
e8324357 2404}
f078f209
LR
2405
2406void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2407{
c5170163
S
2408 struct ath_atx_tid *tid;
2409 struct ath_atx_ac *ac;
2410 int tidno, acno;
f078f209 2411
8ee5afbc 2412 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2413 tidno < WME_NUM_TID;
2414 tidno++, tid++) {
2415 tid->an = an;
2416 tid->tidno = tidno;
2417 tid->seq_start = tid->seq_next = 0;
2418 tid->baw_size = WME_MAX_BA;
2419 tid->baw_head = tid->baw_tail = 0;
2420 tid->sched = false;
e8324357 2421 tid->paused = false;
a37c2c79 2422 tid->state &= ~AGGR_CLEANUP;
c5170163 2423 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2424 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2425 tid->ac = &an->ac[acno];
a37c2c79
S
2426 tid->state &= ~AGGR_ADDBA_COMPLETE;
2427 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2428 }
f078f209 2429
8ee5afbc 2430 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2431 acno < WME_NUM_AC; acno++, ac++) {
2432 ac->sched = false;
066dae93 2433 ac->txq = sc->tx.txq_map[acno];
c5170163 2434 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2435 }
2436}
2437
b5aa9bf9 2438void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2439{
2b40994c
FF
2440 struct ath_atx_ac *ac;
2441 struct ath_atx_tid *tid;
f078f209 2442 struct ath_txq *txq;
066dae93 2443 int tidno;
e8324357 2444
2b40994c
FF
2445 for (tidno = 0, tid = &an->tid[tidno];
2446 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2447
2b40994c 2448 ac = tid->ac;
066dae93 2449 txq = ac->txq;
f078f209 2450
2b40994c
FF
2451 spin_lock_bh(&txq->axq_lock);
2452
2453 if (tid->sched) {
2454 list_del(&tid->list);
2455 tid->sched = false;
2456 }
2457
2458 if (ac->sched) {
2459 list_del(&ac->list);
2460 tid->ac->sched = false;
f078f209 2461 }
2b40994c
FF
2462
2463 ath_tid_drain(sc, txq, tid);
2464 tid->state &= ~AGGR_ADDBA_COMPLETE;
2465 tid->state &= ~AGGR_CLEANUP;
2466
2467 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2468 }
2469}