Fix common misspellings
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
f078f209
LR
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
f078f209 34
c6663876 35static u16 bits_per_symbol[][2] = {
f078f209
LR
36 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
45};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
82b873af
FF
49static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
2d42efc4 51 struct list_head *bf_head);
e8324357 52static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
53 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 55static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357 56 struct list_head *head);
269c44bc 57static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
0cdd5c60
FF
58static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
90fa539c
FF
61static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
c4288390 63
545750d3 64enum {
0e668cde
FF
65 MCS_HT20,
66 MCS_HT20_SGI,
545750d3
FF
67 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
0e668cde
FF
71static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
83 },
84 [MCS_HT40] = {
0e668cde
FF
85 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
89 },
90 [MCS_HT40_SGI] = {
0e668cde
FF
91 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
95 }
96};
97
e8324357
S
98/*********************/
99/* Aggregation logic */
100/*********************/
f078f209 101
e8324357 102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 103{
e8324357 104 struct ath_atx_ac *ac = tid->ac;
ff37e337 105
e8324357
S
106 if (tid->paused)
107 return;
ff37e337 108
e8324357
S
109 if (tid->sched)
110 return;
ff37e337 111
e8324357
S
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 114
e8324357
S
115 if (ac->sched)
116 return;
f078f209 117
e8324357
S
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
f078f209 121
e8324357 122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 123{
066dae93 124 struct ath_txq *txq = tid->ac->txq;
e6a9854b 125
75401849 126 WARN_ON(!tid->paused);
f078f209 127
75401849
LB
128 spin_lock_bh(&txq->axq_lock);
129 tid->paused = false;
f078f209 130
e8324357
S
131 if (list_empty(&tid->buf_q))
132 goto unlock;
f078f209 133
e8324357
S
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
528f0c6b 138}
f078f209 139
2d42efc4 140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
146}
147
e8324357 148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 149{
066dae93 150 struct ath_txq *txq = tid->ac->txq;
e8324357
S
151 struct ath_buf *bf;
152 struct list_head bf_head;
90fa539c 153 struct ath_tx_status ts;
2d42efc4 154 struct ath_frame_info *fi;
f078f209 155
90fa539c 156 INIT_LIST_HEAD(&bf_head);
e6a9854b 157
90fa539c 158 memset(&ts, 0, sizeof(ts));
75401849 159 spin_lock_bh(&txq->axq_lock);
f078f209 160
e8324357
S
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 163 list_move_tail(&bf->list, &bf_head);
90fa539c 164
e1566d1f 165 spin_unlock_bh(&txq->axq_lock);
2d42efc4
FF
166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
7d2c16be 169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
90fa539c 170 } else {
a9e99a0c 171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
90fa539c 172 }
e1566d1f 173 spin_lock_bh(&txq->axq_lock);
528f0c6b 174 }
f078f209 175
e8324357 176 spin_unlock_bh(&txq->axq_lock);
528f0c6b 177}
f078f209 178
e8324357
S
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
528f0c6b 181{
e8324357 182 int index, cindex;
f078f209 183
e8324357
S
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 186
81ee13ba 187 __clear_bit(cindex, tid->tx_buf);
528f0c6b 188
81ee13ba 189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
528f0c6b 193}
f078f209 194
e8324357 195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 196 u16 seqno)
528f0c6b 197{
e8324357 198 int index, cindex;
528f0c6b 199
2d3bcba0 200 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 202 __set_bit(cindex, tid->tx_buf);
f078f209 203
e8324357
S
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 208 }
f078f209
LR
209}
210
211/*
e8324357
S
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
f078f209 216 */
e8324357
S
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
f078f209 219
f078f209 220{
e8324357
S
221 struct ath_buf *bf;
222 struct list_head bf_head;
db1a052b 223 struct ath_tx_status ts;
2d42efc4 224 struct ath_frame_info *fi;
db1a052b
FF
225
226 memset(&ts, 0, sizeof(ts));
e8324357 227 INIT_LIST_HEAD(&bf_head);
f078f209 228
e8324357
S
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
f078f209 232
d43f3015
S
233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
f078f209 235
2d42efc4
FF
236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
f078f209 239
e8324357 240 spin_unlock(&txq->axq_lock);
db1a052b 241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
242 spin_lock(&txq->axq_lock);
243 }
f078f209 244
e8324357
S
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
f078f209
LR
247}
248
fec247c0 249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 250 struct sk_buff *skb)
f078f209 251{
8b7f8532 252 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 253 struct ieee80211_hdr *hdr;
f078f209 254
fec247c0 255 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 256 if (fi->retries++ > 0)
2d42efc4 257 return;
f078f209 258
e8324357
S
259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
261}
262
0a8cea84 263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 264{
0a8cea84 265 struct ath_buf *bf = NULL;
d43f3015
S
266
267 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
0a8cea84
FF
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
d43f3015
S
277 spin_unlock_bh(&sc->tx.txbuflock);
278
0a8cea84
FF
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
295 return NULL;
296
d43f3015
S
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 302 tbf->bf_state = bf->bf_state;
d43f3015
S
303
304 return tbf;
305}
306
b572d033
FF
307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
2d42efc4 311 struct ath_frame_info *fi;
b572d033
FF
312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
b572d033
FF
320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
2d42efc4
FF
327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
b572d033
FF
329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
d43f3015
S
339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
c5992618 341 struct ath_tx_status *ts, int txok, bool retry)
f078f209 342{
e8324357
S
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
1286ec6d 345 struct ieee80211_sta *sta;
0cdd5c60 346 struct ieee80211_hw *hw = sc->hw;
1286ec6d 347 struct ieee80211_hdr *hdr;
76d5a9e8 348 struct ieee80211_tx_info *tx_info;
e8324357 349 struct ath_atx_tid *tid = NULL;
d43f3015 350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 351 struct list_head bf_head, bf_pending;
0934af23 352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 353 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
78c4653a 356 struct ieee80211_tx_rate rates[4];
2d42efc4 357 struct ath_frame_info *fi;
ebd02287 358 int nframes;
5daefbd0 359 u8 tidno;
f078f209 360
a22be22a 361 skb = bf->bf_mpdu;
1286ec6d
S
362 hdr = (struct ieee80211_hdr *)skb->data;
363
76d5a9e8 364 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 365
78c4653a
FF
366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
1286ec6d 368 rcu_read_lock();
f078f209 369
686b9cb9 370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
371 if (!sta) {
372 rcu_read_unlock();
73e19463 373
31e79a59
FF
374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
0cdd5c60 383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
31e79a59
FF
384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
1286ec6d 389 return;
f078f209
LR
390 }
391
1286ec6d 392 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 395
b11b160d
FF
396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
5daefbd0 401 if (tidno != ts->tid)
b11b160d
FF
402 txok = false;
403
e8324357 404 isaggr = bf_isaggr(bf);
d43f3015 405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 406
d43f3015 407 if (isaggr && txok) {
db1a052b
FF
408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 411 } else {
d43f3015
S
412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
2660b81a 419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 420 needreset = 1;
e8324357 421 }
f078f209
LR
422 }
423
e8324357
S
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
f078f209 426
b572d033 427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 428 while (bf) {
f0b8220c 429 txfail = txpending = sendbar = 0;
e8324357 430 bf_next = bf->bf_next;
f078f209 431
78c4653a
FF
432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 434 fi = get_frame_info(skb);
78c4653a 435
2d42efc4 436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
e8324357
S
437 /* transmit completion, subframe is
438 * acked by block ack */
0934af23 439 acked_cnt++;
e8324357
S
440 } else if (!isaggr && txok) {
441 /* transmit completion */
0934af23 442 acked_cnt++;
e8324357 443 } else {
c5992618 444 if (!(tid->state & AGGR_CLEANUP) && retry) {
2d42efc4
FF
445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
e8324357
S
447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
0934af23 452 txfail_cnt++;
e8324357
S
453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
f078f209 462
e5003249
VT
463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
cbfe89c6
VT
465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
e8324357 473 } else {
9680e8a3 474 BUG_ON(list_empty(bf_q));
d43f3015 475 list_move_tail(&bf->list, &bf_head);
e8324357 476 }
f078f209 477
90fa539c 478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
2d42efc4 484 ath_tx_update_baw(sc, tid, fi->seqno);
e8324357 485 spin_unlock_bh(&txq->axq_lock);
f078f209 486
8a92e2ee 487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 488 memcpy(tx_info->control.rates, rates, sizeof(rates));
0cdd5c60 489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
8a92e2ee
VT
490 rc_update = false;
491 } else {
0cdd5c60 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
8a92e2ee
VT
493 }
494
db1a052b
FF
495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
e8324357 497 } else {
d43f3015 498 /* retry the un-acked ones */
e5003249
VT
499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
502
503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
2d42efc4 511 ath_tx_update_baw(sc, tid, fi->seqno);
e5003249
VT
512 spin_unlock_bh(&txq->axq_lock);
513
514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
0cdd5c60 516 ath_tx_rc_status(sc, bf, ts, nframes,
b572d033 517 nbad, 0, false);
e5003249
VT
518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
c41d92dc 534 }
e8324357
S
535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
f078f209 545 }
f078f209 546
4cee7861
FF
547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
e8324357 555 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
556 ath_tx_flush_tid(sc, tid);
557
e8324357
S
558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 560 tid->state &= ~AGGR_CLEANUP;
d43f3015 561 }
e8324357 562 }
f078f209 563
1286ec6d
S
564 rcu_read_unlock();
565
bdd62c06
VN
566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
e8324357 568 ath_reset(sc, false);
bdd62c06
VN
569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
e8324357 571}
f078f209 572
e8324357
S
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
f078f209 575{
528f0c6b
S
576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
a8efee4f 578 struct ieee80211_tx_rate *rates;
d43f3015 579 u32 max_4ms_framelen, frmlen;
4ef70841 580 u16 aggr_limit, legacy = 0;
e8324357 581 int i;
528f0c6b 582
a22be22a 583 skb = bf->bf_mpdu;
528f0c6b 584 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 585 rates = tx_info->control.rates;
528f0c6b 586
e8324357
S
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 593
e8324357
S
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
545750d3
FF
596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
598 legacy = 1;
599 break;
600 }
601
0e668cde 602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
603 modeidx = MCS_HT40;
604 else
0e668cde
FF
605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
545750d3
FF
609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
612 }
613 }
e63835b0 614
f078f209 615 /*
e8324357
S
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
f078f209 619 */
e8324357
S
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
f078f209 622
1773912b
VT
623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 629
e8324357 630 /*
25985edc
LDM
631 * h/w can accept aggregates up to 16 bit lengths (65535).
632 * The IE, however can hold up to 65536, which shows up here
e8324357 633 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 634 */
4ef70841
S
635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 637
e8324357
S
638 return aggr_limit;
639}
f078f209 640
e8324357 641/*
d43f3015 642 * Returns the number of delimiters to be added to
e8324357 643 * meet the minimum required mpdudensity.
e8324357
S
644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
e8324357
S
648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 650 u32 nsymbits, nsymbols;
e8324357 651 u16 minlen;
545750d3 652 u8 flags, rix;
c6663876 653 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
658
659 /*
e8324357
S
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
f078f209 664 */
2d42efc4 665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
e8324357 666 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 667
e8324357
S
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 673 *
e8324357
S
674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
4ef70841
S
677
678 if (tid->an->mpdudensity == 0)
e8324357 679 return ndelim;
f078f209 680
e8324357
S
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
e8324357
S
683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 685
e8324357 686 if (half_gi)
4ef70841 687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 688 else
4ef70841 689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 690
e8324357
S
691 if (nsymbols == 0)
692 nsymbols = 1;
f078f209 693
c6663876
FF
694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 697
e8324357 698 if (frmlen < minlen) {
e8324357
S
699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
f078f209
LR
701 }
702
e8324357 703 return ndelim;
f078f209
LR
704}
705
e8324357 706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 707 struct ath_txq *txq,
d43f3015 708 struct ath_atx_tid *tid,
269c44bc
FF
709 struct list_head *bf_q,
710 int *aggr_len)
f078f209 711{
e8324357 712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 718 struct ieee80211_tx_info *tx_info;
2d42efc4 719 struct ath_frame_info *fi;
f078f209 720
e8324357 721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 722
e8324357
S
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
2d42efc4 725 fi = get_frame_info(bf->bf_mpdu);
f078f209 726
d43f3015 727 /* do not step over block-ack window */
2d42efc4 728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
e8324357
S
729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
f078f209 732
e8324357
S
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
f078f209 737
d43f3015 738 /* do not exceed aggregation limit */
2d42efc4 739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 740
d43f3015
S
741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
743 status = ATH_AGGR_LIMITED;
744 break;
745 }
f078f209 746
0299a50a
FF
747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
d43f3015
S
752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
754 status = ATH_AGGR_LIMITED;
755 break;
756 }
d43f3015 757 nframes++;
f078f209 758
d43f3015 759 /* add padding for previous frame to aggregation length */
e8324357 760 al += bpad + al_delta;
f078f209 761
e8324357
S
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
2d42efc4 766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
e8324357 767 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 768
e8324357 769 bf->bf_next = NULL;
87d5efbb 770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 771
d43f3015 772 /* link buffers of this frame to the aggregate */
2d42efc4
FF
773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
d43f3015
S
775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
e8324357
S
777 if (bf_prev) {
778 bf_prev->bf_next = bf;
87d5efbb
VT
779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
e8324357
S
781 }
782 bf_prev = bf;
fec247c0 783
e8324357 784 } while (!list_empty(&tid->buf_q));
f078f209 785
269c44bc 786 *aggr_len = al;
d43f3015 787
e8324357
S
788 return status;
789#undef PADBYTES
790}
f078f209 791
e8324357
S
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
d43f3015 795 struct ath_buf *bf;
e8324357 796 enum ATH_AGGR_STATUS status;
2d42efc4 797 struct ath_frame_info *fi;
e8324357 798 struct list_head bf_q;
269c44bc 799 int aggr_len;
f078f209 800
e8324357
S
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
f078f209 804
e8324357
S
805 INIT_LIST_HEAD(&bf_q);
806
269c44bc 807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 808
f078f209 809 /*
d43f3015
S
810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
f078f209 812 */
e8324357
S
813 if (list_empty(&bf_q))
814 break;
f078f209 815
e8324357 816 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 818
d43f3015 819 /* if only one frame, send as non-aggregate */
b572d033 820 if (bf == bf->bf_lastbf) {
2d42efc4
FF
821 fi = get_frame_info(bf->bf_mpdu);
822
e8324357 823 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
2d42efc4 825 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357
S
826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
f078f209 829
d43f3015 830 /* setup first desc of aggregate */
e8324357 831 bf->bf_state.bf_type |= BUF_AGGR;
269c44bc
FF
832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
f078f209 834
d43f3015
S
835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 837
e8324357 838 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 839 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 840
4b3ba66a 841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
842 status != ATH_AGGR_BAW_CLOSED);
843}
844
231c3a1f
FF
845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
e8324357
S
847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
f83da965 852 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
f83da965 857 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 858 txtid->paused = true;
49447f2f 859 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f 860
2ed72229
FF
861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
231c3a1f 864 return 0;
e8324357 865}
f078f209 866
f83da965 867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 871 struct ath_txq *txq = txtid->ac->txq;
f078f209 872
e8324357 873 if (txtid->state & AGGR_CLEANUP)
f83da965 874 return;
f078f209 875
e8324357 876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 878 return;
e8324357 879 }
f078f209 880
e8324357 881 spin_lock_bh(&txq->axq_lock);
75401849 882 txtid->paused = true;
f078f209 883
90fa539c
FF
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
e8324357 891 txtid->state |= AGGR_CLEANUP;
90fa539c 892 else
e8324357 893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
894 spin_unlock_bh(&txq->axq_lock);
895
896 ath_tx_flush_tid(sc, txtid);
e8324357 897}
f078f209 898
e8324357
S
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
f078f209
LR
914}
915
e8324357
S
916/********************/
917/* Queue Management */
918/********************/
f078f209 919
e8324357
S
920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
f078f209 922{
e8324357
S
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
f078f209 925
e8324357
S
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
f078f209
LR
934 }
935}
936
e8324357 937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 938{
cbe61d8a 939 struct ath_hw *ah = sc->sc_ah;
c46917bb 940 struct ath_common *common = ath9k_hw_common(ah);
e8324357 941 struct ath9k_tx_queue_info qi;
066dae93
FF
942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
60f2d1d5 948 int axq_qnum, i;
f078f209 949
e8324357 950 memset(&qi, 0, sizeof(qi));
066dae93 951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
f078f209
LR
956
957 /*
e8324357
S
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
f078f209 971 */
afe754d6
VT
972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
60f2d1d5
BG
982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (axq_qnum == -1) {
f078f209 984 /*
e8324357
S
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
f078f209 987 */
e8324357 988 return NULL;
f078f209 989 }
60f2d1d5 990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
3800276a 991 ath_err(common, "qnum %u out of range, max %zu!\n",
60f2d1d5
BG
992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
993 ath9k_hw_releasetxqueue(ah, axq_qnum);
e8324357
S
994 return NULL;
995 }
60f2d1d5
BG
996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 998
60f2d1d5
BG
999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
e8324357
S
1001 txq->axq_link = NULL;
1002 INIT_LIST_HEAD(&txq->axq_q);
1003 INIT_LIST_HEAD(&txq->axq_acq);
1004 spin_lock_init(&txq->axq_lock);
1005 txq->axq_depth = 0;
4b3ba66a 1006 txq->axq_ampdu_depth = 0;
164ace38 1007 txq->axq_tx_inprogress = false;
60f2d1d5 1008 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1009
1010 txq->txq_headidx = txq->txq_tailidx = 0;
1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357 1014 }
60f2d1d5 1015 return &sc->tx.txq[axq_qnum];
f078f209
LR
1016}
1017
e8324357
S
1018int ath_txq_update(struct ath_softc *sc, int qnum,
1019 struct ath9k_tx_queue_info *qinfo)
1020{
cbe61d8a 1021 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1022 int error = 0;
1023 struct ath9k_tx_queue_info qi;
1024
1025 if (qnum == sc->beacon.beaconq) {
1026 /*
1027 * XXX: for beacon queue, we just save the parameter.
1028 * It will be picked up by ath_beaconq_config when
1029 * it's necessary.
1030 */
1031 sc->beacon.beacon_qi = *qinfo;
f078f209 1032 return 0;
e8324357 1033 }
f078f209 1034
9680e8a3 1035 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1036
1037 ath9k_hw_get_txq_props(ah, qnum, &qi);
1038 qi.tqi_aifs = qinfo->tqi_aifs;
1039 qi.tqi_cwmin = qinfo->tqi_cwmin;
1040 qi.tqi_cwmax = qinfo->tqi_cwmax;
1041 qi.tqi_burstTime = qinfo->tqi_burstTime;
1042 qi.tqi_readyTime = qinfo->tqi_readyTime;
1043
1044 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1045 ath_err(ath9k_hw_common(sc->sc_ah),
1046 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1047 error = -EIO;
1048 } else {
1049 ath9k_hw_resettxqueue(ah, qnum);
1050 }
1051
1052 return error;
1053}
1054
1055int ath_cabq_update(struct ath_softc *sc)
1056{
1057 struct ath9k_tx_queue_info qi;
9814f6b3 1058 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1059 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1060
e8324357 1061 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1062 /*
e8324357 1063 * Ensure the readytime % is within the bounds.
f078f209 1064 */
17d7904d
S
1065 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1067 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1068 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1069
9814f6b3 1070 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1071 sc->config.cabqReadytime) / 100;
e8324357
S
1072 ath_txq_update(sc, qnum, &qi);
1073
1074 return 0;
f078f209
LR
1075}
1076
4b3ba66a
FF
1077static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1078{
1079 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1080 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1081}
1082
043a0405
S
1083/*
1084 * Drain a given TX queue (could be Beacon or Data)
1085 *
1086 * This assumes output has been stopped and
1087 * we do not need to block ath_tx_tasklet.
1088 */
1089void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1090{
e8324357
S
1091 struct ath_buf *bf, *lastbf;
1092 struct list_head bf_head;
db1a052b
FF
1093 struct ath_tx_status ts;
1094
1095 memset(&ts, 0, sizeof(ts));
e8324357 1096 INIT_LIST_HEAD(&bf_head);
f078f209 1097
e8324357
S
1098 for (;;) {
1099 spin_lock_bh(&txq->axq_lock);
f078f209 1100
e5003249
VT
1101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1103 txq->txq_headidx = txq->txq_tailidx = 0;
1104 spin_unlock_bh(&txq->axq_lock);
1105 break;
1106 } else {
1107 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1108 struct ath_buf, list);
1109 }
1110 } else {
1111 if (list_empty(&txq->axq_q)) {
1112 txq->axq_link = NULL;
1113 spin_unlock_bh(&txq->axq_lock);
1114 break;
1115 }
1116 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1117 list);
f078f209 1118
e5003249
VT
1119 if (bf->bf_stale) {
1120 list_del(&bf->list);
1121 spin_unlock_bh(&txq->axq_lock);
f078f209 1122
0a8cea84 1123 ath_tx_return_buffer(sc, bf);
e5003249
VT
1124 continue;
1125 }
e8324357 1126 }
f078f209 1127
e8324357 1128 lastbf = bf->bf_lastbf;
f078f209 1129
e5003249
VT
1130 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1131 list_cut_position(&bf_head,
1132 &txq->txq_fifo[txq->txq_tailidx],
1133 &lastbf->list);
1134 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1135 } else {
1136 /* remove ath_buf's of the same mpdu from txq */
1137 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1138 }
1139
e8324357 1140 txq->axq_depth--;
4b3ba66a
FF
1141 if (bf_is_ampdu_not_probing(bf))
1142 txq->axq_ampdu_depth--;
e8324357
S
1143 spin_unlock_bh(&txq->axq_lock);
1144
1145 if (bf_isampdu(bf))
c5992618
FF
1146 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1147 retry_tx);
e8324357 1148 else
db1a052b 1149 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1150 }
1151
164ace38
SB
1152 spin_lock_bh(&txq->axq_lock);
1153 txq->axq_tx_inprogress = false;
1154 spin_unlock_bh(&txq->axq_lock);
1155
e5003249
VT
1156 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1157 spin_lock_bh(&txq->axq_lock);
1158 while (!list_empty(&txq->txq_fifo_pending)) {
1159 bf = list_first_entry(&txq->txq_fifo_pending,
1160 struct ath_buf, list);
1161 list_cut_position(&bf_head,
1162 &txq->txq_fifo_pending,
1163 &bf->bf_lastbf->list);
1164 spin_unlock_bh(&txq->axq_lock);
1165
1166 if (bf_isampdu(bf))
1167 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
c5992618 1168 &ts, 0, retry_tx);
e5003249
VT
1169 else
1170 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1171 &ts, 0, 0);
1172 spin_lock_bh(&txq->axq_lock);
1173 }
1174 spin_unlock_bh(&txq->axq_lock);
1175 }
e609e2ea
FF
1176
1177 /* flush any pending frames if aggregation is enabled */
1178 if (sc->sc_flags & SC_OP_TXAGGR) {
1179 if (!retry_tx) {
1180 spin_lock_bh(&txq->axq_lock);
1181 ath_txq_drain_pending_buffers(sc, txq);
1182 spin_unlock_bh(&txq->axq_lock);
1183 }
1184 }
f078f209
LR
1185}
1186
080e1a25 1187bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1188{
cbe61d8a 1189 struct ath_hw *ah = sc->sc_ah;
c46917bb 1190 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1191 struct ath_txq *txq;
1192 int i, npend = 0;
1193
1194 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1195 return true;
043a0405 1196
0d51cccc 1197 ath9k_hw_abort_tx_dma(ah);
043a0405 1198
0d51cccc 1199 /* Check if any queue remains active */
043a0405 1200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1201 if (!ATH_TXQ_SETUP(sc, i))
1202 continue;
1203
1204 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
043a0405
S
1205 }
1206
080e1a25 1207 if (npend)
393934c6 1208 ath_err(common, "Failed to stop TX DMA!\n");
043a0405
S
1209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1211 if (!ATH_TXQ_SETUP(sc, i))
1212 continue;
1213
1214 /*
1215 * The caller will resume queues with ieee80211_wake_queues.
1216 * Mark the queue as not stopped to prevent ath_tx_complete
1217 * from waking the queue too early.
1218 */
1219 txq = &sc->tx.txq[i];
1220 txq->stopped = false;
1221 ath_draintxq(sc, txq, retry_tx);
043a0405 1222 }
080e1a25
FF
1223
1224 return !npend;
e8324357 1225}
f078f209 1226
043a0405 1227void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1228{
043a0405
S
1229 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1230 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1231}
f078f209 1232
7755bad9
BG
1233/* For each axq_acq entry, for each tid, try to schedule packets
1234 * for transmit until ampdu_depth has reached min Q depth.
1235 */
e8324357
S
1236void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1237{
7755bad9
BG
1238 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1239 struct ath_atx_tid *tid, *last_tid;
f078f209 1240
21f28e6f
FF
1241 if (list_empty(&txq->axq_acq) ||
1242 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1243 return;
f078f209 1244
e8324357 1245 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1246 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1247
7755bad9
BG
1248 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1249 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1250 list_del(&ac->list);
1251 ac->sched = false;
f078f209 1252
7755bad9
BG
1253 while (!list_empty(&ac->tid_q)) {
1254 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1255 list);
1256 list_del(&tid->list);
1257 tid->sched = false;
f078f209 1258
7755bad9
BG
1259 if (tid->paused)
1260 continue;
f078f209 1261
7755bad9 1262 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1263
7755bad9
BG
1264 /*
1265 * add tid to round-robin queue if more frames
1266 * are pending for the tid
1267 */
1268 if (!list_empty(&tid->buf_q))
1269 ath_tx_queue_tid(txq, tid);
f078f209 1270
7755bad9
BG
1271 if (tid == last_tid ||
1272 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1273 break;
1274 }
f078f209 1275
7755bad9
BG
1276 if (!list_empty(&ac->tid_q)) {
1277 if (!ac->sched) {
1278 ac->sched = true;
1279 list_add_tail(&ac->list, &txq->axq_acq);
1280 }
f078f209 1281 }
7755bad9
BG
1282
1283 if (ac == last_ac ||
1284 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1285 return;
e8324357
S
1286 }
1287}
f078f209 1288
e8324357
S
1289/***********/
1290/* TX, DMA */
1291/***********/
1292
f078f209 1293/*
e8324357
S
1294 * Insert a chain of ath_buf (descriptors) on a txq and
1295 * assume the descriptors are already chained together by caller.
f078f209 1296 */
e8324357
S
1297static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1298 struct list_head *head)
f078f209 1299{
cbe61d8a 1300 struct ath_hw *ah = sc->sc_ah;
c46917bb 1301 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1302 struct ath_buf *bf;
f078f209 1303
e8324357
S
1304 /*
1305 * Insert the frame on the outbound list and
1306 * pass it on to the hardware.
1307 */
f078f209 1308
e8324357
S
1309 if (list_empty(head))
1310 return;
f078f209 1311
e8324357 1312 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1313
226afe68
JP
1314 ath_dbg(common, ATH_DBG_QUEUE,
1315 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1316
e5003249
VT
1317 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1318 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1319 list_splice_tail_init(head, &txq->txq_fifo_pending);
1320 return;
1321 }
1322 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
226afe68
JP
1323 ath_dbg(common, ATH_DBG_XMIT,
1324 "Initializing tx fifo %d which is non-empty\n",
1325 txq->txq_headidx);
e5003249
VT
1326 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1327 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1328 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
8d8d3fdc 1329 TX_STAT_INC(txq->axq_qnum, puttxbuf);
e8324357 1330 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1331 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1332 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1333 } else {
e5003249
VT
1334 list_splice_tail_init(head, &txq->axq_q);
1335
1336 if (txq->axq_link == NULL) {
8d8d3fdc 1337 TX_STAT_INC(txq->axq_qnum, puttxbuf);
e5003249 1338 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
226afe68
JP
1339 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1340 txq->axq_qnum, ito64(bf->bf_daddr),
1341 bf->bf_desc);
e5003249
VT
1342 } else {
1343 *txq->axq_link = bf->bf_daddr;
226afe68
JP
1344 ath_dbg(common, ATH_DBG_XMIT,
1345 "link[%u] (%p)=%llx (%p)\n",
1346 txq->axq_qnum, txq->axq_link,
1347 ito64(bf->bf_daddr), bf->bf_desc);
e5003249
VT
1348 }
1349 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1350 &txq->axq_link);
8d8d3fdc 1351 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1352 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1353 }
e5003249 1354 txq->axq_depth++;
4b3ba66a
FF
1355 if (bf_is_ampdu_not_probing(bf))
1356 txq->axq_ampdu_depth++;
e8324357 1357}
f078f209 1358
e8324357 1359static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
04caf863 1360 struct ath_buf *bf, struct ath_tx_control *txctl)
f078f209 1361{
2d42efc4 1362 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
04caf863 1363 struct list_head bf_head;
f078f209 1364
e8324357 1365 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209 1366
e8324357
S
1367 /*
1368 * Do not queue to h/w when any of the following conditions is true:
1369 * - there are pending frames in software queue
1370 * - the TID is currently paused for ADDBA/BAR request
1371 * - seqno is not within block-ack window
1372 * - h/w queue depth exceeds low water mark
1373 */
1374 if (!list_empty(&tid->buf_q) || tid->paused ||
2d42efc4 1375 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
4b3ba66a 1376 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1377 /*
e8324357
S
1378 * Add this frame to software queue for scheduling later
1379 * for aggregation.
f078f209 1380 */
bda8adda 1381 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
04caf863 1382 list_add_tail(&bf->list, &tid->buf_q);
e8324357
S
1383 ath_tx_queue_tid(txctl->txq, tid);
1384 return;
1385 }
1386
04caf863
FF
1387 INIT_LIST_HEAD(&bf_head);
1388 list_add(&bf->list, &bf_head);
1389
e8324357 1390 /* Add sub-frame to BAW */
2d42efc4
FF
1391 if (!fi->retries)
1392 ath_tx_addto_baw(sc, tid, fi->seqno);
e8324357
S
1393
1394 /* Queue to h/w without aggregation */
bda8adda 1395 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1396 bf->bf_lastbf = bf;
2d42efc4 1397 ath_buf_set_rate(sc, bf, fi->framelen);
04caf863 1398 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
e8324357
S
1399}
1400
82b873af
FF
1401static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1402 struct ath_atx_tid *tid,
2d42efc4 1403 struct list_head *bf_head)
e8324357 1404{
2d42efc4 1405 struct ath_frame_info *fi;
e8324357
S
1406 struct ath_buf *bf;
1407
e8324357
S
1408 bf = list_first_entry(bf_head, struct ath_buf, list);
1409 bf->bf_state.bf_type &= ~BUF_AMPDU;
1410
1411 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1412 if (tid)
1413 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1414
d43f3015 1415 bf->bf_lastbf = bf;
2d42efc4
FF
1416 fi = get_frame_info(bf->bf_mpdu);
1417 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357 1418 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1419 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1420}
1421
1422static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1423{
1424 struct ieee80211_hdr *hdr;
1425 enum ath9k_pkt_type htype;
1426 __le16 fc;
1427
1428 hdr = (struct ieee80211_hdr *)skb->data;
1429 fc = hdr->frame_control;
1430
1431 if (ieee80211_is_beacon(fc))
1432 htype = ATH9K_PKT_TYPE_BEACON;
1433 else if (ieee80211_is_probe_resp(fc))
1434 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1435 else if (ieee80211_is_atim(fc))
1436 htype = ATH9K_PKT_TYPE_ATIM;
1437 else if (ieee80211_is_pspoll(fc))
1438 htype = ATH9K_PKT_TYPE_PSPOLL;
1439 else
1440 htype = ATH9K_PKT_TYPE_NORMAL;
1441
1442 return htype;
1443}
1444
2d42efc4
FF
1445static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1446 int framelen)
e8324357 1447{
9ac58615 1448 struct ath_softc *sc = hw->priv;
e8324357 1449 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1450 struct ieee80211_sta *sta = tx_info->control.sta;
1451 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
e8324357 1452 struct ieee80211_hdr *hdr;
2d42efc4 1453 struct ath_frame_info *fi = get_frame_info(skb);
e8324357
S
1454 struct ath_node *an;
1455 struct ath_atx_tid *tid;
2d42efc4
FF
1456 enum ath9k_key_type keytype;
1457 u16 seqno = 0;
5daefbd0 1458 u8 tidno;
e8324357 1459
2d42efc4 1460 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1461
e8324357 1462 hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4
FF
1463 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1464 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
e8324357 1465
2d42efc4
FF
1466 an = (struct ath_node *) sta->drv_priv;
1467 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1468
1469 /*
1470 * Override seqno set by upper layer with the one
1471 * in tx aggregation state.
1472 */
1473 tid = ATH_AN_2_TID(an, tidno);
1474 seqno = tid->seq_next;
1475 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1476 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1477 }
1478
1479 memset(fi, 0, sizeof(*fi));
1480 if (hw_key)
1481 fi->keyix = hw_key->hw_key_idx;
1482 else
1483 fi->keyix = ATH9K_TXKEYIX_INVALID;
1484 fi->keytype = keytype;
1485 fi->framelen = framelen;
1486 fi->seqno = seqno;
e8324357
S
1487}
1488
82b873af 1489static int setup_tx_flags(struct sk_buff *skb)
e8324357
S
1490{
1491 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1492 int flags = 0;
1493
1494 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1495 flags |= ATH9K_TXDESC_INTREQ;
1496
1497 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1498 flags |= ATH9K_TXDESC_NOACK;
e8324357 1499
82b873af 1500 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
b0a33448
LR
1501 flags |= ATH9K_TXDESC_LDPC;
1502
e8324357
S
1503 return flags;
1504}
1505
1506/*
1507 * rix - rate index
1508 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1509 * width - 0 for 20 MHz, 1 for 40 MHz
1510 * half_gi - to use 4us v/s 3.6 us for symbol time
1511 */
269c44bc 1512static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
e8324357
S
1513 int width, int half_gi, bool shortPreamble)
1514{
e8324357 1515 u32 nbits, nsymbits, duration, nsymbols;
269c44bc 1516 int streams;
e8324357
S
1517
1518 /* find number of symbols: PLCP + data */
c6663876 1519 streams = HT_RC_2_STREAMS(rix);
e8324357 1520 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1521 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1522 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1523
1524 if (!half_gi)
1525 duration = SYMBOL_TIME(nsymbols);
1526 else
1527 duration = SYMBOL_TIME_HALFGI(nsymbols);
1528
1529 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1530 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1531
1532 return duration;
1533}
1534
ea066d5a
MSS
1535u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1536{
1537 struct ath_hw *ah = sc->sc_ah;
1538 struct ath9k_channel *curchan = ah->curchan;
1539 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1540 (curchan->channelFlags & CHANNEL_5GHZ) &&
1541 (chainmask == 0x7) && (rate < 0x90))
1542 return 0x3;
1543 else
1544 return chainmask;
1545}
1546
269c44bc 1547static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
e8324357 1548{
43c27613 1549 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1550 struct ath9k_11n_rate_series series[4];
1551 struct sk_buff *skb;
1552 struct ieee80211_tx_info *tx_info;
1553 struct ieee80211_tx_rate *rates;
545750d3 1554 const struct ieee80211_rate *rate;
254ad0ff 1555 struct ieee80211_hdr *hdr;
c89424df
S
1556 int i, flags = 0;
1557 u8 rix = 0, ctsrate = 0;
254ad0ff 1558 bool is_pspoll;
e8324357
S
1559
1560 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1561
a22be22a 1562 skb = bf->bf_mpdu;
e8324357
S
1563 tx_info = IEEE80211_SKB_CB(skb);
1564 rates = tx_info->control.rates;
254ad0ff
S
1565 hdr = (struct ieee80211_hdr *)skb->data;
1566 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1567
e8324357 1568 /*
c89424df
S
1569 * We check if Short Preamble is needed for the CTS rate by
1570 * checking the BSS's global flag.
1571 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1572 */
545750d3
FF
1573 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1574 ctsrate = rate->hw_value;
c89424df 1575 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1576 ctsrate |= rate->hw_value_short;
e8324357 1577
e8324357 1578 for (i = 0; i < 4; i++) {
545750d3
FF
1579 bool is_40, is_sgi, is_sp;
1580 int phy;
1581
e8324357
S
1582 if (!rates[i].count || (rates[i].idx < 0))
1583 continue;
1584
1585 rix = rates[i].idx;
e8324357
S
1586 series[i].Tries = rates[i].count;
1587
27032059
FF
1588 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1589 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1590 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1591 flags |= ATH9K_TXDESC_RTSENA;
1592 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1593 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1594 flags |= ATH9K_TXDESC_CTSENA;
1595 }
1596
c89424df
S
1597 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1598 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1599 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1600 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1601
545750d3
FF
1602 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1603 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1604 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1605
1606 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1607 /* MCS rates */
1608 series[i].Rate = rix | 0x80;
ea066d5a
MSS
1609 series[i].ChSel = ath_txchainmask_reduction(sc,
1610 common->tx_chainmask, series[i].Rate);
269c44bc 1611 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
545750d3 1612 is_40, is_sgi, is_sp);
074a8c0d
FF
1613 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1614 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1615 continue;
1616 }
1617
ea066d5a 1618 /* legacy rates */
545750d3
FF
1619 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1620 !(rate->flags & IEEE80211_RATE_ERP_G))
1621 phy = WLAN_RC_PHY_CCK;
1622 else
1623 phy = WLAN_RC_PHY_OFDM;
1624
1625 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1626 series[i].Rate = rate->hw_value;
1627 if (rate->hw_value_short) {
1628 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1629 series[i].Rate |= rate->hw_value_short;
1630 } else {
1631 is_sp = false;
1632 }
1633
ea066d5a
MSS
1634 if (bf->bf_state.bfs_paprd)
1635 series[i].ChSel = common->tx_chainmask;
1636 else
1637 series[i].ChSel = ath_txchainmask_reduction(sc,
1638 common->tx_chainmask, series[i].Rate);
1639
545750d3 1640 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
269c44bc 1641 phy, rate->bitrate * 100, len, rix, is_sp);
f078f209
LR
1642 }
1643
27032059 1644 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
269c44bc 1645 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
27032059
FF
1646 flags &= ~ATH9K_TXDESC_RTSENA;
1647
1648 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1649 if (flags & ATH9K_TXDESC_RTSENA)
1650 flags &= ~ATH9K_TXDESC_CTSENA;
1651
e8324357 1652 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1653 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1654 bf->bf_lastbf->bf_desc,
254ad0ff 1655 !is_pspoll, ctsrate,
c89424df 1656 0, series, 4, flags);
f078f209 1657
17d7904d 1658 if (sc->config.ath_aggr_prot && flags)
c89424df 1659 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1660}
1661
82b873af 1662static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
04caf863 1663 struct ath_txq *txq,
2d42efc4 1664 struct sk_buff *skb)
f078f209 1665{
9ac58615 1666 struct ath_softc *sc = hw->priv;
04caf863 1667 struct ath_hw *ah = sc->sc_ah;
82b873af 1668 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1669 struct ath_frame_info *fi = get_frame_info(skb);
82b873af 1670 struct ath_buf *bf;
04caf863 1671 struct ath_desc *ds;
04caf863 1672 int frm_type;
82b873af
FF
1673
1674 bf = ath_tx_get_buffer(sc);
1675 if (!bf) {
226afe68 1676 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
82b873af
FF
1677 return NULL;
1678 }
e022edbd 1679
528f0c6b 1680 ATH_TXBUF_RESET(bf);
f078f209 1681
82b873af 1682 bf->bf_flags = setup_tx_flags(skb);
f078f209 1683 bf->bf_mpdu = skb;
f8316df1 1684
c1739eb3
BG
1685 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1686 skb->len, DMA_TO_DEVICE);
1687 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1688 bf->bf_mpdu = NULL;
6cf9e995 1689 bf->bf_buf_addr = 0;
3800276a
JP
1690 ath_err(ath9k_hw_common(sc->sc_ah),
1691 "dma_mapping_error() on TX\n");
82b873af
FF
1692 ath_tx_return_buffer(sc, bf);
1693 return NULL;
f8316df1
LR
1694 }
1695
528f0c6b 1696 frm_type = get_hw_packet_type(skb);
f078f209 1697
f078f209 1698 ds = bf->bf_desc;
87d5efbb 1699 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1700
2d42efc4
FF
1701 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1702 fi->keyix, fi->keytype, bf->bf_flags);
528f0c6b
S
1703
1704 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1705 skb->len, /* segment length */
1706 true, /* first segment */
1707 true, /* last segment */
3f3a1c80 1708 ds, /* first descriptor */
cc610ac0 1709 bf->bf_buf_addr,
04caf863
FF
1710 txq->axq_qnum);
1711
1712
1713 return bf;
1714}
1715
1716/* FIXME: tx power */
1717static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1718 struct ath_tx_control *txctl)
1719{
1720 struct sk_buff *skb = bf->bf_mpdu;
1721 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1722 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
04caf863 1723 struct list_head bf_head;
248a38d0 1724 struct ath_atx_tid *tid = NULL;
04caf863 1725 u8 tidno;
f078f209 1726
528f0c6b 1727 spin_lock_bh(&txctl->txq->axq_lock);
61e1b0b0
MSS
1728 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1729 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1730 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1731 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1732 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1733
066dae93 1734 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1735 }
1736
1737 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1738 /*
1739 * Try aggregation if it's a unicast data frame
1740 * and the destination is HT capable.
1741 */
1742 ath_tx_send_ampdu(sc, tid, bf, txctl);
f078f209 1743 } else {
04caf863
FF
1744 INIT_LIST_HEAD(&bf_head);
1745 list_add_tail(&bf->list, &bf_head);
1746
61117f01 1747 bf->bf_state.bfs_ftype = txctl->frame_type;
82b873af
FF
1748 bf->bf_state.bfs_paprd = txctl->paprd;
1749
9a6b8270 1750 if (bf->bf_state.bfs_paprd)
04caf863
FF
1751 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1752 bf->bf_state.bfs_paprd);
9a6b8270 1753
9cf04dcc
MSS
1754 if (txctl->paprd)
1755 bf->bf_state.bfs_paprd_timestamp = jiffies;
1756
248a38d0 1757 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
f078f209 1758 }
528f0c6b
S
1759
1760 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1761}
1762
f8316df1 1763/* Upon failure caller should free skb */
c52f33d0 1764int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1765 struct ath_tx_control *txctl)
f078f209 1766{
28d16708
FF
1767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1769 struct ieee80211_sta *sta = info->control.sta;
9ac58615 1770 struct ath_softc *sc = hw->priv;
84642d6b 1771 struct ath_txq *txq = txctl->txq;
528f0c6b 1772 struct ath_buf *bf;
4d91f9f3 1773 int padpos, padsize;
04caf863 1774 int frmlen = skb->len + FCS_LEN;
28d16708 1775 int q;
f078f209 1776
a9927ba3
BG
1777 /* NOTE: sta can be NULL according to net/mac80211.h */
1778 if (sta)
1779 txctl->an = (struct ath_node *)sta->drv_priv;
1780
04caf863
FF
1781 if (info->control.hw_key)
1782 frmlen += info->control.hw_key->icv_len;
1783
f078f209 1784 /*
e8324357
S
1785 * As a temporary workaround, assign seq# here; this will likely need
1786 * to be cleaned up to work better with Beacon transmission and virtual
1787 * BSSes.
f078f209 1788 */
e8324357 1789 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1790 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1791 sc->tx.seq_no += 0x10;
1792 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1793 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1794 }
f078f209 1795
e8324357 1796 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1797 padpos = ath9k_cmn_padpos(hdr->frame_control);
1798 padsize = padpos & 3;
28d16708
FF
1799 if (padsize && skb->len > padpos) {
1800 if (skb_headroom(skb) < padsize)
1801 return -ENOMEM;
1802
e8324357 1803 skb_push(skb, padsize);
4d91f9f3 1804 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1805 }
f078f209 1806
2d42efc4
FF
1807 setup_frame_info(hw, skb, frmlen);
1808
1809 /*
1810 * At this point, the vif, hw_key and sta pointers in the tx control
1811 * info are no longer valid (overwritten by the ath_frame_info data.
1812 */
1813
1814 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
28d16708
FF
1815 if (unlikely(!bf))
1816 return -ENOMEM;
f078f209 1817
28d16708
FF
1818 q = skb_get_queue_mapping(skb);
1819 spin_lock_bh(&txq->axq_lock);
1820 if (txq == sc->tx.txq_map[q] &&
1821 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1822 ieee80211_stop_queue(sc->hw, q);
28d16708 1823 txq->stopped = 1;
f078f209 1824 }
28d16708 1825 spin_unlock_bh(&txq->axq_lock);
f078f209 1826
28d16708
FF
1827 ath_tx_start_dma(sc, bf, txctl);
1828
1829 return 0;
f078f209
LR
1830}
1831
e8324357
S
1832/*****************/
1833/* TX Completion */
1834/*****************/
528f0c6b 1835
e8324357 1836static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0cdd5c60 1837 int tx_flags, int ftype, struct ath_txq *txq)
528f0c6b 1838{
e8324357
S
1839 struct ieee80211_hw *hw = sc->hw;
1840 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1841 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1842 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1843 int q, padpos, padsize;
528f0c6b 1844
226afe68 1845 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1846
6b2c4032 1847 if (tx_flags & ATH_TX_BAR)
e8324357 1848 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1849
6b2c4032 1850 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1851 /* Frame was ACKed */
1852 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1853 }
1854
4d91f9f3
BP
1855 padpos = ath9k_cmn_padpos(hdr->frame_control);
1856 padsize = padpos & 3;
1857 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1858 /*
1859 * Remove MAC header padding before giving the frame back to
1860 * mac80211.
1861 */
4d91f9f3 1862 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1863 skb_pull(skb, padsize);
1864 }
528f0c6b 1865
1b04b930
S
1866 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1867 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1868 ath_dbg(common, ATH_DBG_PS,
1869 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1870 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1871 PS_WAIT_FOR_CAB |
1872 PS_WAIT_FOR_PSPOLL_DATA |
1873 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1874 }
1875
7545daf4
FF
1876 q = skb_get_queue_mapping(skb);
1877 if (txq == sc->tx.txq_map[q]) {
1878 spin_lock_bh(&txq->axq_lock);
1879 if (WARN_ON(--txq->pending_frames < 0))
1880 txq->pending_frames = 0;
92460412 1881
7545daf4
FF
1882 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1883 ieee80211_wake_queue(sc->hw, q);
1884 txq->stopped = 0;
066dae93 1885 }
7545daf4 1886 spin_unlock_bh(&txq->axq_lock);
97923b14 1887 }
7545daf4
FF
1888
1889 ieee80211_tx_status(hw, skb);
e8324357 1890}
f078f209 1891
e8324357 1892static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1893 struct ath_txq *txq, struct list_head *bf_q,
1894 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1895{
e8324357 1896 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1897 unsigned long flags;
6b2c4032 1898 int tx_flags = 0;
f078f209 1899
e8324357 1900 if (sendbar)
6b2c4032 1901 tx_flags = ATH_TX_BAR;
f078f209 1902
e8324357 1903 if (!txok) {
6b2c4032 1904 tx_flags |= ATH_TX_ERROR;
f078f209 1905
e8324357 1906 if (bf_isxretried(bf))
6b2c4032 1907 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1908 }
1909
c1739eb3 1910 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1911 bf->bf_buf_addr = 0;
9f42c2b6
FF
1912
1913 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
1914 if (time_after(jiffies,
1915 bf->bf_state.bfs_paprd_timestamp +
1916 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1917 dev_kfree_skb_any(skb);
78a18172 1918 else
ca369eb4 1919 complete(&sc->paprd_complete);
9f42c2b6 1920 } else {
5bec3e5a 1921 ath_debug_stat_tx(sc, bf, ts, txq);
0cdd5c60 1922 ath_tx_complete(sc, skb, tx_flags,
61117f01 1923 bf->bf_state.bfs_ftype, txq);
9f42c2b6 1924 }
6cf9e995
BG
1925 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1926 * accidentally reference it later.
1927 */
1928 bf->bf_mpdu = NULL;
e8324357
S
1929
1930 /*
1931 * Return the list of ath_buf of this mpdu to free queue
1932 */
1933 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1934 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1935 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1936}
1937
0cdd5c60
FF
1938static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1939 struct ath_tx_status *ts, int nframes, int nbad,
1940 int txok, bool update_rc)
f078f209 1941{
a22be22a 1942 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1943 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1944 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 1945 struct ieee80211_hw *hw = sc->hw;
f0c255a0 1946 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 1947 u8 i, tx_rateindex;
f078f209 1948
95e4acb7 1949 if (txok)
db1a052b 1950 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1951
db1a052b 1952 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1953 WARN_ON(tx_rateindex >= hw->max_rates);
1954
db1a052b 1955 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1956 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1957 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1958 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1959
b572d033 1960 BUG_ON(nbad > nframes);
ebd02287 1961
b572d033
FF
1962 tx_info->status.ampdu_len = nframes;
1963 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
1964 }
1965
db1a052b 1966 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1967 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
1968 /*
1969 * If an underrun error is seen assume it as an excessive
1970 * retry only if max frame trigger level has been reached
1971 * (2 KB for single stream, and 4 KB for dual stream).
1972 * Adjust the long retry as if the frame was tried
1973 * hw->max_rate_tries times to affect how rate control updates
1974 * PER for the failed rate.
1975 * In case of congestion on the bus penalizing this type of
1976 * underruns should help hardware actually transmit new frames
1977 * successfully by eventually preferring slower rates.
1978 * This itself should also alleviate congestion on the bus.
1979 */
1980 if (ieee80211_is_data(hdr->frame_control) &&
1981 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1982 ATH9K_TX_DELIM_UNDERRUN)) &&
1983 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1984 tx_info->status.rates[tx_rateindex].count =
1985 hw->max_rate_tries;
f078f209 1986 }
8a92e2ee 1987
545750d3 1988 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 1989 tx_info->status.rates[i].count = 0;
545750d3
FF
1990 tx_info->status.rates[i].idx = -1;
1991 }
8a92e2ee 1992
78c4653a 1993 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
1994}
1995
e8324357 1996static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 1997{
cbe61d8a 1998 struct ath_hw *ah = sc->sc_ah;
c46917bb 1999 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2000 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2001 struct list_head bf_head;
e8324357 2002 struct ath_desc *ds;
29bffa96 2003 struct ath_tx_status ts;
0934af23 2004 int txok;
e8324357 2005 int status;
f078f209 2006
226afe68
JP
2007 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2008 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2009 txq->axq_link);
f078f209 2010
f078f209
LR
2011 for (;;) {
2012 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2013 if (list_empty(&txq->axq_q)) {
2014 txq->axq_link = NULL;
86271e46 2015 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2016 ath_txq_schedule(sc, txq);
f078f209
LR
2017 spin_unlock_bh(&txq->axq_lock);
2018 break;
2019 }
f078f209
LR
2020 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2021
e8324357
S
2022 /*
2023 * There is a race condition that a BH gets scheduled
2024 * after sw writes TxE and before hw re-load the last
2025 * descriptor to get the newly chained one.
2026 * Software must keep the last DONE descriptor as a
2027 * holding descriptor - software does so by marking
2028 * it with the STALE flag.
2029 */
2030 bf_held = NULL;
a119cc49 2031 if (bf->bf_stale) {
e8324357
S
2032 bf_held = bf;
2033 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2034 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2035 break;
2036 } else {
2037 bf = list_entry(bf_held->list.next,
6ef9b13d 2038 struct ath_buf, list);
e8324357 2039 }
f078f209
LR
2040 }
2041
2042 lastbf = bf->bf_lastbf;
e8324357 2043 ds = lastbf->bf_desc;
f078f209 2044
29bffa96
FF
2045 memset(&ts, 0, sizeof(ts));
2046 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2047 if (status == -EINPROGRESS) {
f078f209 2048 spin_unlock_bh(&txq->axq_lock);
e8324357 2049 break;
f078f209 2050 }
2dac4fb9 2051 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2052
e8324357
S
2053 /*
2054 * Remove ath_buf's of the same transmit unit from txq,
2055 * however leave the last descriptor back as the holding
2056 * descriptor for hw.
2057 */
a119cc49 2058 lastbf->bf_stale = true;
e8324357 2059 INIT_LIST_HEAD(&bf_head);
e8324357
S
2060 if (!list_is_singular(&lastbf->list))
2061 list_cut_position(&bf_head,
2062 &txq->axq_q, lastbf->list.prev);
f078f209 2063
e8324357 2064 txq->axq_depth--;
29bffa96 2065 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2066 txq->axq_tx_inprogress = false;
0a8cea84
FF
2067 if (bf_held)
2068 list_del(&bf_held->list);
4b3ba66a
FF
2069
2070 if (bf_is_ampdu_not_probing(bf))
2071 txq->axq_ampdu_depth--;
69081624 2072
e8324357 2073 spin_unlock_bh(&txq->axq_lock);
f078f209 2074
0a8cea84
FF
2075 if (bf_held)
2076 ath_tx_return_buffer(sc, bf_held);
f078f209 2077
e8324357
S
2078 if (!bf_isampdu(bf)) {
2079 /*
2080 * This frame is sent out as a single frame.
2081 * Use hardware retry status for this frame.
2082 */
29bffa96 2083 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2084 bf->bf_state.bf_type |= BUF_XRETRY;
0cdd5c60 2085 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
e8324357 2086 }
f078f209 2087
e8324357 2088 if (bf_isampdu(bf))
c5992618
FF
2089 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2090 true);
e8324357 2091 else
29bffa96 2092 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2093
059d806c 2094 spin_lock_bh(&txq->axq_lock);
60f2d1d5 2095
86271e46 2096 if (sc->sc_flags & SC_OP_TXAGGR)
e8324357
S
2097 ath_txq_schedule(sc, txq);
2098 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2099 }
2100}
2101
181fb18d
VN
2102static void ath_hw_pll_work(struct work_struct *work)
2103{
2104 struct ath_softc *sc = container_of(work, struct ath_softc,
2105 hw_pll_work.work);
2106 static int count;
2107
2108 if (AR_SREV_9485(sc->sc_ah)) {
2109 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2110 count++;
2111
2112 if (count == 3) {
2113 /* Rx is hung for more than 500ms. Reset it */
2114 ath_reset(sc, true);
2115 count = 0;
2116 }
2117 } else
2118 count = 0;
2119
2120 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2121 }
2122}
2123
305fe47f 2124static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2125{
2126 struct ath_softc *sc = container_of(work, struct ath_softc,
2127 tx_complete_work.work);
2128 struct ath_txq *txq;
2129 int i;
2130 bool needreset = false;
60f2d1d5
BG
2131#ifdef CONFIG_ATH9K_DEBUGFS
2132 sc->tx_complete_poll_work_seen++;
2133#endif
164ace38
SB
2134
2135 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2136 if (ATH_TXQ_SETUP(sc, i)) {
2137 txq = &sc->tx.txq[i];
2138 spin_lock_bh(&txq->axq_lock);
2139 if (txq->axq_depth) {
2140 if (txq->axq_tx_inprogress) {
2141 needreset = true;
2142 spin_unlock_bh(&txq->axq_lock);
2143 break;
2144 } else {
2145 txq->axq_tx_inprogress = true;
2146 }
60f2d1d5
BG
2147 } else {
2148 /* If the queue has pending buffers, then it
2149 * should be doing tx work (and have axq_depth).
2150 * Shouldn't get to this state I think..but
2151 * we do.
2152 */
2153 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2154 (txq->pending_frames > 0 ||
2155 !list_empty(&txq->axq_acq) ||
2156 txq->stopped)) {
2157 ath_err(ath9k_hw_common(sc->sc_ah),
2158 "txq: %p axq_qnum: %u,"
2159 " mac80211_qnum: %i"
2160 " axq_link: %p"
2161 " pending frames: %i"
2162 " axq_acq empty: %i"
2163 " stopped: %i"
2164 " axq_depth: 0 Attempting to"
2165 " restart tx logic.\n",
2166 txq, txq->axq_qnum,
2167 txq->mac80211_qnum,
2168 txq->axq_link,
2169 txq->pending_frames,
2170 list_empty(&txq->axq_acq),
2171 txq->stopped);
60f2d1d5
BG
2172 ath_txq_schedule(sc, txq);
2173 }
164ace38
SB
2174 }
2175 spin_unlock_bh(&txq->axq_lock);
2176 }
2177
2178 if (needreset) {
226afe68
JP
2179 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2180 "tx hung, resetting the chip\n");
fac6b6a0 2181 ath_reset(sc, true);
164ace38
SB
2182 }
2183
42935eca 2184 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2185 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2186}
2187
2188
f078f209 2189
e8324357 2190void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2191{
e8324357
S
2192 int i;
2193 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2194
e8324357 2195 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2196
e8324357
S
2197 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2198 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2199 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2200 }
2201}
2202
e5003249
VT
2203void ath_tx_edma_tasklet(struct ath_softc *sc)
2204{
2205 struct ath_tx_status txs;
2206 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2207 struct ath_hw *ah = sc->sc_ah;
2208 struct ath_txq *txq;
2209 struct ath_buf *bf, *lastbf;
2210 struct list_head bf_head;
2211 int status;
2212 int txok;
2213
2214 for (;;) {
2215 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2216 if (status == -EINPROGRESS)
2217 break;
2218 if (status == -EIO) {
226afe68
JP
2219 ath_dbg(common, ATH_DBG_XMIT,
2220 "Error processing tx status\n");
e5003249
VT
2221 break;
2222 }
2223
2224 /* Skip beacon completions */
2225 if (txs.qid == sc->beacon.beaconq)
2226 continue;
2227
2228 txq = &sc->tx.txq[txs.qid];
2229
2230 spin_lock_bh(&txq->axq_lock);
2231 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2232 spin_unlock_bh(&txq->axq_lock);
2233 return;
2234 }
2235
2236 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2237 struct ath_buf, list);
2238 lastbf = bf->bf_lastbf;
2239
2240 INIT_LIST_HEAD(&bf_head);
2241 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2242 &lastbf->list);
2243 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2244 txq->axq_depth--;
2245 txq->axq_tx_inprogress = false;
4b3ba66a
FF
2246 if (bf_is_ampdu_not_probing(bf))
2247 txq->axq_ampdu_depth--;
e5003249
VT
2248 spin_unlock_bh(&txq->axq_lock);
2249
2250 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2251
2252 if (!bf_isampdu(bf)) {
e5003249
VT
2253 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2254 bf->bf_state.bf_type |= BUF_XRETRY;
0cdd5c60 2255 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
e5003249
VT
2256 }
2257
2258 if (bf_isampdu(bf))
c5992618
FF
2259 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2260 txok, true);
e5003249
VT
2261 else
2262 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2263 &txs, txok, 0);
2264
2265 spin_lock_bh(&txq->axq_lock);
60f2d1d5 2266
86271e46
FF
2267 if (!list_empty(&txq->txq_fifo_pending)) {
2268 INIT_LIST_HEAD(&bf_head);
2269 bf = list_first_entry(&txq->txq_fifo_pending,
2270 struct ath_buf, list);
2271 list_cut_position(&bf_head,
2272 &txq->txq_fifo_pending,
2273 &bf->bf_lastbf->list);
2274 ath_tx_txqaddbuf(sc, txq, &bf_head);
2275 } else if (sc->sc_flags & SC_OP_TXAGGR)
2276 ath_txq_schedule(sc, txq);
2277
e5003249
VT
2278 spin_unlock_bh(&txq->axq_lock);
2279 }
2280}
2281
e8324357
S
2282/*****************/
2283/* Init, Cleanup */
2284/*****************/
f078f209 2285
5088c2f1
VT
2286static int ath_txstatus_setup(struct ath_softc *sc, int size)
2287{
2288 struct ath_descdma *dd = &sc->txsdma;
2289 u8 txs_len = sc->sc_ah->caps.txs_len;
2290
2291 dd->dd_desc_len = size * txs_len;
2292 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2293 &dd->dd_desc_paddr, GFP_KERNEL);
2294 if (!dd->dd_desc)
2295 return -ENOMEM;
2296
2297 return 0;
2298}
2299
2300static int ath_tx_edma_init(struct ath_softc *sc)
2301{
2302 int err;
2303
2304 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2305 if (!err)
2306 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2307 sc->txsdma.dd_desc_paddr,
2308 ATH_TXSTATUS_RING_SIZE);
2309
2310 return err;
2311}
2312
2313static void ath_tx_edma_cleanup(struct ath_softc *sc)
2314{
2315 struct ath_descdma *dd = &sc->txsdma;
2316
2317 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2318 dd->dd_desc_paddr);
2319}
2320
e8324357 2321int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2322{
c46917bb 2323 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2324 int error = 0;
f078f209 2325
797fe5cb 2326 spin_lock_init(&sc->tx.txbuflock);
f078f209 2327
797fe5cb 2328 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2329 "tx", nbufs, 1, 1);
797fe5cb 2330 if (error != 0) {
3800276a
JP
2331 ath_err(common,
2332 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2333 goto err;
2334 }
f078f209 2335
797fe5cb 2336 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2337 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2338 if (error != 0) {
3800276a
JP
2339 ath_err(common,
2340 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2341 goto err;
2342 }
f078f209 2343
164ace38 2344 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
181fb18d 2345 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
164ace38 2346
5088c2f1
VT
2347 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2348 error = ath_tx_edma_init(sc);
2349 if (error)
2350 goto err;
2351 }
2352
797fe5cb 2353err:
e8324357
S
2354 if (error != 0)
2355 ath_tx_cleanup(sc);
f078f209 2356
e8324357 2357 return error;
f078f209
LR
2358}
2359
797fe5cb 2360void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2361{
2362 if (sc->beacon.bdma.dd_desc_len != 0)
2363 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2364
2365 if (sc->tx.txdma.dd_desc_len != 0)
2366 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2367
2368 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2369 ath_tx_edma_cleanup(sc);
e8324357 2370}
f078f209
LR
2371
2372void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2373{
c5170163
S
2374 struct ath_atx_tid *tid;
2375 struct ath_atx_ac *ac;
2376 int tidno, acno;
f078f209 2377
8ee5afbc 2378 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2379 tidno < WME_NUM_TID;
2380 tidno++, tid++) {
2381 tid->an = an;
2382 tid->tidno = tidno;
2383 tid->seq_start = tid->seq_next = 0;
2384 tid->baw_size = WME_MAX_BA;
2385 tid->baw_head = tid->baw_tail = 0;
2386 tid->sched = false;
e8324357 2387 tid->paused = false;
a37c2c79 2388 tid->state &= ~AGGR_CLEANUP;
c5170163 2389 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2390 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2391 tid->ac = &an->ac[acno];
a37c2c79
S
2392 tid->state &= ~AGGR_ADDBA_COMPLETE;
2393 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2394 }
f078f209 2395
8ee5afbc 2396 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2397 acno < WME_NUM_AC; acno++, ac++) {
2398 ac->sched = false;
066dae93 2399 ac->txq = sc->tx.txq_map[acno];
c5170163 2400 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2401 }
2402}
2403
b5aa9bf9 2404void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2405{
2b40994c
FF
2406 struct ath_atx_ac *ac;
2407 struct ath_atx_tid *tid;
f078f209 2408 struct ath_txq *txq;
066dae93 2409 int tidno;
e8324357 2410
2b40994c
FF
2411 for (tidno = 0, tid = &an->tid[tidno];
2412 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2413
2b40994c 2414 ac = tid->ac;
066dae93 2415 txq = ac->txq;
f078f209 2416
2b40994c
FF
2417 spin_lock_bh(&txq->axq_lock);
2418
2419 if (tid->sched) {
2420 list_del(&tid->list);
2421 tid->sched = false;
2422 }
2423
2424 if (ac->sched) {
2425 list_del(&ac->list);
2426 tid->ac->sched = false;
f078f209 2427 }
2b40994c
FF
2428
2429 ath_tid_drain(sc, txq, tid);
2430 tid->state &= ~AGGR_ADDBA_COMPLETE;
2431 tid->state &= ~AGGR_CLEANUP;
2432
2433 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2434 }
2435}