wl1271: bugfix: use bitwise-AND instead of logical-AND
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 124{
e8324357 125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 126
75401849 127 WARN_ON(!tid->paused);
f078f209 128
75401849
LB
129 spin_lock_bh(&txq->axq_lock);
130 tid->paused = false;
f078f209 131
e8324357
S
132 if (list_empty(&tid->buf_q))
133 goto unlock;
f078f209 134
e8324357
S
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
528f0c6b 139}
f078f209 140
e8324357 141static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 142{
e8324357
S
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf;
145 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head);
f078f209 147
75401849 148 WARN_ON(!tid->paused);
e6a9854b 149
75401849
LB
150 spin_lock_bh(&txq->axq_lock);
151 tid->paused = false;
f078f209 152
e8324357
S
153 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
9680e8a3 155 BUG_ON(bf_isretried(bf));
d43f3015 156 list_move_tail(&bf->list, &bf_head);
c37452b0 157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
528f0c6b 158 }
f078f209 159
e8324357 160 spin_unlock_bh(&txq->axq_lock);
528f0c6b 161}
f078f209 162
e8324357
S
163static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
164 int seqno)
528f0c6b 165{
e8324357 166 int index, cindex;
f078f209 167
e8324357
S
168 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 170
e8324357 171 tid->tx_buf[cindex] = NULL;
528f0c6b 172
e8324357
S
173 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 }
528f0c6b 177}
f078f209 178
e8324357
S
179static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 struct ath_buf *bf)
528f0c6b 181{
e8324357 182 int index, cindex;
528f0c6b 183
e8324357
S
184 if (bf_isretried(bf))
185 return;
528f0c6b 186
e8324357
S
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 189
9680e8a3 190 BUG_ON(tid->tx_buf[cindex] != NULL);
e8324357 191 tid->tx_buf[cindex] = bf;
f078f209 192
e8324357
S
193 if (index >= ((tid->baw_tail - tid->baw_head) &
194 (ATH_TID_MAX_BUFS - 1))) {
195 tid->baw_tail = cindex;
196 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 197 }
f078f209
LR
198}
199
200/*
e8324357
S
201 * TODO: For frame(s) that are in the retry state, we will reuse the
202 * sequence number(s) without setting the retry bit. The
203 * alternative is to give up on these and BAR the receiver's window
204 * forward.
f078f209 205 */
e8324357
S
206static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
207 struct ath_atx_tid *tid)
f078f209 208
f078f209 209{
e8324357
S
210 struct ath_buf *bf;
211 struct list_head bf_head;
db1a052b
FF
212 struct ath_tx_status ts;
213
214 memset(&ts, 0, sizeof(ts));
e8324357 215 INIT_LIST_HEAD(&bf_head);
f078f209 216
e8324357
S
217 for (;;) {
218 if (list_empty(&tid->buf_q))
219 break;
f078f209 220
d43f3015
S
221 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
222 list_move_tail(&bf->list, &bf_head);
f078f209 223
e8324357
S
224 if (bf_isretried(bf))
225 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 226
e8324357 227 spin_unlock(&txq->axq_lock);
db1a052b 228 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
229 spin_lock(&txq->axq_lock);
230 }
f078f209 231
e8324357
S
232 tid->seq_next = tid->seq_start;
233 tid->baw_tail = tid->baw_head;
f078f209
LR
234}
235
fec247c0
S
236static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
237 struct ath_buf *bf)
f078f209 238{
e8324357
S
239 struct sk_buff *skb;
240 struct ieee80211_hdr *hdr;
f078f209 241
e8324357
S
242 bf->bf_state.bf_type |= BUF_RETRY;
243 bf->bf_retries++;
fec247c0 244 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 245
e8324357
S
246 skb = bf->bf_mpdu;
247 hdr = (struct ieee80211_hdr *)skb->data;
248 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
249}
250
0a8cea84 251static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 252{
0a8cea84 253 struct ath_buf *bf = NULL;
d43f3015
S
254
255 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
256
257 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
258 spin_unlock_bh(&sc->tx.txbuflock);
259 return NULL;
260 }
0a8cea84
FF
261
262 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
263 list_del(&bf->list);
264
d43f3015
S
265 spin_unlock_bh(&sc->tx.txbuflock);
266
0a8cea84
FF
267 return bf;
268}
269
270static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
271{
272 spin_lock_bh(&sc->tx.txbuflock);
273 list_add_tail(&bf->list, &sc->tx.txbuf);
274 spin_unlock_bh(&sc->tx.txbuflock);
275}
276
277static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
278{
279 struct ath_buf *tbf;
280
281 tbf = ath_tx_get_buffer(sc);
282 if (WARN_ON(!tbf))
283 return NULL;
284
d43f3015
S
285 ATH_TXBUF_RESET(tbf);
286
827e69bf 287 tbf->aphy = bf->aphy;
d43f3015
S
288 tbf->bf_mpdu = bf->bf_mpdu;
289 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 290 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015
S
291 tbf->bf_state = bf->bf_state;
292 tbf->bf_dmacontext = bf->bf_dmacontext;
293
294 return tbf;
295}
296
297static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
298 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 299 struct ath_tx_status *ts, int txok)
f078f209 300{
e8324357
S
301 struct ath_node *an = NULL;
302 struct sk_buff *skb;
1286ec6d 303 struct ieee80211_sta *sta;
76d5a9e8 304 struct ieee80211_hw *hw;
1286ec6d 305 struct ieee80211_hdr *hdr;
76d5a9e8 306 struct ieee80211_tx_info *tx_info;
e8324357 307 struct ath_atx_tid *tid = NULL;
d43f3015 308 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 309 struct list_head bf_head, bf_pending;
0934af23 310 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 311 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
312 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
313 bool rc_update = true;
78c4653a 314 struct ieee80211_tx_rate rates[4];
f078f209 315
a22be22a 316 skb = bf->bf_mpdu;
1286ec6d
S
317 hdr = (struct ieee80211_hdr *)skb->data;
318
76d5a9e8 319 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 320 hw = bf->aphy->hw;
76d5a9e8 321
78c4653a
FF
322 memcpy(rates, tx_info->control.rates, sizeof(rates));
323
1286ec6d 324 rcu_read_lock();
f078f209 325
5ed176e1 326 /* XXX: use ieee80211_find_sta! */
76d5a9e8 327 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
1286ec6d
S
328 if (!sta) {
329 rcu_read_unlock();
73e19463 330
31e79a59
FF
331 INIT_LIST_HEAD(&bf_head);
332 while (bf) {
333 bf_next = bf->bf_next;
334
335 bf->bf_state.bf_type |= BUF_XRETRY;
336 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
337 !bf->bf_stale || bf_next != NULL)
338 list_move_tail(&bf->list, &bf_head);
339
340 ath_tx_rc_status(bf, ts, 0, 0, false);
341 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
342 0, 0);
343
344 bf = bf_next;
345 }
1286ec6d 346 return;
f078f209
LR
347 }
348
1286ec6d
S
349 an = (struct ath_node *)sta->drv_priv;
350 tid = ATH_AN_2_TID(an, bf->bf_tidno);
351
b11b160d
FF
352 /*
353 * The hardware occasionally sends a tx status for the wrong TID.
354 * In this case, the BA status cannot be considered valid and all
355 * subframes need to be retransmitted
356 */
357 if (bf->bf_tidno != ts->tid)
358 txok = false;
359
e8324357 360 isaggr = bf_isaggr(bf);
d43f3015 361 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 362
d43f3015 363 if (isaggr && txok) {
db1a052b
FF
364 if (ts->ts_flags & ATH9K_TX_BA) {
365 seq_st = ts->ts_seqnum;
366 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 367 } else {
d43f3015
S
368 /*
369 * AR5416 can become deaf/mute when BA
370 * issue happens. Chip needs to be reset.
371 * But AP code may have sychronization issues
372 * when perform internal reset in this routine.
373 * Only enable reset in STA mode for now.
374 */
2660b81a 375 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 376 needreset = 1;
e8324357 377 }
f078f209
LR
378 }
379
e8324357
S
380 INIT_LIST_HEAD(&bf_pending);
381 INIT_LIST_HEAD(&bf_head);
f078f209 382
db1a052b 383 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
384 while (bf) {
385 txfail = txpending = 0;
386 bf_next = bf->bf_next;
f078f209 387
78c4653a
FF
388 skb = bf->bf_mpdu;
389 tx_info = IEEE80211_SKB_CB(skb);
390
e8324357
S
391 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
392 /* transmit completion, subframe is
393 * acked by block ack */
0934af23 394 acked_cnt++;
e8324357
S
395 } else if (!isaggr && txok) {
396 /* transmit completion */
0934af23 397 acked_cnt++;
e8324357 398 } else {
e8324357 399 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 400 !bf_last->bf_tx_aborted) {
e8324357 401 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 402 ath_tx_set_retry(sc, txq, bf);
e8324357
S
403 txpending = 1;
404 } else {
405 bf->bf_state.bf_type |= BUF_XRETRY;
406 txfail = 1;
407 sendbar = 1;
0934af23 408 txfail_cnt++;
e8324357
S
409 }
410 } else {
411 /*
412 * cleanup in progress, just fail
413 * the un-acked sub-frames
414 */
415 txfail = 1;
416 }
417 }
f078f209 418
e5003249
VT
419 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
420 bf_next == NULL) {
cbfe89c6
VT
421 /*
422 * Make sure the last desc is reclaimed if it
423 * not a holding desc.
424 */
425 if (!bf_last->bf_stale)
426 list_move_tail(&bf->list, &bf_head);
427 else
428 INIT_LIST_HEAD(&bf_head);
e8324357 429 } else {
9680e8a3 430 BUG_ON(list_empty(bf_q));
d43f3015 431 list_move_tail(&bf->list, &bf_head);
e8324357 432 }
f078f209 433
e8324357
S
434 if (!txpending) {
435 /*
436 * complete the acked-ones/xretried ones; update
437 * block-ack window
438 */
439 spin_lock_bh(&txq->axq_lock);
440 ath_tx_update_baw(sc, tid, bf->bf_seqno);
441 spin_unlock_bh(&txq->axq_lock);
f078f209 442
8a92e2ee 443 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 444 memcpy(tx_info->control.rates, rates, sizeof(rates));
db1a052b 445 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
446 rc_update = false;
447 } else {
db1a052b 448 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
449 }
450
db1a052b
FF
451 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
452 !txfail, sendbar);
e8324357 453 } else {
d43f3015 454 /* retry the un-acked ones */
e5003249
VT
455 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
456 if (bf->bf_next == NULL && bf_last->bf_stale) {
457 struct ath_buf *tbf;
458
459 tbf = ath_clone_txbuf(sc, bf_last);
460 /*
461 * Update tx baw and complete the
462 * frame with failed status if we
463 * run out of tx buf.
464 */
465 if (!tbf) {
466 spin_lock_bh(&txq->axq_lock);
467 ath_tx_update_baw(sc, tid,
468 bf->bf_seqno);
469 spin_unlock_bh(&txq->axq_lock);
470
471 bf->bf_state.bf_type |=
472 BUF_XRETRY;
473 ath_tx_rc_status(bf, ts, nbad,
474 0, false);
475 ath_tx_complete_buf(sc, bf, txq,
476 &bf_head,
477 ts, 0, 0);
478 break;
479 }
480
481 ath9k_hw_cleartxdesc(sc->sc_ah,
482 tbf->bf_desc);
483 list_add_tail(&tbf->list, &bf_head);
484 } else {
485 /*
486 * Clear descriptor status words for
487 * software retry
488 */
489 ath9k_hw_cleartxdesc(sc->sc_ah,
490 bf->bf_desc);
c41d92dc 491 }
e8324357
S
492 }
493
494 /*
495 * Put this buffer to the temporary pending
496 * queue to retain ordering
497 */
498 list_splice_tail_init(&bf_head, &bf_pending);
499 }
500
501 bf = bf_next;
f078f209 502 }
f078f209 503
4cee7861
FF
504 /* prepend un-acked frames to the beginning of the pending frame queue */
505 if (!list_empty(&bf_pending)) {
506 spin_lock_bh(&txq->axq_lock);
507 list_splice(&bf_pending, &tid->buf_q);
508 ath_tx_queue_tid(txq, tid);
509 spin_unlock_bh(&txq->axq_lock);
510 }
511
e8324357 512 if (tid->state & AGGR_CLEANUP) {
e8324357
S
513 if (tid->baw_head == tid->baw_tail) {
514 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 515 tid->state &= ~AGGR_CLEANUP;
e63835b0 516
e8324357
S
517 /* send buffered frames as singles */
518 ath_tx_flush_tid(sc, tid);
d43f3015 519 }
1286ec6d 520 rcu_read_unlock();
e8324357
S
521 return;
522 }
f078f209 523
1286ec6d
S
524 rcu_read_unlock();
525
e8324357
S
526 if (needreset)
527 ath_reset(sc, false);
e8324357 528}
f078f209 529
e8324357
S
530static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
531 struct ath_atx_tid *tid)
f078f209 532{
528f0c6b
S
533 struct sk_buff *skb;
534 struct ieee80211_tx_info *tx_info;
a8efee4f 535 struct ieee80211_tx_rate *rates;
d43f3015 536 u32 max_4ms_framelen, frmlen;
4ef70841 537 u16 aggr_limit, legacy = 0;
e8324357 538 int i;
528f0c6b 539
a22be22a 540 skb = bf->bf_mpdu;
528f0c6b 541 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 542 rates = tx_info->control.rates;
528f0c6b 543
e8324357
S
544 /*
545 * Find the lowest frame length among the rate series that will have a
546 * 4ms transmit duration.
547 * TODO - TXOP limit needs to be considered.
548 */
549 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 550
e8324357
S
551 for (i = 0; i < 4; i++) {
552 if (rates[i].count) {
545750d3
FF
553 int modeidx;
554 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
555 legacy = 1;
556 break;
557 }
558
0e668cde 559 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
560 modeidx = MCS_HT40;
561 else
0e668cde
FF
562 modeidx = MCS_HT20;
563
564 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
565 modeidx++;
545750d3
FF
566
567 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 568 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
569 }
570 }
e63835b0 571
f078f209 572 /*
e8324357
S
573 * limit aggregate size by the minimum rate if rate selected is
574 * not a probe rate, if rate selected is a probe rate then
575 * avoid aggregation of this packet.
f078f209 576 */
e8324357
S
577 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
578 return 0;
f078f209 579
1773912b
VT
580 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
581 aggr_limit = min((max_4ms_framelen * 3) / 8,
582 (u32)ATH_AMPDU_LIMIT_MAX);
583 else
584 aggr_limit = min(max_4ms_framelen,
585 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 586
e8324357
S
587 /*
588 * h/w can accept aggregates upto 16 bit lengths (65535).
589 * The IE, however can hold upto 65536, which shows up here
590 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 591 */
4ef70841
S
592 if (tid->an->maxampdu)
593 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 594
e8324357
S
595 return aggr_limit;
596}
f078f209 597
e8324357 598/*
d43f3015 599 * Returns the number of delimiters to be added to
e8324357 600 * meet the minimum required mpdudensity.
e8324357
S
601 */
602static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
603 struct ath_buf *bf, u16 frmlen)
604{
e8324357
S
605 struct sk_buff *skb = bf->bf_mpdu;
606 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 607 u32 nsymbits, nsymbols;
e8324357 608 u16 minlen;
545750d3 609 u8 flags, rix;
c6663876 610 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
611
612 /* Select standard number of delimiters based on frame length alone */
613 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
614
615 /*
e8324357
S
616 * If encryption enabled, hardware requires some more padding between
617 * subframes.
618 * TODO - this could be improved to be dependent on the rate.
619 * The hardware can keep up at lower rates, but not higher rates
f078f209 620 */
e8324357
S
621 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
622 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 623
e8324357
S
624 /*
625 * Convert desired mpdu density from microeconds to bytes based
626 * on highest rate in rate series (i.e. first rate) to determine
627 * required minimum length for subframe. Take into account
628 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 629 *
e8324357
S
630 * If there is no mpdu density restriction, no further calculation
631 * is needed.
632 */
4ef70841
S
633
634 if (tid->an->mpdudensity == 0)
e8324357 635 return ndelim;
f078f209 636
e8324357
S
637 rix = tx_info->control.rates[0].idx;
638 flags = tx_info->control.rates[0].flags;
e8324357
S
639 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
640 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 641
e8324357 642 if (half_gi)
4ef70841 643 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 644 else
4ef70841 645 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 646
e8324357
S
647 if (nsymbols == 0)
648 nsymbols = 1;
f078f209 649
c6663876
FF
650 streams = HT_RC_2_STREAMS(rix);
651 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 652 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 653
e8324357 654 if (frmlen < minlen) {
e8324357
S
655 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
656 ndelim = max(mindelim, ndelim);
f078f209
LR
657 }
658
e8324357 659 return ndelim;
f078f209
LR
660}
661
e8324357 662static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 663 struct ath_txq *txq,
d43f3015
S
664 struct ath_atx_tid *tid,
665 struct list_head *bf_q)
f078f209 666{
e8324357 667#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
668 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
669 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
670 u16 aggr_limit = 0, al = 0, bpad = 0,
671 al_delta, h_baw = tid->baw_size / 2;
672 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 673
e8324357 674 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 675
e8324357
S
676 do {
677 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 678
d43f3015 679 /* do not step over block-ack window */
e8324357
S
680 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
681 status = ATH_AGGR_BAW_CLOSED;
682 break;
683 }
f078f209 684
e8324357
S
685 if (!rl) {
686 aggr_limit = ath_lookup_rate(sc, bf, tid);
687 rl = 1;
688 }
f078f209 689
d43f3015 690 /* do not exceed aggregation limit */
e8324357 691 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 692
d43f3015
S
693 if (nframes &&
694 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
695 status = ATH_AGGR_LIMITED;
696 break;
697 }
f078f209 698
d43f3015
S
699 /* do not exceed subframe limit */
700 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
701 status = ATH_AGGR_LIMITED;
702 break;
703 }
d43f3015 704 nframes++;
f078f209 705
d43f3015 706 /* add padding for previous frame to aggregation length */
e8324357 707 al += bpad + al_delta;
f078f209 708
e8324357
S
709 /*
710 * Get the delimiters needed to meet the MPDU
711 * density for this node.
712 */
713 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 714 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 715
e8324357 716 bf->bf_next = NULL;
87d5efbb 717 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 718
d43f3015 719 /* link buffers of this frame to the aggregate */
e8324357 720 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
721 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
722 list_move_tail(&bf->list, bf_q);
e8324357
S
723 if (bf_prev) {
724 bf_prev->bf_next = bf;
87d5efbb
VT
725 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
726 bf->bf_daddr);
e8324357
S
727 }
728 bf_prev = bf;
fec247c0 729
e8324357 730 } while (!list_empty(&tid->buf_q));
f078f209 731
e8324357
S
732 bf_first->bf_al = al;
733 bf_first->bf_nframes = nframes;
d43f3015 734
e8324357
S
735 return status;
736#undef PADBYTES
737}
f078f209 738
e8324357
S
739static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
740 struct ath_atx_tid *tid)
741{
d43f3015 742 struct ath_buf *bf;
e8324357
S
743 enum ATH_AGGR_STATUS status;
744 struct list_head bf_q;
f078f209 745
e8324357
S
746 do {
747 if (list_empty(&tid->buf_q))
748 return;
f078f209 749
e8324357
S
750 INIT_LIST_HEAD(&bf_q);
751
fec247c0 752 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 753
f078f209 754 /*
d43f3015
S
755 * no frames picked up to be aggregated;
756 * block-ack window is not open.
f078f209 757 */
e8324357
S
758 if (list_empty(&bf_q))
759 break;
f078f209 760
e8324357 761 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 762 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 763
d43f3015 764 /* if only one frame, send as non-aggregate */
e8324357 765 if (bf->bf_nframes == 1) {
e8324357 766 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 767 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
768 ath_buf_set_rate(sc, bf);
769 ath_tx_txqaddbuf(sc, txq, &bf_q);
770 continue;
771 }
f078f209 772
d43f3015 773 /* setup first desc of aggregate */
e8324357
S
774 bf->bf_state.bf_type |= BUF_AGGR;
775 ath_buf_set_rate(sc, bf);
776 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 777
d43f3015
S
778 /* anchor last desc of aggregate */
779 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 780
e8324357 781 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 782 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 783
e8324357
S
784 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
785 status != ATH_AGGR_BAW_CLOSED);
786}
787
f83da965
S
788void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
789 u16 tid, u16 *ssn)
e8324357
S
790{
791 struct ath_atx_tid *txtid;
792 struct ath_node *an;
793
794 an = (struct ath_node *)sta->drv_priv;
f83da965
S
795 txtid = ATH_AN_2_TID(an, tid);
796 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 797 txtid->paused = true;
f83da965 798 *ssn = txtid->seq_start;
e8324357 799}
f078f209 800
f83da965 801void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
802{
803 struct ath_node *an = (struct ath_node *)sta->drv_priv;
804 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
805 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
db1a052b 806 struct ath_tx_status ts;
e8324357
S
807 struct ath_buf *bf;
808 struct list_head bf_head;
db1a052b
FF
809
810 memset(&ts, 0, sizeof(ts));
e8324357 811 INIT_LIST_HEAD(&bf_head);
f078f209 812
e8324357 813 if (txtid->state & AGGR_CLEANUP)
f83da965 814 return;
f078f209 815
e8324357 816 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 817 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 818 return;
e8324357 819 }
f078f209 820
e8324357
S
821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock);
75401849 823 txtid->paused = true;
e8324357
S
824 while (!list_empty(&txtid->buf_q)) {
825 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
826 if (!bf_isretried(bf)) {
827 /*
828 * NB: it's based on the assumption that
829 * software retried frame will always stay
830 * at the head of software queue.
831 */
832 break;
833 }
d43f3015 834 list_move_tail(&bf->list, &bf_head);
e8324357 835 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
db1a052b 836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209 837 }
d43f3015 838 spin_unlock_bh(&txq->axq_lock);
f078f209 839
e8324357 840 if (txtid->baw_head != txtid->baw_tail) {
e8324357
S
841 txtid->state |= AGGR_CLEANUP;
842 } else {
843 txtid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 844 ath_tx_flush_tid(sc, txtid);
f078f209 845 }
e8324357 846}
f078f209 847
e8324357
S
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
849{
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = (struct ath_node *)sta->drv_priv;
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size =
858 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
859 txtid->state |= AGGR_ADDBA_COMPLETE;
860 txtid->state &= ~AGGR_ADDBA_PROGRESS;
861 ath_tx_resume_tid(sc, txtid);
862 }
f078f209
LR
863}
864
e8324357 865bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
c4288390 866{
e8324357 867 struct ath_atx_tid *txtid;
c4288390 868
e8324357
S
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return false;
c4288390 871
e8324357
S
872 txtid = ATH_AN_2_TID(an, tidno);
873
c3d8f02e 874 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
e8324357 875 return true;
e8324357 876 return false;
c4288390
S
877}
878
e8324357
S
879/********************/
880/* Queue Management */
881/********************/
f078f209 882
e8324357
S
883static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
884 struct ath_txq *txq)
f078f209 885{
e8324357
S
886 struct ath_atx_ac *ac, *ac_tmp;
887 struct ath_atx_tid *tid, *tid_tmp;
f078f209 888
e8324357
S
889 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
890 list_del(&ac->list);
891 ac->sched = false;
892 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
893 list_del(&tid->list);
894 tid->sched = false;
895 ath_tid_drain(sc, txq, tid);
896 }
f078f209
LR
897 }
898}
899
e8324357 900struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 901{
cbe61d8a 902 struct ath_hw *ah = sc->sc_ah;
c46917bb 903 struct ath_common *common = ath9k_hw_common(ah);
e8324357 904 struct ath9k_tx_queue_info qi;
e5003249 905 int qnum, i;
f078f209 906
e8324357
S
907 memset(&qi, 0, sizeof(qi));
908 qi.tqi_subtype = subtype;
909 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
910 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
911 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
912 qi.tqi_physCompBuf = 0;
f078f209
LR
913
914 /*
e8324357
S
915 * Enable interrupts only for EOL and DESC conditions.
916 * We mark tx descriptors to receive a DESC interrupt
917 * when a tx queue gets deep; otherwise waiting for the
918 * EOL to reap descriptors. Note that this is done to
919 * reduce interrupt load and this only defers reaping
920 * descriptors, never transmitting frames. Aside from
921 * reducing interrupts this also permits more concurrency.
922 * The only potential downside is if the tx queue backs
923 * up in which case the top half of the kernel may backup
924 * due to a lack of tx descriptors.
925 *
926 * The UAPSD queue is an exception, since we take a desc-
927 * based intr on the EOSP frames.
f078f209 928 */
afe754d6
VT
929 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
930 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
931 TXQ_FLAG_TXERRINT_ENABLE;
932 } else {
933 if (qtype == ATH9K_TX_QUEUE_UAPSD)
934 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
935 else
936 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
937 TXQ_FLAG_TXDESCINT_ENABLE;
938 }
e8324357
S
939 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
940 if (qnum == -1) {
f078f209 941 /*
e8324357
S
942 * NB: don't print a message, this happens
943 * normally on parts with too few tx queues
f078f209 944 */
e8324357 945 return NULL;
f078f209 946 }
e8324357 947 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
948 ath_print(common, ATH_DBG_FATAL,
949 "qnum %u out of range, max %u!\n",
950 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
951 ath9k_hw_releasetxqueue(ah, qnum);
952 return NULL;
953 }
954 if (!ATH_TXQ_SETUP(sc, qnum)) {
955 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 956
293f2ba8 957 txq->axq_class = subtype;
e8324357
S
958 txq->axq_qnum = qnum;
959 txq->axq_link = NULL;
960 INIT_LIST_HEAD(&txq->axq_q);
961 INIT_LIST_HEAD(&txq->axq_acq);
962 spin_lock_init(&txq->axq_lock);
963 txq->axq_depth = 0;
164ace38 964 txq->axq_tx_inprogress = false;
e8324357 965 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
966
967 txq->txq_headidx = txq->txq_tailidx = 0;
968 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
969 INIT_LIST_HEAD(&txq->txq_fifo[i]);
970 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
971 }
972 return &sc->tx.txq[qnum];
f078f209
LR
973}
974
e8324357
S
975int ath_txq_update(struct ath_softc *sc, int qnum,
976 struct ath9k_tx_queue_info *qinfo)
977{
cbe61d8a 978 struct ath_hw *ah = sc->sc_ah;
e8324357
S
979 int error = 0;
980 struct ath9k_tx_queue_info qi;
981
982 if (qnum == sc->beacon.beaconq) {
983 /*
984 * XXX: for beacon queue, we just save the parameter.
985 * It will be picked up by ath_beaconq_config when
986 * it's necessary.
987 */
988 sc->beacon.beacon_qi = *qinfo;
f078f209 989 return 0;
e8324357 990 }
f078f209 991
9680e8a3 992 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
993
994 ath9k_hw_get_txq_props(ah, qnum, &qi);
995 qi.tqi_aifs = qinfo->tqi_aifs;
996 qi.tqi_cwmin = qinfo->tqi_cwmin;
997 qi.tqi_cwmax = qinfo->tqi_cwmax;
998 qi.tqi_burstTime = qinfo->tqi_burstTime;
999 qi.tqi_readyTime = qinfo->tqi_readyTime;
1000
1001 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1002 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1003 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1004 error = -EIO;
1005 } else {
1006 ath9k_hw_resettxqueue(ah, qnum);
1007 }
1008
1009 return error;
1010}
1011
1012int ath_cabq_update(struct ath_softc *sc)
1013{
1014 struct ath9k_tx_queue_info qi;
1015 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1016
e8324357 1017 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1018 /*
e8324357 1019 * Ensure the readytime % is within the bounds.
f078f209 1020 */
17d7904d
S
1021 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1022 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1023 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1024 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1025
57c4d7b4 1026 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1027 sc->config.cabqReadytime) / 100;
e8324357
S
1028 ath_txq_update(sc, qnum, &qi);
1029
1030 return 0;
f078f209
LR
1031}
1032
043a0405
S
1033/*
1034 * Drain a given TX queue (could be Beacon or Data)
1035 *
1036 * This assumes output has been stopped and
1037 * we do not need to block ath_tx_tasklet.
1038 */
1039void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1040{
e8324357
S
1041 struct ath_buf *bf, *lastbf;
1042 struct list_head bf_head;
db1a052b
FF
1043 struct ath_tx_status ts;
1044
1045 memset(&ts, 0, sizeof(ts));
e8324357 1046 INIT_LIST_HEAD(&bf_head);
f078f209 1047
e8324357
S
1048 for (;;) {
1049 spin_lock_bh(&txq->axq_lock);
f078f209 1050
e5003249
VT
1051 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1052 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1053 txq->txq_headidx = txq->txq_tailidx = 0;
1054 spin_unlock_bh(&txq->axq_lock);
1055 break;
1056 } else {
1057 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1058 struct ath_buf, list);
1059 }
1060 } else {
1061 if (list_empty(&txq->axq_q)) {
1062 txq->axq_link = NULL;
1063 spin_unlock_bh(&txq->axq_lock);
1064 break;
1065 }
1066 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1067 list);
f078f209 1068
e5003249
VT
1069 if (bf->bf_stale) {
1070 list_del(&bf->list);
1071 spin_unlock_bh(&txq->axq_lock);
f078f209 1072
0a8cea84 1073 ath_tx_return_buffer(sc, bf);
e5003249
VT
1074 continue;
1075 }
e8324357 1076 }
f078f209 1077
e8324357 1078 lastbf = bf->bf_lastbf;
6d913f7d
VT
1079 if (!retry_tx)
1080 lastbf->bf_tx_aborted = true;
f078f209 1081
e5003249
VT
1082 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1083 list_cut_position(&bf_head,
1084 &txq->txq_fifo[txq->txq_tailidx],
1085 &lastbf->list);
1086 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1087 } else {
1088 /* remove ath_buf's of the same mpdu from txq */
1089 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1090 }
1091
e8324357 1092 txq->axq_depth--;
f078f209 1093
e8324357
S
1094 spin_unlock_bh(&txq->axq_lock);
1095
1096 if (bf_isampdu(bf))
db1a052b 1097 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1098 else
db1a052b 1099 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1100 }
1101
164ace38
SB
1102 spin_lock_bh(&txq->axq_lock);
1103 txq->axq_tx_inprogress = false;
1104 spin_unlock_bh(&txq->axq_lock);
1105
e8324357
S
1106 /* flush any pending frames if aggregation is enabled */
1107 if (sc->sc_flags & SC_OP_TXAGGR) {
1108 if (!retry_tx) {
1109 spin_lock_bh(&txq->axq_lock);
1110 ath_txq_drain_pending_buffers(sc, txq);
1111 spin_unlock_bh(&txq->axq_lock);
1112 }
1113 }
e5003249
VT
1114
1115 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1116 spin_lock_bh(&txq->axq_lock);
1117 while (!list_empty(&txq->txq_fifo_pending)) {
1118 bf = list_first_entry(&txq->txq_fifo_pending,
1119 struct ath_buf, list);
1120 list_cut_position(&bf_head,
1121 &txq->txq_fifo_pending,
1122 &bf->bf_lastbf->list);
1123 spin_unlock_bh(&txq->axq_lock);
1124
1125 if (bf_isampdu(bf))
1126 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1127 &ts, 0);
1128 else
1129 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1130 &ts, 0, 0);
1131 spin_lock_bh(&txq->axq_lock);
1132 }
1133 spin_unlock_bh(&txq->axq_lock);
1134 }
f078f209
LR
1135}
1136
043a0405 1137void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1138{
cbe61d8a 1139 struct ath_hw *ah = sc->sc_ah;
c46917bb 1140 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1141 struct ath_txq *txq;
1142 int i, npend = 0;
1143
1144 if (sc->sc_flags & SC_OP_INVALID)
1145 return;
1146
1147 /* Stop beacon queue */
1148 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1149
1150 /* Stop data queues */
1151 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1152 if (ATH_TXQ_SETUP(sc, i)) {
1153 txq = &sc->tx.txq[i];
1154 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1155 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1156 }
1157 }
1158
1159 if (npend) {
1160 int r;
1161
e8009e98 1162 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1163 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1164
1165 spin_lock_bh(&sc->sc_resetlock);
20bd2a09 1166 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1167 if (r)
c46917bb
LR
1168 ath_print(common, ATH_DBG_FATAL,
1169 "Unable to reset hardware; reset status %d\n",
1170 r);
043a0405
S
1171 spin_unlock_bh(&sc->sc_resetlock);
1172 }
1173
1174 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1175 if (ATH_TXQ_SETUP(sc, i))
1176 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1177 }
e8324357 1178}
f078f209 1179
043a0405 1180void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1181{
043a0405
S
1182 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1183 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1184}
f078f209 1185
e8324357
S
1186void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1187{
1188 struct ath_atx_ac *ac;
1189 struct ath_atx_tid *tid;
f078f209 1190
e8324357
S
1191 if (list_empty(&txq->axq_acq))
1192 return;
f078f209 1193
e8324357
S
1194 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1195 list_del(&ac->list);
1196 ac->sched = false;
f078f209 1197
e8324357
S
1198 do {
1199 if (list_empty(&ac->tid_q))
1200 return;
f078f209 1201
e8324357
S
1202 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1203 list_del(&tid->list);
1204 tid->sched = false;
f078f209 1205
e8324357
S
1206 if (tid->paused)
1207 continue;
f078f209 1208
164ace38 1209 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1210
1211 /*
e8324357
S
1212 * add tid to round-robin queue if more frames
1213 * are pending for the tid
f078f209 1214 */
e8324357
S
1215 if (!list_empty(&tid->buf_q))
1216 ath_tx_queue_tid(txq, tid);
f078f209 1217
e8324357
S
1218 break;
1219 } while (!list_empty(&ac->tid_q));
f078f209 1220
e8324357
S
1221 if (!list_empty(&ac->tid_q)) {
1222 if (!ac->sched) {
1223 ac->sched = true;
1224 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1225 }
e8324357
S
1226 }
1227}
f078f209 1228
e8324357
S
1229int ath_tx_setup(struct ath_softc *sc, int haltype)
1230{
1231 struct ath_txq *txq;
f078f209 1232
e8324357 1233 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1234 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1235 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1236 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1237 return 0;
1238 }
1239 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1240 if (txq != NULL) {
1241 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1242 return 1;
1243 } else
1244 return 0;
f078f209
LR
1245}
1246
e8324357
S
1247/***********/
1248/* TX, DMA */
1249/***********/
1250
f078f209 1251/*
e8324357
S
1252 * Insert a chain of ath_buf (descriptors) on a txq and
1253 * assume the descriptors are already chained together by caller.
f078f209 1254 */
e8324357
S
1255static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1256 struct list_head *head)
f078f209 1257{
cbe61d8a 1258 struct ath_hw *ah = sc->sc_ah;
c46917bb 1259 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1260 struct ath_buf *bf;
f078f209 1261
e8324357
S
1262 /*
1263 * Insert the frame on the outbound list and
1264 * pass it on to the hardware.
1265 */
f078f209 1266
e8324357
S
1267 if (list_empty(head))
1268 return;
f078f209 1269
e8324357 1270 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1271
c46917bb
LR
1272 ath_print(common, ATH_DBG_QUEUE,
1273 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1274
e5003249
VT
1275 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1276 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1277 list_splice_tail_init(head, &txq->txq_fifo_pending);
1278 return;
1279 }
1280 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1281 ath_print(common, ATH_DBG_XMIT,
1282 "Initializing tx fifo %d which "
1283 "is non-empty\n",
1284 txq->txq_headidx);
1285 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1286 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1287 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1288 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1289 ath_print(common, ATH_DBG_XMIT,
1290 "TXDP[%u] = %llx (%p)\n",
1291 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1292 } else {
e5003249
VT
1293 list_splice_tail_init(head, &txq->axq_q);
1294
1295 if (txq->axq_link == NULL) {
1296 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1297 ath_print(common, ATH_DBG_XMIT,
1298 "TXDP[%u] = %llx (%p)\n",
1299 txq->axq_qnum, ito64(bf->bf_daddr),
1300 bf->bf_desc);
1301 } else {
1302 *txq->axq_link = bf->bf_daddr;
1303 ath_print(common, ATH_DBG_XMIT,
1304 "link[%u] (%p)=%llx (%p)\n",
1305 txq->axq_qnum, txq->axq_link,
1306 ito64(bf->bf_daddr), bf->bf_desc);
1307 }
1308 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1309 &txq->axq_link);
1310 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1311 }
e5003249 1312 txq->axq_depth++;
e8324357 1313}
f078f209 1314
e8324357
S
1315static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1316 struct list_head *bf_head,
1317 struct ath_tx_control *txctl)
f078f209
LR
1318{
1319 struct ath_buf *bf;
f078f209 1320
e8324357
S
1321 bf = list_first_entry(bf_head, struct ath_buf, list);
1322 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1323 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1324
e8324357
S
1325 /*
1326 * Do not queue to h/w when any of the following conditions is true:
1327 * - there are pending frames in software queue
1328 * - the TID is currently paused for ADDBA/BAR request
1329 * - seqno is not within block-ack window
1330 * - h/w queue depth exceeds low water mark
1331 */
1332 if (!list_empty(&tid->buf_q) || tid->paused ||
1333 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1334 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1335 /*
e8324357
S
1336 * Add this frame to software queue for scheduling later
1337 * for aggregation.
f078f209 1338 */
d43f3015 1339 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1340 ath_tx_queue_tid(txctl->txq, tid);
1341 return;
1342 }
1343
1344 /* Add sub-frame to BAW */
1345 ath_tx_addto_baw(sc, tid, bf);
1346
1347 /* Queue to h/w without aggregation */
1348 bf->bf_nframes = 1;
d43f3015 1349 bf->bf_lastbf = bf;
e8324357
S
1350 ath_buf_set_rate(sc, bf);
1351 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1352}
1353
c37452b0
S
1354static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1355 struct ath_atx_tid *tid,
1356 struct list_head *bf_head)
e8324357
S
1357{
1358 struct ath_buf *bf;
1359
e8324357
S
1360 bf = list_first_entry(bf_head, struct ath_buf, list);
1361 bf->bf_state.bf_type &= ~BUF_AMPDU;
1362
1363 /* update starting sequence number for subsequent ADDBA request */
1364 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1365
1366 bf->bf_nframes = 1;
d43f3015 1367 bf->bf_lastbf = bf;
e8324357
S
1368 ath_buf_set_rate(sc, bf);
1369 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1370 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1371}
1372
c37452b0
S
1373static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1374 struct list_head *bf_head)
1375{
1376 struct ath_buf *bf;
1377
1378 bf = list_first_entry(bf_head, struct ath_buf, list);
1379
1380 bf->bf_lastbf = bf;
1381 bf->bf_nframes = 1;
1382 ath_buf_set_rate(sc, bf);
1383 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1384 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1385}
1386
e8324357
S
1387static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1388{
1389 struct ieee80211_hdr *hdr;
1390 enum ath9k_pkt_type htype;
1391 __le16 fc;
1392
1393 hdr = (struct ieee80211_hdr *)skb->data;
1394 fc = hdr->frame_control;
1395
1396 if (ieee80211_is_beacon(fc))
1397 htype = ATH9K_PKT_TYPE_BEACON;
1398 else if (ieee80211_is_probe_resp(fc))
1399 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1400 else if (ieee80211_is_atim(fc))
1401 htype = ATH9K_PKT_TYPE_ATIM;
1402 else if (ieee80211_is_pspoll(fc))
1403 htype = ATH9K_PKT_TYPE_PSPOLL;
1404 else
1405 htype = ATH9K_PKT_TYPE_NORMAL;
1406
1407 return htype;
1408}
1409
e8324357
S
1410static void assign_aggr_tid_seqno(struct sk_buff *skb,
1411 struct ath_buf *bf)
1412{
1413 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1414 struct ieee80211_hdr *hdr;
1415 struct ath_node *an;
1416 struct ath_atx_tid *tid;
1417 __le16 fc;
1418 u8 *qc;
1419
1420 if (!tx_info->control.sta)
1421 return;
1422
1423 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1424 hdr = (struct ieee80211_hdr *)skb->data;
1425 fc = hdr->frame_control;
1426
1427 if (ieee80211_is_data_qos(fc)) {
1428 qc = ieee80211_get_qos_ctl(hdr);
1429 bf->bf_tidno = qc[0] & 0xf;
1430 }
1431
1432 /*
1433 * For HT capable stations, we save tidno for later use.
1434 * We also override seqno set by upper layer with the one
1435 * in tx aggregation state.
e8324357
S
1436 */
1437 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1438 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1439 bf->bf_seqno = tid->seq_next;
1440 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1441}
1442
b0a33448 1443static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1444{
1445 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1446 int flags = 0;
1447
1448 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1449 flags |= ATH9K_TXDESC_INTREQ;
1450
1451 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1452 flags |= ATH9K_TXDESC_NOACK;
e8324357 1453
b0a33448
LR
1454 if (use_ldpc)
1455 flags |= ATH9K_TXDESC_LDPC;
1456
e8324357
S
1457 return flags;
1458}
1459
1460/*
1461 * rix - rate index
1462 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1463 * width - 0 for 20 MHz, 1 for 40 MHz
1464 * half_gi - to use 4us v/s 3.6 us for symbol time
1465 */
1466static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1467 int width, int half_gi, bool shortPreamble)
1468{
e8324357 1469 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1470 int streams, pktlen;
1471
1472 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1473
1474 /* find number of symbols: PLCP + data */
c6663876 1475 streams = HT_RC_2_STREAMS(rix);
e8324357 1476 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1477 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1478 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1479
1480 if (!half_gi)
1481 duration = SYMBOL_TIME(nsymbols);
1482 else
1483 duration = SYMBOL_TIME_HALFGI(nsymbols);
1484
1485 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1486 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1487
1488 return duration;
1489}
1490
1491static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1492{
43c27613 1493 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1494 struct ath9k_11n_rate_series series[4];
1495 struct sk_buff *skb;
1496 struct ieee80211_tx_info *tx_info;
1497 struct ieee80211_tx_rate *rates;
545750d3 1498 const struct ieee80211_rate *rate;
254ad0ff 1499 struct ieee80211_hdr *hdr;
c89424df
S
1500 int i, flags = 0;
1501 u8 rix = 0, ctsrate = 0;
254ad0ff 1502 bool is_pspoll;
e8324357
S
1503
1504 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1505
a22be22a 1506 skb = bf->bf_mpdu;
e8324357
S
1507 tx_info = IEEE80211_SKB_CB(skb);
1508 rates = tx_info->control.rates;
254ad0ff
S
1509 hdr = (struct ieee80211_hdr *)skb->data;
1510 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1511
e8324357 1512 /*
c89424df
S
1513 * We check if Short Preamble is needed for the CTS rate by
1514 * checking the BSS's global flag.
1515 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1516 */
545750d3
FF
1517 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1518 ctsrate = rate->hw_value;
c89424df 1519 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1520 ctsrate |= rate->hw_value_short;
e8324357 1521
e8324357 1522 for (i = 0; i < 4; i++) {
545750d3
FF
1523 bool is_40, is_sgi, is_sp;
1524 int phy;
1525
e8324357
S
1526 if (!rates[i].count || (rates[i].idx < 0))
1527 continue;
1528
1529 rix = rates[i].idx;
e8324357 1530 series[i].Tries = rates[i].count;
43c27613 1531 series[i].ChSel = common->tx_chainmask;
e8324357 1532
27032059
FF
1533 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1534 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1535 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1536 flags |= ATH9K_TXDESC_RTSENA;
1537 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1538 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1539 flags |= ATH9K_TXDESC_CTSENA;
1540 }
1541
c89424df
S
1542 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1543 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1544 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1545 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1546
545750d3
FF
1547 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1548 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1549 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1550
1551 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1552 /* MCS rates */
1553 series[i].Rate = rix | 0x80;
1554 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1555 is_40, is_sgi, is_sp);
074a8c0d
FF
1556 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1557 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1558 continue;
1559 }
1560
1561 /* legcay rates */
1562 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1563 !(rate->flags & IEEE80211_RATE_ERP_G))
1564 phy = WLAN_RC_PHY_CCK;
1565 else
1566 phy = WLAN_RC_PHY_OFDM;
1567
1568 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1569 series[i].Rate = rate->hw_value;
1570 if (rate->hw_value_short) {
1571 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1572 series[i].Rate |= rate->hw_value_short;
1573 } else {
1574 is_sp = false;
1575 }
1576
1577 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1578 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1579 }
1580
27032059
FF
1581 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1582 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1583 flags &= ~ATH9K_TXDESC_RTSENA;
1584
1585 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1586 if (flags & ATH9K_TXDESC_RTSENA)
1587 flags &= ~ATH9K_TXDESC_CTSENA;
1588
e8324357 1589 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1590 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1591 bf->bf_lastbf->bf_desc,
254ad0ff 1592 !is_pspoll, ctsrate,
c89424df 1593 0, series, 4, flags);
f078f209 1594
17d7904d 1595 if (sc->config.ath_aggr_prot && flags)
c89424df 1596 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1597}
1598
c52f33d0 1599static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1600 struct sk_buff *skb,
528f0c6b 1601 struct ath_tx_control *txctl)
f078f209 1602{
c52f33d0
JM
1603 struct ath_wiphy *aphy = hw->priv;
1604 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1605 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1606 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1607 int hdrlen;
1608 __le16 fc;
1bc14880 1609 int padpos, padsize;
b0a33448 1610 bool use_ldpc = false;
e022edbd 1611
827e69bf
FF
1612 tx_info->pad[0] = 0;
1613 switch (txctl->frame_type) {
c81494d5 1614 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1615 break;
c81494d5 1616 case ATH9K_IFT_PAUSE:
827e69bf
FF
1617 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1618 /* fall through */
c81494d5 1619 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1620 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1621 break;
1622 }
528f0c6b
S
1623 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1624 fc = hdr->frame_control;
f078f209 1625
528f0c6b 1626 ATH_TXBUF_RESET(bf);
f078f209 1627
827e69bf 1628 bf->aphy = aphy;
1bc14880
BP
1629 bf->bf_frmlen = skb->len + FCS_LEN;
1630 /* Remove the padding size from bf_frmlen, if any */
1631 padpos = ath9k_cmn_padpos(hdr->frame_control);
1632 padsize = padpos & 3;
1633 if (padsize && skb->len>padpos+padsize) {
1634 bf->bf_frmlen -= padsize;
1635 }
cd3d39a6 1636
9f42c2b6 1637 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
c656bbb5 1638 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1639 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1640 use_ldpc = true;
1641 }
528f0c6b 1642
9f42c2b6 1643 bf->bf_state.bfs_paprd = txctl->paprd;
ca369eb4
VT
1644 if (txctl->paprd)
1645 bf->bf_state.bfs_paprd_timestamp = jiffies;
b0a33448 1646 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1647
c17512d8 1648 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
528f0c6b
S
1649 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1650 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1651 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1652 } else {
1653 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1654 }
1655
17b182e3
S
1656 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1657 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1658 assign_aggr_tid_seqno(skb, bf);
1659
f078f209 1660 bf->bf_mpdu = skb;
f8316df1 1661
7da3c55c
GJ
1662 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1663 skb->len, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
f8316df1 1665 bf->bf_mpdu = NULL;
c46917bb
LR
1666 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1667 "dma_mapping_error() on TX\n");
f8316df1
LR
1668 return -ENOMEM;
1669 }
1670
528f0c6b 1671 bf->bf_buf_addr = bf->bf_dmacontext;
e7824a50
LR
1672
1673 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1674 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1675 bf->bf_isnullfunc = true;
1b04b930 1676 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
e7824a50
LR
1677 } else
1678 bf->bf_isnullfunc = false;
1679
7c9fd60f
VT
1680 bf->bf_tx_aborted = false;
1681
f8316df1 1682 return 0;
528f0c6b
S
1683}
1684
1685/* FIXME: tx power */
1686static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1687 struct ath_tx_control *txctl)
1688{
a22be22a 1689 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1690 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1691 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1692 struct ath_node *an = NULL;
1693 struct list_head bf_head;
1694 struct ath_desc *ds;
1695 struct ath_atx_tid *tid;
cbe61d8a 1696 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1697 int frm_type;
c37452b0 1698 __le16 fc;
528f0c6b 1699
528f0c6b 1700 frm_type = get_hw_packet_type(skb);
c37452b0 1701 fc = hdr->frame_control;
528f0c6b
S
1702
1703 INIT_LIST_HEAD(&bf_head);
1704 list_add_tail(&bf->list, &bf_head);
f078f209 1705
f078f209 1706 ds = bf->bf_desc;
87d5efbb 1707 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1708
528f0c6b
S
1709 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1710 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1711
1712 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1713 skb->len, /* segment length */
1714 true, /* first segment */
1715 true, /* last segment */
3f3a1c80 1716 ds, /* first descriptor */
cc610ac0
VT
1717 bf->bf_buf_addr,
1718 txctl->txq->axq_qnum);
f078f209 1719
9f42c2b6
FF
1720 if (bf->bf_state.bfs_paprd)
1721 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1722
528f0c6b 1723 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1724
f1617967
JL
1725 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1726 tx_info->control.sta) {
1727 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1728 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1729
c37452b0
S
1730 if (!ieee80211_is_data_qos(fc)) {
1731 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1732 goto tx_done;
1733 }
1734
4fdec031 1735 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1736 /*
1737 * Try aggregation if it's a unicast data frame
1738 * and the destination is HT capable.
1739 */
528f0c6b 1740 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1741 } else {
1742 /*
528f0c6b
S
1743 * Send this frame as regular when ADDBA
1744 * exchange is neither complete nor pending.
f078f209 1745 */
c37452b0
S
1746 ath_tx_send_ht_normal(sc, txctl->txq,
1747 tid, &bf_head);
f078f209
LR
1748 }
1749 } else {
c37452b0 1750 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1751 }
528f0c6b 1752
c37452b0 1753tx_done:
528f0c6b 1754 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1755}
1756
f8316df1 1757/* Upon failure caller should free skb */
c52f33d0 1758int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1759 struct ath_tx_control *txctl)
f078f209 1760{
c52f33d0
JM
1761 struct ath_wiphy *aphy = hw->priv;
1762 struct ath_softc *sc = aphy->sc;
c46917bb 1763 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1764 struct ath_txq *txq = txctl->txq;
528f0c6b 1765 struct ath_buf *bf;
97923b14 1766 int q, r;
f078f209 1767
528f0c6b
S
1768 bf = ath_tx_get_buffer(sc);
1769 if (!bf) {
c46917bb 1770 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1771 return -1;
1772 }
1773
c52f33d0 1774 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1775 if (unlikely(r)) {
c46917bb 1776 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1777
1778 /* upon ath_tx_processq() this TX queue will be resumed, we
1779 * guarantee this will happen by knowing beforehand that
1780 * we will at least have to run TX completionon one buffer
1781 * on the queue */
1782 spin_lock_bh(&txq->axq_lock);
84642d6b 1783 if (!txq->stopped && txq->axq_depth > 1) {
f52de03b 1784 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1785 txq->stopped = 1;
1786 }
1787 spin_unlock_bh(&txq->axq_lock);
1788
0a8cea84 1789 ath_tx_return_buffer(sc, bf);
c112d0c5 1790
f8316df1
LR
1791 return r;
1792 }
1793
97923b14
FF
1794 q = skb_get_queue_mapping(skb);
1795 if (q >= 4)
1796 q = 0;
1797
1798 spin_lock_bh(&txq->axq_lock);
1799 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1800 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1801 txq->stopped = 1;
1802 }
1803 spin_unlock_bh(&txq->axq_lock);
1804
8f93b8b3 1805 ath_tx_start_dma(sc, bf, txctl);
f078f209 1806
528f0c6b 1807 return 0;
f078f209
LR
1808}
1809
c52f33d0 1810void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1811{
c52f33d0
JM
1812 struct ath_wiphy *aphy = hw->priv;
1813 struct ath_softc *sc = aphy->sc;
c46917bb 1814 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1815 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1816 int padpos, padsize;
e8324357
S
1817 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1818 struct ath_tx_control txctl;
f078f209 1819
e8324357 1820 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1821
1822 /*
e8324357
S
1823 * As a temporary workaround, assign seq# here; this will likely need
1824 * to be cleaned up to work better with Beacon transmission and virtual
1825 * BSSes.
f078f209 1826 */
e8324357 1827 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1828 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1829 sc->tx.seq_no += 0x10;
1830 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1831 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1832 }
f078f209 1833
e8324357 1834 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1835 padpos = ath9k_cmn_padpos(hdr->frame_control);
1836 padsize = padpos & 3;
1837 if (padsize && skb->len>padpos) {
e8324357 1838 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1839 ath_print(common, ATH_DBG_XMIT,
1840 "TX CABQ padding failed\n");
e8324357
S
1841 dev_kfree_skb_any(skb);
1842 return;
1843 }
1844 skb_push(skb, padsize);
4d91f9f3 1845 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1846 }
f078f209 1847
e8324357 1848 txctl.txq = sc->beacon.cabq;
f078f209 1849
c46917bb
LR
1850 ath_print(common, ATH_DBG_XMIT,
1851 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1852
c52f33d0 1853 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1854 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1855 goto exit;
f078f209 1856 }
f078f209 1857
e8324357
S
1858 return;
1859exit:
1860 dev_kfree_skb_any(skb);
f078f209
LR
1861}
1862
e8324357
S
1863/*****************/
1864/* TX Completion */
1865/*****************/
528f0c6b 1866
e8324357 1867static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1868 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1869{
e8324357
S
1870 struct ieee80211_hw *hw = sc->hw;
1871 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1872 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1873 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1874 int q, padpos, padsize;
528f0c6b 1875
c46917bb 1876 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1877
827e69bf
FF
1878 if (aphy)
1879 hw = aphy->hw;
528f0c6b 1880
6b2c4032 1881 if (tx_flags & ATH_TX_BAR)
e8324357 1882 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1883
6b2c4032 1884 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1885 /* Frame was ACKed */
1886 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1887 }
1888
4d91f9f3
BP
1889 padpos = ath9k_cmn_padpos(hdr->frame_control);
1890 padsize = padpos & 3;
1891 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1892 /*
1893 * Remove MAC header padding before giving the frame back to
1894 * mac80211.
1895 */
4d91f9f3 1896 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1897 skb_pull(skb, padsize);
1898 }
528f0c6b 1899
1b04b930
S
1900 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1901 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1902 ath_print(common, ATH_DBG_PS,
1903 "Going back to sleep after having "
f643e51d 1904 "received TX status (0x%lx)\n",
1b04b930
S
1905 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1906 PS_WAIT_FOR_CAB |
1907 PS_WAIT_FOR_PSPOLL_DATA |
1908 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1909 }
1910
827e69bf 1911 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1912 ath9k_tx_status(hw, skb);
97923b14
FF
1913 else {
1914 q = skb_get_queue_mapping(skb);
1915 if (q >= 4)
1916 q = 0;
1917
1918 if (--sc->tx.pending_frames[q] < 0)
1919 sc->tx.pending_frames[q] = 0;
1920
827e69bf 1921 ieee80211_tx_status(hw, skb);
97923b14 1922 }
e8324357 1923}
f078f209 1924
e8324357 1925static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1926 struct ath_txq *txq, struct list_head *bf_q,
1927 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1928{
e8324357 1929 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1930 unsigned long flags;
6b2c4032 1931 int tx_flags = 0;
f078f209 1932
e8324357 1933 if (sendbar)
6b2c4032 1934 tx_flags = ATH_TX_BAR;
f078f209 1935
e8324357 1936 if (!txok) {
6b2c4032 1937 tx_flags |= ATH_TX_ERROR;
f078f209 1938
e8324357 1939 if (bf_isxretried(bf))
6b2c4032 1940 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1941 }
1942
e8324357 1943 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
9f42c2b6
FF
1944
1945 if (bf->bf_state.bfs_paprd) {
ca369eb4
VT
1946 if (time_after(jiffies,
1947 bf->bf_state.bfs_paprd_timestamp +
78a18172 1948 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1949 dev_kfree_skb_any(skb);
78a18172 1950 else
ca369eb4 1951 complete(&sc->paprd_complete);
9f42c2b6
FF
1952 } else {
1953 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1954 ath_debug_stat_tx(sc, txq, bf, ts);
1955 }
e8324357
S
1956
1957 /*
1958 * Return the list of ath_buf of this mpdu to free queue
1959 */
1960 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1961 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1962 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1963}
1964
e8324357 1965static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1966 struct ath_tx_status *ts, int txok)
f078f209 1967{
e8324357
S
1968 u16 seq_st = 0;
1969 u32 ba[WME_BA_BMP_SIZE >> 5];
1970 int ba_index;
1971 int nbad = 0;
1972 int isaggr = 0;
f078f209 1973
7c9fd60f 1974 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1975 return 0;
f078f209 1976
e8324357
S
1977 isaggr = bf_isaggr(bf);
1978 if (isaggr) {
db1a052b
FF
1979 seq_st = ts->ts_seqnum;
1980 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1981 }
f078f209 1982
e8324357
S
1983 while (bf) {
1984 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1985 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1986 nbad++;
1987
1988 bf = bf->bf_next;
1989 }
f078f209 1990
e8324357
S
1991 return nbad;
1992}
f078f209 1993
db1a052b 1994static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1995 int nbad, int txok, bool update_rc)
f078f209 1996{
a22be22a 1997 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1998 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1999 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 2000 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 2001 u8 i, tx_rateindex;
f078f209 2002
95e4acb7 2003 if (txok)
db1a052b 2004 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2005
db1a052b 2006 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2007 WARN_ON(tx_rateindex >= hw->max_rates);
2008
db1a052b 2009 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 2010 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
d969847c
FF
2011 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2012 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2013
db1a052b 2014 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 2015 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 2016 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2017 if (ts->ts_flags &
827e69bf
FF
2018 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2019 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2020 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2021 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf
FF
2022 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2023 tx_info->status.ampdu_len = bf->bf_nframes;
2024 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
e8324357 2025 }
f078f209 2026 }
8a92e2ee 2027
545750d3 2028 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2029 tx_info->status.rates[i].count = 0;
545750d3
FF
2030 tx_info->status.rates[i].idx = -1;
2031 }
8a92e2ee 2032
78c4653a 2033 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2034}
2035
059d806c
S
2036static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2037{
2038 int qnum;
2039
97923b14
FF
2040 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2041 if (qnum == -1)
2042 return;
2043
059d806c 2044 spin_lock_bh(&txq->axq_lock);
97923b14 2045 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
68e8f2fa
VT
2046 if (ath_mac80211_start_queue(sc, qnum))
2047 txq->stopped = 0;
059d806c
S
2048 }
2049 spin_unlock_bh(&txq->axq_lock);
2050}
2051
e8324357 2052static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2053{
cbe61d8a 2054 struct ath_hw *ah = sc->sc_ah;
c46917bb 2055 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2056 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2057 struct list_head bf_head;
e8324357 2058 struct ath_desc *ds;
29bffa96 2059 struct ath_tx_status ts;
0934af23 2060 int txok;
e8324357 2061 int status;
f078f209 2062
c46917bb
LR
2063 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2064 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2065 txq->axq_link);
f078f209 2066
f078f209
LR
2067 for (;;) {
2068 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2069 if (list_empty(&txq->axq_q)) {
2070 txq->axq_link = NULL;
f078f209
LR
2071 spin_unlock_bh(&txq->axq_lock);
2072 break;
2073 }
f078f209
LR
2074 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2075
e8324357
S
2076 /*
2077 * There is a race condition that a BH gets scheduled
2078 * after sw writes TxE and before hw re-load the last
2079 * descriptor to get the newly chained one.
2080 * Software must keep the last DONE descriptor as a
2081 * holding descriptor - software does so by marking
2082 * it with the STALE flag.
2083 */
2084 bf_held = NULL;
a119cc49 2085 if (bf->bf_stale) {
e8324357
S
2086 bf_held = bf;
2087 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2088 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2089 break;
2090 } else {
2091 bf = list_entry(bf_held->list.next,
6ef9b13d 2092 struct ath_buf, list);
e8324357 2093 }
f078f209
LR
2094 }
2095
2096 lastbf = bf->bf_lastbf;
e8324357 2097 ds = lastbf->bf_desc;
f078f209 2098
29bffa96
FF
2099 memset(&ts, 0, sizeof(ts));
2100 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2101 if (status == -EINPROGRESS) {
f078f209 2102 spin_unlock_bh(&txq->axq_lock);
e8324357 2103 break;
f078f209 2104 }
f078f209 2105
e7824a50
LR
2106 /*
2107 * We now know the nullfunc frame has been ACKed so we
2108 * can disable RX.
2109 */
2110 if (bf->bf_isnullfunc &&
29bffa96 2111 (ts.ts_status & ATH9K_TX_ACKED)) {
3f7c5c10
SB
2112 if ((sc->ps_flags & PS_ENABLED))
2113 ath9k_enable_ps(sc);
2114 else
1b04b930 2115 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
e7824a50
LR
2116 }
2117
e8324357
S
2118 /*
2119 * Remove ath_buf's of the same transmit unit from txq,
2120 * however leave the last descriptor back as the holding
2121 * descriptor for hw.
2122 */
a119cc49 2123 lastbf->bf_stale = true;
e8324357 2124 INIT_LIST_HEAD(&bf_head);
e8324357
S
2125 if (!list_is_singular(&lastbf->list))
2126 list_cut_position(&bf_head,
2127 &txq->axq_q, lastbf->list.prev);
f078f209 2128
e8324357 2129 txq->axq_depth--;
29bffa96 2130 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2131 txq->axq_tx_inprogress = false;
0a8cea84
FF
2132 if (bf_held)
2133 list_del(&bf_held->list);
e8324357 2134 spin_unlock_bh(&txq->axq_lock);
f078f209 2135
0a8cea84
FF
2136 if (bf_held)
2137 ath_tx_return_buffer(sc, bf_held);
f078f209 2138
e8324357
S
2139 if (!bf_isampdu(bf)) {
2140 /*
2141 * This frame is sent out as a single frame.
2142 * Use hardware retry status for this frame.
2143 */
29bffa96 2144 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2145 bf->bf_state.bf_type |= BUF_XRETRY;
29bffa96 2146 ath_tx_rc_status(bf, &ts, 0, txok, true);
e8324357 2147 }
f078f209 2148
e8324357 2149 if (bf_isampdu(bf))
29bffa96 2150 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2151 else
29bffa96 2152 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2153
059d806c 2154 ath_wake_mac80211_queue(sc, txq);
8469cdef 2155
059d806c 2156 spin_lock_bh(&txq->axq_lock);
e8324357
S
2157 if (sc->sc_flags & SC_OP_TXAGGR)
2158 ath_txq_schedule(sc, txq);
2159 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2160 }
2161}
2162
305fe47f 2163static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2164{
2165 struct ath_softc *sc = container_of(work, struct ath_softc,
2166 tx_complete_work.work);
2167 struct ath_txq *txq;
2168 int i;
2169 bool needreset = false;
2170
2171 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2172 if (ATH_TXQ_SETUP(sc, i)) {
2173 txq = &sc->tx.txq[i];
2174 spin_lock_bh(&txq->axq_lock);
2175 if (txq->axq_depth) {
2176 if (txq->axq_tx_inprogress) {
2177 needreset = true;
2178 spin_unlock_bh(&txq->axq_lock);
2179 break;
2180 } else {
2181 txq->axq_tx_inprogress = true;
2182 }
2183 }
2184 spin_unlock_bh(&txq->axq_lock);
2185 }
2186
2187 if (needreset) {
c46917bb
LR
2188 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2189 "tx hung, resetting the chip\n");
332c5566 2190 ath9k_ps_wakeup(sc);
164ace38 2191 ath_reset(sc, false);
332c5566 2192 ath9k_ps_restore(sc);
164ace38
SB
2193 }
2194
42935eca 2195 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2196 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2197}
2198
2199
f078f209 2200
e8324357 2201void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2202{
e8324357
S
2203 int i;
2204 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2205
e8324357 2206 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2207
e8324357
S
2208 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2209 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2210 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2211 }
2212}
2213
e5003249
VT
2214void ath_tx_edma_tasklet(struct ath_softc *sc)
2215{
2216 struct ath_tx_status txs;
2217 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2218 struct ath_hw *ah = sc->sc_ah;
2219 struct ath_txq *txq;
2220 struct ath_buf *bf, *lastbf;
2221 struct list_head bf_head;
2222 int status;
2223 int txok;
2224
2225 for (;;) {
2226 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2227 if (status == -EINPROGRESS)
2228 break;
2229 if (status == -EIO) {
2230 ath_print(common, ATH_DBG_XMIT,
2231 "Error processing tx status\n");
2232 break;
2233 }
2234
2235 /* Skip beacon completions */
2236 if (txs.qid == sc->beacon.beaconq)
2237 continue;
2238
2239 txq = &sc->tx.txq[txs.qid];
2240
2241 spin_lock_bh(&txq->axq_lock);
2242 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2243 spin_unlock_bh(&txq->axq_lock);
2244 return;
2245 }
2246
2247 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2248 struct ath_buf, list);
2249 lastbf = bf->bf_lastbf;
2250
2251 INIT_LIST_HEAD(&bf_head);
2252 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2253 &lastbf->list);
2254 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2255 txq->axq_depth--;
2256 txq->axq_tx_inprogress = false;
2257 spin_unlock_bh(&txq->axq_lock);
2258
2259 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2260
de0f648d
VT
2261 /*
2262 * Make sure null func frame is acked before configuring
2263 * hw into ps mode.
2264 */
2265 if (bf->bf_isnullfunc && txok) {
2266 if ((sc->ps_flags & PS_ENABLED))
2267 ath9k_enable_ps(sc);
2268 else
2269 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2270 }
2271
e5003249 2272 if (!bf_isampdu(bf)) {
e5003249
VT
2273 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2274 bf->bf_state.bf_type |= BUF_XRETRY;
2275 ath_tx_rc_status(bf, &txs, 0, txok, true);
2276 }
2277
2278 if (bf_isampdu(bf))
2279 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2280 else
2281 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2282 &txs, txok, 0);
2283
7f9f3600
FF
2284 ath_wake_mac80211_queue(sc, txq);
2285
e5003249
VT
2286 spin_lock_bh(&txq->axq_lock);
2287 if (!list_empty(&txq->txq_fifo_pending)) {
2288 INIT_LIST_HEAD(&bf_head);
2289 bf = list_first_entry(&txq->txq_fifo_pending,
2290 struct ath_buf, list);
2291 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2292 &bf->bf_lastbf->list);
2293 ath_tx_txqaddbuf(sc, txq, &bf_head);
2294 } else if (sc->sc_flags & SC_OP_TXAGGR)
2295 ath_txq_schedule(sc, txq);
2296 spin_unlock_bh(&txq->axq_lock);
2297 }
2298}
2299
e8324357
S
2300/*****************/
2301/* Init, Cleanup */
2302/*****************/
f078f209 2303
5088c2f1
VT
2304static int ath_txstatus_setup(struct ath_softc *sc, int size)
2305{
2306 struct ath_descdma *dd = &sc->txsdma;
2307 u8 txs_len = sc->sc_ah->caps.txs_len;
2308
2309 dd->dd_desc_len = size * txs_len;
2310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2311 &dd->dd_desc_paddr, GFP_KERNEL);
2312 if (!dd->dd_desc)
2313 return -ENOMEM;
2314
2315 return 0;
2316}
2317
2318static int ath_tx_edma_init(struct ath_softc *sc)
2319{
2320 int err;
2321
2322 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2323 if (!err)
2324 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2325 sc->txsdma.dd_desc_paddr,
2326 ATH_TXSTATUS_RING_SIZE);
2327
2328 return err;
2329}
2330
2331static void ath_tx_edma_cleanup(struct ath_softc *sc)
2332{
2333 struct ath_descdma *dd = &sc->txsdma;
2334
2335 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2336 dd->dd_desc_paddr);
2337}
2338
e8324357 2339int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2340{
c46917bb 2341 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2342 int error = 0;
f078f209 2343
797fe5cb 2344 spin_lock_init(&sc->tx.txbuflock);
f078f209 2345
797fe5cb 2346 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2347 "tx", nbufs, 1, 1);
797fe5cb 2348 if (error != 0) {
c46917bb
LR
2349 ath_print(common, ATH_DBG_FATAL,
2350 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2351 goto err;
2352 }
f078f209 2353
797fe5cb 2354 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2355 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2356 if (error != 0) {
c46917bb
LR
2357 ath_print(common, ATH_DBG_FATAL,
2358 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2359 goto err;
2360 }
f078f209 2361
164ace38
SB
2362 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2363
5088c2f1
VT
2364 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2365 error = ath_tx_edma_init(sc);
2366 if (error)
2367 goto err;
2368 }
2369
797fe5cb 2370err:
e8324357
S
2371 if (error != 0)
2372 ath_tx_cleanup(sc);
f078f209 2373
e8324357 2374 return error;
f078f209
LR
2375}
2376
797fe5cb 2377void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2378{
2379 if (sc->beacon.bdma.dd_desc_len != 0)
2380 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2381
2382 if (sc->tx.txdma.dd_desc_len != 0)
2383 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2384
2385 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2386 ath_tx_edma_cleanup(sc);
e8324357 2387}
f078f209
LR
2388
2389void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2390{
c5170163
S
2391 struct ath_atx_tid *tid;
2392 struct ath_atx_ac *ac;
2393 int tidno, acno;
f078f209 2394
8ee5afbc 2395 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2396 tidno < WME_NUM_TID;
2397 tidno++, tid++) {
2398 tid->an = an;
2399 tid->tidno = tidno;
2400 tid->seq_start = tid->seq_next = 0;
2401 tid->baw_size = WME_MAX_BA;
2402 tid->baw_head = tid->baw_tail = 0;
2403 tid->sched = false;
e8324357 2404 tid->paused = false;
a37c2c79 2405 tid->state &= ~AGGR_CLEANUP;
c5170163 2406 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2407 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2408 tid->ac = &an->ac[acno];
a37c2c79
S
2409 tid->state &= ~AGGR_ADDBA_COMPLETE;
2410 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2411 }
f078f209 2412
8ee5afbc 2413 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2414 acno < WME_NUM_AC; acno++, ac++) {
2415 ac->sched = false;
1d2231e2 2416 ac->qnum = sc->tx.hwq_map[acno];
c5170163 2417 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2418 }
2419}
2420
b5aa9bf9 2421void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2422{
2b40994c
FF
2423 struct ath_atx_ac *ac;
2424 struct ath_atx_tid *tid;
f078f209 2425 struct ath_txq *txq;
2b40994c 2426 int i, tidno;
e8324357 2427
2b40994c
FF
2428 for (tidno = 0, tid = &an->tid[tidno];
2429 tidno < WME_NUM_TID; tidno++, tid++) {
2430 i = tid->ac->qnum;
f078f209 2431
2b40994c
FF
2432 if (!ATH_TXQ_SETUP(sc, i))
2433 continue;
f078f209 2434
2b40994c
FF
2435 txq = &sc->tx.txq[i];
2436 ac = tid->ac;
f078f209 2437
2b40994c
FF
2438 spin_lock_bh(&txq->axq_lock);
2439
2440 if (tid->sched) {
2441 list_del(&tid->list);
2442 tid->sched = false;
2443 }
2444
2445 if (ac->sched) {
2446 list_del(&ac->list);
2447 tid->ac->sched = false;
f078f209 2448 }
2b40994c
FF
2449
2450 ath_tid_drain(sc, txq, tid);
2451 tid->state &= ~AGGR_ADDBA_COMPLETE;
2452 tid->state &= ~AGGR_CLEANUP;
2453
2454 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2455 }
2456}