Merge tag 'amdtee-fix-for-v6.6' of https://git.linaro.org/people/jens.wiklander/linux...
[linux-block.git] / drivers / net / wireless / broadcom / brcm80211 / brcmsmac / ampdu.c
CommitLineData
5b435de0
AS
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <net/mac80211.h>
17
18#include "rate.h"
19#include "scb.h"
20#include "phy/phy_hal.h"
21#include "antsel.h"
22#include "main.h"
23#include "ampdu.h"
b353dda4 24#include "debug.h"
cdf4352f 25#include "brcms_trace_events.h"
5b435de0
AS
26
27/* max number of mpdus in an ampdu */
28#define AMPDU_MAX_MPDU 32
29/* max number of mpdus in an ampdu to a legacy */
30#define AMPDU_NUM_MPDU_LEGACY 16
31/* max Tx ba window size (in pdu) */
32#define AMPDU_TX_BA_MAX_WSIZE 64
33/* default Tx ba window size (in pdu) */
34#define AMPDU_TX_BA_DEF_WSIZE 64
35/* default Rx ba window size (in pdu) */
36#define AMPDU_RX_BA_DEF_WSIZE 64
37/* max Rx ba window size (in pdu) */
38#define AMPDU_RX_BA_MAX_WSIZE 64
39/* max dur of tx ampdu (in msec) */
40#define AMPDU_MAX_DUR 5
41/* default tx retry limit */
42#define AMPDU_DEF_RETRY_LIMIT 5
43/* default tx retry limit at reg rate */
44#define AMPDU_DEF_RR_RETRY_LIMIT 2
5b435de0
AS
45/* default ffpld reserved bytes */
46#define AMPDU_DEF_FFPLD_RSVD 2048
47/* # of inis to be freed on detach */
48#define AMPDU_INI_FREE 10
49/* max # of mpdus released at a time */
50#define AMPDU_SCB_MAX_RELEASE 20
51
52#define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
53#define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
54 * without underflows
55 */
56#define FFPLD_MPDU_SIZE 1800 /* estimate of maximum mpdu size */
57#define FFPLD_MAX_MCS 23 /* we don't deal with mcs 32 */
58#define FFPLD_PLD_INCR 1000 /* increments in bytes */
59#define FFPLD_MAX_AMPDU_CNT 5000 /* maximum number of ampdu we
60 * accumulate between resets.
61 */
62
63#define AMPDU_DELIMITER_LEN 4
64
65/* max allowed number of mpdus in an ampdu (2 streams) */
66#define AMPDU_NUM_MPDU 16
67
68#define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
69
70/* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
71#define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
72 AMPDU_DELIMITER_LEN + 3\
73 + DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
74
75/* modulo add/sub, bound = 2^k */
76#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
77#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
78
79/* structure to hold tx fifo information and pre-loading state
80 * counters specific to tx underflows of ampdus
81 * some counters might be redundant with the ones in wlc or ampdu structures.
82 * This allows to maintain a specific state independently of
83 * how often and/or when the wlc counters are updated.
84 *
85 * ampdu_pld_size: number of bytes to be pre-loaded
86 * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
87 * prev_txfunfl: num of underflows last read from the HW macstats counter
88 * accum_txfunfl: num of underflows since we modified pld params
89 * accum_txampdu: num of tx ampdu since we modified pld params
90 * prev_txampdu: previous reading of tx ampdu
91 * dmaxferrate: estimated dma avg xfer rate in kbits/sec
92 */
93struct brcms_fifo_info {
94 u16 ampdu_pld_size;
95 u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
96 u16 prev_txfunfl;
97 u32 accum_txfunfl;
98 u32 accum_txampdu;
99 u32 prev_txampdu;
100 u32 dmaxferrate;
101};
102
103/* AMPDU module specific state
104 *
105 * wlc: pointer to main wlc structure
106 * scb_handle: scb cubby handle to retrieve data from scb
107 * ini_enable: per-tid initiator enable/disable of ampdu
108 * ba_tx_wsize: Tx ba window size (in pdu)
109 * ba_rx_wsize: Rx ba window size (in pdu)
110 * retry_limit: mpdu transmit retry limit
111 * rr_retry_limit: mpdu transmit retry limit at regular rate
112 * retry_limit_tid: per-tid mpdu transmit retry limit
113 * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
114 * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
115 * max_pdu: max pdus allowed in ampdu
116 * dur: max duration of an ampdu (in msec)
5b435de0
AS
117 * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
118 * ffpld_rsvd: number of bytes to reserve for preload
119 * max_txlen: max size of ampdu per mcs, bw and sgi
120 * mfbr: enable multiple fallback rate
121 * tx_max_funl: underflows should be kept such that
122 * (tx_max_funfl*underflows) < tx frames
123 * fifo_tb: table of fifo infos
124 */
125struct ampdu_info {
126 struct brcms_c_info *wlc;
127 int scb_handle;
128 u8 ini_enable[AMPDU_MAX_SCB_TID];
129 u8 ba_tx_wsize;
130 u8 ba_rx_wsize;
131 u8 retry_limit;
132 u8 rr_retry_limit;
133 u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
134 u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
135 u8 mpdu_density;
136 s8 max_pdu;
137 u8 dur;
5b435de0
AS
138 u8 rx_factor;
139 u32 ffpld_rsvd;
140 u32 max_txlen[MCS_TABLE_SIZE][2][2];
141 bool mfbr;
142 u32 tx_max_funl;
143 struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
144};
145
146/* used for flushing ampdu packets */
147struct cb_del_ampdu_pars {
148 struct ieee80211_sta *sta;
149 u16 tid;
150};
151
152static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
153{
154 u32 rate, mcs;
155
156 for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
157 /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
158 /* 20MHz, No SGI */
159 rate = mcs_2_rate(mcs, false, false);
160 ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
161 /* 40 MHz, No SGI */
162 rate = mcs_2_rate(mcs, true, false);
163 ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
164 /* 20MHz, SGI */
165 rate = mcs_2_rate(mcs, false, true);
166 ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
167 /* 40 MHz, SGI */
168 rate = mcs_2_rate(mcs, true, true);
169 ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
170 }
171}
172
173static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
174{
175 if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
176 return true;
177 else
178 return false;
179}
180
181static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
182{
183 struct brcms_c_info *wlc = ampdu->wlc;
b353dda4 184 struct bcma_device *core = wlc->hw->d11core;
5b435de0
AS
185
186 wlc->pub->_ampdu = false;
187
188 if (on) {
189 if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
b353dda4
SF
190 brcms_err(core, "wl%d: driver not nmode enabled\n",
191 wlc->pub->unit);
5b435de0
AS
192 return -ENOTSUPP;
193 }
194 if (!brcms_c_ampdu_cap(ampdu)) {
b353dda4
SF
195 brcms_err(core, "wl%d: device not ampdu capable\n",
196 wlc->pub->unit);
5b435de0
AS
197 return -ENOTSUPP;
198 }
199 wlc->pub->_ampdu = on;
200 }
201
202 return 0;
203}
204
205static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
206{
207 int i, j;
208 struct brcms_fifo_info *fifo;
209
210 for (j = 0; j < NUM_FFPLD_FIFO; j++) {
211 fifo = (ampdu->fifo_tb + j);
212 fifo->ampdu_pld_size = 0;
213 for (i = 0; i <= FFPLD_MAX_MCS; i++)
214 fifo->mcs2ampdu_table[i] = 255;
215 fifo->dmaxferrate = 0;
216 fifo->accum_txampdu = 0;
217 fifo->prev_txfunfl = 0;
218 fifo->accum_txfunfl = 0;
219
220 }
221}
222
223struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
224{
225 struct ampdu_info *ampdu;
226 int i;
227
228 ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
229 if (!ampdu)
230 return NULL;
231
232 ampdu->wlc = wlc;
233
234 for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
235 ampdu->ini_enable[i] = true;
236 /* Disable ampdu for VO by default */
237 ampdu->ini_enable[PRIO_8021D_VO] = false;
238 ampdu->ini_enable[PRIO_8021D_NC] = false;
239
240 /* Disable ampdu for BK by default since not enough fifo space */
241 ampdu->ini_enable[PRIO_8021D_NONE] = false;
242 ampdu->ini_enable[PRIO_8021D_BK] = false;
243
244 ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
245 ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
246 ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
247 ampdu->max_pdu = AUTO;
248 ampdu->dur = AMPDU_MAX_DUR;
5b435de0
AS
249
250 ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
251 /*
252 * bump max ampdu rcv size to 64k for all 11n
253 * devices except 4321A0 and 4321A1
254 */
255 if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
256 ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
257 else
258 ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
260 ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
261
262 for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
263 ampdu->retry_limit_tid[i] = ampdu->retry_limit;
264 ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
265 }
266
267 brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
268 ampdu->mfbr = false;
269 /* try to set ampdu to the default value */
270 brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
271
272 ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
273 brcms_c_ffpld_init(ampdu);
274
275 return ampdu;
276}
277
278void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
279{
280 kfree(ampdu);
281}
282
283static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
284 struct scb *scb)
285{
286 struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
287 int i;
288
289 scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
290
291 /* go back to legacy size if some preloading is occurring */
292 for (i = 0; i < NUM_FFPLD_FIFO; i++) {
293 if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
294 scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
295 }
296
297 /* apply user override */
298 if (ampdu->max_pdu != AUTO)
299 scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
300
301 scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
302 AMPDU_SCB_MAX_RELEASE);
303
304 if (scb_ampdu->max_rx_ampdu_bytes)
305 scb_ampdu->release = min_t(u8, scb_ampdu->release,
306 scb_ampdu->max_rx_ampdu_bytes / 1600);
307
308 scb_ampdu->release = min(scb_ampdu->release,
309 ampdu->fifo_tb[TX_AC_BE_FIFO].
310 mcs2ampdu_table[FFPLD_MAX_MCS]);
311}
312
313static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
314{
315 brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb);
316}
317
318static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
319{
320 int i;
321 u32 phy_rate, dma_rate, tmp;
322 u8 max_mpdu;
323 struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
324
325 /* recompute the dma rate */
326 /* note : we divide/multiply by 100 to avoid integer overflows */
327 max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
328 AMPDU_NUM_MPDU_LEGACY);
329 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
330 dma_rate =
331 (((phy_rate / 100) *
332 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
333 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
334 fifo->dmaxferrate = dma_rate;
335
336 /* fill up the mcs2ampdu table; do not recalc the last mcs */
337 dma_rate = dma_rate >> 7;
338 for (i = 0; i < FFPLD_MAX_MCS; i++) {
339 /* shifting to keep it within integer range */
340 phy_rate = mcs_2_rate(i, true, false) >> 7;
341 if (phy_rate > dma_rate) {
342 tmp = ((fifo->ampdu_pld_size * phy_rate) /
343 ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
344 tmp = min_t(u32, tmp, 255);
345 fifo->mcs2ampdu_table[i] = (u8) tmp;
346 }
347 }
348}
349
350/* evaluate the dma transfer rate using the tx underflows as feedback.
351 * If necessary, increase tx fifo preloading. If not enough,
352 * decrease maximum ampdu size for each mcs till underflows stop
353 * Return 1 if pre-loading not active, -1 if not an underflow event,
354 * 0 if pre-loading module took care of the event.
355 */
356static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
357{
358 struct ampdu_info *ampdu = wlc->ampdu;
359 u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
360 u32 txunfl_ratio;
361 u8 max_mpdu;
362 u32 current_ampdu_cnt = 0;
363 u16 max_pld_size;
364 u32 new_txunfl;
365 struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
366 uint xmtfifo_sz;
367 u16 cur_txunfl;
368
369 /* return if we got here for a different reason than underflows */
370 cur_txunfl = brcms_b_read_shm(wlc->hw,
371 M_UCODE_MACSTAT +
372 offsetof(struct macstat, txfunfl[fid]));
373 new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
374 if (new_txunfl == 0) {
5211fa2c
SF
375 brcms_dbg_ht(wlc->hw->d11core,
376 "TX status FRAG set but no tx underflows\n");
5b435de0
AS
377 return -1;
378 }
379 fifo->prev_txfunfl = cur_txunfl;
380
381 if (!ampdu->tx_max_funl)
382 return 1;
383
384 /* check if fifo is big enough */
385 if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
386 return -1;
387
388 if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
389 return 1;
390
391 max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
392 fifo->accum_txfunfl += new_txunfl;
393
394 /* we need to wait for at least 10 underflows */
395 if (fifo->accum_txfunfl < 10)
396 return 0;
397
5211fa2c
SF
398 brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n",
399 current_ampdu_cnt, fifo->accum_txfunfl);
5b435de0
AS
400
401 /*
402 compute the current ratio of tx unfl per ampdu.
403 When the current ampdu count becomes too
404 big while the ratio remains small, we reset
405 the current count in order to not
406 introduce too big of a latency in detecting a
407 large amount of tx underflows later.
408 */
409
410 txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
411
412 if (txunfl_ratio > ampdu->tx_max_funl) {
413 if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
414 fifo->accum_txfunfl = 0;
415
416 return 0;
417 }
418 max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
419 AMPDU_NUM_MPDU_LEGACY);
420
421 /* In case max value max_pdu is already lower than
422 the fifo depth, there is nothing more we can do.
423 */
424
425 if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
426 fifo->accum_txfunfl = 0;
427 return 0;
428 }
429
430 if (fifo->ampdu_pld_size < max_pld_size) {
431
432 /* increment by TX_FIFO_PLD_INC bytes */
433 fifo->ampdu_pld_size += FFPLD_PLD_INCR;
434 if (fifo->ampdu_pld_size > max_pld_size)
435 fifo->ampdu_pld_size = max_pld_size;
436
437 /* update scb release size */
438 brcms_c_scb_ampdu_update_config_all(ampdu);
439
440 /*
441 * compute a new dma xfer rate for max_mpdu @ max mcs.
442 * This is the minimum dma rate that can achieve no
443 * underflow condition for the current mpdu size.
444 *
445 * note : we divide/multiply by 100 to avoid integer overflows
446 */
447 fifo->dmaxferrate =
448 (((phy_rate / 100) *
449 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
450 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
451
5211fa2c
SF
452 brcms_dbg_ht(wlc->hw->d11core,
453 "DMA estimated transfer rate %d; "
454 "pre-load size %d\n",
455 fifo->dmaxferrate, fifo->ampdu_pld_size);
5b435de0
AS
456 } else {
457
458 /* decrease ampdu size */
459 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
460 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
461 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
462 AMPDU_NUM_MPDU_LEGACY - 1;
463 else
464 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
465
466 /* recompute the table */
467 brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
468
469 /* update scb release size */
470 brcms_c_scb_ampdu_update_config_all(ampdu);
471 }
472 }
473 fifo->accum_txfunfl = 0;
474 return 0;
475}
476
477void
478brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
5b435de0
AS
479 uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
480{
481 struct scb_ampdu *scb_ampdu;
5b435de0
AS
482 struct ampdu_info *ampdu = wlc->ampdu;
483 struct scb *scb = &wlc->pri_scb;
484 scb_ampdu = &scb->scb_ampdu;
485
486 if (!ampdu->ini_enable[tid]) {
b353dda4 487 brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
5b435de0
AS
488 __func__, tid);
489 return;
490 }
491
5b435de0
AS
492 scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
493}
494
ef2c0512
SF
495void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
496 struct brcms_c_info *wlc)
497{
498 session->wlc = wlc;
499 skb_queue_head_init(&session->skb_list);
500 session->max_ampdu_len = 0; /* determined from first MPDU */
501 session->max_ampdu_frames = 0; /* determined from first MPDU */
502 session->ampdu_len = 0;
503 session->dma_len = 0;
504}
505
506/*
507 * Preps the given packet for AMPDU based on the session data. If the
508 * frame cannot be accomodated in the current session, -ENOSPC is
509 * returned.
510 */
511int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
512 struct sk_buff *p)
513{
514 struct brcms_c_info *wlc = session->wlc;
515 struct ampdu_info *ampdu = wlc->ampdu;
516 struct scb *scb = &wlc->pri_scb;
517 struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
518 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
519 struct ieee80211_tx_rate *txrate = tx_info->status.rates;
520 struct d11txh *txh = (struct d11txh *)p->data;
521 unsigned ampdu_frames;
522 u8 ndelim, tid;
523 u8 *plcp;
524 uint len;
525 u16 mcl;
526 bool fbr_iscck;
527 bool rr;
528
529 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
530 plcp = (u8 *)(txh + 1);
531 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
532 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
533 BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
534 len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
535
536 ampdu_frames = skb_queue_len(&session->skb_list);
537 if (ampdu_frames != 0) {
538 struct sk_buff *first;
539
540 if (ampdu_frames + 1 > session->max_ampdu_frames ||
541 session->ampdu_len + len > session->max_ampdu_len)
542 return -ENOSPC;
543
544 /*
545 * We aren't really out of space if the new frame is of
546 * a different priority, but we want the same behaviour
547 * so return -ENOSPC anyway.
548 *
549 * XXX: The old AMPDU code did this, but is it really
550 * necessary?
551 */
552 first = skb_peek(&session->skb_list);
553 if (p->priority != first->priority)
554 return -ENOSPC;
555 }
556
557 /*
558 * Now that we're sure this frame can be accomodated, update the
559 * session information.
560 */
561 session->ampdu_len += len;
562 session->dma_len += p->len;
563
564 tid = (u8)p->priority;
565
566 /* Handle retry limits */
567 if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
568 txrate[0].count++;
569 rr = true;
570 } else {
571 txrate[1].count++;
572 rr = false;
573 }
574
575 if (ampdu_frames == 0) {
576 u8 plcp0, plcp3, is40, sgi, mcs;
577 uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
578 struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
579
580 if (rr) {
581 plcp0 = plcp[0];
582 plcp3 = plcp[3];
583 } else {
584 plcp0 = txh->FragPLCPFallback[0];
585 plcp3 = txh->FragPLCPFallback[3];
586
587 }
588
589 /* Limit AMPDU size based on MCS */
590 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
591 sgi = plcp3_issgi(plcp3) ? 1 : 0;
592 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
593 session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
594 ampdu->max_txlen[mcs][is40][sgi]);
595
596 session->max_ampdu_frames = scb_ampdu->max_pdu;
597 if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
598 session->max_ampdu_frames =
599 min_t(u16, f->mcs2ampdu_table[mcs],
600 session->max_ampdu_frames);
601 }
602 }
603
604 /*
605 * Treat all frames as "middle" frames of AMPDU here. First and
606 * last frames must be fixed up after all MPDUs have been prepped.
607 */
608 mcl = le16_to_cpu(txh->MacTxControlLow);
609 mcl &= ~TXC_AMPDU_MASK;
610 mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
611 mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
612 txh->MacTxControlLow = cpu_to_le16(mcl);
613 txh->PreloadSize = 0; /* always default to 0 */
614
615 skb_queue_tail(&session->skb_list, p);
616
617 return 0;
618}
619
620void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
621{
622 struct brcms_c_info *wlc = session->wlc;
623 struct ampdu_info *ampdu = wlc->ampdu;
624 struct sk_buff *first, *last;
625 struct d11txh *txh;
626 struct ieee80211_tx_info *tx_info;
627 struct ieee80211_tx_rate *txrate;
628 u8 ndelim;
629 u8 *plcp;
630 uint len;
631 uint fifo;
632 struct brcms_fifo_info *f;
633 u16 mcl;
634 bool fbr;
635 bool fbr_iscck;
636 struct ieee80211_rts *rts;
637 bool use_rts = false, use_cts = false;
638 u16 dma_len = session->dma_len;
639 u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
640 u32 rspec = 0, rspec_fallback = 0;
641 u32 rts_rspec = 0, rts_rspec_fallback = 0;
d7f95d92 642 u8 plcp0, is40, mcs;
ef2c0512
SF
643 u16 mch;
644 u8 preamble_type = BRCMS_GF_PREAMBLE;
645 u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
646 u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
647 u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
648
649 if (skb_queue_empty(&session->skb_list))
650 return;
651
652 first = skb_peek(&session->skb_list);
653 last = skb_peek_tail(&session->skb_list);
654
655 /* Need to fix up last MPDU first to adjust AMPDU length */
656 txh = (struct d11txh *)last->data;
657 fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
658 f = &ampdu->fifo_tb[fifo];
659
660 mcl = le16_to_cpu(txh->MacTxControlLow);
661 mcl &= ~TXC_AMPDU_MASK;
662 mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
663 txh->MacTxControlLow = cpu_to_le16(mcl);
664
665 /* remove the null delimiter after last mpdu */
666 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
667 txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
668 session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
669
670 /* remove the pad len from last mpdu */
671 fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
672 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
673 BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
674 session->ampdu_len -= roundup(len, 4) - len;
675
676 /* Now fix up the first MPDU */
677 tx_info = IEEE80211_SKB_CB(first);
678 txrate = tx_info->status.rates;
679 txh = (struct d11txh *)first->data;
680 plcp = (u8 *)(txh + 1);
681 rts = (struct ieee80211_rts *)&txh->rts_frame;
682
683 mcl = le16_to_cpu(txh->MacTxControlLow);
684 /* If only one MPDU leave it marked as last */
685 if (first != last) {
686 mcl &= ~TXC_AMPDU_MASK;
687 mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
688 }
689 mcl |= TXC_STARTMSDU;
690 if (ieee80211_is_rts(rts->frame_control)) {
691 mcl |= TXC_SENDRTS;
692 use_rts = true;
693 }
694 if (ieee80211_is_cts(rts->frame_control)) {
695 mcl |= TXC_SENDCTS;
696 use_cts = true;
697 }
698 txh->MacTxControlLow = cpu_to_le16(mcl);
699
700 fbr = txrate[1].count > 0;
d7f95d92 701 if (!fbr)
ef2c0512 702 plcp0 = plcp[0];
d7f95d92 703 else
ef2c0512 704 plcp0 = txh->FragPLCPFallback[0];
d7f95d92 705
ef2c0512 706 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
ef2c0512
SF
707 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
708
709 if (is40) {
710 if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
711 mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
712 else
713 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
714 }
715
716 /* rebuild the rspec and rspec_fallback */
717 rspec = RSPEC_MIMORATE;
718 rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
719 if (plcp[0] & MIMO_PLCP_40MHZ)
720 rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
721
722 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
723 if (fbr_iscck) {
724 rspec_fallback =
725 cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
726 } else {
727 rspec_fallback = RSPEC_MIMORATE;
728 rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
729 if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
730 rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
731 }
732
733 if (use_rts || use_cts) {
734 rts_rspec =
735 brcms_c_rspec_to_rts_rspec(wlc, rspec,
736 false, mimo_ctlchbw);
737 rts_rspec_fallback =
738 brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
739 false, mimo_ctlchbw);
740 }
741
742 BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
743 /* mark plcp to indicate ampdu */
744 BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
745
746 /* reset the mixed mode header durations */
747 if (txh->MModeLen) {
748 u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
749 session->ampdu_len);
750 txh->MModeLen = cpu_to_le16(mmodelen);
751 preamble_type = BRCMS_MM_PREAMBLE;
752 }
753 if (txh->MModeFbrLen) {
754 u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
755 session->ampdu_len);
756 txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
757 fbr_preamble_type = BRCMS_MM_PREAMBLE;
758 }
759
760 /* set the preload length */
761 if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
762 dma_len = min(dma_len, f->ampdu_pld_size);
763 txh->PreloadSize = cpu_to_le16(dma_len);
764 } else {
765 txh->PreloadSize = 0;
766 }
767
768 mch = le16_to_cpu(txh->MacTxControlHigh);
769
770 /* update RTS dur fields */
771 if (use_rts || use_cts) {
772 u16 durid;
773 if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
774 TXC_PREAMBLE_RTS_MAIN_SHORT)
775 rts_preamble_type = BRCMS_SHORT_PREAMBLE;
776
777 if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
778 TXC_PREAMBLE_RTS_FB_SHORT)
779 rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
780
781 durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
782 rspec, rts_preamble_type,
783 preamble_type,
784 session->ampdu_len, true);
785 rts->duration = cpu_to_le16(durid);
786 durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
787 rts_rspec_fallback,
788 rspec_fallback,
789 rts_fbr_preamble_type,
790 fbr_preamble_type,
791 session->ampdu_len, true);
792 txh->RTSDurFallback = cpu_to_le16(durid);
793 /* set TxFesTimeNormal */
794 txh->TxFesTimeNormal = rts->duration;
795 /* set fallback rate version of TxFesTimeNormal */
796 txh->TxFesTimeFallback = txh->RTSDurFallback;
797 }
798
799 /* set flag and plcp for fallback rate */
800 if (fbr) {
801 mch |= TXC_AMPDU_FBR;
802 txh->MacTxControlHigh = cpu_to_le16(mch);
803 BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
804 BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
805 }
806
5211fa2c
SF
807 brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
808 wlc->pub->unit, skb_queue_len(&session->skb_list),
809 session->ampdu_len);
ef2c0512
SF
810}
811
5b435de0
AS
812static void
813brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
814 struct ieee80211_tx_info *tx_info,
815 struct tx_status *txs, u8 mcs)
816{
817 struct ieee80211_tx_rate *txrate = tx_info->status.rates;
818 int i;
819
820 /* clear the rest of the rates */
821 for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
822 txrate[i].idx = -1;
823 txrate[i].count = 0;
824 }
825}
826
827static void
828brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
829 struct sk_buff *p, struct tx_status *txs,
830 u32 s1, u32 s2)
831{
832 struct scb_ampdu *scb_ampdu;
833 struct brcms_c_info *wlc = ampdu->wlc;
834 struct scb_ampdu_tid_ini *ini;
835 u8 bitmap[8], queue, tid;
836 struct d11txh *txh;
837 u8 *plcp;
838 struct ieee80211_hdr *h;
839 u16 seq, start_seq = 0, bindex, index, mcl;
840 u8 mcs = 0;
841 bool ba_recd = false, ack_recd = false;
2f73f04b 842 u8 tot_mpdu = 0;
5b435de0 843 uint supr_status;
f5c3bf15 844 bool retry = true;
5b435de0 845 u16 mimoantsel = 0;
d7f95d92 846 u8 retry_limit;
5b435de0 847 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
5b435de0 848
8ae74654 849#ifdef DEBUG
5b435de0
AS
850 u8 hole[AMPDU_MAX_MPDU];
851 memset(hole, 0, sizeof(hole));
852#endif
853
854 scb_ampdu = &scb->scb_ampdu;
855 tid = (u8) (p->priority);
856
857 ini = &scb_ampdu->ini[tid];
858 retry_limit = ampdu->retry_limit_tid[tid];
5b435de0
AS
859 memset(bitmap, 0, sizeof(bitmap));
860 queue = txs->frameid & TXFID_QUEUE_MASK;
861 supr_status = txs->status & TX_STATUS_SUPR_MASK;
862
863 if (txs->status & TX_STATUS_ACK_RCV) {
5b435de0
AS
864 WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
865 start_seq = txs->sequence >> SEQNUM_SHIFT;
866 bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
867 TX_STATUS_BA_BMAP03_SHIFT;
868
869 WARN_ON(s1 & TX_STATUS_INTERMEDIATE);
870 WARN_ON(!(s1 & TX_STATUS_AMPDU));
871
872 bitmap[0] |=
873 (s1 & TX_STATUS_BA_BMAP47_MASK) <<
874 TX_STATUS_BA_BMAP47_SHIFT;
875 bitmap[1] = (s1 >> 8) & 0xff;
876 bitmap[2] = (s1 >> 16) & 0xff;
877 bitmap[3] = (s1 >> 24) & 0xff;
878
879 bitmap[4] = s2 & 0xff;
880 bitmap[5] = (s2 >> 8) & 0xff;
881 bitmap[6] = (s2 >> 16) & 0xff;
882 bitmap[7] = (s2 >> 24) & 0xff;
883
884 ba_recd = true;
885 } else {
886 if (supr_status) {
5b435de0 887 if (supr_status == TX_STATUS_SUPR_BADCH) {
99e94940 888 brcms_dbg_ht(wlc->hw->d11core,
769009b8 889 "%s: Pkt tx suppressed, illegal channel possibly %d\n",
5b435de0
AS
890 __func__, CHSPEC_CHANNEL(
891 wlc->default_bss->chanspec));
892 } else {
893 if (supr_status != TX_STATUS_SUPR_FRAG)
b353dda4
SF
894 brcms_err(wlc->hw->d11core,
895 "%s: supr_status 0x%x\n",
5b435de0
AS
896 __func__, supr_status);
897 }
898 /* no need to retry for badch; will fail again */
899 if (supr_status == TX_STATUS_SUPR_BADCH ||
900 supr_status == TX_STATUS_SUPR_EXPTIME) {
901 retry = false;
902 } else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
903 /* TX underflow:
904 * try tuning pre-loading or ampdu size
905 */
906 } else if (supr_status == TX_STATUS_SUPR_FRAG) {
907 /*
908 * if there were underflows, but pre-loading
909 * is not active, notify rate adaptation.
910 */
d7f95d92 911 brcms_c_ffpld_check_txfunfl(wlc, queue);
5b435de0
AS
912 }
913 } else if (txs->phyerr) {
a76e9ff1
JG
914 brcms_dbg_ht(wlc->hw->d11core,
915 "%s: ampdu tx phy error (0x%x)\n",
916 __func__, txs->phyerr);
5b435de0
AS
917 }
918 }
919
920 /* loop through all pkts and retry if not acked */
921 while (p) {
922 tx_info = IEEE80211_SKB_CB(p);
923 txh = (struct d11txh *) p->data;
924 mcl = le16_to_cpu(txh->MacTxControlLow);
925 plcp = (u8 *) (txh + 1);
926 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
927 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
928
cdf4352f
SF
929 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
930
5b435de0
AS
931 if (tot_mpdu == 0) {
932 mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
933 mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
934 }
935
936 index = TX_SEQ_TO_INDEX(seq);
937 ack_recd = false;
938 if (ba_recd) {
01c195de
DS
939 int block_acked;
940
5b435de0 941 bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
01c195de
DS
942 if (bindex < AMPDU_TX_BA_MAX_WSIZE)
943 block_acked = isset(bitmap, bindex);
944 else
945 block_acked = 0;
5211fa2c
SF
946 brcms_dbg_ht(wlc->hw->d11core,
947 "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
948 tid, seq, start_seq, bindex,
01c195de 949 block_acked, index);
5b435de0 950 /* if acked then clear bit and free packet */
01c195de 951 if (block_acked) {
5b435de0
AS
952 ini->txretry[index] = 0;
953
954 /*
955 * ampdu_ack_len:
956 * number of acked aggregated frames
957 */
958 /* ampdu_len: number of aggregated frames */
959 brcms_c_ampdu_rate_status(wlc, tx_info, txs,
960 mcs);
961 tx_info->flags |= IEEE80211_TX_STAT_ACK;
962 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
963 tx_info->status.ampdu_ack_len =
964 tx_info->status.ampdu_len = 1;
965
966 skb_pull(p, D11_PHY_HDR_LEN);
967 skb_pull(p, D11_TXH_LEN);
968
969 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
970 p);
971 ack_recd = true;
5b435de0
AS
972 }
973 }
974 /* either retransmit or send bar if ack not recd */
975 if (!ack_recd) {
5e379203 976 if (retry && (ini->txretry[index] < (int)retry_limit)) {
e041f65d 977 int ret;
5b435de0 978 ini->txretry[index]++;
e041f65d
SF
979 ret = brcms_c_txfifo(wlc, queue, p);
980 /*
981 * We shouldn't be out of space in the DMA
982 * ring here since we're reinserting a frame
983 * that was just pulled out.
984 */
985 WARN_ONCE(ret, "queue %d out of txds\n", queue);
5b435de0
AS
986 } else {
987 /* Retry timeout */
5b435de0
AS
988 ieee80211_tx_info_clear_status(tx_info);
989 tx_info->status.ampdu_ack_len = 0;
990 tx_info->status.ampdu_len = 1;
991 tx_info->flags |=
992 IEEE80211_TX_STAT_AMPDU_NO_BACK;
993 skb_pull(p, D11_PHY_HDR_LEN);
994 skb_pull(p, D11_TXH_LEN);
5211fa2c 995 brcms_dbg_ht(wlc->hw->d11core,
637ccc27
PH
996 "BA Timeout, seq %d\n",
997 seq);
5b435de0
AS
998 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
999 p);
1000 }
1001 }
1002 tot_mpdu++;
1003
1004 /* break out if last packet of ampdu */
1005 if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1006 TXC_AMPDU_LAST)
1007 break;
1008
1009 p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
1010 }
5b435de0
AS
1011
1012 /* update rate state */
d7f95d92 1013 brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
5b435de0
AS
1014}
1015
1016void
1017brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1018 struct sk_buff *p, struct tx_status *txs)
1019{
5b435de0 1020 struct brcms_c_info *wlc = ampdu->wlc;
5b435de0 1021 u32 s1 = 0, s2 = 0;
5b435de0
AS
1022
1023 /* BMAC_NOTE: For the split driver, second level txstatus comes later
1024 * So if the ACK was received then wait for the second level else just
1025 * call the first one
1026 */
1027 if (txs->status & TX_STATUS_ACK_RCV) {
1028 u8 status_delay = 0;
1029
1030 /* wait till the next 8 bytes of txstatus is available */
16d2812e
AS
1031 s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
1032 while ((s1 & TXS_V) == 0) {
5b435de0
AS
1033 udelay(1);
1034 status_delay++;
1035 if (status_delay > 10)
1036 return; /* error condition */
16d2812e
AS
1037 s1 = bcma_read32(wlc->hw->d11core,
1038 D11REGOFFS(frmtxstatus));
5b435de0
AS
1039 }
1040
16d2812e 1041 s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
5b435de0
AS
1042 }
1043
1044 if (scb) {
5b435de0
AS
1045 brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
1046 } else {
1047 /* loop through all pkts and free */
1048 u8 queue = txs->frameid & TXFID_QUEUE_MASK;
1049 struct d11txh *txh;
1050 u16 mcl;
1051 while (p) {
5b435de0 1052 txh = (struct d11txh *) p->data;
cdf4352f
SF
1053 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1054 sizeof(*txh));
5b435de0
AS
1055 mcl = le16_to_cpu(txh->MacTxControlLow);
1056 brcmu_pkt_buf_free_skb(p);
1057 /* break out if last packet of ampdu */
1058 if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1059 TXC_AMPDU_LAST)
1060 break;
1061 p = dma_getnexttxp(wlc->hw->di[queue],
1062 DMA_RANGE_TRANSMITTED);
1063 }
5b435de0
AS
1064 }
1065}
1066
1067void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
1068{
1069 char template[T_RAM_ACCESS_SZ * 2];
1070
1071 /* driver needs to write the ta in the template; ta is at offset 16 */
1072 memset(template, 0, sizeof(template));
1073 memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
1074 brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
1075 (T_RAM_ACCESS_SZ * 2),
1076 template);
1077}
1078
1079bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
1080{
1081 return wlc->ampdu->ini_enable[tid];
1082}
1083
1084void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
1085{
1086 struct brcms_c_info *wlc = ampdu->wlc;
1087
1088 /*
1089 * Extend ucode internal watchdog timer to
1090 * match larger received frames
1091 */
1092 if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
1093 IEEE80211_HT_MAX_AMPDU_64K) {
1094 brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1095 brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1096 } else {
1097 brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1098 brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1099 }
1100}
1101
5b435de0
AS
1102/*
1103 * callback function that helps invalidating ampdu packets in a DMA queue
1104 */
1105static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1106{
1107 struct ieee80211_sta *sta = arg_a;
1108 struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
1109
1110 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
644e8c07
TH
1111 (tx_info->rate_driver_data[0] == sta || sta == NULL))
1112 tx_info->rate_driver_data[0] = NULL;
5b435de0
AS
1113}
1114
1115/*
1116 * When a remote party is no longer available for ampdu communication, any
1117 * pending tx ampdu packets in the driver have to be flushed.
1118 */
1119void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
1120 struct ieee80211_sta *sta, u16 tid)
1121{
5b435de0
AS
1122 brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1123}