mac80211: make TX aggregation start/stop request async
[linux-2.6-block.git] / net / mac80211 / agg-tx.c
CommitLineData
b8695a8f
JB
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2009, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
5a0e3ad6 17#include <linux/slab.h>
b8695a8f
JB
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
24487981 20#include "driver-ops.h"
b8695a8f
JB
21#include "wme.h"
22
86ab6c5a
JB
23/**
24 * DOC: TX aggregation
25 *
26 * Aggregation on the TX side requires setting the hardware flag
27 * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
28 * hardware parameter to the number of hardware AMPDU queues. If there are no
29 * hardware queues then the driver will (currently) have to do all frame
30 * buffering.
31 *
32 * When TX aggregation is started by some subsystem (usually the rate control
33 * algorithm would be appropriate) by calling the
34 * ieee80211_start_tx_ba_session() function, the driver will be notified via
35 * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
36 *
37 * In response to that, the driver is later required to call the
38 * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
39 * function, which will start the aggregation session.
40 *
41 * Similarly, when the aggregation session is stopped by
42 * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
43 * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
44 * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
45 * (or ieee80211_stop_tx_ba_cb_irqsafe()).
46 */
47
b8695a8f
JB
48static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
49 const u8 *da, u16 tid,
50 u8 dialog_token, u16 start_seq_num,
51 u16 agg_size, u16 timeout)
52{
53 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
54 struct sk_buff *skb;
55 struct ieee80211_mgmt *mgmt;
56 u16 capab;
57
58 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
59
60 if (!skb) {
61 printk(KERN_ERR "%s: failed to allocate buffer "
47846c9b 62 "for addba request frame\n", sdata->name);
b8695a8f
JB
63 return;
64 }
65 skb_reserve(skb, local->hw.extra_tx_headroom);
66 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
67 memset(mgmt, 0, 24);
68 memcpy(mgmt->da, da, ETH_ALEN);
47846c9b 69 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
8abd3f9b
JB
70 if (sdata->vif.type == NL80211_IFTYPE_AP ||
71 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
47846c9b 72 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
46900298
JB
73 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
74 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
b8695a8f
JB
75
76 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
77 IEEE80211_STYPE_ACTION);
78
79 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
80
81 mgmt->u.action.category = WLAN_CATEGORY_BACK;
82 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
83
84 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
85 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
86 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
87 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
88
89 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
90
91 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
92 mgmt->u.action.u.addba_req.start_seq_num =
93 cpu_to_le16(start_seq_num << 4);
94
62ae67be 95 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
96}
97
98void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
99{
100 struct ieee80211_local *local = sdata->local;
101 struct sk_buff *skb;
102 struct ieee80211_bar *bar;
103 u16 bar_control = 0;
104
105 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
106 if (!skb) {
107 printk(KERN_ERR "%s: failed to allocate buffer for "
47846c9b 108 "bar frame\n", sdata->name);
b8695a8f
JB
109 return;
110 }
111 skb_reserve(skb, local->hw.extra_tx_headroom);
112 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
113 memset(bar, 0, sizeof(*bar));
114 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
115 IEEE80211_STYPE_BACK_REQ);
116 memcpy(bar->ra, ra, ETH_ALEN);
47846c9b 117 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
b8695a8f
JB
118 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
119 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
120 bar_control |= (u16)(tid << 12);
121 bar->control = cpu_to_le16(bar_control);
122 bar->start_seq_num = cpu_to_le16(ssn);
123
62ae67be
JB
124 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
125 ieee80211_tx_skb(sdata, skb);
b8695a8f
JB
126}
127
a622ab72
JB
128static void kfree_tid_tx(struct rcu_head *rcu_head)
129{
130 struct tid_ampdu_tx *tid_tx =
131 container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
132
133 kfree(tid_tx);
134}
135
136static int ___ieee80211_stop_tx_ba_session(
137 struct sta_info *sta, u16 tid,
138 enum ieee80211_back_parties initiator)
23e6a7ea 139{
849b7967 140 struct ieee80211_local *local = sta->local;
a622ab72 141 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
23e6a7ea 142 int ret;
a622ab72
JB
143
144 lockdep_assert_held(&sta->lock);
145
146 if (WARN_ON(!tid_tx))
147 return -ENOENT;
23e6a7ea 148
0ab33703
JB
149 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
150 /* not even started yet! */
151 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
152 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
153 return 0;
154 }
155
827d42c9
JB
156#ifdef CONFIG_MAC80211_HT_DEBUG
157 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
158 sta->sta.addr, tid);
159#endif /* CONFIG_MAC80211_HT_DEBUG */
160
a622ab72 161 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
23e6a7ea 162
a622ab72
JB
163 /*
164 * After this packets are no longer handed right through
165 * to the driver but are put onto tid_tx->pending instead,
166 * with locking to ensure proper access.
167 */
168 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
736708bd 169
a622ab72 170 tid_tx->stop_initiator = initiator;
23e6a7ea 171
12375ef9 172 ret = drv_ampdu_action(local, sta->sdata,
c951ad35 173 IEEE80211_AMPDU_TX_STOP,
24487981 174 &sta->sta, tid, NULL);
23e6a7ea
JB
175
176 /* HW shall not deny going back to legacy */
177 if (WARN_ON(ret)) {
cd8ffc80
JB
178 /*
179 * We may have pending packets get stuck in this case...
180 * Not bothering with a workaround for now.
181 */
23e6a7ea
JB
182 }
183
184 return ret;
185}
186
b8695a8f
JB
187/*
188 * After sending add Block Ack request we activated a timer until
189 * add Block Ack response will arrive from the recipient.
190 * If this timer expires sta_addba_resp_timer_expired will be executed.
191 */
192static void sta_addba_resp_timer_expired(unsigned long data)
193{
194 /* not an elegant detour, but there is no choice as the timer passes
195 * only one argument, and both sta_info and TID are needed, so init
196 * flow in sta_info_create gives the TID as data, while the timer_to_id
197 * array gives the sta through container_of */
198 u16 tid = *(u8 *)data;
23e6a7ea 199 struct sta_info *sta = container_of((void *)data,
b8695a8f 200 struct sta_info, timer_to_tid[tid]);
a622ab72 201 struct tid_ampdu_tx *tid_tx;
23e6a7ea 202
b8695a8f
JB
203 /* check if the TID waits for addBA response */
204 spin_lock_bh(&sta->lock);
a622ab72
JB
205 tid_tx = sta->ampdu_mlme.tid_tx[tid];
206 if (!tid_tx ||
207 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
b8695a8f 208 spin_unlock_bh(&sta->lock);
b8695a8f
JB
209#ifdef CONFIG_MAC80211_HT_DEBUG
210 printk(KERN_DEBUG "timer expired on tid %d but we are not "
67e0f392 211 "(or no longer) expecting addBA response there\n",
8ade0082 212 tid);
b8695a8f 213#endif
23e6a7ea 214 return;
b8695a8f
JB
215 }
216
217#ifdef CONFIG_MAC80211_HT_DEBUG
218 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
219#endif
220
849b7967 221 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
b8695a8f 222 spin_unlock_bh(&sta->lock);
b8695a8f
JB
223}
224
96f5e66e
JB
225static inline int ieee80211_ac_from_tid(int tid)
226{
227 return ieee802_1d_to_ac[tid & 7];
228}
229
a6a67db2
JB
230/*
231 * When multiple aggregation sessions on multiple stations
232 * are being created/destroyed simultaneously, we need to
233 * refcount the global queue stop caused by that in order
234 * to not get into a situation where one of the aggregation
235 * setup or teardown re-enables queues before the other is
236 * ready to handle that.
237 *
238 * These two functions take care of this issue by keeping
239 * a global "agg_queue_stop" refcount.
240 */
241static void __acquires(agg_queue)
242ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
243{
244 int queue = ieee80211_ac_from_tid(tid);
245
246 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
247 ieee80211_stop_queue_by_reason(
248 &local->hw, queue,
249 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
250 __acquire(agg_queue);
251}
252
253static void __releases(agg_queue)
254ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
255{
256 int queue = ieee80211_ac_from_tid(tid);
257
258 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
259 ieee80211_wake_queue_by_reason(
260 &local->hw, queue,
261 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
262 __release(agg_queue);
263}
264
0ab33703
JB
265static void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
266{
267 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
268 struct ieee80211_local *local = sta->local;
269 struct ieee80211_sub_if_data *sdata = sta->sdata;
270 u16 start_seq_num;
271 int ret;
272
273 /*
274 * While we're asking the driver about the aggregation,
275 * stop the AC queue so that we don't have to worry
276 * about frames that came in while we were doing that,
277 * which would require us to put them to the AC pending
278 * afterwards which just makes the code more complex.
279 */
280 ieee80211_stop_queue_agg(local, tid);
281
282 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
283
284 /*
285 * This might be off by one due to a race that we can't
286 * really prevent here without synchronize_net() which
287 * can't be called now.
288 */
289 start_seq_num = sta->tid_seq[tid] >> 4;
290
291 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
292 &sta->sta, tid, &start_seq_num);
293 if (ret) {
294#ifdef CONFIG_MAC80211_HT_DEBUG
295 printk(KERN_DEBUG "BA request denied - HW unavailable for"
296 " tid %d\n", tid);
297#endif
298 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
299 ieee80211_wake_queue_agg(local, tid);
300 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
301 return;
302 }
303
304 /* we can take packets again now */
305 ieee80211_wake_queue_agg(local, tid);
306
307 /* activate the timer for the recipient's addBA response */
308 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
309#ifdef CONFIG_MAC80211_HT_DEBUG
310 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
311#endif
312
313 sta->ampdu_mlme.addba_req_num[tid]++;
314
315 /* send AddBA request */
316 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
317 tid_tx->dialog_token, start_seq_num,
318 0x40, 5000);
319}
320
321void ieee80211_tx_ba_session_work(struct work_struct *work)
322{
323 struct sta_info *sta =
324 container_of(work, struct sta_info, ampdu_mlme.work);
325 struct tid_ampdu_tx *tid_tx;
326 int tid;
327
328 /*
329 * When this flag is set, new sessions should be
330 * blocked, and existing sessions will be torn
331 * down by the code that set the flag, so this
332 * need not run.
333 */
334 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
335 return;
336
337 spin_lock_bh(&sta->lock);
338 for (tid = 0; tid < STA_TID_NUM; tid++) {
339 tid_tx = sta->ampdu_mlme.tid_tx[tid];
340 if (!tid_tx)
341 continue;
342
343 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
344 ieee80211_tx_ba_session_handle_start(sta, tid);
345 else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
346 &tid_tx->state))
347 ___ieee80211_stop_tx_ba_session(sta, tid,
348 WLAN_BACK_INITIATOR);
349 }
350 spin_unlock_bh(&sta->lock);
351}
352
c951ad35 353int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 354{
c951ad35
JB
355 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
356 struct ieee80211_sub_if_data *sdata = sta->sdata;
357 struct ieee80211_local *local = sdata->local;
a622ab72 358 struct tid_ampdu_tx *tid_tx;
e4e72fb4 359 int ret = 0;
b8695a8f 360
b5878a2d
JB
361 trace_api_start_tx_ba_session(pubsta, tid);
362
23e6a7ea
JB
363 if (WARN_ON(!local->ops->ampdu_action))
364 return -EINVAL;
365
c951ad35
JB
366 if ((tid >= STA_TID_NUM) ||
367 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
b8695a8f
JB
368 return -EINVAL;
369
370#ifdef CONFIG_MAC80211_HT_DEBUG
371 printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
c951ad35 372 pubsta->addr, tid);
b8695a8f
JB
373#endif /* CONFIG_MAC80211_HT_DEBUG */
374
8abd3f9b
JB
375 /*
376 * The aggregation code is not prepared to handle
377 * anything but STA/AP due to the BSSID handling.
378 * IBSS could work in the code but isn't supported
379 * by drivers or the standard.
380 */
c951ad35
JB
381 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
382 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
383 sdata->vif.type != NL80211_IFTYPE_AP)
384 return -EINVAL;
8abd3f9b 385
618f356b 386 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
722f069a 387#ifdef CONFIG_MAC80211_HT_DEBUG
2a419056 388 printk(KERN_DEBUG "BA sessions blocked. "
722f069a
S
389 "Denying BA session request\n");
390#endif
c951ad35 391 return -EINVAL;
722f069a
S
392 }
393
b8695a8f
JB
394 spin_lock_bh(&sta->lock);
395
396 /* we have tried too many times, receiver does not want A-MPDU */
397 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
398 ret = -EBUSY;
399 goto err_unlock_sta;
400 }
401
a622ab72 402 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 403 /* check if the TID is not in aggregation flow already */
a622ab72 404 if (tid_tx) {
b8695a8f
JB
405#ifdef CONFIG_MAC80211_HT_DEBUG
406 printk(KERN_DEBUG "BA request denied - session is not "
407 "idle on tid %u\n", tid);
408#endif /* CONFIG_MAC80211_HT_DEBUG */
409 ret = -EAGAIN;
410 goto err_unlock_sta;
411 }
412
413 /* prepare A-MPDU MLME for Tx aggregation */
a622ab72
JB
414 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
415 if (!tid_tx) {
b8695a8f
JB
416#ifdef CONFIG_MAC80211_HT_DEBUG
417 if (net_ratelimit())
418 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
419 tid);
420#endif
421 ret = -ENOMEM;
0ab33703 422 goto err_unlock_sta;
b8695a8f 423 }
96f5e66e 424
a622ab72 425 skb_queue_head_init(&tid_tx->pending);
0ab33703 426 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
cd8ffc80 427
b8695a8f 428 /* Tx timer */
a622ab72
JB
429 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
430 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
431 init_timer(&tid_tx->addba_resp_timer);
b8695a8f 432
0ab33703 433 /* assign a dialog token */
b8695a8f 434 sta->ampdu_mlme.dialog_token_allocator++;
a622ab72 435 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
b8695a8f 436
0ab33703
JB
437 /* finally, assign it to the array */
438 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
51a0d38d 439
0ab33703 440 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
b8695a8f 441
0ab33703 442 /* this flow continues off the work */
96f5e66e 443 err_unlock_sta:
b8695a8f 444 spin_unlock_bh(&sta->lock);
b8695a8f
JB
445 return ret;
446}
447EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
448
cd8ffc80
JB
449/*
450 * splice packets from the STA's pending to the local pending,
a6a67db2 451 * requires a call to ieee80211_agg_splice_finish later
cd8ffc80 452 */
a6a67db2
JB
453static void __acquires(agg_queue)
454ieee80211_agg_splice_packets(struct ieee80211_local *local,
455 struct tid_ampdu_tx *tid_tx, u16 tid)
cd8ffc80 456{
a6a67db2 457 int queue = ieee80211_ac_from_tid(tid);
cd8ffc80 458 unsigned long flags;
cd8ffc80 459
a6a67db2 460 ieee80211_stop_queue_agg(local, tid);
cd8ffc80 461
a622ab72
JB
462 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
463 " from the pending queue\n", tid))
416fbdff
LR
464 return;
465
a622ab72 466 if (!skb_queue_empty(&tid_tx->pending)) {
cd8ffc80 467 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
cd8ffc80 468 /* copy over remaining packets */
a622ab72
JB
469 skb_queue_splice_tail_init(&tid_tx->pending,
470 &local->pending[queue]);
cd8ffc80
JB
471 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
472 }
473}
474
a6a67db2
JB
475static void __releases(agg_queue)
476ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
cd8ffc80 477{
a6a67db2 478 ieee80211_wake_queue_agg(local, tid);
cd8ffc80
JB
479}
480
481/* caller must hold sta->lock */
b1720231
JB
482static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
483 struct sta_info *sta, u16 tid)
484{
a622ab72
JB
485 lockdep_assert_held(&sta->lock);
486
b1720231 487#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 488 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
b1720231
JB
489#endif
490
a622ab72 491 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
cd8ffc80 492 /*
a622ab72
JB
493 * Now mark as operational. This will be visible
494 * in the TX path, and lets it go lock-free in
495 * the common case.
cd8ffc80 496 */
a622ab72
JB
497 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
498 ieee80211_agg_splice_finish(local, tid);
b1720231 499
12375ef9 500 drv_ampdu_action(local, sta->sdata,
c951ad35 501 IEEE80211_AMPDU_TX_OPERATIONAL,
24487981 502 &sta->sta, tid, NULL);
b1720231
JB
503}
504
c951ad35 505void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
b8695a8f 506{
c951ad35
JB
507 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
508 struct ieee80211_local *local = sdata->local;
b8695a8f 509 struct sta_info *sta;
a622ab72 510 struct tid_ampdu_tx *tid_tx;
b8695a8f 511
b5878a2d
JB
512 trace_api_start_tx_ba_cb(sdata, ra, tid);
513
b8695a8f
JB
514 if (tid >= STA_TID_NUM) {
515#ifdef CONFIG_MAC80211_HT_DEBUG
516 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
517 tid, STA_TID_NUM);
518#endif
519 return;
520 }
521
522 rcu_read_lock();
abe60632 523 sta = sta_info_get(sdata, ra);
b8695a8f
JB
524 if (!sta) {
525 rcu_read_unlock();
526#ifdef CONFIG_MAC80211_HT_DEBUG
527 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
528#endif
529 return;
530 }
531
b8695a8f 532 spin_lock_bh(&sta->lock);
a622ab72 533 tid_tx = sta->ampdu_mlme.tid_tx[tid];
b8695a8f 534
a622ab72 535 if (WARN_ON(!tid_tx)) {
b8695a8f 536#ifdef CONFIG_MAC80211_HT_DEBUG
a622ab72 537 printk(KERN_DEBUG "addBA was not requested!\n");
b8695a8f
JB
538#endif
539 spin_unlock_bh(&sta->lock);
540 rcu_read_unlock();
541 return;
542 }
543
a622ab72 544 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
96f5e66e 545 goto out;
b8695a8f 546
a622ab72 547 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
b1720231 548 ieee80211_agg_tx_operational(local, sta, tid);
96f5e66e
JB
549
550 out:
b8695a8f
JB
551 spin_unlock_bh(&sta->lock);
552 rcu_read_unlock();
553}
b8695a8f 554
c951ad35 555void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
86ab6c5a
JB
556 const u8 *ra, u16 tid)
557{
c951ad35
JB
558 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
559 struct ieee80211_local *local = sdata->local;
86ab6c5a
JB
560 struct ieee80211_ra_tid *ra_tid;
561 struct sk_buff *skb = dev_alloc_skb(0);
562
563 if (unlikely(!skb)) {
564#ifdef CONFIG_MAC80211_HT_DEBUG
565 if (net_ratelimit())
566 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 567 "dropping start BA session", sdata->name);
86ab6c5a
JB
568#endif
569 return;
570 }
571 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
572 memcpy(&ra_tid->ra, ra, ETH_ALEN);
573 ra_tid->tid = tid;
574
c1475ca9
JB
575 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
576 skb_queue_tail(&sdata->skb_queue, skb);
577 ieee80211_queue_work(&local->hw, &sdata->work);
86ab6c5a
JB
578}
579EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
580
849b7967
JB
581int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
582 enum ieee80211_back_parties initiator)
583{
a622ab72 584 struct tid_ampdu_tx *tid_tx;
849b7967
JB
585 int ret;
586
849b7967 587 spin_lock_bh(&sta->lock);
a622ab72 588 tid_tx = sta->ampdu_mlme.tid_tx[tid];
849b7967 589
0ab33703 590 if (!tid_tx) {
849b7967
JB
591 ret = -ENOENT;
592 goto unlock;
593 }
594
849b7967
JB
595 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
596
597 unlock:
598 spin_unlock_bh(&sta->lock);
599 return ret;
600}
b8695a8f 601
6a8579d0 602int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
b8695a8f 603{
c951ad35
JB
604 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
605 struct ieee80211_sub_if_data *sdata = sta->sdata;
606 struct ieee80211_local *local = sdata->local;
0ab33703
JB
607 struct tid_ampdu_tx *tid_tx;
608 int ret = 0;
b8695a8f 609
6a8579d0 610 trace_api_stop_tx_ba_session(pubsta, tid);
b5878a2d 611
4253119a 612 if (!local->ops->ampdu_action)
23e6a7ea
JB
613 return -EINVAL;
614
b8695a8f
JB
615 if (tid >= STA_TID_NUM)
616 return -EINVAL;
617
0ab33703
JB
618 spin_lock_bh(&sta->lock);
619 tid_tx = sta->ampdu_mlme.tid_tx[tid];
620
621 if (!tid_tx) {
622 ret = -ENOENT;
623 goto unlock;
624 }
625
626 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
627 /* already in progress stopping it */
628 ret = 0;
629 goto unlock;
630 }
631
632 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
633 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
634
635 unlock:
636 spin_unlock_bh(&sta->lock);
637 return ret;
b8695a8f
JB
638}
639EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
640
c951ad35 641void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
b8695a8f 642{
c951ad35
JB
643 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
644 struct ieee80211_local *local = sdata->local;
b8695a8f 645 struct sta_info *sta;
a622ab72 646 struct tid_ampdu_tx *tid_tx;
b8695a8f 647
b5878a2d
JB
648 trace_api_stop_tx_ba_cb(sdata, ra, tid);
649
b8695a8f
JB
650 if (tid >= STA_TID_NUM) {
651#ifdef CONFIG_MAC80211_HT_DEBUG
652 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
653 tid, STA_TID_NUM);
654#endif
655 return;
656 }
657
658#ifdef CONFIG_MAC80211_HT_DEBUG
659 printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
660 ra, tid);
661#endif /* CONFIG_MAC80211_HT_DEBUG */
662
663 rcu_read_lock();
abe60632 664 sta = sta_info_get(sdata, ra);
b8695a8f
JB
665 if (!sta) {
666#ifdef CONFIG_MAC80211_HT_DEBUG
667 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
668#endif
669 rcu_read_unlock();
670 return;
671 }
b8695a8f 672
a622ab72
JB
673 spin_lock_bh(&sta->lock);
674 tid_tx = sta->ampdu_mlme.tid_tx[tid];
675
676 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
b8695a8f
JB
677#ifdef CONFIG_MAC80211_HT_DEBUG
678 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
679#endif
a622ab72 680 spin_unlock_bh(&sta->lock);
b8695a8f
JB
681 rcu_read_unlock();
682 return;
683 }
684
a622ab72 685 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
b8695a8f
JB
686 ieee80211_send_delba(sta->sdata, ra, tid,
687 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
688
a622ab72
JB
689 /*
690 * When we get here, the TX path will not be lockless any more wrt.
691 * aggregation, since the OPERATIONAL bit has long been cleared.
692 * Thus it will block on getting the lock, if it occurs. So if we
693 * stop the queue now, we will not get any more packets, and any
694 * that might be being processed will wait for us here, thereby
695 * guaranteeing that no packets go to the tid_tx pending queue any
696 * more.
697 */
b8695a8f 698
a622ab72 699 ieee80211_agg_splice_packets(local, tid_tx, tid);
96f5e66e 700
a622ab72
JB
701 /* future packets must not find the tid_tx struct any more */
702 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
cd8ffc80 703
a622ab72 704 ieee80211_agg_splice_finish(local, tid);
cd8ffc80 705
a622ab72 706 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
b8695a8f 707
a622ab72 708 spin_unlock_bh(&sta->lock);
b8695a8f
JB
709 rcu_read_unlock();
710}
b8695a8f 711
c951ad35 712void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
b8695a8f
JB
713 const u8 *ra, u16 tid)
714{
c951ad35
JB
715 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
716 struct ieee80211_local *local = sdata->local;
b8695a8f
JB
717 struct ieee80211_ra_tid *ra_tid;
718 struct sk_buff *skb = dev_alloc_skb(0);
719
720 if (unlikely(!skb)) {
721#ifdef CONFIG_MAC80211_HT_DEBUG
722 if (net_ratelimit())
723 printk(KERN_WARNING "%s: Not enough memory, "
47846c9b 724 "dropping stop BA session", sdata->name);
b8695a8f
JB
725#endif
726 return;
727 }
728 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
729 memcpy(&ra_tid->ra, ra, ETH_ALEN);
730 ra_tid->tid = tid;
731
c1475ca9
JB
732 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
733 skb_queue_tail(&sdata->skb_queue, skb);
734 ieee80211_queue_work(&local->hw, &sdata->work);
b8695a8f
JB
735}
736EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
737
86ab6c5a 738
b8695a8f
JB
739void ieee80211_process_addba_resp(struct ieee80211_local *local,
740 struct sta_info *sta,
741 struct ieee80211_mgmt *mgmt,
742 size_t len)
743{
a622ab72 744 struct tid_ampdu_tx *tid_tx;
b1720231 745 u16 capab, tid;
b8695a8f
JB
746
747 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
748 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
749
b8695a8f
JB
750 spin_lock_bh(&sta->lock);
751
a622ab72
JB
752 tid_tx = sta->ampdu_mlme.tid_tx[tid];
753
754 if (!tid_tx)
8ade0082 755 goto out;
b8695a8f 756
a622ab72 757 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
b8695a8f
JB
758#ifdef CONFIG_MAC80211_HT_DEBUG
759 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
a622ab72 760#endif
8ade0082 761 goto out;
b8695a8f
JB
762 }
763
a622ab72 764 del_timer(&tid_tx->addba_resp_timer);
8ade0082 765
b8695a8f 766#ifdef CONFIG_MAC80211_HT_DEBUG
55f98938 767 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
a622ab72 768#endif
2171abc5 769
b8695a8f
JB
770 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
771 == WLAN_STATUS_SUCCESS) {
a622ab72
JB
772 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
773 &tid_tx->state)) {
774 /* ignore duplicate response */
775 goto out;
776 }
b8695a8f 777
a622ab72 778 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
b1720231 779 ieee80211_agg_tx_operational(local, sta, tid);
b8695a8f 780
b1720231 781 sta->ampdu_mlme.addba_req_num[tid] = 0;
b8695a8f 782 } else {
849b7967 783 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
b8695a8f 784 }
2171abc5 785
2171abc5 786 out:
849b7967 787 spin_unlock_bh(&sta->lock);
b8695a8f 788}