ath9k: fix initial sequence number after starting an ampdu session
[linux-2.6-block.git] / drivers / net / wireless / ath / ath9k / virtual.c
CommitLineData
8ca21f01
JM
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
5a0e3ad6
TH
17#include <linux/slab.h>
18
8ca21f01
JM
19#include "ath9k.h"
20
21struct ath9k_vif_iter_data {
31a01645
FF
22 const u8 *hw_macaddr;
23 u8 mask[ETH_ALEN];
8ca21f01
JM
24};
25
26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
27{
28 struct ath9k_vif_iter_data *iter_data = data;
31a01645 29 int i;
8ca21f01 30
31a01645
FF
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
8ca21f01
JM
33}
34
31a01645 35void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
8ca21f01 36{
bce048d7
JM
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
1510718d 39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
8ca21f01 40 struct ath9k_vif_iter_data iter_data;
31a01645 41 int i;
8ca21f01
JM
42
43 /*
31a01645
FF
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
8ca21f01 46 */
31a01645
FF
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
49
50 if (vif)
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
8ca21f01
JM
52
53 /* Get list of all active MAC addresses */
c52f33d0
JM
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
8ca21f01 56 &iter_data);
c52f33d0
JM
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
59 continue;
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
62 }
63 spin_unlock_bh(&sc->wiphy_lock);
8ca21f01 64
31a01645 65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
13b81559 66 ath_hw_setbssidmask(common);
8ca21f01 67}
c52f33d0
JM
68
69int ath9k_wiphy_add(struct ath_softc *sc)
70{
71 int i, error;
72 struct ath_wiphy *aphy;
1510718d 73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
c52f33d0
JM
74 struct ieee80211_hw *hw;
75 u8 addr[ETH_ALEN];
76
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
78 if (hw == NULL)
79 return -ENOMEM;
80
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
84 break;
85 }
86
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
89 struct ath_wiphy **n;
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
93 GFP_ATOMIC);
94 if (n == NULL) {
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
97 return -ENOMEM;
98 }
99 n[i] = NULL;
100 sc->sec_wiphy = n;
101 sc->num_sec_wiphy++;
102 }
103
104 SET_IEEE80211_DEV(hw, sc->dev);
105
106 aphy = hw->priv;
107 aphy->sc = sc;
108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy;
9fa23e17 110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
c52f33d0
JM
111 spin_unlock_bh(&sc->wiphy_lock);
112
1510718d 113 memcpy(addr, common->macaddr, ETH_ALEN);
c52f33d0
JM
114 addr[0] |= 0x02; /* Locally managed address */
115 /*
116 * XOR virtual wiphy index into the least significant bits to generate
117 * a different MAC address for each virtual wiphy.
118 */
119 addr[5] ^= i & 0xff;
120 addr[4] ^= (i & 0xff00) >> 8;
121 addr[3] ^= (i & 0xff0000) >> 16;
122
123 SET_IEEE80211_PERM_ADDR(hw, addr);
124
285f2dda 125 ath9k_set_hw_capab(sc, hw);
c52f33d0
JM
126
127 error = ieee80211_register_hw(hw);
128
f98c3bd2
JM
129 if (error == 0) {
130 /* Make sure wiphy scheduler is started (if enabled) */
131 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
132 }
133
c52f33d0
JM
134 return error;
135}
136
137int ath9k_wiphy_del(struct ath_wiphy *aphy)
138{
139 struct ath_softc *sc = aphy->sc;
140 int i;
141
142 spin_lock_bh(&sc->wiphy_lock);
143 for (i = 0; i < sc->num_sec_wiphy; i++) {
144 if (aphy == sc->sec_wiphy[i]) {
145 sc->sec_wiphy[i] = NULL;
146 spin_unlock_bh(&sc->wiphy_lock);
147 ieee80211_unregister_hw(aphy->hw);
148 ieee80211_free_hw(aphy->hw);
149 return 0;
150 }
151 }
152 spin_unlock_bh(&sc->wiphy_lock);
153 return -ENOENT;
154}
f0ed85c6
JM
155
156static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157 struct ieee80211_vif *vif, const u8 *bssid,
158 int ps)
159{
160 struct ath_softc *sc = aphy->sc;
161 struct ath_tx_control txctl;
162 struct sk_buff *skb;
163 struct ieee80211_hdr *hdr;
164 __le16 fc;
165 struct ieee80211_tx_info *info;
166
167 skb = dev_alloc_skb(24);
168 if (skb == NULL)
169 return -ENOMEM;
170 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 memset(hdr, 0, 24);
172 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173 IEEE80211_FCTL_TODS);
174 if (ps)
175 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176 hdr->frame_control = fc;
177 memcpy(hdr->addr1, bssid, ETH_ALEN);
178 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179 memcpy(hdr->addr3, bssid, ETH_ALEN);
180
181 info = IEEE80211_SKB_CB(skb);
182 memset(info, 0, sizeof(*info));
183 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184 info->control.vif = vif;
185 info->control.rates[0].idx = 0;
186 info->control.rates[0].count = 4;
187 info->control.rates[1].idx = -1;
188
189 memset(&txctl, 0, sizeof(struct ath_tx_control));
066dae93 190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
c81494d5 191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
f0ed85c6
JM
192
193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
194 goto exit;
195
196 return 0;
197exit:
198 dev_kfree_skb_any(skb);
199 return -1;
200}
201
0e2dedf9
JM
202static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
203{
204 int i;
205 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 return true;
207 for (i = 0; i < sc->num_sec_wiphy; i++) {
208 if (sc->sec_wiphy[i] &&
209 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
210 return true;
211 }
212 return false;
213}
214
215static bool ath9k_wiphy_pausing(struct ath_softc *sc)
216{
217 bool ret;
218 spin_lock_bh(&sc->wiphy_lock);
219 ret = __ath9k_wiphy_pausing(sc);
220 spin_unlock_bh(&sc->wiphy_lock);
221 return ret;
222}
223
8089cc47
JM
224static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
225{
226 int i;
227 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 return true;
229 for (i = 0; i < sc->num_sec_wiphy; i++) {
230 if (sc->sec_wiphy[i] &&
231 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
232 return true;
233 }
234 return false;
235}
236
237bool ath9k_wiphy_scanning(struct ath_softc *sc)
238{
239 bool ret;
240 spin_lock_bh(&sc->wiphy_lock);
241 ret = __ath9k_wiphy_scanning(sc);
242 spin_unlock_bh(&sc->wiphy_lock);
243 return ret;
244}
245
0e2dedf9
JM
246static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
247
248/* caller must hold wiphy_lock */
249static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
250{
251 if (aphy == NULL)
252 return;
253 if (aphy->chan_idx != aphy->sc->chan_idx)
254 return; /* wiphy not on the selected channel */
255 __ath9k_wiphy_unpause(aphy);
256}
257
258static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
259{
260 int i;
261 spin_lock_bh(&sc->wiphy_lock);
262 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263 for (i = 0; i < sc->num_sec_wiphy; i++)
264 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265 spin_unlock_bh(&sc->wiphy_lock);
266}
267
268void ath9k_wiphy_chan_work(struct work_struct *work)
269{
270 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
1bdf6c3b 271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0e2dedf9
JM
272 struct ath_wiphy *aphy = sc->next_wiphy;
273
274 if (aphy == NULL)
275 return;
276
277 /*
278 * All pending interfaces paused; ready to change
279 * channels.
280 */
281
282 /* Change channels */
283 mutex_lock(&sc->mutex);
284 /* XXX: remove me eventually */
285 ath9k_update_ichannel(sc, aphy->hw,
286 &sc->sc_ah->channels[sc->chan_idx]);
1bdf6c3b
LR
287
288 /* sync hw configuration for hw code */
289 common->hw = aphy->hw;
290
0e2dedf9
JM
291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
294 "virtual wiphy\n");
295 mutex_unlock(&sc->mutex);
296 return;
297 }
298 mutex_unlock(&sc->mutex);
299
300 ath9k_wiphy_unpause_channel(sc);
301}
302
f0ed85c6
JM
303/*
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver.
306 */
61117f01 307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
f0ed85c6
JM
308{
309 struct ath_wiphy *aphy = hw->priv;
f0ed85c6 310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f0ed85c6 311
61117f01 312 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
827e69bf 313 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
f0ed85c6
JM
314 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
315 "frame\n", wiphy_name(hw->wiphy));
316 /*
317 * The AP did not reply; ignore this to allow us to
318 * continue.
319 */
320 }
321 aphy->state = ATH_WIPHY_PAUSED;
0e2dedf9
JM
322 if (!ath9k_wiphy_pausing(aphy->sc)) {
323 /*
324 * Drop from tasklet to work to allow mutex for channel
325 * change.
326 */
42935eca 327 ieee80211_queue_work(aphy->sc->hw,
0e2dedf9
JM
328 &aphy->sc->chan_work);
329 }
f0ed85c6
JM
330 }
331
f0ed85c6
JM
332 dev_kfree_skb(skb);
333}
334
0e2dedf9
JM
335static void ath9k_mark_paused(struct ath_wiphy *aphy)
336{
337 struct ath_softc *sc = aphy->sc;
338 aphy->state = ATH_WIPHY_PAUSED;
339 if (!__ath9k_wiphy_pausing(sc))
42935eca 340 ieee80211_queue_work(sc->hw, &sc->chan_work);
0e2dedf9
JM
341}
342
f0ed85c6
JM
343static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
344{
345 struct ath_wiphy *aphy = data;
346 struct ath_vif *avp = (void *) vif->drv_priv;
347
348 switch (vif->type) {
349 case NL80211_IFTYPE_STATION:
350 if (!vif->bss_conf.assoc) {
0e2dedf9 351 ath9k_mark_paused(aphy);
f0ed85c6
JM
352 break;
353 }
354 /* TODO: could avoid this if already in PS mode */
0e2dedf9
JM
355 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
356 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
357 __func__);
358 ath9k_mark_paused(aphy);
359 }
f0ed85c6
JM
360 break;
361 case NL80211_IFTYPE_AP:
362 /* Beacon transmission is paused by aphy->state change */
0e2dedf9 363 ath9k_mark_paused(aphy);
f0ed85c6
JM
364 break;
365 default:
366 break;
367 }
368}
369
370/* caller must hold wiphy_lock */
371static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
372{
373 ieee80211_stop_queues(aphy->hw);
374 aphy->state = ATH_WIPHY_PAUSING;
375 /*
376 * TODO: handle PAUSING->PAUSED for the case where there are multiple
377 * active vifs (now we do it on the first vif getting ready; should be
378 * on the last)
379 */
380 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
381 aphy);
382 return 0;
383}
384
385int ath9k_wiphy_pause(struct ath_wiphy *aphy)
386{
387 int ret;
388 spin_lock_bh(&aphy->sc->wiphy_lock);
389 ret = __ath9k_wiphy_pause(aphy);
390 spin_unlock_bh(&aphy->sc->wiphy_lock);
391 return ret;
392}
393
394static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
395{
396 struct ath_wiphy *aphy = data;
397 struct ath_vif *avp = (void *) vif->drv_priv;
398
399 switch (vif->type) {
400 case NL80211_IFTYPE_STATION:
401 if (!vif->bss_conf.assoc)
402 break;
403 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
404 break;
405 case NL80211_IFTYPE_AP:
406 /* Beacon transmission is re-enabled by aphy->state change */
407 break;
408 default:
409 break;
410 }
411}
412
413/* caller must hold wiphy_lock */
414static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
415{
416 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
417 ath9k_unpause_iter, aphy);
418 aphy->state = ATH_WIPHY_ACTIVE;
419 ieee80211_wake_queues(aphy->hw);
420 return 0;
421}
422
423int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
424{
425 int ret;
426 spin_lock_bh(&aphy->sc->wiphy_lock);
427 ret = __ath9k_wiphy_unpause(aphy);
428 spin_unlock_bh(&aphy->sc->wiphy_lock);
429 return ret;
430}
0e2dedf9 431
7ec3e514
JM
432static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
433{
434 int i;
435 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
436 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
437 for (i = 0; i < sc->num_sec_wiphy; i++) {
438 if (sc->sec_wiphy[i] &&
439 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
440 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
441 }
442}
443
0e2dedf9
JM
444/* caller must hold wiphy_lock */
445static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
446{
447 int i;
448 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
449 __ath9k_wiphy_pause(sc->pri_wiphy);
450 for (i = 0; i < sc->num_sec_wiphy; i++) {
451 if (sc->sec_wiphy[i] &&
452 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
453 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
454 }
455}
456
457int ath9k_wiphy_select(struct ath_wiphy *aphy)
458{
459 struct ath_softc *sc = aphy->sc;
460 bool now;
461
462 spin_lock_bh(&sc->wiphy_lock);
8089cc47
JM
463 if (__ath9k_wiphy_scanning(sc)) {
464 /*
465 * For now, we are using mac80211 sw scan and it expects to
466 * have full control over channel changes, so avoid wiphy
467 * scheduling during a scan. This could be optimized if the
468 * scanning control were moved into the driver.
469 */
470 spin_unlock_bh(&sc->wiphy_lock);
471 return -EBUSY;
472 }
0e2dedf9 473 if (__ath9k_wiphy_pausing(sc)) {
7ec3e514
JM
474 if (sc->wiphy_select_failures == 0)
475 sc->wiphy_select_first_fail = jiffies;
476 sc->wiphy_select_failures++;
477 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
478 {
479 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
480 "out; disable/enable hw to recover\n");
481 __ath9k_wiphy_mark_all_paused(sc);
482 /*
483 * TODO: this workaround to fix hardware is unlikely to
484 * be specific to virtual wiphy changes. It can happen
485 * on normal channel change, too, and as such, this
486 * should really be made more generic. For example,
487 * tricker radio disable/enable on GTT interrupt burst
488 * (say, 10 GTT interrupts received without any TX
489 * frame being completed)
490 */
491 spin_unlock_bh(&sc->wiphy_lock);
68a89116
LR
492 ath_radio_disable(sc, aphy->hw);
493 ath_radio_enable(sc, aphy->hw);
494 /* Only the primary wiphy hw is used for queuing work */
42935eca 495 ieee80211_queue_work(aphy->sc->hw,
7ec3e514
JM
496 &aphy->sc->chan_work);
497 return -EBUSY; /* previous select still in progress */
498 }
0e2dedf9
JM
499 spin_unlock_bh(&sc->wiphy_lock);
500 return -EBUSY; /* previous select still in progress */
501 }
7ec3e514 502 sc->wiphy_select_failures = 0;
0e2dedf9
JM
503
504 /* Store the new channel */
505 sc->chan_idx = aphy->chan_idx;
506 sc->chan_is_ht = aphy->chan_is_ht;
507 sc->next_wiphy = aphy;
508
509 __ath9k_wiphy_pause_all(sc);
510 now = !__ath9k_wiphy_pausing(aphy->sc);
511 spin_unlock_bh(&sc->wiphy_lock);
512
513 if (now) {
514 /* Ready to request channel change immediately */
42935eca 515 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
0e2dedf9
JM
516 }
517
518 /*
519 * wiphys will be unpaused in ath9k_tx_status() once channel has been
520 * changed if any wiphy needs time to become paused.
521 */
522
523 return 0;
524}
9580a222
JM
525
526bool ath9k_wiphy_started(struct ath_softc *sc)
527{
528 int i;
529 spin_lock_bh(&sc->wiphy_lock);
530 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
531 spin_unlock_bh(&sc->wiphy_lock);
532 return true;
533 }
534 for (i = 0; i < sc->num_sec_wiphy; i++) {
535 if (sc->sec_wiphy[i] &&
536 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
537 spin_unlock_bh(&sc->wiphy_lock);
538 return true;
539 }
540 }
541 spin_unlock_bh(&sc->wiphy_lock);
542 return false;
543}
18eb62f8
JM
544
545static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
546 struct ath_wiphy *selected)
547{
8089cc47
JM
548 if (selected->state == ATH_WIPHY_SCAN) {
549 if (aphy == selected)
550 return;
551 /*
552 * Pause all other wiphys for the duration of the scan even if
553 * they are on the current channel now.
554 */
555 } else if (aphy->chan_idx == selected->chan_idx)
18eb62f8
JM
556 return;
557 aphy->state = ATH_WIPHY_PAUSED;
558 ieee80211_stop_queues(aphy->hw);
559}
560
561void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
562 struct ath_wiphy *selected)
563{
564 int i;
565 spin_lock_bh(&sc->wiphy_lock);
566 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
567 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
568 for (i = 0; i < sc->num_sec_wiphy; i++) {
569 if (sc->sec_wiphy[i] &&
570 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
571 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
572 }
573 spin_unlock_bh(&sc->wiphy_lock);
574}
f98c3bd2
JM
575
576void ath9k_wiphy_work(struct work_struct *work)
577{
578 struct ath_softc *sc = container_of(work, struct ath_softc,
579 wiphy_work.work);
580 struct ath_wiphy *aphy = NULL;
581 bool first = true;
582
583 spin_lock_bh(&sc->wiphy_lock);
584
585 if (sc->wiphy_scheduler_int == 0) {
586 /* wiphy scheduler is disabled */
587 spin_unlock_bh(&sc->wiphy_lock);
588 return;
589 }
590
591try_again:
592 sc->wiphy_scheduler_index++;
593 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
594 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
595 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
596 break;
597
598 sc->wiphy_scheduler_index++;
599 aphy = NULL;
600 }
601 if (aphy == NULL) {
602 sc->wiphy_scheduler_index = 0;
603 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
604 if (first) {
605 first = false;
606 goto try_again;
607 }
608 /* No wiphy is ready to be scheduled */
609 } else
610 aphy = sc->pri_wiphy;
611 }
612
613 spin_unlock_bh(&sc->wiphy_lock);
614
615 if (aphy &&
616 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
617 ath9k_wiphy_select(aphy)) {
618 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
619 "change\n");
620 }
621
42935eca
LR
622 ieee80211_queue_delayed_work(sc->hw,
623 &sc->wiphy_work,
624 sc->wiphy_scheduler_int);
f98c3bd2
JM
625}
626
627void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
628{
629 cancel_delayed_work_sync(&sc->wiphy_work);
630 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
631 if (sc->wiphy_scheduler_int)
42935eca
LR
632 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
633 sc->wiphy_scheduler_int);
f98c3bd2 634}
64839170
LR
635
636/* caller must hold wiphy_lock */
637bool ath9k_all_wiphys_idle(struct ath_softc *sc)
638{
639 unsigned int i;
194b7c13 640 if (!sc->pri_wiphy->idle)
64839170 641 return false;
64839170
LR
642 for (i = 0; i < sc->num_sec_wiphy; i++) {
643 struct ath_wiphy *aphy = sc->sec_wiphy[i];
644 if (!aphy)
645 continue;
194b7c13 646 if (!aphy->idle)
64839170
LR
647 return false;
648 }
649 return true;
650}
194b7c13
LR
651
652/* caller must hold wiphy_lock */
653void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
654{
655 struct ath_softc *sc = aphy->sc;
656
657 aphy->idle = idle;
226afe68
JP
658 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
659 "Marking %s as %sidle\n",
660 wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
194b7c13 661}
f52de03b 662/* Only bother starting a queue on an active virtual wiphy */
68e8f2fa 663bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
f52de03b
LR
664{
665 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
666 unsigned int i;
68e8f2fa 667 bool txq_started = false;
f52de03b
LR
668
669 spin_lock_bh(&sc->wiphy_lock);
670
671 /* Start the primary wiphy */
672 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
673 ieee80211_wake_queue(hw, skb_queue);
68e8f2fa 674 txq_started = true;
f52de03b
LR
675 goto unlock;
676 }
677
678 /* Now start the secondary wiphy queues */
679 for (i = 0; i < sc->num_sec_wiphy; i++) {
680 struct ath_wiphy *aphy = sc->sec_wiphy[i];
681 if (!aphy)
682 continue;
683 if (aphy->state != ATH_WIPHY_ACTIVE)
684 continue;
685
686 hw = aphy->hw;
687 ieee80211_wake_queue(hw, skb_queue);
68e8f2fa 688 txq_started = true;
f52de03b
LR
689 break;
690 }
691
692unlock:
693 spin_unlock_bh(&sc->wiphy_lock);
68e8f2fa 694 return txq_started;
f52de03b
LR
695}
696
697/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
698void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
699{
700 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 unsigned int i;
702
703 spin_lock_bh(&sc->wiphy_lock);
704
705 /* Stop the primary wiphy */
706 ieee80211_stop_queue(hw, skb_queue);
707
708 /* Now stop the secondary wiphy queues */
709 for (i = 0; i < sc->num_sec_wiphy; i++) {
710 struct ath_wiphy *aphy = sc->sec_wiphy[i];
711 if (!aphy)
712 continue;
713 hw = aphy->hw;
714 ieee80211_stop_queue(hw, skb_queue);
715 }
716 spin_unlock_bh(&sc->wiphy_lock);
717}