net: convert print_mac to %pM
[linux-2.6-block.git] / drivers / net / wireless / ath9k / core.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19#include "core.h"
20#include "regd.h"
21
22static int ath_outdoor; /* enable outdoor use */
23
f078f209
LR
24static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
30
31/* return bus cachesize in 4B word units */
32
33static void bus_read_cachesize(struct ath_softc *sc, int *csz)
34{
35 u8 u8tmp;
36
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
38 *csz = (int)u8tmp;
39
40 /*
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
44 */
45
46 if (*csz == 0)
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
48}
49
50/*
51 * Set current operating mode
52 *
53 * This function initializes and fills the rate table in the ATH object based
ff9b662d 54 * on the operating mode.
f078f209 55*/
f078f209
LR
56static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
57{
58 const struct ath9k_rate_table *rt;
59 int i;
60
61 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
86b89eed 62 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
f078f209
LR
63 BUG_ON(!rt);
64
65 for (i = 0; i < rt->rateCount; i++)
66 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
67
0345f37b 68 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
f078f209
LR
69 for (i = 0; i < 256; i++) {
70 u8 ix = rt->rateCodeToIndex[i];
71
72 if (ix == 0xff)
73 continue;
74
75 sc->sc_hwmap[i].ieeerate =
76 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
77 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
78
79 if (rt->info[ix].shortPreamble ||
80 rt->info[ix].phy == PHY_OFDM) {
81 /* XXX: Handle this */
82 }
83
84 /* NB: this uses the last entry if the rate isn't found */
85 /* XXX beware of overlow */
86 }
87 sc->sc_currates = rt;
88 sc->sc_curmode = mode;
89 /*
90 * All protection frames are transmited at 2Mb/s for
91 * 11g, otherwise at 1Mb/s.
92 * XXX select protection rate index from rate table.
93 */
86b89eed 94 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
f078f209
LR
95}
96
97/*
86b89eed
S
98 * Set up rate table (legacy rates)
99 */
100static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
f078f209
LR
101{
102 struct ath_hal *ah = sc->sc_ah;
86b89eed
S
103 const struct ath9k_rate_table *rt = NULL;
104 struct ieee80211_supported_band *sband;
105 struct ieee80211_rate *rate;
106 int i, maxrates;
107
108 switch (band) {
109 case IEEE80211_BAND_2GHZ:
110 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
f078f209 111 break;
86b89eed
S
112 case IEEE80211_BAND_5GHZ:
113 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
f078f209
LR
114 break;
115 default:
86b89eed 116 break;
f078f209 117 }
f078f209 118
86b89eed
S
119 if (rt == NULL)
120 return;
f078f209 121
86b89eed
S
122 sband = &sc->sbands[band];
123 rate = sc->rates[band];
124
125 if (rt->rateCount > ATH_RATE_MAX)
126 maxrates = ATH_RATE_MAX;
127 else
128 maxrates = rt->rateCount;
129
130 for (i = 0; i < maxrates; i++) {
131 rate[i].bitrate = rt->info[i].rateKbps / 100;
132 rate[i].hw_value = rt->info[i].rateCode;
133 sband->n_bitrates++;
134 DPRINTF(sc, ATH_DBG_CONFIG,
135 "%s: Rate: %2dMbps, ratecode: %2d\n",
136 __func__,
137 rate[i].bitrate / 10,
138 rate[i].hw_value);
139 }
f078f209
LR
140}
141
142/*
143 * Set up channel list
144 */
145static int ath_setup_channels(struct ath_softc *sc)
146{
147 struct ath_hal *ah = sc->sc_ah;
148 int nchan, i, a = 0, b = 0;
149 u8 regclassids[ATH_REGCLASSIDS_MAX];
150 u32 nregclass = 0;
151 struct ieee80211_supported_band *band_2ghz;
152 struct ieee80211_supported_band *band_5ghz;
153 struct ieee80211_channel *chan_2ghz;
154 struct ieee80211_channel *chan_5ghz;
155 struct ath9k_channel *c;
156
157 /* Fill in ah->ah_channels */
158 if (!ath9k_regd_init_channels(ah,
159 ATH_CHAN_MAX,
160 (u32 *)&nchan,
161 regclassids,
162 ATH_REGCLASSIDS_MAX,
163 &nregclass,
164 CTRY_DEFAULT,
f078f209
LR
165 false,
166 1)) {
167 u32 rd = ah->ah_currentRD;
168
169 DPRINTF(sc, ATH_DBG_FATAL,
170 "%s: unable to collect channel list; "
171 "regdomain likely %u country code %u\n",
172 __func__, rd, CTRY_DEFAULT);
173 return -EINVAL;
174 }
175
176 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
177 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
178 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
179 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
180
181 for (i = 0; i < nchan; i++) {
182 c = &ah->ah_channels[i];
183 if (IS_CHAN_2GHZ(c)) {
184 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
185 chan_2ghz[a].center_freq = c->channel;
186 chan_2ghz[a].max_power = c->maxTxPower;
187
188 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
189 chan_2ghz[a].flags |=
190 IEEE80211_CHAN_NO_IBSS;
191 if (c->channelFlags & CHANNEL_PASSIVE)
192 chan_2ghz[a].flags |=
193 IEEE80211_CHAN_PASSIVE_SCAN;
194
195 band_2ghz->n_channels = ++a;
196
197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "%s: 2MHz channel: %d, "
199 "channelFlags: 0x%x\n",
200 __func__,
201 c->channel,
202 c->channelFlags);
203 } else if (IS_CHAN_5GHZ(c)) {
204 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
205 chan_5ghz[b].center_freq = c->channel;
206 chan_5ghz[b].max_power = c->maxTxPower;
207
208 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
209 chan_5ghz[b].flags |=
210 IEEE80211_CHAN_NO_IBSS;
211 if (c->channelFlags & CHANNEL_PASSIVE)
212 chan_5ghz[b].flags |=
213 IEEE80211_CHAN_PASSIVE_SCAN;
214
215 band_5ghz->n_channels = ++b;
216
217 DPRINTF(sc, ATH_DBG_CONFIG,
218 "%s: 5MHz channel: %d, "
219 "channelFlags: 0x%x\n",
220 __func__,
221 c->channel,
222 c->channelFlags);
223 }
224 }
225
226 return 0;
227}
228
229/*
230 * Determine mode from channel flags
231 *
232 * This routine will provide the enumerated WIRELESSS_MODE value based
ff9b662d 233 * on the settings of the channel flags. If no valid set of flags
f078f209
LR
234 * exist, the lowest mode (11b) is selected.
235*/
236
237static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
238{
239 if (chan->chanmode == CHANNEL_A)
86b89eed 240 return ATH9K_MODE_11A;
f078f209 241 else if (chan->chanmode == CHANNEL_G)
86b89eed 242 return ATH9K_MODE_11G;
f078f209 243 else if (chan->chanmode == CHANNEL_B)
86b89eed 244 return ATH9K_MODE_11B;
f078f209 245 else if (chan->chanmode == CHANNEL_A_HT20)
86b89eed 246 return ATH9K_MODE_11NA_HT20;
f078f209 247 else if (chan->chanmode == CHANNEL_G_HT20)
86b89eed 248 return ATH9K_MODE_11NG_HT20;
f078f209 249 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
86b89eed 250 return ATH9K_MODE_11NA_HT40PLUS;
f078f209 251 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
86b89eed 252 return ATH9K_MODE_11NA_HT40MINUS;
f078f209 253 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
86b89eed 254 return ATH9K_MODE_11NG_HT40PLUS;
f078f209 255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
86b89eed 256 return ATH9K_MODE_11NG_HT40MINUS;
f078f209 257
ff9b662d
S
258 WARN_ON(1); /* should not get here */
259
86b89eed 260 return ATH9K_MODE_11B;
f078f209
LR
261}
262
263/*
264 * Stop the device, grabbing the top-level lock to protect
265 * against concurrent entry through ath_init (which can happen
266 * if another thread does a system call and the thread doing the
267 * stop is preempted).
268 */
269
270static int ath_stop(struct ath_softc *sc)
271{
272 struct ath_hal *ah = sc->sc_ah;
273
672840ac
S
274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
275 __func__, sc->sc_flags & SC_OP_INVALID);
f078f209
LR
276
277 /*
278 * Shutdown the hardware and driver:
279 * stop output from above
f078f209
LR
280 * turn off timers
281 * disable interrupts
282 * clear transmit machinery
283 * clear receive machinery
284 * turn off the radio
285 * reclaim beacon resources
286 *
287 * Note that some of this work is not possible if the
288 * hardware is gone (invalid).
289 */
290
f078f209 291 ath_draintxq(sc, false);
672840ac 292 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
293 ath_stoprecv(sc);
294 ath9k_hw_phy_disable(ah);
295 } else
296 sc->sc_rxlink = NULL;
297
298 return 0;
299}
300
f078f209
LR
301/*
302 * Set the current channel
303 *
304 * Set/change channels. If the channel is really being changed, it's done
305 * by reseting the chip. To accomplish this we must first cleanup any pending
306 * DMA, then restart stuff after a la ath_init.
307*/
308int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
309{
310 struct ath_hal *ah = sc->sc_ah;
311 bool fastcc = true, stopped;
f078f209 312
672840ac 313 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
f078f209
LR
314 return -EIO;
315
316 DPRINTF(sc, ATH_DBG_CONFIG,
317 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
318 __func__,
7c56d24b
S
319 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
320 sc->sc_ah->ah_curchan->channelFlags),
321 sc->sc_ah->ah_curchan->channel,
f078f209
LR
322 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
323 hchan->channel, hchan->channelFlags);
324
7c56d24b
S
325 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
326 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
672840ac
S
327 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
328 (sc->sc_flags & SC_OP_FULL_RESET)) {
f078f209
LR
329 int status;
330 /*
331 * This is only performed if the channel settings have
332 * actually changed.
333 *
334 * To switch channels clear any pending DMA operations;
335 * wait long enough for the RX fifo to drain, reset the
336 * hardware at the new frequency, and then re-enable
337 * the relevant bits of the h/w.
338 */
339 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
340 ath_draintxq(sc, false); /* clear pending tx frames */
341 stopped = ath_stoprecv(sc); /* turn off frame recv */
342
343 /* XXX: do not flush receive queue here. We don't want
344 * to flush data frames already in queue because of
345 * changing channel. */
346
672840ac 347 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
f078f209
LR
348 fastcc = false;
349
350 spin_lock_bh(&sc->sc_resetlock);
b4696c8b 351 if (!ath9k_hw_reset(ah, hchan,
927e70e9
S
352 sc->sc_ht_info.tx_chan_width,
353 sc->sc_tx_chainmask,
354 sc->sc_rx_chainmask,
355 sc->sc_ht_extprotspacing,
356 fastcc, &status)) {
f078f209
LR
357 DPRINTF(sc, ATH_DBG_FATAL,
358 "%s: unable to reset channel %u (%uMhz) "
359 "flags 0x%x hal status %u\n", __func__,
360 ath9k_hw_mhz2ieee(ah, hchan->channel,
361 hchan->channelFlags),
362 hchan->channel, hchan->channelFlags, status);
363 spin_unlock_bh(&sc->sc_resetlock);
364 return -EIO;
365 }
366 spin_unlock_bh(&sc->sc_resetlock);
367
672840ac
S
368 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
369 sc->sc_flags &= ~SC_OP_FULL_RESET;
f078f209
LR
370
371 /* Re-enable rx framework */
372 if (ath_startrecv(sc) != 0) {
373 DPRINTF(sc, ATH_DBG_FATAL,
374 "%s: unable to restart recv logic\n", __func__);
375 return -EIO;
376 }
377 /*
378 * Change channels and update the h/w rate map
379 * if we're switching; e.g. 11a to 11b/g.
380 */
86b89eed
S
381 ath_setcurmode(sc, ath_chan2mode(hchan));
382
f078f209
LR
383 ath_update_txpow(sc); /* update tx power state */
384 /*
385 * Re-enable interrupts.
386 */
387 ath9k_hw_set_interrupts(ah, sc->sc_imask);
388 }
389 return 0;
390}
391
392/**********************/
393/* Chainmask Handling */
394/**********************/
395
396static void ath_chainmask_sel_timertimeout(unsigned long data)
397{
398 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
399 cm->switch_allowed = 1;
400}
401
402/* Start chainmask select timer */
403static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
404{
405 cm->switch_allowed = 0;
406 mod_timer(&cm->timer, ath_chainmask_sel_period);
407}
408
409/* Stop chainmask select timer */
410static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
411{
412 cm->switch_allowed = 0;
413 del_timer_sync(&cm->timer);
414}
415
416static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
417{
418 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
419
0345f37b 420 memset(cm, 0, sizeof(struct ath_chainmask_sel));
f078f209
LR
421
422 cm->cur_tx_mask = sc->sc_tx_chainmask;
423 cm->cur_rx_mask = sc->sc_rx_chainmask;
424 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
425 setup_timer(&cm->timer,
426 ath_chainmask_sel_timertimeout, (unsigned long) cm);
427}
428
429int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
430{
431 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
432
433 /*
434 * Disable auto-swtiching in one of the following if conditions.
435 * sc_chainmask_auto_sel is used for internal global auto-switching
436 * enabled/disabled setting
437 */
60b67f51 438 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
f078f209
LR
439 cm->cur_tx_mask = sc->sc_tx_chainmask;
440 return cm->cur_tx_mask;
441 }
442
443 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
444 return cm->cur_tx_mask;
445
446 if (cm->switch_allowed) {
447 /* Switch down from tx 3 to tx 2. */
448 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
449 ATH_RSSI_OUT(cm->tx_avgrssi) >=
450 ath_chainmask_sel_down_rssi_thres) {
451 cm->cur_tx_mask = sc->sc_tx_chainmask;
452
453 /* Don't let another switch happen until
454 * this timer expires */
455 ath_chainmask_sel_timerstart(cm);
456 }
457 /* Switch up from tx 2 to 3. */
458 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
459 ATH_RSSI_OUT(cm->tx_avgrssi) <=
460 ath_chainmask_sel_up_rssi_thres) {
461 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
462
463 /* Don't let another switch happen
464 * until this timer expires */
465 ath_chainmask_sel_timerstart(cm);
466 }
467 }
468
469 return cm->cur_tx_mask;
470}
471
472/*
473 * Update tx/rx chainmask. For legacy association,
474 * hard code chainmask to 1x1, for 11n association, use
475 * the chainmask configuration.
476 */
477
478void ath_update_chainmask(struct ath_softc *sc, int is_ht)
479{
672840ac 480 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
f078f209 481 if (is_ht) {
60b67f51
S
482 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
483 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
f078f209
LR
484 } else {
485 sc->sc_tx_chainmask = 1;
486 sc->sc_rx_chainmask = 1;
487 }
488
489 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
490 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
491}
492
6f255425
LR
493/*******/
494/* ANI */
495/*******/
496
497/*
498 * This routine performs the periodic noise floor calibration function
499 * that is used to adjust and optimize the chip performance. This
500 * takes environmental changes (location, temperature) into account.
501 * When the task is complete, it reschedules itself depending on the
502 * appropriate interval that was calculated.
503 */
504
505static void ath_ani_calibrate(unsigned long data)
506{
507 struct ath_softc *sc;
508 struct ath_hal *ah;
509 bool longcal = false;
510 bool shortcal = false;
511 bool aniflag = false;
512 unsigned int timestamp = jiffies_to_msecs(jiffies);
513 u32 cal_interval;
514
515 sc = (struct ath_softc *)data;
516 ah = sc->sc_ah;
517
518 /*
519 * don't calibrate when we're scanning.
520 * we are most likely not on our home channel.
521 */
522 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
523 return;
524
525 /* Long calibration runs independently of short calibration. */
526 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
527 longcal = true;
528 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
529 __func__, jiffies);
530 sc->sc_ani.sc_longcal_timer = timestamp;
531 }
532
533 /* Short calibration applies only while sc_caldone is false */
534 if (!sc->sc_ani.sc_caldone) {
535 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
536 ATH_SHORT_CALINTERVAL) {
537 shortcal = true;
538 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
539 __func__, jiffies);
540 sc->sc_ani.sc_shortcal_timer = timestamp;
541 sc->sc_ani.sc_resetcal_timer = timestamp;
542 }
543 } else {
544 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
545 ATH_RESTART_CALINTERVAL) {
546 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
547 &sc->sc_ani.sc_caldone);
548 if (sc->sc_ani.sc_caldone)
549 sc->sc_ani.sc_resetcal_timer = timestamp;
550 }
551 }
552
553 /* Verify whether we must check ANI */
554 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
555 ATH_ANI_POLLINTERVAL) {
556 aniflag = true;
557 sc->sc_ani.sc_checkani_timer = timestamp;
558 }
559
560 /* Skip all processing if there's nothing to do. */
561 if (longcal || shortcal || aniflag) {
562 /* Call ANI routine if necessary */
563 if (aniflag)
564 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
565 ah->ah_curchan);
566
567 /* Perform calibration if necessary */
568 if (longcal || shortcal) {
569 bool iscaldone = false;
570
571 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
572 sc->sc_rx_chainmask, longcal,
573 &iscaldone)) {
574 if (longcal)
575 sc->sc_ani.sc_noise_floor =
576 ath9k_hw_getchan_noise(ah,
577 ah->ah_curchan);
578
579 DPRINTF(sc, ATH_DBG_ANI,
580 "%s: calibrate chan %u/%x nf: %d\n",
581 __func__,
582 ah->ah_curchan->channel,
583 ah->ah_curchan->channelFlags,
584 sc->sc_ani.sc_noise_floor);
585 } else {
586 DPRINTF(sc, ATH_DBG_ANY,
587 "%s: calibrate chan %u/%x failed\n",
588 __func__,
589 ah->ah_curchan->channel,
590 ah->ah_curchan->channelFlags);
591 }
592 sc->sc_ani.sc_caldone = iscaldone;
593 }
594 }
595
596 /*
597 * Set timer interval based on previous results.
598 * The interval must be the shortest necessary to satisfy ANI,
599 * short calibration and long calibration.
600 */
601
602 cal_interval = ATH_ANI_POLLINTERVAL;
603 if (!sc->sc_ani.sc_caldone)
604 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
605
606 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
607}
608
f078f209
LR
609/******************/
610/* VAP management */
611/******************/
612
f078f209
LR
613int ath_vap_attach(struct ath_softc *sc,
614 int if_id,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
617{
618 struct ath_vap *avp;
619
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
623 return -EINVAL;
624 }
625
626 switch (opmode) {
627 case ATH9K_M_STA:
628 case ATH9K_M_IBSS:
629 case ATH9K_M_MONITOR:
630 break;
631 case ATH9K_M_HOSTAP:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
634 return -ENOMEM;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 /* create ath_vap */
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
642 if (avp == NULL)
643 return -ENOMEM;
644
0345f37b 645 memset(avp, 0, sizeof(struct ath_vap));
f078f209
LR
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
649 avp->av_bslot = -1;
f078f209 650
31e9ab2b
S
651 if (opmode == ATH9K_M_HOSTAP)
652 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
f078f209
LR
653
654 sc->sc_vaps[if_id] = avp;
655 sc->sc_nvaps++;
656 /* Set the device opmode */
b4696c8b 657 sc->sc_ah->ah_opmode = opmode;
f078f209
LR
658
659 /* default VAP configuration */
660 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
661 avp->av_config.av_fixed_retryset = 0x03030303;
662
663 return 0;
664}
665
666int ath_vap_detach(struct ath_softc *sc, int if_id)
667{
668 struct ath_hal *ah = sc->sc_ah;
669 struct ath_vap *avp;
670
671 avp = sc->sc_vaps[if_id];
672 if (avp == NULL) {
673 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
674 __func__, if_id);
675 return -EINVAL;
676 }
677
678 /*
679 * Quiesce the hardware while we remove the vap. In
680 * particular we need to reclaim all references to the
681 * vap state by any frames pending on the tx queues.
682 *
683 * XXX can we do this w/o affecting other vap's?
684 */
685 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
686 ath_draintxq(sc, false); /* stop xmit side */
687 ath_stoprecv(sc); /* stop recv side */
688 ath_flushrecv(sc); /* flush recv queue */
689
f078f209
LR
690 kfree(avp);
691 sc->sc_vaps[if_id] = NULL;
692 sc->sc_nvaps--;
693
694 return 0;
695}
696
697int ath_vap_config(struct ath_softc *sc,
698 int if_id, struct ath_vap_config *if_config)
699{
700 struct ath_vap *avp;
701
702 if (if_id >= ATH_BCBUF) {
703 DPRINTF(sc, ATH_DBG_FATAL,
704 "%s: Invalid interface id = %u\n", __func__, if_id);
705 return -EINVAL;
706 }
707
708 avp = sc->sc_vaps[if_id];
709 ASSERT(avp != NULL);
710
711 if (avp)
712 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
713
714 return 0;
715}
716
717/********/
718/* Core */
719/********/
720
721int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
722{
723 struct ath_hal *ah = sc->sc_ah;
724 int status;
725 int error = 0;
f078f209 726
b4696c8b
S
727 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
728 __func__, sc->sc_ah->ah_opmode);
f078f209
LR
729
730 /*
731 * Stop anything previously setup. This is safe
732 * whether this is the first time through or not.
733 */
734 ath_stop(sc);
735
736 /* Initialize chanmask selection */
60b67f51
S
737 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
738 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
f078f209
LR
739
740 /* Reset SERDES registers */
741 ath9k_hw_configpcipowersave(ah, 0);
742
743 /*
744 * The basic interface to setting the hardware in a good
745 * state is ``reset''. On return the hardware is known to
746 * be powered up and with interrupts disabled. This must
747 * be followed by initialization of the appropriate bits
748 * and then setup of the interrupt mask.
749 */
f078f209
LR
750
751 spin_lock_bh(&sc->sc_resetlock);
927e70e9
S
752 if (!ath9k_hw_reset(ah, initial_chan,
753 sc->sc_ht_info.tx_chan_width,
754 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
755 sc->sc_ht_extprotspacing, false, &status)) {
f078f209
LR
756 DPRINTF(sc, ATH_DBG_FATAL,
757 "%s: unable to reset hardware; hal status %u "
758 "(freq %u flags 0x%x)\n", __func__, status,
7c56d24b 759 initial_chan->channel, initial_chan->channelFlags);
f078f209
LR
760 error = -EIO;
761 spin_unlock_bh(&sc->sc_resetlock);
762 goto done;
763 }
764 spin_unlock_bh(&sc->sc_resetlock);
765 /*
766 * This is needed only to setup initial state
767 * but it's best done after a reset.
768 */
769 ath_update_txpow(sc);
770
771 /*
772 * Setup the hardware after reset:
773 * The receive engine is set going.
774 * Frame transmit is handled entirely
775 * in the frame output path; there's nothing to do
776 * here except setup the interrupt mask.
777 */
778 if (ath_startrecv(sc) != 0) {
779 DPRINTF(sc, ATH_DBG_FATAL,
780 "%s: unable to start recv logic\n", __func__);
781 error = -EIO;
782 goto done;
783 }
784 /* Setup our intr mask. */
785 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
786 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
787 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
788
60b67f51 789 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
f078f209
LR
790 sc->sc_imask |= ATH9K_INT_GTT;
791
60b67f51 792 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
f078f209
LR
793 sc->sc_imask |= ATH9K_INT_CST;
794
795 /*
796 * Enable MIB interrupts when there are hardware phy counters.
797 * Note we only do this (at the moment) for station mode.
798 */
799 if (ath9k_hw_phycounters(ah) &&
b4696c8b
S
800 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
801 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
f078f209
LR
802 sc->sc_imask |= ATH9K_INT_MIB;
803 /*
804 * Some hardware processes the TIM IE and fires an
805 * interrupt when the TIM bit is set. For hardware
806 * that does, if not overridden by configuration,
807 * enable the TIM interrupt when operating as station.
808 */
60b67f51 809 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
b4696c8b 810 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
60b67f51 811 !sc->sc_config.swBeaconProcess)
f078f209
LR
812 sc->sc_imask |= ATH9K_INT_TIM;
813 /*
814 * Don't enable interrupts here as we've not yet built our
815 * vap and node data structures, which will be needed as soon
816 * as we start receiving.
817 */
86b89eed 818 ath_setcurmode(sc, ath_chan2mode(initial_chan));
f078f209
LR
819
820 /* XXX: we must make sure h/w is ready and clear invalid flag
821 * before turning on interrupt. */
672840ac 822 sc->sc_flags &= ~SC_OP_INVALID;
f078f209
LR
823done:
824 return error;
825}
826
f45144ef 827int ath_reset(struct ath_softc *sc, bool retry_tx)
f078f209
LR
828{
829 struct ath_hal *ah = sc->sc_ah;
f45144ef
S
830 int status;
831 int error = 0;
f078f209
LR
832
833 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
f45144ef
S
834 ath_draintxq(sc, retry_tx); /* stop xmit */
835 ath_stoprecv(sc); /* stop recv */
836 ath_flushrecv(sc); /* flush recv queue */
f078f209 837
f45144ef
S
838 /* Reset chip */
839 spin_lock_bh(&sc->sc_resetlock);
7c56d24b 840 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
927e70e9
S
841 sc->sc_ht_info.tx_chan_width,
842 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
843 sc->sc_ht_extprotspacing, false, &status)) {
f45144ef
S
844 DPRINTF(sc, ATH_DBG_FATAL,
845 "%s: unable to reset hardware; hal status %u\n",
846 __func__, status);
847 error = -EIO;
848 }
849 spin_unlock_bh(&sc->sc_resetlock);
f078f209
LR
850
851 if (ath_startrecv(sc) != 0) /* restart recv */
852 DPRINTF(sc, ATH_DBG_FATAL,
853 "%s: unable to start recv logic\n", __func__);
854
855 /*
856 * We may be doing a reset in response to a request
857 * that changes the channel so update any state that
858 * might change as a result.
859 */
7c56d24b 860 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
f078f209 861
f45144ef 862 ath_update_txpow(sc);
f078f209 863
672840ac 864 if (sc->sc_flags & SC_OP_BEACONS)
f078f209 865 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
f45144ef 866
f078f209
LR
867 ath9k_hw_set_interrupts(ah, sc->sc_imask);
868
869 /* Restart the txq */
f45144ef 870 if (retry_tx) {
f078f209
LR
871 int i;
872 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
873 if (ATH_TXQ_SETUP(sc, i)) {
874 spin_lock_bh(&sc->sc_txq[i].axq_lock);
875 ath_txq_schedule(sc, &sc->sc_txq[i]);
876 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
877 }
878 }
879 }
f078f209
LR
880
881 return error;
882}
883
884int ath_suspend(struct ath_softc *sc)
885{
886 struct ath_hal *ah = sc->sc_ah;
887
888 /* No I/O if device has been surprise removed */
672840ac 889 if (sc->sc_flags & SC_OP_INVALID)
f078f209
LR
890 return -EIO;
891
892 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
893 ath9k_hw_set_interrupts(ah, 0);
894
895 /* XXX: we must make sure h/w will not generate any interrupt
896 * before setting the invalid flag. */
672840ac 897 sc->sc_flags |= SC_OP_INVALID;
f078f209
LR
898
899 /* disable HAL and put h/w to sleep */
900 ath9k_hw_disable(sc->sc_ah);
901
902 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
903
904 return 0;
905}
906
907/* Interrupt handler. Most of the actual processing is deferred.
908 * It's the caller's responsibility to ensure the chip is awake. */
909
910irqreturn_t ath_isr(int irq, void *dev)
911{
912 struct ath_softc *sc = dev;
913 struct ath_hal *ah = sc->sc_ah;
914 enum ath9k_int status;
915 bool sched = false;
916
917 do {
672840ac 918 if (sc->sc_flags & SC_OP_INVALID) {
f078f209
LR
919 /*
920 * The hardware is not ready/present, don't
921 * touch anything. Note this can happen early
922 * on if the IRQ is shared.
923 */
924 return IRQ_NONE;
925 }
926 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
927 return IRQ_NONE;
928 }
929
930 /*
931 * Figure out the reason(s) for the interrupt. Note
932 * that the hal returns a pseudo-ISR that may include
933 * bits we haven't explicitly enabled so we mask the
934 * value to insure we only process bits we requested.
935 */
936 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
937
938 status &= sc->sc_imask; /* discard unasked-for bits */
939
940 /*
941 * If there are no status bits set, then this interrupt was not
942 * for me (should have been caught above).
943 */
944
945 if (!status)
946 return IRQ_NONE;
947
948 sc->sc_intrstatus = status;
949
950 if (status & ATH9K_INT_FATAL) {
951 /* need a chip reset */
952 sched = true;
953 } else if (status & ATH9K_INT_RXORN) {
954 /* need a chip reset */
955 sched = true;
956 } else {
957 if (status & ATH9K_INT_SWBA) {
958 /* schedule a tasklet for beacon handling */
959 tasklet_schedule(&sc->bcon_tasklet);
960 }
961 if (status & ATH9K_INT_RXEOL) {
962 /*
963 * NB: the hardware should re-read the link when
964 * RXE bit is written, but it doesn't work
965 * at least on older hardware revs.
966 */
967 sched = true;
968 }
969
970 if (status & ATH9K_INT_TXURN)
971 /* bump tx trigger level */
972 ath9k_hw_updatetxtriglevel(ah, true);
973 /* XXX: optimize this */
974 if (status & ATH9K_INT_RX)
975 sched = true;
976 if (status & ATH9K_INT_TX)
977 sched = true;
978 if (status & ATH9K_INT_BMISS)
979 sched = true;
980 /* carrier sense timeout */
981 if (status & ATH9K_INT_CST)
982 sched = true;
983 if (status & ATH9K_INT_MIB) {
984 /*
985 * Disable interrupts until we service the MIB
986 * interrupt; otherwise it will continue to
987 * fire.
988 */
989 ath9k_hw_set_interrupts(ah, 0);
990 /*
991 * Let the hal handle the event. We assume
992 * it will clear whatever condition caused
993 * the interrupt.
994 */
995 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
996 ath9k_hw_set_interrupts(ah, sc->sc_imask);
997 }
998 if (status & ATH9K_INT_TIM_TIMER) {
60b67f51
S
999 if (!(ah->ah_caps.hw_caps &
1000 ATH9K_HW_CAP_AUTOSLEEP)) {
f078f209
LR
1001 /* Clear RxAbort bit so that we can
1002 * receive frames */
1003 ath9k_hw_setrxabort(ah, 0);
1004 sched = true;
1005 }
1006 }
1007 }
1008 } while (0);
1009
1010 if (sched) {
1011 /* turn off every interrupt except SWBA */
1012 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1013 tasklet_schedule(&sc->intr_tq);
1014 }
1015
1016 return IRQ_HANDLED;
1017}
1018
1019/* Deferred interrupt processing */
1020
1021static void ath9k_tasklet(unsigned long data)
1022{
1023 struct ath_softc *sc = (struct ath_softc *)data;
1024 u32 status = sc->sc_intrstatus;
1025
1026 if (status & ATH9K_INT_FATAL) {
1027 /* need a chip reset */
f45144ef 1028 ath_reset(sc, false);
f078f209
LR
1029 return;
1030 } else {
1031
1032 if (status &
1033 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1034 /* XXX: fill me in */
1035 /*
1036 if (status & ATH9K_INT_RXORN) {
1037 }
1038 if (status & ATH9K_INT_RXEOL) {
1039 }
1040 */
1041 spin_lock_bh(&sc->sc_rxflushlock);
1042 ath_rx_tasklet(sc, 0);
1043 spin_unlock_bh(&sc->sc_rxflushlock);
1044 }
1045 /* XXX: optimize this */
1046 if (status & ATH9K_INT_TX)
1047 ath_tx_tasklet(sc);
1048 /* XXX: fill me in */
1049 /*
1050 if (status & ATH9K_INT_BMISS) {
1051 }
1052 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1053 if (status & ATH9K_INT_TIM) {
1054 }
1055 if (status & ATH9K_INT_DTIMSYNC) {
1056 }
1057 }
1058 */
1059 }
1060
1061 /* re-enable hardware interrupt */
1062 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1063}
1064
1065int ath_init(u16 devid, struct ath_softc *sc)
1066{
1067 struct ath_hal *ah = NULL;
1068 int status;
1069 int error = 0, i;
1070 int csz = 0;
f078f209
LR
1071
1072 /* XXX: hardware will not be ready until ath_open() being called */
672840ac 1073 sc->sc_flags |= SC_OP_INVALID;
f078f209
LR
1074
1075 sc->sc_debug = DBG_DEFAULT;
1076 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1077
1078 /* Initialize tasklet */
1079 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1080 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1081 (unsigned long)sc);
1082
1083 /*
1084 * Cache line size is used to size and align various
1085 * structures used to communicate with the hardware.
1086 */
1087 bus_read_cachesize(sc, &csz);
1088 /* XXX assert csz is non-zero */
1089 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1090
1091 spin_lock_init(&sc->sc_resetlock);
1092
1093 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1094 if (ah == NULL) {
1095 DPRINTF(sc, ATH_DBG_FATAL,
1096 "%s: unable to attach hardware; HAL status %u\n",
1097 __func__, status);
1098 error = -ENXIO;
1099 goto bad;
1100 }
1101 sc->sc_ah = ah;
1102
6f255425
LR
1103 /* Initializes the noise floor to a reasonable default value.
1104 * Later on this will be updated during ANI processing. */
1105 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1106
f078f209 1107 /* Get the hardware key cache size. */
60b67f51 1108 sc->sc_keymax = ah->ah_caps.keycache_size;
f078f209
LR
1109 if (sc->sc_keymax > ATH_KEYMAX) {
1110 DPRINTF(sc, ATH_DBG_KEYCACHE,
1111 "%s: Warning, using only %u entries in %u key cache\n",
1112 __func__, ATH_KEYMAX, sc->sc_keymax);
1113 sc->sc_keymax = ATH_KEYMAX;
1114 }
1115
1116 /*
1117 * Reset the key cache since some parts do not
1118 * reset the contents on initial power up.
1119 */
1120 for (i = 0; i < sc->sc_keymax; i++)
1121 ath9k_hw_keyreset(ah, (u16) i);
1122 /*
1123 * Mark key cache slots associated with global keys
1124 * as in use. If we knew TKIP was not to be used we
1125 * could leave the +32, +64, and +32+64 slots free.
1126 * XXX only for splitmic.
1127 */
1128 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1129 set_bit(i, sc->sc_keymap);
1130 set_bit(i + 32, sc->sc_keymap);
1131 set_bit(i + 64, sc->sc_keymap);
1132 set_bit(i + 32 + 64, sc->sc_keymap);
1133 }
1134 /*
1135 * Collect the channel list using the default country
1136 * code and including outdoor channels. The 802.11 layer
1137 * is resposible for filtering this list based on settings
1138 * like the phy mode.
1139 */
f078f209
LR
1140 error = ath_setup_channels(sc);
1141 if (error)
1142 goto bad;
1143
1144 /* default to STA mode */
b4696c8b 1145 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
f078f209 1146
86b89eed 1147 /* Setup rate tables */
f078f209 1148
86b89eed
S
1149 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1150 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
f078f209
LR
1151
1152 /* NB: setup here so ath_rate_update is happy */
86b89eed 1153 ath_setcurmode(sc, ATH9K_MODE_11A);
f078f209
LR
1154
1155 /*
1156 * Allocate hardware transmit queues: one queue for
1157 * beacon frames and one data queue for each QoS
1158 * priority. Note that the hal handles reseting
1159 * these queues at the needed time.
1160 */
1161 sc->sc_bhalq = ath_beaconq_setup(ah);
1162 if (sc->sc_bhalq == -1) {
1163 DPRINTF(sc, ATH_DBG_FATAL,
1164 "%s: unable to setup a beacon xmit queue\n", __func__);
1165 error = -EIO;
1166 goto bad2;
1167 }
1168 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1169 if (sc->sc_cabq == NULL) {
1170 DPRINTF(sc, ATH_DBG_FATAL,
1171 "%s: unable to setup CAB xmit queue\n", __func__);
1172 error = -EIO;
1173 goto bad2;
1174 }
1175
1176 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1177 ath_cabq_update(sc);
1178
1179 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1180 sc->sc_haltype2q[i] = -1;
1181
1182 /* Setup data queues */
1183 /* NB: ensure BK queue is the lowest priority h/w queue */
1184 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1185 DPRINTF(sc, ATH_DBG_FATAL,
1186 "%s: unable to setup xmit queue for BK traffic\n",
1187 __func__);
1188 error = -EIO;
1189 goto bad2;
1190 }
1191
1192 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1193 DPRINTF(sc, ATH_DBG_FATAL,
1194 "%s: unable to setup xmit queue for BE traffic\n",
1195 __func__);
1196 error = -EIO;
1197 goto bad2;
1198 }
1199 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1200 DPRINTF(sc, ATH_DBG_FATAL,
1201 "%s: unable to setup xmit queue for VI traffic\n",
1202 __func__);
1203 error = -EIO;
1204 goto bad2;
1205 }
1206 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1207 DPRINTF(sc, ATH_DBG_FATAL,
1208 "%s: unable to setup xmit queue for VO traffic\n",
1209 __func__);
1210 error = -EIO;
1211 goto bad2;
1212 }
1213
6f255425
LR
1214 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1215
f078f209
LR
1216 sc->sc_rc = ath_rate_attach(ah);
1217 if (sc->sc_rc == NULL) {
dc2222a8 1218 error = -EIO;
f078f209
LR
1219 goto bad2;
1220 }
1221
60b67f51 1222 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
f078f209
LR
1223 ATH9K_CIPHER_TKIP, NULL)) {
1224 /*
1225 * Whether we should enable h/w TKIP MIC.
1226 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1227 * report WMM capable, so it's always safe to turn on
1228 * TKIP MIC in this case.
1229 */
60b67f51
S
1230 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1231 0, 1, NULL);
f078f209
LR
1232 }
1233
1234 /*
1235 * Check whether the separate key cache entries
1236 * are required to handle both tx+rx MIC keys.
1237 * With split mic keys the number of stations is limited
1238 * to 27 otherwise 59.
1239 */
60b67f51 1240 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
f078f209 1241 ATH9K_CIPHER_TKIP, NULL)
60b67f51 1242 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
f078f209 1243 ATH9K_CIPHER_MIC, NULL)
60b67f51 1244 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
f078f209
LR
1245 0, NULL))
1246 sc->sc_splitmic = 1;
1247
1248 /* turn on mcast key search if possible */
60b67f51
S
1249 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1250 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
f078f209
LR
1251 1, NULL);
1252
1253 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1254 sc->sc_config.txpowlimit_override = 0;
1255
1256 /* 11n Capabilities */
60b67f51 1257 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
672840ac
S
1258 sc->sc_flags |= SC_OP_TXAGGR;
1259 sc->sc_flags |= SC_OP_RXAGGR;
f078f209
LR
1260 }
1261
60b67f51
S
1262 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1263 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
f078f209 1264
60b67f51 1265 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
f078f209
LR
1266 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1267
1268 ath9k_hw_getmac(ah, sc->sc_myaddr);
60b67f51 1269 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
f078f209
LR
1270 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1271 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1272 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1273 }
1274 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1275
1276 /* initialize beacon slots */
1277 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1278 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1279
1280 /* save MISC configurations */
1281 sc->sc_config.swBeaconProcess = 1;
1282
1283#ifdef CONFIG_SLOW_ANT_DIV
1284 /* range is 40 - 255, we use something in the middle */
1285 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1286#endif
1287
1288 return 0;
1289bad2:
1290 /* cleanup tx queues */
1291 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1292 if (ATH_TXQ_SETUP(sc, i))
1293 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1294bad:
1295 if (ah)
1296 ath9k_hw_detach(ah);
1297 return error;
1298}
1299
1300void ath_deinit(struct ath_softc *sc)
1301{
1302 struct ath_hal *ah = sc->sc_ah;
1303 int i;
1304
1305 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1306
6115e855
SB
1307 tasklet_kill(&sc->intr_tq);
1308 tasklet_kill(&sc->bcon_tasklet);
f078f209 1309 ath_stop(sc);
672840ac 1310 if (!(sc->sc_flags & SC_OP_INVALID))
f078f209
LR
1311 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1312 ath_rate_detach(sc->sc_rc);
1313 /* cleanup tx queues */
1314 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1315 if (ATH_TXQ_SETUP(sc, i))
1316 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1317 ath9k_hw_detach(ah);
1318}
1319
1320/*******************/
1321/* Node Management */
1322/*******************/
1323
1324struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1325{
1326 struct ath_vap *avp;
1327 struct ath_node *an;
f078f209
LR
1328
1329 avp = sc->sc_vaps[if_id];
1330 ASSERT(avp != NULL);
1331
1332 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1333 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1334 if (an == NULL)
1335 return NULL;
0345f37b 1336 memset(an, 0, sizeof(*an));
f078f209
LR
1337
1338 an->an_sc = sc;
1339 memcpy(an->an_addr, addr, ETH_ALEN);
1340 atomic_set(&an->an_refcnt, 1);
1341
1342 /* set up per-node tx/rx state */
1343 ath_tx_node_init(sc, an);
1344 ath_rx_node_init(sc, an);
1345
1346 ath_chainmask_sel_init(sc, an);
1347 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1348 list_add(&an->list, &sc->node_list);
1349
1350 return an;
1351}
1352
1353void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1354{
1355 unsigned long flags;
1356
f078f209
LR
1357 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1358 an->an_flags |= ATH_NODE_CLEAN;
1359 ath_tx_node_cleanup(sc, an, bh_flag);
1360 ath_rx_node_cleanup(sc, an);
1361
1362 ath_tx_node_free(sc, an);
1363 ath_rx_node_free(sc, an);
1364
1365 spin_lock_irqsave(&sc->node_lock, flags);
1366
1367 list_del(&an->list);
1368
1369 spin_unlock_irqrestore(&sc->node_lock, flags);
1370
1371 kfree(an);
1372}
1373
1374/* Finds a node and increases the refcnt if found */
1375
1376struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1377{
1378 struct ath_node *an = NULL, *an_found = NULL;
1379
1380 if (list_empty(&sc->node_list)) /* FIXME */
1381 goto out;
1382 list_for_each_entry(an, &sc->node_list, list) {
1383 if (!compare_ether_addr(an->an_addr, addr)) {
1384 atomic_inc(&an->an_refcnt);
1385 an_found = an;
1386 break;
1387 }
1388 }
1389out:
1390 return an_found;
1391}
1392
1393/* Decrements the refcnt and if it drops to zero, detach the node */
1394
1395void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1396{
1397 if (atomic_dec_and_test(&an->an_refcnt))
1398 ath_node_detach(sc, an, bh_flag);
1399}
1400
1401/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1402struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1403{
1404 struct ath_node *an = NULL, *an_found = NULL;
1405
1406 if (list_empty(&sc->node_list))
1407 return NULL;
1408
1409 list_for_each_entry(an, &sc->node_list, list)
1410 if (!compare_ether_addr(an->an_addr, addr)) {
1411 an_found = an;
1412 break;
1413 }
1414
1415 return an_found;
1416}
1417
1418/*
1419 * Set up New Node
1420 *
1421 * Setup driver-specific state for a newly associated node. This routine
1422 * really only applies if compression or XR are enabled, there is no code
1423 * covering any other cases.
1424*/
1425
1426void ath_newassoc(struct ath_softc *sc,
1427 struct ath_node *an, int isnew, int isuapsd)
1428{
1429 int tidno;
1430
1431 /* if station reassociates, tear down the aggregation state. */
1432 if (!isnew) {
1433 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
672840ac 1434 if (sc->sc_flags & SC_OP_TXAGGR)
f078f209 1435 ath_tx_aggr_teardown(sc, an, tidno);
672840ac 1436 if (sc->sc_flags & SC_OP_RXAGGR)
f078f209
LR
1437 ath_rx_aggr_teardown(sc, an, tidno);
1438 }
1439 }
1440 an->an_flags = 0;
1441}
1442
1443/**************/
1444/* Encryption */
1445/**************/
1446
1447void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1448{
1449 ath9k_hw_keyreset(sc->sc_ah, keyix);
1450 if (freeslot)
1451 clear_bit(keyix, sc->sc_keymap);
1452}
1453
1454int ath_keyset(struct ath_softc *sc,
1455 u16 keyix,
1456 struct ath9k_keyval *hk,
1457 const u8 mac[ETH_ALEN])
1458{
1459 bool status;
1460
1461 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1462 keyix, hk, mac, false);
1463
1464 return status != false;
1465}
1466
1467/***********************/
1468/* TX Power/Regulatory */
1469/***********************/
1470
1471/*
1472 * Set Transmit power in HAL
1473 *
1474 * This routine makes the actual HAL calls to set the new transmit power
1475 * limit.
1476*/
1477
1478void ath_update_txpow(struct ath_softc *sc)
1479{
1480 struct ath_hal *ah = sc->sc_ah;
1481 u32 txpow;
1482
1483 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1484 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1485 /* read back in case value is clamped */
60b67f51 1486 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
f078f209
LR
1487 sc->sc_curtxpow = txpow;
1488 }
1489}
1490
1491/* Return the current country and domain information */
1492void ath_get_currentCountry(struct ath_softc *sc,
1493 struct ath9k_country_entry *ctry)
1494{
1495 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1496
1497 /* If HAL not specific yet, since it is band dependent,
1498 * use the one we passed in. */
1499 if (ctry->countryCode == CTRY_DEFAULT) {
1500 ctry->iso[0] = 0;
1501 ctry->iso[1] = 0;
1502 } else if (ctry->iso[0] && ctry->iso[1]) {
1503 if (!ctry->iso[2]) {
1504 if (ath_outdoor)
1505 ctry->iso[2] = 'O';
1506 else
1507 ctry->iso[2] = 'I';
1508 }
1509 }
1510}
1511
1512/**************************/
1513/* Slow Antenna Diversity */
1514/**************************/
1515
1516void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1517 struct ath_softc *sc,
1518 int32_t rssitrig)
1519{
1520 int trig;
1521
1522 /* antdivf_rssitrig can range from 40 - 0xff */
1523 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1524 trig = (rssitrig < 40) ? 40 : rssitrig;
1525
1526 antdiv->antdiv_sc = sc;
1527 antdiv->antdivf_rssitrig = trig;
1528}
1529
1530void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1531 u8 num_antcfg,
1532 const u8 *bssid)
1533{
1534 antdiv->antdiv_num_antcfg =
1535 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1536 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1537 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1538 antdiv->antdiv_curcfg = 0;
1539 antdiv->antdiv_bestcfg = 0;
1540 antdiv->antdiv_laststatetsf = 0;
1541
1542 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1543
1544 antdiv->antdiv_start = 1;
1545}
1546
1547void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1548{
1549 antdiv->antdiv_start = 0;
1550}
1551
1552static int32_t ath_find_max_val(int32_t *val,
1553 u8 num_val, u8 *max_index)
1554{
1555 u32 MaxVal = *val++;
1556 u32 cur_index = 0;
1557
1558 *max_index = 0;
1559 while (++cur_index < num_val) {
1560 if (*val > MaxVal) {
1561 MaxVal = *val;
1562 *max_index = cur_index;
1563 }
1564
1565 val++;
1566 }
1567
1568 return MaxVal;
1569}
1570
1571void ath_slow_ant_div(struct ath_antdiv *antdiv,
1572 struct ieee80211_hdr *hdr,
1573 struct ath_rx_status *rx_stats)
1574{
1575 struct ath_softc *sc = antdiv->antdiv_sc;
1576 struct ath_hal *ah = sc->sc_ah;
1577 u64 curtsf = 0;
1578 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1579 __le16 fc = hdr->frame_control;
1580
1581 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1582 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1583 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1584 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1585 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1586 } else {
1587 return;
1588 }
1589
1590 switch (antdiv->antdiv_state) {
1591 case ATH_ANT_DIV_IDLE:
1592 if ((antdiv->antdiv_lastbrssi[curcfg] <
1593 antdiv->antdivf_rssitrig)
1594 && ((curtsf - antdiv->antdiv_laststatetsf) >
1595 ATH_ANT_DIV_MIN_IDLE_US)) {
1596
1597 curcfg++;
1598 if (curcfg == antdiv->antdiv_num_antcfg)
1599 curcfg = 0;
1600
1601 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1602 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1603 antdiv->antdiv_curcfg = curcfg;
1604 antdiv->antdiv_laststatetsf = curtsf;
1605 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1606 }
1607 }
1608 break;
1609
1610 case ATH_ANT_DIV_SCAN:
1611 if ((curtsf - antdiv->antdiv_laststatetsf) <
1612 ATH_ANT_DIV_MIN_SCAN_US)
1613 break;
1614
1615 curcfg++;
1616 if (curcfg == antdiv->antdiv_num_antcfg)
1617 curcfg = 0;
1618
1619 if (curcfg == antdiv->antdiv_bestcfg) {
1620 ath_find_max_val(antdiv->antdiv_lastbrssi,
1621 antdiv->antdiv_num_antcfg, &bestcfg);
1622 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1623 antdiv->antdiv_bestcfg = bestcfg;
1624 antdiv->antdiv_curcfg = bestcfg;
1625 antdiv->antdiv_laststatetsf = curtsf;
1626 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1627 }
1628 } else {
1629 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1630 antdiv->antdiv_curcfg = curcfg;
1631 antdiv->antdiv_laststatetsf = curtsf;
1632 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1633 }
1634 }
1635
1636 break;
1637 }
1638}
1639
1640/***********************/
1641/* Descriptor Handling */
1642/***********************/
1643
1644/*
1645 * Set up DMA descriptors
1646 *
1647 * This function will allocate both the DMA descriptor structure, and the
1648 * buffers it contains. These are used to contain the descriptors used
1649 * by the system.
1650*/
1651
1652int ath_descdma_setup(struct ath_softc *sc,
1653 struct ath_descdma *dd,
1654 struct list_head *head,
1655 const char *name,
1656 int nbuf,
1657 int ndesc)
1658{
1659#define DS2PHYS(_dd, _ds) \
1660 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1661#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1662#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1663
1664 struct ath_desc *ds;
1665 struct ath_buf *bf;
1666 int i, bsize, error;
1667
1668 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1669 __func__, name, nbuf, ndesc);
1670
1671 /* ath_desc must be a multiple of DWORDs */
1672 if ((sizeof(struct ath_desc) % 4) != 0) {
1673 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1674 __func__);
1675 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1676 error = -ENOMEM;
1677 goto fail;
1678 }
1679
1680 dd->dd_name = name;
1681 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1682
1683 /*
1684 * Need additional DMA memory because we can't use
1685 * descriptors that cross the 4K page boundary. Assume
1686 * one skipped descriptor per 4K page.
1687 */
60b67f51 1688 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
f078f209
LR
1689 u32 ndesc_skipped =
1690 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1691 u32 dma_len;
1692
1693 while (ndesc_skipped) {
1694 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1695 dd->dd_desc_len += dma_len;
1696
1697 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1698 };
1699 }
1700
1701 /* allocate descriptors */
1702 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1703 dd->dd_desc_len,
1704 &dd->dd_desc_paddr);
1705 if (dd->dd_desc == NULL) {
1706 error = -ENOMEM;
1707 goto fail;
1708 }
1709 ds = dd->dd_desc;
1710 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1711 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1712 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1713
1714 /* allocate buffers */
1715 bsize = sizeof(struct ath_buf) * nbuf;
1716 bf = kmalloc(bsize, GFP_KERNEL);
1717 if (bf == NULL) {
1718 error = -ENOMEM;
1719 goto fail2;
1720 }
0345f37b 1721 memset(bf, 0, bsize);
f078f209
LR
1722 dd->dd_bufptr = bf;
1723
1724 INIT_LIST_HEAD(head);
1725 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1726 bf->bf_desc = ds;
1727 bf->bf_daddr = DS2PHYS(dd, ds);
1728
60b67f51
S
1729 if (!(sc->sc_ah->ah_caps.hw_caps &
1730 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
f078f209
LR
1731 /*
1732 * Skip descriptor addresses which can cause 4KB
1733 * boundary crossing (addr + length) with a 32 dword
1734 * descriptor fetch.
1735 */
1736 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1737 ASSERT((caddr_t) bf->bf_desc <
1738 ((caddr_t) dd->dd_desc +
1739 dd->dd_desc_len));
1740
1741 ds += ndesc;
1742 bf->bf_desc = ds;
1743 bf->bf_daddr = DS2PHYS(dd, ds);
1744 }
1745 }
1746 list_add_tail(&bf->list, head);
1747 }
1748 return 0;
1749fail2:
1750 pci_free_consistent(sc->pdev,
1751 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1752fail:
0345f37b 1753 memset(dd, 0, sizeof(*dd));
f078f209
LR
1754 return error;
1755#undef ATH_DESC_4KB_BOUND_CHECK
1756#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1757#undef DS2PHYS
1758}
1759
1760/*
1761 * Cleanup DMA descriptors
1762 *
1763 * This function will free the DMA block that was allocated for the descriptor
1764 * pool. Since this was allocated as one "chunk", it is freed in the same
1765 * manner.
1766*/
1767
1768void ath_descdma_cleanup(struct ath_softc *sc,
1769 struct ath_descdma *dd,
1770 struct list_head *head)
1771{
1772 /* Free memory associated with descriptors */
1773 pci_free_consistent(sc->pdev,
1774 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1775
1776 INIT_LIST_HEAD(head);
1777 kfree(dd->dd_bufptr);
0345f37b 1778 memset(dd, 0, sizeof(*dd));
f078f209
LR
1779}
1780
1781/*************/
1782/* Utilities */
1783/*************/
1784
f078f209
LR
1785int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1786{
1787 int qnum;
1788
1789 switch (queue) {
1790 case 0:
1791 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1792 break;
1793 case 1:
1794 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1795 break;
1796 case 2:
1797 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1798 break;
1799 case 3:
1800 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1801 break;
1802 default:
1803 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1804 break;
1805 }
1806
1807 return qnum;
1808}
1809
1810int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1811{
1812 int qnum;
1813
1814 switch (queue) {
1815 case ATH9K_WME_AC_VO:
1816 qnum = 0;
1817 break;
1818 case ATH9K_WME_AC_VI:
1819 qnum = 1;
1820 break;
1821 case ATH9K_WME_AC_BE:
1822 qnum = 2;
1823 break;
1824 case ATH9K_WME_AC_BK:
1825 qnum = 3;
1826 break;
1827 default:
1828 qnum = -1;
1829 break;
1830 }
1831
1832 return qnum;
1833}
1834
1835
1836/*
1837 * Expand time stamp to TSF
1838 *
1839 * Extend 15-bit time stamp from rx descriptor to
1840 * a full 64-bit TSF using the current h/w TSF.
1841*/
1842
1843u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1844{
1845 u64 tsf;
1846
1847 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1848 if ((tsf & 0x7fff) < rstamp)
1849 tsf -= 0x8000;
1850 return (tsf & ~0x7fff) | rstamp;
1851}
1852
1853/*
1854 * Set Default Antenna
1855 *
1856 * Call into the HAL to set the default antenna to use. Not really valid for
1857 * MIMO technology.
1858*/
1859
1860void ath_setdefantenna(void *context, u32 antenna)
1861{
1862 struct ath_softc *sc = (struct ath_softc *)context;
1863 struct ath_hal *ah = sc->sc_ah;
1864
1865 /* XXX block beacon interrupts */
1866 ath9k_hw_setantenna(ah, antenna);
1867 sc->sc_defant = antenna;
1868 sc->sc_rxotherant = 0;
1869}
1870
1871/*
1872 * Set Slot Time
1873 *
1874 * This will wake up the chip if required, and set the slot time for the
1875 * frame (maximum transmit time). Slot time is assumed to be already set
1876 * in the ATH object member sc_slottime
1877*/
1878
1879void ath_setslottime(struct ath_softc *sc)
1880{
1881 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1882 sc->sc_updateslot = OK;
1883}