net/wireless/nl80211.c: Avoid call to genlmsg_cancel
[linux-2.6-block.git] / drivers / net / wireless / iwlwifi / iwl-core.c
CommitLineData
df48c323 1/******************************************************************************
df48c323
TW
2 *
3 * GPL LICENSE SUMMARY
4 *
1f447808 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
df48c323
TW
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
df48c323
TW
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
8ccde88a 31#include <linux/etherdevice.h>
d43c36dc 32#include <linux/sched.h>
5a0e3ad6 33#include <linux/slab.h>
1d0a082d 34#include <net/mac80211.h>
df48c323 35
6bc913bd 36#include "iwl-eeprom.h"
3e0d4cb1 37#include "iwl-dev.h" /* FIXME: remove */
19335774 38#include "iwl-debug.h"
df48c323 39#include "iwl-core.h"
b661c819 40#include "iwl-io.h"
5da4b55f 41#include "iwl-power.h"
83dde8c9 42#include "iwl-sta.h"
ef850d7c 43#include "iwl-helpers.h"
df48c323 44
1d0a082d 45
df48c323
TW
46MODULE_DESCRIPTION("iwl core");
47MODULE_VERSION(IWLWIFI_VERSION);
a7b75207 48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
712b6cf5 49MODULE_LICENSE("GPL");
df48c323 50
06702a73
WYG
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
670245ed
JB
67bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
06702a73 69module_param(bt_coex_active, bool, S_IRUGO);
6c69d121 70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
06702a73 71
a562a9dd
RC
72u32 iwl_debug_level;
73EXPORT_SYMBOL(iwl_debug_level);
74
57bd1bea
TW
75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76EXPORT_SYMBOL(iwl_bcast_addr);
77
78
1d0a082d 79/* This function both allocates and initializes hw and priv. */
dc21b545 80struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
1d0a082d
AK
81{
82 struct iwl_priv *priv;
1d0a082d
AK
83 /* mac80211 allocates memory for this device instance, including
84 * space for this driver's private structure */
dc21b545
JB
85 struct ieee80211_hw *hw;
86
87 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
88 cfg->ops->ieee80211_ops);
1d0a082d 89 if (hw == NULL) {
c96c31e4 90 pr_err("%s: Can not allocate network device\n",
a3139c59 91 cfg->name);
1d0a082d
AK
92 goto out;
93 }
94
95 priv = hw->priv;
96 priv->hw = hw;
97
98out:
99 return hw;
100}
101EXPORT_SYMBOL(iwl_alloc_all);
102
d9fe60de
JB
103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
c7de35cd 105static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
d9fe60de 106 struct ieee80211_sta_ht_cap *ht_info,
c7de35cd
RR
107 enum ieee80211_band band)
108{
39130df3
RR
109 u16 max_bit_rate = 0;
110 u8 rx_chains_num = priv->hw_params.rx_chains_num;
111 u8 tx_chains_num = priv->hw_params.tx_chains_num;
112
c7de35cd 113 ht_info->cap = 0;
d9fe60de 114 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
c7de35cd 115
d9fe60de 116 ht_info->ht_supported = true;
c7de35cd 117
7cb1b088
WYG
118 if (priv->cfg->ht_params &&
119 priv->cfg->ht_params->ht_greenfield_support)
b261793d 120 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
d9fe60de 121 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
39130df3 122 max_bit_rate = MAX_BIT_RATE_20_MHZ;
7aafef1c 123 if (priv->hw_params.ht40_channel & BIT(band)) {
d9fe60de
JB
124 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
125 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
126 ht_info->mcs.rx_mask[4] = 0x01;
39130df3 127 max_bit_rate = MAX_BIT_RATE_40_MHZ;
c7de35cd 128 }
c7de35cd
RR
129
130 if (priv->cfg->mod_params->amsdu_size_8K)
d9fe60de 131 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
c7de35cd
RR
132
133 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
7cb1b088
WYG
134 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
135 ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
c7de35cd 136 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
7cb1b088
WYG
137 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
138 ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
c7de35cd 139
d9fe60de 140 ht_info->mcs.rx_mask[0] = 0xFF;
39130df3 141 if (rx_chains_num >= 2)
d9fe60de 142 ht_info->mcs.rx_mask[1] = 0xFF;
39130df3 143 if (rx_chains_num >= 3)
d9fe60de 144 ht_info->mcs.rx_mask[2] = 0xFF;
39130df3
RR
145
146 /* Highest supported Rx data rate */
147 max_bit_rate *= rx_chains_num;
d9fe60de
JB
148 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
149 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
39130df3
RR
150
151 /* Tx MCS capabilities */
d9fe60de 152 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
39130df3 153 if (tx_chains_num != rx_chains_num) {
d9fe60de
JB
154 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
155 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
156 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
39130df3 157 }
c7de35cd 158}
c7de35cd 159
c7de35cd
RR
160/**
161 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
162 */
534166de 163int iwlcore_init_geos(struct iwl_priv *priv)
c7de35cd
RR
164{
165 struct iwl_channel_info *ch;
166 struct ieee80211_supported_band *sband;
167 struct ieee80211_channel *channels;
168 struct ieee80211_channel *geo_ch;
169 struct ieee80211_rate *rates;
170 int i = 0;
171
172 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
173 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
e1623446 174 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
c7de35cd
RR
175 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
176 return 0;
177 }
178
179 channels = kzalloc(sizeof(struct ieee80211_channel) *
180 priv->channel_count, GFP_KERNEL);
181 if (!channels)
182 return -ENOMEM;
183
5027309b 184 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
c7de35cd
RR
185 GFP_KERNEL);
186 if (!rates) {
187 kfree(channels);
188 return -ENOMEM;
189 }
190
191 /* 5.2GHz channels start after the 2.4GHz channels */
192 sband = &priv->bands[IEEE80211_BAND_5GHZ];
193 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
194 /* just OFDM */
195 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
5027309b 196 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
c7de35cd 197
49779293 198 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 199 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 200 IEEE80211_BAND_5GHZ);
c7de35cd
RR
201
202 sband = &priv->bands[IEEE80211_BAND_2GHZ];
203 sband->channels = channels;
204 /* OFDM & CCK */
205 sband->bitrates = rates;
5027309b 206 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
c7de35cd 207
49779293 208 if (priv->cfg->sku & IWL_SKU_N)
d9fe60de 209 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
49779293 210 IEEE80211_BAND_2GHZ);
c7de35cd
RR
211
212 priv->ieee_channels = channels;
213 priv->ieee_rates = rates;
214
c7de35cd
RR
215 for (i = 0; i < priv->channel_count; i++) {
216 ch = &priv->channel_info[i];
217
218 /* FIXME: might be removed if scan is OK */
219 if (!is_channel_valid(ch))
220 continue;
221
222 if (is_channel_a_band(ch))
223 sband = &priv->bands[IEEE80211_BAND_5GHZ];
224 else
225 sband = &priv->bands[IEEE80211_BAND_2GHZ];
226
227 geo_ch = &sband->channels[sband->n_channels++];
228
229 geo_ch->center_freq =
59eb21a6
BR
230 ieee80211_channel_to_frequency(ch->channel,
231 sband->band);
c7de35cd
RR
232 geo_ch->max_power = ch->max_power_avg;
233 geo_ch->max_antenna_gain = 0xff;
234 geo_ch->hw_value = ch->channel;
235
236 if (is_channel_valid(ch)) {
237 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
238 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
239
240 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
241 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
242
243 if (ch->flags & EEPROM_CHANNEL_RADAR)
244 geo_ch->flags |= IEEE80211_CHAN_RADAR;
245
7aafef1c 246 geo_ch->flags |= ch->ht40_extension_channel;
4d38c2e8 247
dc1b0973
WYG
248 if (ch->max_power_avg > priv->tx_power_device_lmt)
249 priv->tx_power_device_lmt = ch->max_power_avg;
c7de35cd
RR
250 } else {
251 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
252 }
253
e1623446 254 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
c7de35cd
RR
255 ch->channel, geo_ch->center_freq,
256 is_channel_a_band(ch) ? "5.2" : "2.4",
257 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
258 "restricted" : "valid",
259 geo_ch->flags);
260 }
261
262 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
263 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
264 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
265 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
a3139c59
SO
266 priv->pci_dev->device,
267 priv->pci_dev->subsystem_device);
c7de35cd
RR
268 priv->cfg->sku &= ~IWL_SKU_A;
269 }
270
978785a3 271 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
a3139c59
SO
272 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
273 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
c7de35cd
RR
274
275 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
276
277 return 0;
278}
534166de 279EXPORT_SYMBOL(iwlcore_init_geos);
c7de35cd
RR
280
281/*
282 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
283 */
534166de 284void iwlcore_free_geos(struct iwl_priv *priv)
c7de35cd
RR
285{
286 kfree(priv->ieee_channels);
287 kfree(priv->ieee_rates);
288 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
289}
534166de 290EXPORT_SYMBOL(iwlcore_free_geos);
c7de35cd 291
7e6a5886
JB
292static bool iwl_is_channel_extension(struct iwl_priv *priv,
293 enum ieee80211_band band,
294 u16 channel, u8 extension_chan_offset)
47c5196e
TW
295{
296 const struct iwl_channel_info *ch_info;
297
298 ch_info = iwl_get_channel_info(priv, band, channel);
299 if (!is_channel_valid(ch_info))
7e6a5886 300 return false;
47c5196e 301
d9fe60de 302 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
7aafef1c 303 return !(ch_info->ht40_extension_channel &
689da1b3 304 IEEE80211_CHAN_NO_HT40PLUS);
d9fe60de 305 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
7aafef1c 306 return !(ch_info->ht40_extension_channel &
689da1b3 307 IEEE80211_CHAN_NO_HT40MINUS);
47c5196e 308
7e6a5886 309 return false;
47c5196e
TW
310}
311
7e6a5886
JB
312bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
313 struct iwl_rxon_context *ctx,
314 struct ieee80211_sta_ht_cap *ht_cap)
47c5196e 315{
7e6a5886
JB
316 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
317 return false;
47c5196e 318
7e6a5886
JB
319 /*
320 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
a2b0f02e
WYG
321 * the bit will not set if it is pure 40MHz case
322 */
7e6a5886
JB
323 if (ht_cap && !ht_cap->ht_supported)
324 return false;
325
d73e4923 326#ifdef CONFIG_IWLWIFI_DEBUGFS
1e4247d4 327 if (priv->disable_ht40)
7e6a5886 328 return false;
1e4247d4 329#endif
7e6a5886 330
611d3eb7 331 return iwl_is_channel_extension(priv, priv->band,
246ed355 332 le16_to_cpu(ctx->staging.channel),
7e6a5886 333 ctx->ht.extension_chan_offset);
47c5196e 334}
7aafef1c 335EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
47c5196e 336
2c2f3b33
TW
337static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
338{
ea196fdb
JB
339 u16 new_val;
340 u16 beacon_factor;
341
342 /*
343 * If mac80211 hasn't given us a beacon interval, program
344 * the default into the device (not checking this here
345 * would cause the adjustment below to return the maximum
346 * value, which may break PAN.)
347 */
348 if (!beacon_val)
349 return DEFAULT_BEACON_INTERVAL;
350
351 /*
352 * If the beacon interval we obtained from the peer
353 * is too large, we'll have to wake up more often
354 * (and in IBSS case, we'll beacon too much)
355 *
356 * For example, if max_beacon_val is 4096, and the
357 * requested beacon interval is 7000, we'll have to
358 * use 3500 to be able to wake up on the beacons.
359 *
360 * This could badly influence beacon detection stats.
361 */
2c2f3b33
TW
362
363 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
364 new_val = beacon_val / beacon_factor;
365
366 if (!new_val)
367 new_val = max_beacon_val;
368
369 return new_val;
370}
371
47313e34 372int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2c2f3b33
TW
373{
374 u64 tsf;
375 s32 interval_tm, rem;
2c2f3b33
TW
376 struct ieee80211_conf *conf = NULL;
377 u16 beacon_int;
47313e34 378 struct ieee80211_vif *vif = ctx->vif;
2c2f3b33
TW
379
380 conf = ieee80211_get_hw_conf(priv->hw);
381
948f5a2f
JB
382 lockdep_assert_held(&priv->mutex);
383
246ed355 384 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
948f5a2f 385
246ed355
JB
386 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
387 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
2c2f3b33 388
47313e34 389 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
2c2f3b33 390
47313e34
JB
391 /*
392 * TODO: For IBSS we need to get atim_window from mac80211,
393 * for now just always use 0
394 */
395 ctx->timing.atim_window = 0;
2c2f3b33 396
bde4530e 397 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
f1f270b2
JB
398 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
399 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
400 priv->contexts[IWL_RXON_CTX_BSS].vif &&
401 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
bde4530e
JB
402 ctx->timing.beacon_interval =
403 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
404 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
f1f270b2
JB
405 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
406 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
407 priv->contexts[IWL_RXON_CTX_PAN].vif &&
408 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
409 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
410 !ctx->vif->bss_conf.beacon_int)) {
411 ctx->timing.beacon_interval =
412 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
413 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
bde4530e
JB
414 } else {
415 beacon_int = iwl_adjust_beacon_interval(beacon_int,
f8525e55 416 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
bde4530e
JB
417 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
418 }
2c2f3b33
TW
419
420 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
f8525e55 421 interval_tm = beacon_int * TIME_UNIT;
2c2f3b33 422 rem = do_div(tsf, interval_tm);
246ed355 423 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
2c2f3b33 424
47313e34 425 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
2491fa42 426
2c2f3b33
TW
427 IWL_DEBUG_ASSOC(priv,
428 "beacon interval %d beacon timer %d beacon tim %d\n",
246ed355
JB
429 le16_to_cpu(ctx->timing.beacon_interval),
430 le32_to_cpu(ctx->timing.beacon_init_val),
431 le16_to_cpu(ctx->timing.atim_window));
948f5a2f 432
8f2d3d2a 433 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
246ed355 434 sizeof(ctx->timing), &ctx->timing);
2c2f3b33 435}
948f5a2f 436EXPORT_SYMBOL(iwl_send_rxon_timing);
2c2f3b33 437
246ed355
JB
438void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
439 int hw_decrypt)
8ccde88a 440{
246ed355 441 struct iwl_rxon_cmd *rxon = &ctx->staging;
8ccde88a
SO
442
443 if (hw_decrypt)
444 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
445 else
446 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
447
448}
449EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
450
dacefedb 451/* validate RXON structure is valid */
246ed355 452int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
8ccde88a 453{
246ed355 454 struct iwl_rxon_cmd *rxon = &ctx->staging;
dacefedb 455 bool error = false;
8ccde88a
SO
456
457 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
dacefedb
JB
458 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
459 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
460 error = true;
461 }
462 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
463 IWL_WARN(priv, "check 2.4G: wrong radar\n");
464 error = true;
465 }
8ccde88a 466 } else {
dacefedb
JB
467 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
468 IWL_WARN(priv, "check 5.2G: not short slot!\n");
469 error = true;
470 }
471 if (rxon->flags & RXON_FLG_CCK_MSK) {
472 IWL_WARN(priv, "check 5.2G: CCK!\n");
473 error = true;
474 }
475 }
476 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
477 IWL_WARN(priv, "mac/bssid mcast!\n");
478 error = true;
8ccde88a 479 }
8ccde88a
SO
480
481 /* make sure basic rates 6Mbps and 1Mbps are supported */
dacefedb
JB
482 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
483 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
484 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
485 error = true;
486 }
8ccde88a 487
dacefedb
JB
488 if (le16_to_cpu(rxon->assoc_id) > 2007) {
489 IWL_WARN(priv, "aid > 2007\n");
490 error = true;
491 }
8ccde88a 492
dacefedb
JB
493 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
494 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
495 IWL_WARN(priv, "CCK and short slot\n");
496 error = true;
497 }
8ccde88a 498
dacefedb
JB
499 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
500 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
501 IWL_WARN(priv, "CCK and auto detect");
502 error = true;
503 }
8ccde88a 504
dacefedb
JB
505 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
506 RXON_FLG_TGG_PROTECT_MSK)) ==
507 RXON_FLG_TGG_PROTECT_MSK) {
508 IWL_WARN(priv, "TGg but no auto-detect\n");
509 error = true;
510 }
8ccde88a
SO
511
512 if (error)
513 IWL_WARN(priv, "Tuning to channel %d\n",
514 le16_to_cpu(rxon->channel));
515
516 if (error) {
dacefedb
JB
517 IWL_ERR(priv, "Invalid RXON\n");
518 return -EINVAL;
8ccde88a
SO
519 }
520 return 0;
521}
522EXPORT_SYMBOL(iwl_check_rxon_cmd);
523
524/**
525 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
526 * @priv: staging_rxon is compared to active_rxon
527 *
528 * If the RXON structure is changing enough to require a new tune,
529 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
530 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
531 */
246ed355
JB
532int iwl_full_rxon_required(struct iwl_priv *priv,
533 struct iwl_rxon_context *ctx)
8ccde88a 534{
246ed355
JB
535 const struct iwl_rxon_cmd *staging = &ctx->staging;
536 const struct iwl_rxon_cmd *active = &ctx->active;
537
538#define CHK(cond) \
539 if ((cond)) { \
540 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
541 return 1; \
542 }
543
544#define CHK_NEQ(c1, c2) \
545 if ((c1) != (c2)) { \
546 IWL_DEBUG_INFO(priv, "need full RXON - " \
547 #c1 " != " #c2 " - %d != %d\n", \
548 (c1), (c2)); \
549 return 1; \
550 }
8ccde88a
SO
551
552 /* These items are only settable from the full RXON command */
246ed355
JB
553 CHK(!iwl_is_associated_ctx(ctx));
554 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
555 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
556 CHK(compare_ether_addr(staging->wlap_bssid_addr,
557 active->wlap_bssid_addr));
558 CHK_NEQ(staging->dev_type, active->dev_type);
559 CHK_NEQ(staging->channel, active->channel);
560 CHK_NEQ(staging->air_propagation, active->air_propagation);
561 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
562 active->ofdm_ht_single_stream_basic_rates);
563 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
564 active->ofdm_ht_dual_stream_basic_rates);
565 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
566 active->ofdm_ht_triple_stream_basic_rates);
567 CHK_NEQ(staging->assoc_id, active->assoc_id);
8ccde88a
SO
568
569 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
570 * be updated with the RXON_ASSOC command -- however only some
571 * flag transitions are allowed using RXON_ASSOC */
572
573 /* Check if we are not switching bands */
246ed355
JB
574 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
575 active->flags & RXON_FLG_BAND_24G_MSK);
8ccde88a
SO
576
577 /* Check if we are switching association toggle */
246ed355
JB
578 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
579 active->filter_flags & RXON_FILTER_ASSOC_MSK);
580
581#undef CHK
582#undef CHK_NEQ
8ccde88a
SO
583
584 return 0;
585}
586EXPORT_SYMBOL(iwl_full_rxon_required);
587
76d04815
JB
588u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
589 struct iwl_rxon_context *ctx)
8ccde88a 590{
4a02886b
JB
591 /*
592 * Assign the lowest rate -- should really get this from
593 * the beacon skb from mac80211.
594 */
246ed355 595 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
8ccde88a
SO
596 return IWL_RATE_1M_PLCP;
597 else
598 return IWL_RATE_6M_PLCP;
599}
600EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
601
246ed355
JB
602static void _iwl_set_rxon_ht(struct iwl_priv *priv,
603 struct iwl_ht_config *ht_conf,
604 struct iwl_rxon_context *ctx)
47c5196e 605{
246ed355 606 struct iwl_rxon_cmd *rxon = &ctx->staging;
47c5196e 607
7e6a5886 608 if (!ctx->ht.enabled) {
a2b0f02e 609 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
42eb7c64 610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
7aafef1c 611 RXON_FLG_HT40_PROT_MSK |
42eb7c64 612 RXON_FLG_HT_PROT_MSK);
47c5196e 613 return;
42eb7c64 614 }
47c5196e 615
7e6a5886 616 /* FIXME: if the definition of ht.protection changed, the "translation"
a2b0f02e
WYG
617 * will be needed for rxon->flags
618 */
7e6a5886 619 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
a2b0f02e
WYG
620
621 /* Set up channel bandwidth:
7aafef1c 622 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
a2b0f02e
WYG
623 /* clear the HT channel mode before set the mode */
624 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
625 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
7e6a5886 626 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
7aafef1c 627 /* pure ht40 */
7e6a5886 628 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
a2b0f02e 629 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
508b08e7 630 /* Note: control channel is opposite of extension channel */
7e6a5886 631 switch (ctx->ht.extension_chan_offset) {
508b08e7
WYG
632 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
633 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
634 break;
635 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
636 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
637 break;
638 }
639 } else {
a2b0f02e 640 /* Note: control channel is opposite of extension channel */
7e6a5886 641 switch (ctx->ht.extension_chan_offset) {
a2b0f02e
WYG
642 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
643 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
644 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
645 break;
646 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
647 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
648 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
649 break;
650 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
651 default:
652 /* channel location only valid if in Mixed mode */
653 IWL_ERR(priv, "invalid extension channel offset\n");
654 break;
655 }
656 }
657 } else {
658 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
47c5196e
TW
659 }
660
45823531 661 if (priv->cfg->ops->hcmd->set_rxon_chain)
246ed355 662 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
47c5196e 663
02bb1bea 664 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
ae5eb026 665 "extension channel offset 0x%x\n",
7e6a5886
JB
666 le32_to_cpu(rxon->flags), ctx->ht.protection,
667 ctx->ht.extension_chan_offset);
47c5196e 668}
246ed355
JB
669
670void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
671{
672 struct iwl_rxon_context *ctx;
673
674 for_each_context(priv, ctx)
675 _iwl_set_rxon_ht(priv, ht_conf, ctx);
676}
47c5196e
TW
677EXPORT_SYMBOL(iwl_set_rxon_ht);
678
246ed355 679/* Return valid, unused, channel for a passive scan to reset the RF */
14023641 680u8 iwl_get_single_channel_number(struct iwl_priv *priv,
246ed355 681 enum ieee80211_band band)
14023641
AK
682{
683 const struct iwl_channel_info *ch_info;
684 int i;
685 u8 channel = 0;
246ed355
JB
686 u8 min, max;
687 struct iwl_rxon_context *ctx;
14023641 688
14023641 689 if (band == IEEE80211_BAND_5GHZ) {
246ed355
JB
690 min = 14;
691 max = priv->channel_count;
14023641 692 } else {
246ed355
JB
693 min = 0;
694 max = 14;
695 }
696
697 for (i = min; i < max; i++) {
698 bool busy = false;
699
700 for_each_context(priv, ctx) {
701 busy = priv->channel_info[i].channel ==
702 le16_to_cpu(ctx->staging.channel);
703 if (busy)
704 break;
14023641 705 }
246ed355
JB
706
707 if (busy)
708 continue;
709
710 channel = priv->channel_info[i].channel;
711 ch_info = iwl_get_channel_info(priv, band, channel);
712 if (is_channel_valid(ch_info))
713 break;
14023641
AK
714 }
715
716 return channel;
717}
718EXPORT_SYMBOL(iwl_get_single_channel_number);
719
bf85ea4f 720/**
3edb5fd6
SZ
721 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
722 * @ch: requested channel as a pointer to struct ieee80211_channel
bf85ea4f 723
bf85ea4f 724 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3edb5fd6 725 * in the staging RXON flag structure based on the ch->band
bf85ea4f 726 */
246ed355
JB
727int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
728 struct iwl_rxon_context *ctx)
bf85ea4f 729{
17e72782 730 enum ieee80211_band band = ch->band;
81e95430 731 u16 channel = ch->hw_value;
17e72782 732
246ed355 733 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
bf85ea4f
AK
734 (priv->band == band))
735 return 0;
736
246ed355 737 ctx->staging.channel = cpu_to_le16(channel);
bf85ea4f 738 if (band == IEEE80211_BAND_5GHZ)
246ed355 739 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
bf85ea4f 740 else
246ed355 741 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
bf85ea4f
AK
742
743 priv->band = band;
744
e1623446 745 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
bf85ea4f
AK
746
747 return 0;
748}
c7de35cd 749EXPORT_SYMBOL(iwl_set_rxon_channel);
bf85ea4f 750
79d07325 751void iwl_set_flags_for_band(struct iwl_priv *priv,
246ed355 752 struct iwl_rxon_context *ctx,
79d07325
WYG
753 enum ieee80211_band band,
754 struct ieee80211_vif *vif)
8ccde88a
SO
755{
756 if (band == IEEE80211_BAND_5GHZ) {
246ed355 757 ctx->staging.flags &=
8ccde88a
SO
758 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
759 | RXON_FLG_CCK_MSK);
246ed355 760 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
8ccde88a
SO
761 } else {
762 /* Copied from iwl_post_associate() */
c213d745 763 if (vif && vif->bss_conf.use_short_slot)
246ed355 764 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
8ccde88a 765 else
246ed355 766 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
8ccde88a 767
246ed355
JB
768 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
769 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
770 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
8ccde88a
SO
771 }
772}
79d07325 773EXPORT_SYMBOL(iwl_set_flags_for_band);
8ccde88a
SO
774
775/*
776 * initialize rxon structure with default values from eeprom
777 */
1dda6d28 778void iwl_connection_init_rx_config(struct iwl_priv *priv,
d0fe478c 779 struct iwl_rxon_context *ctx)
8ccde88a
SO
780{
781 const struct iwl_channel_info *ch_info;
782
246ed355 783 memset(&ctx->staging, 0, sizeof(ctx->staging));
8ccde88a 784
d0fe478c
JB
785 if (!ctx->vif) {
786 ctx->staging.dev_type = ctx->unused_devtype;
787 } else switch (ctx->vif->type) {
8ccde88a 788 case NL80211_IFTYPE_AP:
d0fe478c 789 ctx->staging.dev_type = ctx->ap_devtype;
8ccde88a
SO
790 break;
791
792 case NL80211_IFTYPE_STATION:
d0fe478c 793 ctx->staging.dev_type = ctx->station_devtype;
246ed355 794 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
8ccde88a
SO
795 break;
796
797 case NL80211_IFTYPE_ADHOC:
d0fe478c 798 ctx->staging.dev_type = ctx->ibss_devtype;
246ed355
JB
799 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
800 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
8ccde88a
SO
801 RXON_FILTER_ACCEPT_GRP_MSK;
802 break;
803
8ccde88a 804 default:
d0fe478c
JB
805 IWL_ERR(priv, "Unsupported interface type %d\n",
806 ctx->vif->type);
8ccde88a
SO
807 break;
808 }
809
810#if 0
811 /* TODO: Figure out when short_preamble would be set and cache from
812 * that */
813 if (!hw_to_local(priv->hw)->short_preamble)
246ed355 814 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
8ccde88a 815 else
246ed355 816 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
8ccde88a
SO
817#endif
818
819 ch_info = iwl_get_channel_info(priv, priv->band,
246ed355 820 le16_to_cpu(ctx->active.channel));
8ccde88a
SO
821
822 if (!ch_info)
823 ch_info = &priv->channel_info[0];
824
246ed355 825 ctx->staging.channel = cpu_to_le16(ch_info->channel);
8ccde88a
SO
826 priv->band = ch_info->band;
827
d0fe478c 828 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
8ccde88a 829
246ed355 830 ctx->staging.ofdm_basic_rates =
8ccde88a 831 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
246ed355 832 ctx->staging.cck_basic_rates =
8ccde88a
SO
833 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
834
a2b0f02e 835 /* clear both MIX and PURE40 mode flag */
246ed355 836 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
a2b0f02e 837 RXON_FLG_CHANNEL_MODE_PURE_40);
d0fe478c
JB
838 if (ctx->vif)
839 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
7684c408 840
246ed355
JB
841 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
842 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
843 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
8ccde88a
SO
844}
845EXPORT_SYMBOL(iwl_connection_init_rx_config);
846
79d07325 847void iwl_set_rate(struct iwl_priv *priv)
8ccde88a
SO
848{
849 const struct ieee80211_supported_band *hw = NULL;
850 struct ieee80211_rate *rate;
246ed355 851 struct iwl_rxon_context *ctx;
8ccde88a
SO
852 int i;
853
854 hw = iwl_get_hw_mode(priv, priv->band);
855 if (!hw) {
856 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
857 return;
858 }
859
860 priv->active_rate = 0;
8ccde88a
SO
861
862 for (i = 0; i < hw->n_bitrates; i++) {
863 rate = &(hw->bitrates[i]);
5027309b 864 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
8ccde88a
SO
865 priv->active_rate |= (1 << rate->hw_value);
866 }
867
4a02886b 868 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
8ccde88a 869
246ed355
JB
870 for_each_context(priv, ctx) {
871 ctx->staging.cck_basic_rates =
872 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
4a02886b 873
246ed355
JB
874 ctx->staging.ofdm_basic_rates =
875 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
876 }
8ccde88a 877}
79d07325
WYG
878EXPORT_SYMBOL(iwl_set_rate);
879
880void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
881{
8bd413e6
JB
882 /*
883 * MULTI-FIXME
884 * See iwl_mac_channel_switch.
885 */
886 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
887
79d07325
WYG
888 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
889 return;
890
891 if (priv->switch_rxon.switch_in_progress) {
8bd413e6 892 ieee80211_chswitch_done(ctx->vif, is_success);
79d07325
WYG
893 mutex_lock(&priv->mutex);
894 priv->switch_rxon.switch_in_progress = false;
895 mutex_unlock(&priv->mutex);
896 }
897}
898EXPORT_SYMBOL(iwl_chswitch_done);
8ccde88a
SO
899
900void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
901{
2f301227 902 struct iwl_rx_packet *pkt = rxb_addr(rxb);
8ccde88a 903 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
8bd413e6
JB
904 /*
905 * MULTI-FIXME
906 * See iwl_mac_channel_switch.
907 */
246ed355 908 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
246ed355 909 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
4a56e965 910
0924e519
WYG
911 if (priv->switch_rxon.switch_in_progress) {
912 if (!le32_to_cpu(csa->status) &&
913 (csa->channel == priv->switch_rxon.channel)) {
914 rxon->channel = csa->channel;
246ed355 915 ctx->staging.channel = csa->channel;
0924e519
WYG
916 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
917 le16_to_cpu(csa->channel));
79d07325
WYG
918 iwl_chswitch_done(priv, true);
919 } else {
0924e519
WYG
920 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
921 le16_to_cpu(csa->channel));
79d07325
WYG
922 iwl_chswitch_done(priv, false);
923 }
0924e519 924 }
8ccde88a
SO
925}
926EXPORT_SYMBOL(iwl_rx_csa);
927
928#ifdef CONFIG_IWLWIFI_DEBUG
246ed355
JB
929void iwl_print_rx_config_cmd(struct iwl_priv *priv,
930 struct iwl_rxon_context *ctx)
8ccde88a 931{
246ed355 932 struct iwl_rxon_cmd *rxon = &ctx->staging;
8ccde88a 933
e1623446 934 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
3d816c77 935 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
e1623446
TW
936 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
937 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
938 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
8ccde88a 939 le32_to_cpu(rxon->filter_flags));
e1623446
TW
940 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
941 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
8ccde88a 942 rxon->ofdm_basic_rates);
e1623446
TW
943 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
944 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
945 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
946 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
8ccde88a 947}
a643565e 948EXPORT_SYMBOL(iwl_print_rx_config_cmd);
6686d17e 949#endif
8ccde88a
SO
950/**
951 * iwl_irq_handle_error - called for HW or SW error interrupt from card
952 */
953void iwl_irq_handle_error(struct iwl_priv *priv)
954{
955 /* Set the FW error flag -- cleared on iwl_down */
956 set_bit(STATUS_FW_ERROR, &priv->status);
957
958 /* Cancel currently queued command. */
959 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
960
50619ac9
WYG
961 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
962 if (priv->cfg->internal_wimax_coex &&
963 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
964 APMS_CLK_VAL_MRB_FUNC_MODE) ||
965 (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
966 APMG_PS_CTRL_VAL_RESET_REQ))) {
967 wake_up_interruptible(&priv->wait_command_queue);
968 /*
969 *Keep the restart process from trying to send host
970 * commands by clearing the INIT status bit
971 */
972 clear_bit(STATUS_READY, &priv->status);
973 IWL_ERR(priv, "RF is used by WiMAX\n");
974 return;
975 }
976
459bc732
SZ
977 IWL_ERR(priv, "Loaded firmware version: %s\n",
978 priv->hw->wiphy->fw_version);
979
3a3ff72c 980 priv->cfg->ops->lib->dump_nic_error_log(priv);
696bdee3
WYG
981 if (priv->cfg->ops->lib->dump_csr)
982 priv->cfg->ops->lib->dump_csr(priv);
1b3eb823
WYG
983 if (priv->cfg->ops->lib->dump_fh)
984 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
b03d7d0f 985 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
8ccde88a 986#ifdef CONFIG_IWLWIFI_DEBUG
c341ddb2 987 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
246ed355
JB
988 iwl_print_rx_config_cmd(priv,
989 &priv->contexts[IWL_RXON_CTX_BSS]);
8ccde88a
SO
990#endif
991
992 wake_up_interruptible(&priv->wait_command_queue);
993
994 /* Keep the restart process from trying to send host
995 * commands by clearing the INIT status bit */
996 clear_bit(STATUS_READY, &priv->status);
997
998 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
e1623446 999 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
8ccde88a
SO
1000 "Restarting adapter due to uCode error.\n");
1001
8ccde88a
SO
1002 if (priv->cfg->mod_params->restart_fw)
1003 queue_work(priv->workqueue, &priv->restart);
1004 }
1005}
1006EXPORT_SYMBOL(iwl_irq_handle_error);
1007
f8e200de 1008static int iwl_apm_stop_master(struct iwl_priv *priv)
d68b603c 1009{
5220af0c 1010 int ret = 0;
d68b603c 1011
5220af0c 1012 /* stop device's busmaster DMA activity */
d68b603c
AK
1013 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1014
5220af0c 1015 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
d68b603c 1016 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
5220af0c
BC
1017 if (ret)
1018 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
d68b603c 1019
d68b603c
AK
1020 IWL_DEBUG_INFO(priv, "stop master\n");
1021
5220af0c 1022 return ret;
d68b603c 1023}
d68b603c
AK
1024
1025void iwl_apm_stop(struct iwl_priv *priv)
1026{
fadb3582
BC
1027 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1028
5220af0c 1029 /* Stop device's DMA activity */
d68b603c
AK
1030 iwl_apm_stop_master(priv);
1031
5220af0c 1032 /* Reset the entire device */
d68b603c
AK
1033 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1034
1035 udelay(10);
5220af0c
BC
1036
1037 /*
1038 * Clear "initialization complete" bit to move adapter from
1039 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1040 */
d68b603c 1041 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
d68b603c
AK
1042}
1043EXPORT_SYMBOL(iwl_apm_stop);
1044
fadb3582
BC
1045
1046/*
1047 * Start up NIC's basic functionality after it has been reset
1048 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1049 * NOTE: This does not load uCode nor start the embedded processor
1050 */
1051int iwl_apm_init(struct iwl_priv *priv)
1052{
1053 int ret = 0;
1054 u16 lctl;
1055
1056 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1057
1058 /*
1059 * Use "set_bit" below rather than "write", to preserve any hardware
1060 * bits already set by default after reset.
1061 */
1062
1063 /* Disable L0S exit timer (platform NMI Work/Around) */
1064 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1065 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1066
1067 /*
1068 * Disable L0s without affecting L1;
1069 * don't wait for ICH L0s (ICH bug W/A)
1070 */
1071 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1072 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1073
1074 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1075 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1076
1077 /*
1078 * Enable HAP INTA (interrupt from management bus) to
1079 * wake device's PCI Express link L1a -> L0s
1080 * NOTE: This is no-op for 3945 (non-existant bit)
1081 */
1082 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1083 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1084
1085 /*
a6c5c731
BC
1086 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1087 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1088 * If so (likely), disable L0S, so device moves directly L0->L1;
1089 * costs negligible amount of power savings.
1090 * If not (unlikely), enable L0S, so there is at least some
1091 * power savings, even without L1.
fadb3582 1092 */
7cb1b088 1093 if (priv->cfg->base_params->set_l0s) {
fadb3582
BC
1094 lctl = iwl_pcie_link_ctl(priv);
1095 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1096 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1097 /* L1-ASPM enabled; disable(!) L0S */
1098 iwl_set_bit(priv, CSR_GIO_REG,
1099 CSR_GIO_REG_VAL_L0S_ENABLED);
1100 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1101 } else {
1102 /* L1-ASPM disabled; enable(!) L0S */
1103 iwl_clear_bit(priv, CSR_GIO_REG,
1104 CSR_GIO_REG_VAL_L0S_ENABLED);
1105 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1106 }
1107 }
1108
1109 /* Configure analog phase-lock-loop before activating to D0A */
7cb1b088
WYG
1110 if (priv->cfg->base_params->pll_cfg_val)
1111 iwl_set_bit(priv, CSR_ANA_PLL_CFG,
1112 priv->cfg->base_params->pll_cfg_val);
fadb3582
BC
1113
1114 /*
1115 * Set "initialization complete" bit to move adapter from
1116 * D0U* --> D0A* (powered-up active) state.
1117 */
1118 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1119
1120 /*
1121 * Wait for clock stabilization; once stabilized, access to
1122 * device-internal resources is supported, e.g. iwl_write_prph()
1123 * and accesses to uCode SRAM.
1124 */
1125 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1126 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1127 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1128 if (ret < 0) {
1129 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1130 goto out;
1131 }
1132
1133 /*
1134 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1135 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1136 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1137 * and don't need BSM to restore data after power-saving sleep.
1138 *
1139 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1140 * do not disable clocks. This preserves any hardware bits already
1141 * set by default in "CLK_CTRL_REG" after reset.
1142 */
7cb1b088 1143 if (priv->cfg->base_params->use_bsm)
fadb3582
BC
1144 iwl_write_prph(priv, APMG_CLK_EN_REG,
1145 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1146 else
1147 iwl_write_prph(priv, APMG_CLK_EN_REG,
1148 APMG_CLK_VAL_DMA_CLK_RQT);
1149 udelay(20);
1150
1151 /* Disable L1-Active */
1152 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1153 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1154
1155out:
1156 return ret;
1157}
1158EXPORT_SYMBOL(iwl_apm_init);
1159
1160
630fe9b6
TW
1161int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1162{
a25a66ac
SG
1163 int ret;
1164 s8 prev_tx_power;
1165
1166 lockdep_assert_held(&priv->mutex);
1167
1168 if (priv->tx_power_user_lmt == tx_power && !force)
1169 return 0;
1170
1171 if (!priv->cfg->ops->lib->send_tx_power)
1172 return -EOPNOTSUPP;
5eadd94b 1173
b744cb79
WYG
1174 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1175 IWL_WARN(priv,
1176 "Requested user TXPOWER %d below lower limit %d.\n",
daf518de 1177 tx_power,
b744cb79 1178 IWLAGN_TX_POWER_TARGET_POWER_MIN);
630fe9b6
TW
1179 return -EINVAL;
1180 }
1181
dc1b0973 1182 if (tx_power > priv->tx_power_device_lmt) {
08f2d58d
WYG
1183 IWL_WARN(priv,
1184 "Requested user TXPOWER %d above upper limit %d.\n",
dc1b0973 1185 tx_power, priv->tx_power_device_lmt);
630fe9b6
TW
1186 return -EINVAL;
1187 }
1188
a25a66ac
SG
1189 if (!iwl_is_ready_rf(priv))
1190 return -EIO;
630fe9b6 1191
a25a66ac
SG
1192 /* scan complete use tx_power_next, need to be updated */
1193 priv->tx_power_next = tx_power;
1194 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1195 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1196 return 0;
5eadd94b 1197 }
630fe9b6 1198
a25a66ac
SG
1199 prev_tx_power = priv->tx_power_user_lmt;
1200 priv->tx_power_user_lmt = tx_power;
1201
1202 ret = priv->cfg->ops->lib->send_tx_power(priv);
1203
1204 /* if fail to set tx_power, restore the orig. tx power */
1205 if (ret) {
1206 priv->tx_power_user_lmt = prev_tx_power;
1207 priv->tx_power_next = prev_tx_power;
1208 }
630fe9b6
TW
1209 return ret;
1210}
1211EXPORT_SYMBOL(iwl_set_tx_power);
1212
65b52bde 1213void iwl_send_bt_config(struct iwl_priv *priv)
17f841cd
SO
1214{
1215 struct iwl_bt_cmd bt_cmd = {
456d0f76
WYG
1216 .lead_time = BT_LEAD_TIME_DEF,
1217 .max_kill = BT_MAX_KILL_DEF,
17f841cd
SO
1218 .kill_ack_mask = 0,
1219 .kill_cts_mask = 0,
1220 };
1221
06702a73
WYG
1222 if (!bt_coex_active)
1223 bt_cmd.flags = BT_COEX_DISABLE;
1224 else
1225 bt_cmd.flags = BT_COEX_ENABLE;
1226
f21dd005 1227 priv->bt_enable_flag = bt_cmd.flags;
06702a73
WYG
1228 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1229 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1230
65b52bde
JB
1231 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1232 sizeof(struct iwl_bt_cmd), &bt_cmd))
1233 IWL_ERR(priv, "failed to send BT Coex Config\n");
17f841cd
SO
1234}
1235EXPORT_SYMBOL(iwl_send_bt_config);
1236
ef8d5529 1237int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
49ea8596 1238{
ef8d5529
WYG
1239 struct iwl_statistics_cmd statistics_cmd = {
1240 .configuration_flags =
1241 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
49ea8596 1242 };
ef8d5529
WYG
1243
1244 if (flags & CMD_ASYNC)
1245 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1246 sizeof(struct iwl_statistics_cmd),
1247 &statistics_cmd, NULL);
1248 else
1249 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1250 sizeof(struct iwl_statistics_cmd),
1251 &statistics_cmd);
49ea8596
EG
1252}
1253EXPORT_SYMBOL(iwl_send_statistics_request);
7e8c519e 1254
030f05ed
AK
1255void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1256 struct iwl_rx_mem_buffer *rxb)
1257{
1258#ifdef CONFIG_IWLWIFI_DEBUG
2f301227 1259 struct iwl_rx_packet *pkt = rxb_addr(rxb);
030f05ed
AK
1260 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1261 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1262 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1263#endif
1264}
1265EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
1266
1267void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1268 struct iwl_rx_mem_buffer *rxb)
1269{
2f301227 1270 struct iwl_rx_packet *pkt = rxb_addr(rxb);
396887a2 1271 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
030f05ed 1272 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
396887a2
DH
1273 "notification for %s:\n", len,
1274 get_cmd_string(pkt->hdr.cmd));
1275 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
030f05ed
AK
1276}
1277EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
261b9c33
AK
1278
1279void iwl_rx_reply_error(struct iwl_priv *priv,
1280 struct iwl_rx_mem_buffer *rxb)
1281{
2f301227 1282 struct iwl_rx_packet *pkt = rxb_addr(rxb);
261b9c33
AK
1283
1284 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1285 "seq 0x%04X ser 0x%08X\n",
1286 le32_to_cpu(pkt->u.err_resp.error_type),
1287 get_cmd_string(pkt->u.err_resp.cmd_id),
1288 pkt->u.err_resp.cmd_id,
1289 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1290 le32_to_cpu(pkt->u.err_resp.error_info));
1291}
1292EXPORT_SYMBOL(iwl_rx_reply_error);
1293
a83b9141
WYG
1294void iwl_clear_isr_stats(struct iwl_priv *priv)
1295{
1296 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1297}
a83b9141 1298
488829f1
AK
1299int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1300 const struct ieee80211_tx_queue_params *params)
1301{
1302 struct iwl_priv *priv = hw->priv;
8dfdb9d5 1303 struct iwl_rxon_context *ctx;
488829f1
AK
1304 unsigned long flags;
1305 int q;
1306
1307 IWL_DEBUG_MAC80211(priv, "enter\n");
1308
1309 if (!iwl_is_ready_rf(priv)) {
1310 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1311 return -EIO;
1312 }
1313
1314 if (queue >= AC_NUM) {
1315 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1316 return 0;
1317 }
1318
1319 q = AC_NUM - 1 - queue;
1320
1321 spin_lock_irqsave(&priv->lock, flags);
1322
8dfdb9d5
JB
1323 /*
1324 * MULTI-FIXME
1325 * This may need to be done per interface in nl80211/cfg80211/mac80211.
1326 */
1327 for_each_context(priv, ctx) {
1328 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1329 cpu_to_le16(params->cw_min);
1330 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1331 cpu_to_le16(params->cw_max);
1332 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1333 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1334 cpu_to_le16((params->txop * 32));
1335
1336 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1337 }
488829f1
AK
1338
1339 spin_unlock_irqrestore(&priv->lock, flags);
1340
1341 IWL_DEBUG_MAC80211(priv, "leave\n");
1342 return 0;
1343}
1344EXPORT_SYMBOL(iwl_mac_conf_tx);
5bbe233b 1345
a85d7cca
JB
1346int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1347{
1348 struct iwl_priv *priv = hw->priv;
1349
1350 return priv->ibss_manager == IWL_IBSS_MANAGER;
1351}
1352EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1353
d4daaea6 1354static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
727882d6 1355{
d0fe478c 1356 iwl_connection_init_rx_config(priv, ctx);
727882d6
AK
1357
1358 if (priv->cfg->ops->hcmd->set_rxon_chain)
246ed355 1359 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
727882d6 1360
246ed355 1361 return iwlcore_commit_rxon(priv, ctx);
727882d6 1362}
727882d6 1363
d4daaea6
JB
1364static int iwl_setup_interface(struct iwl_priv *priv,
1365 struct iwl_rxon_context *ctx)
1366{
1367 struct ieee80211_vif *vif = ctx->vif;
1368 int err;
1369
1370 lockdep_assert_held(&priv->mutex);
1371
1372 /*
1373 * This variable will be correct only when there's just
1374 * a single context, but all code using it is for hardware
1375 * that supports only one context.
1376 */
1377 priv->iw_mode = vif->type;
1378
1379 ctx->is_active = true;
1380
1381 err = iwl_set_mode(priv, ctx);
1382 if (err) {
1383 if (!ctx->always_active)
1384 ctx->is_active = false;
1385 return err;
1386 }
1387
1388 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1389 vif->type == NL80211_IFTYPE_ADHOC) {
1390 /*
1391 * pretend to have high BT traffic as long as we
1392 * are operating in IBSS mode, as this will cause
1393 * the rate scaling etc. to behave as intended.
1394 */
1395 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1396 }
1397
1398 return 0;
1399}
1400
b55e75ed 1401int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
cbb6ab94
AK
1402{
1403 struct iwl_priv *priv = hw->priv;
246ed355 1404 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
d0fe478c 1405 struct iwl_rxon_context *tmp, *ctx = NULL;
d4daaea6 1406 int err;
f35c0c56 1407 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
cbb6ab94 1408
3779db10 1409 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
f35c0c56 1410 viftype, vif->addr);
cbb6ab94 1411
47e28f41
JB
1412 mutex_lock(&priv->mutex);
1413
4bd530f3
SG
1414 if (!iwl_is_ready_rf(priv)) {
1415 IWL_WARN(priv, "Try to add interface when device not ready\n");
b55e75ed
JB
1416 err = -EINVAL;
1417 goto out;
1418 }
1419
d0fe478c
JB
1420 for_each_context(priv, tmp) {
1421 u32 possible_modes =
1422 tmp->interface_modes | tmp->exclusive_interface_modes;
1423
1424 if (tmp->vif) {
1425 /* check if this busy context is exclusive */
1426 if (tmp->exclusive_interface_modes &
1427 BIT(tmp->vif->type)) {
1428 err = -EINVAL;
1429 goto out;
1430 }
1431 continue;
1432 }
1433
f35c0c56 1434 if (!(possible_modes & BIT(viftype)))
d0fe478c
JB
1435 continue;
1436
1437 /* have maybe usable context w/o interface */
1438 ctx = tmp;
1439 break;
1440 }
1441
1442 if (!ctx) {
47e28f41
JB
1443 err = -EOPNOTSUPP;
1444 goto out;
cbb6ab94
AK
1445 }
1446
d0fe478c 1447 vif_priv->ctx = ctx;
8bd413e6 1448 ctx->vif = vif;
59079949 1449
d4daaea6
JB
1450 err = iwl_setup_interface(priv, ctx);
1451 if (!err)
1452 goto out;
cbb6ab94 1453
8bd413e6 1454 ctx->vif = NULL;
b55e75ed 1455 priv->iw_mode = NL80211_IFTYPE_STATION;
47e28f41 1456 out:
cbb6ab94
AK
1457 mutex_unlock(&priv->mutex);
1458
1459 IWL_DEBUG_MAC80211(priv, "leave\n");
47e28f41 1460 return err;
cbb6ab94
AK
1461}
1462EXPORT_SYMBOL(iwl_mac_add_interface);
1463
d4daaea6
JB
1464static void iwl_teardown_interface(struct iwl_priv *priv,
1465 struct ieee80211_vif *vif,
1466 bool mode_change)
d8052319 1467{
246ed355 1468 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
d8052319 1469
d4daaea6 1470 lockdep_assert_held(&priv->mutex);
d0fe478c 1471
e7e16b90
SG
1472 if (priv->scan_vif == vif) {
1473 iwl_scan_cancel_timeout(priv, 200);
1474 iwl_force_scan_end(priv);
1475 }
8bd413e6 1476
d4daaea6
JB
1477 if (!mode_change) {
1478 iwl_set_mode(priv, ctx);
1479 if (!ctx->always_active)
1480 ctx->is_active = false;
1481 }
763cc3bf 1482
59079949
JB
1483 /*
1484 * When removing the IBSS interface, overwrite the
1485 * BT traffic load with the stored one from the last
1486 * notification, if any. If this is a device that
1487 * doesn't implement this, this has no effect since
1488 * both values are the same and zero.
1489 */
1490 if (vif->type == NL80211_IFTYPE_ADHOC)
66e863a5 1491 priv->bt_traffic_load = priv->last_bt_traffic_load;
d4daaea6
JB
1492}
1493
1494void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1495 struct ieee80211_vif *vif)
1496{
1497 struct iwl_priv *priv = hw->priv;
1498 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1499
1500 IWL_DEBUG_MAC80211(priv, "enter\n");
1501
1502 mutex_lock(&priv->mutex);
1503
1504 WARN_ON(ctx->vif != vif);
1505 ctx->vif = NULL;
1506
1507 iwl_teardown_interface(priv, vif, false);
59079949 1508
8bd413e6 1509 memset(priv->bssid, 0, ETH_ALEN);
d8052319
AK
1510 mutex_unlock(&priv->mutex);
1511
1512 IWL_DEBUG_MAC80211(priv, "leave\n");
1513
1514}
1515EXPORT_SYMBOL(iwl_mac_remove_interface);
1516
88804e2b
WYG
1517int iwl_alloc_txq_mem(struct iwl_priv *priv)
1518{
1519 if (!priv->txq)
1520 priv->txq = kzalloc(
7cb1b088
WYG
1521 sizeof(struct iwl_tx_queue) *
1522 priv->cfg->base_params->num_of_queues,
88804e2b
WYG
1523 GFP_KERNEL);
1524 if (!priv->txq) {
91dd6c27 1525 IWL_ERR(priv, "Not enough memory for txq\n");
88804e2b
WYG
1526 return -ENOMEM;
1527 }
1528 return 0;
1529}
1530EXPORT_SYMBOL(iwl_alloc_txq_mem);
1531
1532void iwl_free_txq_mem(struct iwl_priv *priv)
1533{
1534 kfree(priv->txq);
1535 priv->txq = NULL;
1536}
1537EXPORT_SYMBOL(iwl_free_txq_mem);
1538
20594eb0
WYG
1539#ifdef CONFIG_IWLWIFI_DEBUGFS
1540
1541#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1542
1543void iwl_reset_traffic_log(struct iwl_priv *priv)
1544{
1545 priv->tx_traffic_idx = 0;
1546 priv->rx_traffic_idx = 0;
1547 if (priv->tx_traffic)
1548 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1549 if (priv->rx_traffic)
1550 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1551}
1552
1553int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1554{
1555 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1556
1557 if (iwl_debug_level & IWL_DL_TX) {
1558 if (!priv->tx_traffic) {
1559 priv->tx_traffic =
1560 kzalloc(traffic_size, GFP_KERNEL);
1561 if (!priv->tx_traffic)
1562 return -ENOMEM;
1563 }
1564 }
1565 if (iwl_debug_level & IWL_DL_RX) {
1566 if (!priv->rx_traffic) {
1567 priv->rx_traffic =
1568 kzalloc(traffic_size, GFP_KERNEL);
1569 if (!priv->rx_traffic)
1570 return -ENOMEM;
1571 }
1572 }
1573 iwl_reset_traffic_log(priv);
1574 return 0;
1575}
1576EXPORT_SYMBOL(iwl_alloc_traffic_mem);
1577
1578void iwl_free_traffic_mem(struct iwl_priv *priv)
1579{
1580 kfree(priv->tx_traffic);
1581 priv->tx_traffic = NULL;
1582
1583 kfree(priv->rx_traffic);
1584 priv->rx_traffic = NULL;
1585}
1586EXPORT_SYMBOL(iwl_free_traffic_mem);
1587
1588void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1589 u16 length, struct ieee80211_hdr *header)
1590{
1591 __le16 fc;
1592 u16 len;
1593
1594 if (likely(!(iwl_debug_level & IWL_DL_TX)))
1595 return;
1596
1597 if (!priv->tx_traffic)
1598 return;
1599
1600 fc = header->frame_control;
1601 if (ieee80211_is_data(fc)) {
1602 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1603 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1604 memcpy((priv->tx_traffic +
1605 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1606 header, len);
1607 priv->tx_traffic_idx =
1608 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1609 }
1610}
1611EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
1612
1613void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1614 u16 length, struct ieee80211_hdr *header)
1615{
1616 __le16 fc;
1617 u16 len;
1618
1619 if (likely(!(iwl_debug_level & IWL_DL_RX)))
1620 return;
1621
1622 if (!priv->rx_traffic)
1623 return;
1624
1625 fc = header->frame_control;
1626 if (ieee80211_is_data(fc)) {
1627 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1628 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1629 memcpy((priv->rx_traffic +
1630 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1631 header, len);
1632 priv->rx_traffic_idx =
1633 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1634 }
1635}
1636EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
22fdf3c9
WYG
1637
1638const char *get_mgmt_string(int cmd)
1639{
1640 switch (cmd) {
1641 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1642 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1643 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1644 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1645 IWL_CMD(MANAGEMENT_PROBE_REQ);
1646 IWL_CMD(MANAGEMENT_PROBE_RESP);
1647 IWL_CMD(MANAGEMENT_BEACON);
1648 IWL_CMD(MANAGEMENT_ATIM);
1649 IWL_CMD(MANAGEMENT_DISASSOC);
1650 IWL_CMD(MANAGEMENT_AUTH);
1651 IWL_CMD(MANAGEMENT_DEAUTH);
1652 IWL_CMD(MANAGEMENT_ACTION);
1653 default:
1654 return "UNKNOWN";
1655
1656 }
1657}
1658
1659const char *get_ctrl_string(int cmd)
1660{
1661 switch (cmd) {
1662 IWL_CMD(CONTROL_BACK_REQ);
1663 IWL_CMD(CONTROL_BACK);
1664 IWL_CMD(CONTROL_PSPOLL);
1665 IWL_CMD(CONTROL_RTS);
1666 IWL_CMD(CONTROL_CTS);
1667 IWL_CMD(CONTROL_ACK);
1668 IWL_CMD(CONTROL_CFEND);
1669 IWL_CMD(CONTROL_CFENDACK);
1670 default:
1671 return "UNKNOWN";
1672
1673 }
1674}
1675
7163b8a4 1676void iwl_clear_traffic_stats(struct iwl_priv *priv)
22fdf3c9
WYG
1677{
1678 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
22fdf3c9
WYG
1679 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1680}
1681
1682/*
1683 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1684 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1685 * Use debugFs to display the rx/rx_statistics
1686 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1687 * information will be recorded, but DATA pkt still will be recorded
1688 * for the reason of iwl_led.c need to control the led blinking based on
1689 * number of tx and rx data.
1690 *
1691 */
1692void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1693{
1694 struct traffic_stats *stats;
1695
1696 if (is_tx)
1697 stats = &priv->tx_stats;
1698 else
1699 stats = &priv->rx_stats;
1700
1701 if (ieee80211_is_mgmt(fc)) {
1702 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1703 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1704 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1705 break;
1706 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1707 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1708 break;
1709 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1710 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1711 break;
1712 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1713 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1714 break;
1715 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1716 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1717 break;
1718 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1719 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1720 break;
1721 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1722 stats->mgmt[MANAGEMENT_BEACON]++;
1723 break;
1724 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1725 stats->mgmt[MANAGEMENT_ATIM]++;
1726 break;
1727 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1728 stats->mgmt[MANAGEMENT_DISASSOC]++;
1729 break;
1730 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1731 stats->mgmt[MANAGEMENT_AUTH]++;
1732 break;
1733 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1734 stats->mgmt[MANAGEMENT_DEAUTH]++;
1735 break;
1736 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1737 stats->mgmt[MANAGEMENT_ACTION]++;
1738 break;
1739 }
1740 } else if (ieee80211_is_ctl(fc)) {
1741 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1742 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1743 stats->ctrl[CONTROL_BACK_REQ]++;
1744 break;
1745 case cpu_to_le16(IEEE80211_STYPE_BACK):
1746 stats->ctrl[CONTROL_BACK]++;
1747 break;
1748 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1749 stats->ctrl[CONTROL_PSPOLL]++;
1750 break;
1751 case cpu_to_le16(IEEE80211_STYPE_RTS):
1752 stats->ctrl[CONTROL_RTS]++;
1753 break;
1754 case cpu_to_le16(IEEE80211_STYPE_CTS):
1755 stats->ctrl[CONTROL_CTS]++;
1756 break;
1757 case cpu_to_le16(IEEE80211_STYPE_ACK):
1758 stats->ctrl[CONTROL_ACK]++;
1759 break;
1760 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1761 stats->ctrl[CONTROL_CFEND]++;
1762 break;
1763 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1764 stats->ctrl[CONTROL_CFENDACK]++;
1765 break;
1766 }
1767 } else {
1768 /* data */
1769 stats->data_cnt++;
1770 stats->data_bytes += len;
1771 }
1772}
1773EXPORT_SYMBOL(iwl_update_stats);
20594eb0
WYG
1774#endif
1775
a93e7973 1776static void iwl_force_rf_reset(struct iwl_priv *priv)
afbdd69a
WYG
1777{
1778 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1779 return;
1780
246ed355 1781 if (!iwl_is_any_associated(priv)) {
afbdd69a
WYG
1782 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1783 return;
1784 }
1785 /*
1786 * There is no easy and better way to force reset the radio,
1787 * the only known method is switching channel which will force to
1788 * reset and tune the radio.
1789 * Use internal short scan (single channel) operation to should
1790 * achieve this objective.
1791 * Driver should reset the radio when number of consecutive missed
1792 * beacon, or any other uCode error condition detected.
1793 */
1794 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1795 iwl_internal_short_hw_scan(priv);
afbdd69a 1796}
a93e7973 1797
a93e7973 1798
c04f9f22 1799int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
a93e7973 1800{
8a472da4
WYG
1801 struct iwl_force_reset *force_reset;
1802
a93e7973
WYG
1803 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1804 return -EINVAL;
1805
8a472da4
WYG
1806 if (mode >= IWL_MAX_FORCE_RESET) {
1807 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1808 return -EINVAL;
1809 }
1810 force_reset = &priv->force_reset[mode];
1811 force_reset->reset_request_count++;
c04f9f22
WYG
1812 if (!external) {
1813 if (force_reset->last_force_reset_jiffies &&
1814 time_after(force_reset->last_force_reset_jiffies +
1815 force_reset->reset_duration, jiffies)) {
1816 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1817 force_reset->reset_reject_count++;
1818 return -EAGAIN;
1819 }
a93e7973 1820 }
8a472da4
WYG
1821 force_reset->reset_success_count++;
1822 force_reset->last_force_reset_jiffies = jiffies;
a93e7973 1823 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
a93e7973
WYG
1824 switch (mode) {
1825 case IWL_RF_RESET:
1826 iwl_force_rf_reset(priv);
1827 break;
1828 case IWL_FW_RESET:
c04f9f22
WYG
1829 /*
1830 * if the request is from external(ex: debugfs),
1831 * then always perform the request in regardless the module
1832 * parameter setting
1833 * if the request is from internal (uCode error or driver
1834 * detect failure), then fw_restart module parameter
1835 * need to be check before performing firmware reload
1836 */
1837 if (!external && !priv->cfg->mod_params->restart_fw) {
1838 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1839 "module parameter setting\n");
1840 break;
1841 }
a93e7973
WYG
1842 IWL_ERR(priv, "On demand firmware reload\n");
1843 /* Set the FW error flag -- cleared on iwl_down */
1844 set_bit(STATUS_FW_ERROR, &priv->status);
1845 wake_up_interruptible(&priv->wait_command_queue);
1846 /*
1847 * Keep the restart process from trying to send host
1848 * commands by clearing the INIT status bit
1849 */
1850 clear_bit(STATUS_READY, &priv->status);
1851 queue_work(priv->workqueue, &priv->restart);
1852 break;
a93e7973 1853 }
a93e7973
WYG
1854 return 0;
1855}
b74e31a9 1856
d4daaea6
JB
1857int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1858 enum nl80211_iftype newtype, bool newp2p)
1859{
1860 struct iwl_priv *priv = hw->priv;
1861 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1862 struct iwl_rxon_context *tmp;
1863 u32 interface_modes;
1864 int err;
1865
1866 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1867
1868 mutex_lock(&priv->mutex);
1869
1870 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1871
1872 if (!(interface_modes & BIT(newtype))) {
1873 err = -EBUSY;
1874 goto out;
1875 }
1876
1877 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1878 for_each_context(priv, tmp) {
1879 if (ctx == tmp)
1880 continue;
1881
1882 if (!tmp->vif)
1883 continue;
1884
1885 /*
1886 * The current mode switch would be exclusive, but
1887 * another context is active ... refuse the switch.
1888 */
1889 err = -EBUSY;
1890 goto out;
1891 }
1892 }
1893
1894 /* success */
1895 iwl_teardown_interface(priv, vif, true);
1896 vif->type = newtype;
1897 err = iwl_setup_interface(priv, ctx);
1898 WARN_ON(err);
1899 /*
1900 * We've switched internally, but submitting to the
1901 * device may have failed for some reason. Mask this
1902 * error, because otherwise mac80211 will not switch
1903 * (and set the interface type back) and we'll be
1904 * out of sync with it.
1905 */
1906 err = 0;
1907
1908 out:
1909 mutex_unlock(&priv->mutex);
1910 return err;
1911}
1912EXPORT_SYMBOL(iwl_mac_change_interface);
1913
b74e31a9 1914/*
22de94de
SG
1915 * On every watchdog tick we check (latest) time stamp. If it does not
1916 * change during timeout period and queue is not empty we reset firmware.
b74e31a9 1917 */
b74e31a9
WYG
1918static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
1919{
22de94de
SG
1920 struct iwl_tx_queue *txq = &priv->txq[cnt];
1921 struct iwl_queue *q = &txq->q;
1922 unsigned long timeout;
1923 int ret;
b74e31a9 1924
22de94de
SG
1925 if (q->read_ptr == q->write_ptr) {
1926 txq->time_stamp = jiffies;
7cb1b088 1927 return 0;
22de94de 1928 }
7cb1b088 1929
22de94de
SG
1930 timeout = txq->time_stamp +
1931 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1932
1933 if (time_after(jiffies, timeout)) {
1934 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1935 q->id, priv->cfg->base_params->wd_timeout);
1936 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1937 return (ret == -EAGAIN) ? 0 : 1;
b74e31a9 1938 }
22de94de 1939
b74e31a9
WYG
1940 return 0;
1941}
1942
22de94de
SG
1943/*
1944 * Making watchdog tick be a quarter of timeout assure we will
1945 * discover the queue hung between timeout and 1.25*timeout
1946 */
1947#define IWL_WD_TICK(timeout) ((timeout) / 4)
1948
1949/*
1950 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1951 * we reset the firmware. If everything is fine just rearm the timer.
1952 */
1953void iwl_bg_watchdog(unsigned long data)
b74e31a9
WYG
1954{
1955 struct iwl_priv *priv = (struct iwl_priv *)data;
1956 int cnt;
22de94de 1957 unsigned long timeout;
b74e31a9
WYG
1958
1959 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1960 return;
1961
22de94de
SG
1962 timeout = priv->cfg->base_params->wd_timeout;
1963 if (timeout == 0)
1964 return;
1965
b74e31a9 1966 /* monitor and check for stuck cmd queue */
13bb9483 1967 if (iwl_check_stuck_queue(priv, priv->cmd_queue))
b74e31a9
WYG
1968 return;
1969
1970 /* monitor and check for other stuck queues */
246ed355 1971 if (iwl_is_any_associated(priv)) {
b74e31a9
WYG
1972 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1973 /* skip as we already checked the command queue */
13bb9483 1974 if (cnt == priv->cmd_queue)
b74e31a9
WYG
1975 continue;
1976 if (iwl_check_stuck_queue(priv, cnt))
1977 return;
1978 }
1979 }
22de94de
SG
1980
1981 mod_timer(&priv->watchdog, jiffies +
1982 msecs_to_jiffies(IWL_WD_TICK(timeout)));
b74e31a9 1983}
22de94de
SG
1984EXPORT_SYMBOL(iwl_bg_watchdog);
1985
1986void iwl_setup_watchdog(struct iwl_priv *priv)
1987{
1988 unsigned int timeout = priv->cfg->base_params->wd_timeout;
afbdd69a 1989
22de94de
SG
1990 if (timeout)
1991 mod_timer(&priv->watchdog,
1992 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1993 else
1994 del_timer(&priv->watchdog);
1995}
1996EXPORT_SYMBOL(iwl_setup_watchdog);
a0ee74cf
WYG
1997
1998/*
1999 * extended beacon time format
2000 * time in usec will be changed into a 32-bit value in extended:internal format
2001 * the extended part is the beacon counts
2002 * the internal part is the time in usec within one beacon interval
2003 */
2004u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
2005{
2006 u32 quot;
2007 u32 rem;
2008 u32 interval = beacon_interval * TIME_UNIT;
2009
2010 if (!interval || !usec)
2011 return 0;
2012
2013 quot = (usec / interval) &
2014 (iwl_beacon_time_mask_high(priv,
2015 priv->hw_params.beacon_time_tsf_bits) >>
2016 priv->hw_params.beacon_time_tsf_bits);
2017 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
2018 priv->hw_params.beacon_time_tsf_bits);
2019
2020 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
2021}
2022EXPORT_SYMBOL(iwl_usecs_to_beacons);
2023
2024/* base is usually what we get from ucode with each received frame,
2025 * the same as HW timer counter counting down
2026 */
2027__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
2028 u32 addon, u32 beacon_interval)
2029{
2030 u32 base_low = base & iwl_beacon_time_mask_low(priv,
2031 priv->hw_params.beacon_time_tsf_bits);
2032 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
2033 priv->hw_params.beacon_time_tsf_bits);
2034 u32 interval = beacon_interval * TIME_UNIT;
2035 u32 res = (base & iwl_beacon_time_mask_high(priv,
2036 priv->hw_params.beacon_time_tsf_bits)) +
2037 (addon & iwl_beacon_time_mask_high(priv,
2038 priv->hw_params.beacon_time_tsf_bits));
2039
2040 if (base_low > addon_low)
2041 res += base_low - addon_low;
2042 else if (base_low < addon_low) {
2043 res += interval + base_low - addon_low;
2044 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2045 } else
2046 res += (1 << priv->hw_params.beacon_time_tsf_bits);
2047
2048 return cpu_to_le32(res);
2049}
2050EXPORT_SYMBOL(iwl_add_beacon_time);
2051
6da3a13e
WYG
2052#ifdef CONFIG_PM
2053
f60dc013 2054int iwl_pci_suspend(struct device *device)
6da3a13e 2055{
f60dc013 2056 struct pci_dev *pdev = to_pci_dev(device);
6da3a13e
WYG
2057 struct iwl_priv *priv = pci_get_drvdata(pdev);
2058
2059 /*
2060 * This function is called when system goes into suspend state
2061 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2062 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2063 * it will not call apm_ops.stop() to stop the DMA operation.
2064 * Calling apm_ops.stop here to make sure we stop the DMA.
2065 */
14e8e4af 2066 iwl_apm_stop(priv);
6da3a13e 2067
6da3a13e
WYG
2068 return 0;
2069}
2070EXPORT_SYMBOL(iwl_pci_suspend);
2071
f60dc013 2072int iwl_pci_resume(struct device *device)
6da3a13e 2073{
f60dc013 2074 struct pci_dev *pdev = to_pci_dev(device);
6da3a13e 2075 struct iwl_priv *priv = pci_get_drvdata(pdev);
0ab84cff 2076 bool hw_rfkill = false;
6da3a13e 2077
cd398c31
AK
2078 /*
2079 * We disable the RETRY_TIMEOUT register (0x41) to keep
2080 * PCI Tx retries from interfering with C3 CPU state.
2081 */
2082 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2083
6da3a13e
WYG
2084 iwl_enable_interrupts(priv);
2085
0ab84cff
JB
2086 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2087 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2088 hw_rfkill = true;
2089
2090 if (hw_rfkill)
2091 set_bit(STATUS_RF_KILL_HW, &priv->status);
2092 else
2093 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2094
2095 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2096
6da3a13e
WYG
2097 return 0;
2098}
2099EXPORT_SYMBOL(iwl_pci_resume);
2100
f60dc013
JL
2101const struct dev_pm_ops iwl_pm_ops = {
2102 .suspend = iwl_pci_suspend,
2103 .resume = iwl_pci_resume,
2104 .freeze = iwl_pci_suspend,
2105 .thaw = iwl_pci_resume,
2106 .poweroff = iwl_pci_suspend,
2107 .restore = iwl_pci_resume,
2108};
2109EXPORT_SYMBOL(iwl_pm_ops);
2110
6da3a13e 2111#endif /* CONFIG_PM */