KVM: PPC: Book3S HV: Always flush TLB in kvmppc_alloc_reset_hpt()
[linux-2.6-block.git] / drivers / net / wireless / intel / iwlwifi / mvm / ops.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <linux/module.h>
68 #include <linux/vmalloc.h>
69 #include <net/mac80211.h>
70
71 #include "fw/notif-wait.h"
72 #include "iwl-trans.h"
73 #include "iwl-op-mode.h"
74 #include "fw/img.h"
75 #include "iwl-debug.h"
76 #include "iwl-drv.h"
77 #include "iwl-modparams.h"
78 #include "mvm.h"
79 #include "iwl-phy-db.h"
80 #include "iwl-eeprom-parse.h"
81 #include "iwl-csr.h"
82 #include "iwl-io.h"
83 #include "iwl-prph.h"
84 #include "rs.h"
85 #include "fw/api/scan.h"
86 #include "time-event.h"
87 #include "fw-api.h"
88 #include "fw/api/scan.h"
89 #include "fw/acpi.h"
90
91 #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
92 MODULE_DESCRIPTION(DRV_DESCRIPTION);
93 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
94 MODULE_LICENSE("GPL");
95
96 static const struct iwl_op_mode_ops iwl_mvm_ops;
97 static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
98
99 struct iwl_mvm_mod_params iwlmvm_mod_params = {
100         .power_scheme = IWL_POWER_SCHEME_BPS,
101         .tfd_q_hang_detect = true
102         /* rest of fields are 0 by default */
103 };
104
105 module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
106 MODULE_PARM_DESC(init_dbg,
107                  "set to true to debug an ASSERT in INIT fw (default: false");
108 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
109 MODULE_PARM_DESC(power_scheme,
110                  "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
111 module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
112                    bool, S_IRUGO);
113 MODULE_PARM_DESC(tfd_q_hang_detect,
114                  "TFD queues hang detection (default: true");
115
116 /*
117  * module init and exit functions
118  */
119 static int __init iwl_mvm_init(void)
120 {
121         int ret;
122
123         ret = iwl_mvm_rate_control_register();
124         if (ret) {
125                 pr_err("Unable to register rate control algorithm: %d\n", ret);
126                 return ret;
127         }
128
129         ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
130
131         if (ret) {
132                 pr_err("Unable to register MVM op_mode: %d\n", ret);
133                 iwl_mvm_rate_control_unregister();
134         }
135
136         return ret;
137 }
138 module_init(iwl_mvm_init);
139
140 static void __exit iwl_mvm_exit(void)
141 {
142         iwl_opmode_deregister("iwlmvm");
143         iwl_mvm_rate_control_unregister();
144 }
145 module_exit(iwl_mvm_exit);
146
147 static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
148 {
149         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
150         u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
151         u32 reg_val = 0;
152         u32 phy_config = iwl_mvm_get_phy_config(mvm);
153
154         radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
155                          FW_PHY_CFG_RADIO_TYPE_POS;
156         radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
157                          FW_PHY_CFG_RADIO_STEP_POS;
158         radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
159                          FW_PHY_CFG_RADIO_DASH_POS;
160
161         /* SKU control */
162         reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
163                                 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
164         reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
165                                 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
166
167         /* radio configuration */
168         reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
169         reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
170         reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
171
172         WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
173                  ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
174
175         /*
176          * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
177          * sampling, and shouldn't be set to any non-zero value.
178          * The same is supposed to be true of the other HW, but unsetting
179          * them (such as the 7260) causes automatic tests to fail on seemingly
180          * unrelated errors. Need to further investigate this, but for now
181          * we'll separate cases.
182          */
183         if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
184                 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
185
186         iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
187                                 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
188                                 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
189                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
190                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
191                                 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
192                                 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
193                                 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
194                                 reg_val);
195
196         IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
197                        radio_cfg_step, radio_cfg_dash);
198
199         /*
200          * W/A : NIC is stuck in a reset state after Early PCIe power off
201          * (PCIe power is lost before PERST# is asserted), causing ME FW
202          * to lose ownership and not being able to obtain it back.
203          */
204         if (!mvm->trans->cfg->apmg_not_supported)
205                 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
206                                        APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
207                                        ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
208 }
209
210 /**
211  * enum iwl_rx_handler_context context for Rx handler
212  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
213  *      which can't acquire mvm->mutex.
214  * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
215  *      (and only in this case!), it should be set as ASYNC. In that case,
216  *      it will be called from a worker with mvm->mutex held.
217  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
218  *      mutex itself, it will be called from a worker without mvm->mutex held.
219  */
220 enum iwl_rx_handler_context {
221         RX_HANDLER_SYNC,
222         RX_HANDLER_ASYNC_LOCKED,
223         RX_HANDLER_ASYNC_UNLOCKED,
224 };
225
226 /**
227  * struct iwl_rx_handlers handler for FW notification
228  * @cmd_id: command id
229  * @context: see &iwl_rx_handler_context
230  * @fn: the function is called when notification is received
231  */
232 struct iwl_rx_handlers {
233         u16 cmd_id;
234         enum iwl_rx_handler_context context;
235         void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
236 };
237
238 #define RX_HANDLER(_cmd_id, _fn, _context)      \
239         { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
240 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)       \
241         { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
242
243 /*
244  * Handlers for fw notifications
245  * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
246  * This list should be in order of frequency for performance purposes.
247  *
248  * The handler can be one from three contexts, see &iwl_rx_handler_context
249  */
250 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
251         RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
252         RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
253
254         RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
255                    RX_HANDLER_ASYNC_LOCKED),
256         RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
257                    RX_HANDLER_ASYNC_LOCKED),
258         RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
259                    RX_HANDLER_ASYNC_LOCKED),
260
261         RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
262                    iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
263
264         RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
265                    RX_HANDLER_SYNC),
266         RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
267                    RX_HANDLER_ASYNC_LOCKED),
268
269         RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
270
271         RX_HANDLER(SCAN_ITERATION_COMPLETE,
272                    iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
273         RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
274                    iwl_mvm_rx_lmac_scan_complete_notif,
275                    RX_HANDLER_ASYNC_LOCKED),
276         RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
277                    RX_HANDLER_SYNC),
278         RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
279                    RX_HANDLER_ASYNC_LOCKED),
280         RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
281                    iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
282
283         RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
284                    RX_HANDLER_SYNC),
285
286         RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
287                    RX_HANDLER_SYNC),
288
289         RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
290         RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
291                    iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
292         RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
293                    RX_HANDLER_ASYNC_LOCKED),
294         RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
295                        iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
296         RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
297                        iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
298
299         RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
300                    RX_HANDLER_ASYNC_LOCKED),
301         RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
302                    RX_HANDLER_SYNC),
303         RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
304                    RX_HANDLER_ASYNC_LOCKED),
305         RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
306                        iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
307         RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
308                        iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
309         RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
310                        iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
311         RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
312                        iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
313 };
314 #undef RX_HANDLER
315 #undef RX_HANDLER_GRP
316
317 /* Please keep this array *SORTED* by hex value.
318  * Access is done through binary search
319  */
320 static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
321         HCMD_NAME(MVM_ALIVE),
322         HCMD_NAME(REPLY_ERROR),
323         HCMD_NAME(ECHO_CMD),
324         HCMD_NAME(INIT_COMPLETE_NOTIF),
325         HCMD_NAME(PHY_CONTEXT_CMD),
326         HCMD_NAME(DBG_CFG),
327         HCMD_NAME(SCAN_CFG_CMD),
328         HCMD_NAME(SCAN_REQ_UMAC),
329         HCMD_NAME(SCAN_ABORT_UMAC),
330         HCMD_NAME(SCAN_COMPLETE_UMAC),
331         HCMD_NAME(TOF_CMD),
332         HCMD_NAME(TOF_NOTIFICATION),
333         HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
334         HCMD_NAME(ADD_STA_KEY),
335         HCMD_NAME(ADD_STA),
336         HCMD_NAME(REMOVE_STA),
337         HCMD_NAME(FW_GET_ITEM_CMD),
338         HCMD_NAME(TX_CMD),
339         HCMD_NAME(SCD_QUEUE_CFG),
340         HCMD_NAME(TXPATH_FLUSH),
341         HCMD_NAME(MGMT_MCAST_KEY),
342         HCMD_NAME(WEP_KEY),
343         HCMD_NAME(SHARED_MEM_CFG),
344         HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
345         HCMD_NAME(MAC_CONTEXT_CMD),
346         HCMD_NAME(TIME_EVENT_CMD),
347         HCMD_NAME(TIME_EVENT_NOTIFICATION),
348         HCMD_NAME(BINDING_CONTEXT_CMD),
349         HCMD_NAME(TIME_QUOTA_CMD),
350         HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
351         HCMD_NAME(LEDS_CMD),
352         HCMD_NAME(LQ_CMD),
353         HCMD_NAME(FW_PAGING_BLOCK_CMD),
354         HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
355         HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
356         HCMD_NAME(HOT_SPOT_CMD),
357         HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
358         HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
359         HCMD_NAME(BT_COEX_CI),
360         HCMD_NAME(PHY_CONFIGURATION_CMD),
361         HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
362         HCMD_NAME(PHY_DB_CMD),
363         HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
364         HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
365         HCMD_NAME(POWER_TABLE_CMD),
366         HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
367         HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
368         HCMD_NAME(DC2DC_CONFIG_CMD),
369         HCMD_NAME(NVM_ACCESS_CMD),
370         HCMD_NAME(BEACON_NOTIFICATION),
371         HCMD_NAME(BEACON_TEMPLATE_CMD),
372         HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
373         HCMD_NAME(BT_CONFIG),
374         HCMD_NAME(STATISTICS_CMD),
375         HCMD_NAME(STATISTICS_NOTIFICATION),
376         HCMD_NAME(EOSP_NOTIFICATION),
377         HCMD_NAME(REDUCE_TX_POWER_CMD),
378         HCMD_NAME(CARD_STATE_NOTIFICATION),
379         HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
380         HCMD_NAME(TDLS_CONFIG_CMD),
381         HCMD_NAME(MAC_PM_POWER_TABLE),
382         HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
383         HCMD_NAME(MFUART_LOAD_NOTIFICATION),
384         HCMD_NAME(RSS_CONFIG_CMD),
385         HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
386         HCMD_NAME(REPLY_RX_PHY_CMD),
387         HCMD_NAME(REPLY_RX_MPDU_CMD),
388         HCMD_NAME(FRAME_RELEASE),
389         HCMD_NAME(BA_NOTIF),
390         HCMD_NAME(MCC_UPDATE_CMD),
391         HCMD_NAME(MCC_CHUB_UPDATE_CMD),
392         HCMD_NAME(MARKER_CMD),
393         HCMD_NAME(BT_PROFILE_NOTIFICATION),
394         HCMD_NAME(BCAST_FILTER_CMD),
395         HCMD_NAME(MCAST_FILTER_CMD),
396         HCMD_NAME(REPLY_SF_CFG_CMD),
397         HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
398         HCMD_NAME(D3_CONFIG_CMD),
399         HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
400         HCMD_NAME(OFFLOADS_QUERY_CMD),
401         HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
402         HCMD_NAME(MATCH_FOUND_NOTIFICATION),
403         HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
404         HCMD_NAME(WOWLAN_PATTERNS),
405         HCMD_NAME(WOWLAN_CONFIGURATION),
406         HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
407         HCMD_NAME(WOWLAN_TKIP_PARAM),
408         HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
409         HCMD_NAME(WOWLAN_GET_STATUSES),
410         HCMD_NAME(SCAN_ITERATION_COMPLETE),
411         HCMD_NAME(D0I3_END_CMD),
412         HCMD_NAME(LTR_CONFIG),
413 };
414
415 /* Please keep this array *SORTED* by hex value.
416  * Access is done through binary search
417  */
418 static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
419         HCMD_NAME(SHARED_MEM_CFG_CMD),
420         HCMD_NAME(INIT_EXTENDED_CFG_CMD),
421 };
422
423 /* Please keep this array *SORTED* by hex value.
424  * Access is done through binary search
425  */
426 static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
427         HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
428 };
429
430 /* Please keep this array *SORTED* by hex value.
431  * Access is done through binary search
432  */
433 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
434         HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
435         HCMD_NAME(CTDP_CONFIG_CMD),
436         HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
437         HCMD_NAME(GEO_TX_POWER_LIMIT),
438         HCMD_NAME(CT_KILL_NOTIFICATION),
439         HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
440 };
441
442 /* Please keep this array *SORTED* by hex value.
443  * Access is done through binary search
444  */
445 static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
446         HCMD_NAME(DQA_ENABLE_CMD),
447         HCMD_NAME(UPDATE_MU_GROUPS_CMD),
448         HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
449         HCMD_NAME(STA_PM_NOTIF),
450         HCMD_NAME(MU_GROUP_MGMT_NOTIF),
451         HCMD_NAME(RX_QUEUES_NOTIFICATION),
452 };
453
454 /* Please keep this array *SORTED* by hex value.
455  * Access is done through binary search
456  */
457 static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
458         HCMD_NAME(MFU_ASSERT_DUMP_NTF),
459 };
460
461 /* Please keep this array *SORTED* by hex value.
462  * Access is done through binary search
463  */
464 static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
465         HCMD_NAME(STORED_BEACON_NTF),
466 };
467
468 /* Please keep this array *SORTED* by hex value.
469  * Access is done through binary search
470  */
471 static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
472         HCMD_NAME(NVM_ACCESS_COMPLETE),
473         HCMD_NAME(NVM_GET_INFO),
474 };
475
476 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
477         [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
478         [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
479         [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
480         [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
481         [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
482         [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
483         [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
484         [REGULATORY_AND_NVM_GROUP] =
485                 HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
486 };
487
488 /* this forward declaration can avoid to export the function */
489 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
490 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
491
492 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
493 {
494         const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
495         u64 dflt_pwr_limit;
496
497         if (!backoff)
498                 return 0;
499
500         dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
501
502         while (backoff->pwr) {
503                 if (dflt_pwr_limit >= backoff->pwr)
504                         return backoff->backoff;
505
506                 backoff++;
507         }
508
509         return 0;
510 }
511
512 static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
513 {
514         struct iwl_mvm *mvm =
515                 container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
516         struct ieee80211_vif *tx_blocked_vif;
517         struct iwl_mvm_vif *mvmvif;
518
519         mutex_lock(&mvm->mutex);
520
521         tx_blocked_vif =
522                 rcu_dereference_protected(mvm->csa_tx_blocked_vif,
523                                           lockdep_is_held(&mvm->mutex));
524
525         if (!tx_blocked_vif)
526                 goto unlock;
527
528         mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
529         iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
530         RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
531 unlock:
532         mutex_unlock(&mvm->mutex);
533 }
534
535 static int iwl_mvm_fwrt_dump_start(void *ctx)
536 {
537         struct iwl_mvm *mvm = ctx;
538         int ret;
539
540         ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
541         if (ret)
542                 return ret;
543
544         mutex_lock(&mvm->mutex);
545
546         return 0;
547 }
548
549 static void iwl_mvm_fwrt_dump_end(void *ctx)
550 {
551         struct iwl_mvm *mvm = ctx;
552
553         mutex_unlock(&mvm->mutex);
554
555         iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
556 }
557
558 static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
559         .dump_start = iwl_mvm_fwrt_dump_start,
560         .dump_end = iwl_mvm_fwrt_dump_end,
561 };
562
563 static struct iwl_op_mode *
564 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
565                       const struct iwl_fw *fw, struct dentry *dbgfs_dir)
566 {
567         struct ieee80211_hw *hw;
568         struct iwl_op_mode *op_mode;
569         struct iwl_mvm *mvm;
570         struct iwl_trans_config trans_cfg = {};
571         static const u8 no_reclaim_cmds[] = {
572                 TX_CMD,
573         };
574         int err, scan_size;
575         u32 min_backoff;
576
577         /*
578          * We use IWL_MVM_STATION_COUNT to check the validity of the station
579          * index all over the driver - check that its value corresponds to the
580          * array size.
581          */
582         BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
583
584         /********************************
585          * 1. Allocating and configuring HW data
586          ********************************/
587         hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
588                                 sizeof(struct iwl_mvm),
589                                 &iwl_mvm_hw_ops);
590         if (!hw)
591                 return NULL;
592
593         if (cfg->max_rx_agg_size)
594                 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
595
596         if (cfg->max_tx_agg_size)
597                 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
598
599         op_mode = hw->priv;
600
601         mvm = IWL_OP_MODE_GET_MVM(op_mode);
602         mvm->dev = trans->dev;
603         mvm->trans = trans;
604         mvm->cfg = cfg;
605         mvm->fw = fw;
606         mvm->hw = hw;
607
608         iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
609
610         mvm->init_status = 0;
611
612         if (iwl_mvm_has_new_rx_api(mvm)) {
613                 op_mode->ops = &iwl_mvm_ops_mq;
614                 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
615         } else {
616                 op_mode->ops = &iwl_mvm_ops;
617                 trans->rx_mpdu_cmd_hdr_size =
618                         sizeof(struct iwl_rx_mpdu_res_start);
619
620                 if (WARN_ON(trans->num_rx_queues > 1))
621                         goto out_free;
622         }
623
624         mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
625
626         mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
627         mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
628         mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
629
630         mvm->sf_state = SF_UNINIT;
631         if (iwl_mvm_has_unified_ucode(mvm))
632                 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
633         else
634                 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
635         mvm->drop_bcn_ap_mode = true;
636
637         mutex_init(&mvm->mutex);
638         mutex_init(&mvm->d0i3_suspend_mutex);
639         spin_lock_init(&mvm->async_handlers_lock);
640         INIT_LIST_HEAD(&mvm->time_event_list);
641         INIT_LIST_HEAD(&mvm->aux_roc_te_list);
642         INIT_LIST_HEAD(&mvm->async_handlers_list);
643         spin_lock_init(&mvm->time_event_lock);
644         spin_lock_init(&mvm->queue_info_lock);
645
646         INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
647         INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
648         INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
649         INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
650         INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
651         INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
652
653         spin_lock_init(&mvm->d0i3_tx_lock);
654         spin_lock_init(&mvm->refs_lock);
655         skb_queue_head_init(&mvm->d0i3_tx);
656         init_waitqueue_head(&mvm->d0i3_exit_waitq);
657         init_waitqueue_head(&mvm->rx_sync_waitq);
658
659         atomic_set(&mvm->queue_sync_counter, 0);
660
661         SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
662
663         INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
664
665         /*
666          * Populate the state variables that the transport layer needs
667          * to know about.
668          */
669         trans_cfg.op_mode = op_mode;
670         trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
671         trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
672         switch (iwlwifi_mod_params.amsdu_size) {
673         case IWL_AMSDU_DEF:
674         case IWL_AMSDU_4K:
675                 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
676                 break;
677         case IWL_AMSDU_8K:
678                 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
679                 break;
680         case IWL_AMSDU_12K:
681                 trans_cfg.rx_buf_size = IWL_AMSDU_12K;
682                 break;
683         default:
684                 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
685                        iwlwifi_mod_params.amsdu_size);
686                 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
687         }
688
689         /* the hardware splits the A-MSDU */
690         if (mvm->cfg->mq_rx_supported)
691                 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
692
693         trans->wide_cmd_header = true;
694         trans_cfg.bc_table_dword = true;
695
696         trans_cfg.command_groups = iwl_mvm_groups;
697         trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
698
699         trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
700         trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
701         trans_cfg.scd_set_active = true;
702
703         trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
704                                           driver_data[2]);
705
706         trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
707
708         /* Set a short watchdog for the command queue */
709         trans_cfg.cmd_q_wdg_timeout =
710                 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
711
712         snprintf(mvm->hw->wiphy->fw_version,
713                  sizeof(mvm->hw->wiphy->fw_version),
714                  "%s", fw->fw_version);
715
716         /* Configure transport layer */
717         iwl_trans_configure(mvm->trans, &trans_cfg);
718
719         trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
720         trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
721         trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
722         memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
723                sizeof(trans->dbg_conf_tlv));
724         trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
725
726         /* set up notification wait support */
727         iwl_notification_wait_init(&mvm->notif_wait);
728
729         /* Init phy db */
730         mvm->phy_db = iwl_phy_db_init(trans);
731         if (!mvm->phy_db) {
732                 IWL_ERR(mvm, "Cannot init phy_db\n");
733                 goto out_free;
734         }
735
736         IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
737                  mvm->cfg->name, mvm->trans->hw_rev);
738
739         if (iwlwifi_mod_params.nvm_file)
740                 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
741         else
742                 IWL_DEBUG_EEPROM(mvm->trans->dev,
743                                  "working without external nvm file\n");
744
745         err = iwl_trans_start_hw(mvm->trans);
746         if (err)
747                 goto out_free;
748
749         mutex_lock(&mvm->mutex);
750         iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
751         err = iwl_run_init_mvm_ucode(mvm, true);
752         if (!iwlmvm_mod_params.init_dbg)
753                 iwl_mvm_stop_device(mvm);
754         iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
755         mutex_unlock(&mvm->mutex);
756         if (err < 0) {
757                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
758                 goto out_free;
759         }
760
761         scan_size = iwl_mvm_scan_size(mvm);
762
763         mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
764         if (!mvm->scan_cmd)
765                 goto out_free;
766
767         /* Set EBS as successful as long as not stated otherwise by the FW. */
768         mvm->last_ebs_successful = true;
769
770         err = iwl_mvm_mac_setup_register(mvm);
771         if (err)
772                 goto out_free;
773         mvm->hw_registered = true;
774
775         min_backoff = iwl_mvm_min_backoff(mvm);
776         iwl_mvm_thermal_initialize(mvm, min_backoff);
777
778         err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
779         if (err)
780                 goto out_unregister;
781
782         if (!iwl_mvm_has_new_rx_stats_api(mvm))
783                 memset(&mvm->rx_stats_v3, 0,
784                        sizeof(struct mvm_statistics_rx_v3));
785         else
786                 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
787
788         /* The transport always starts with a taken reference, we can
789          * release it now if d0i3 is supported */
790         if (iwl_mvm_is_d0i3_supported(mvm))
791                 iwl_trans_unref(mvm->trans);
792
793         iwl_mvm_tof_init(mvm);
794
795         return op_mode;
796
797  out_unregister:
798         if (iwlmvm_mod_params.init_dbg)
799                 return op_mode;
800
801         ieee80211_unregister_hw(mvm->hw);
802         mvm->hw_registered = false;
803         iwl_mvm_leds_exit(mvm);
804         iwl_mvm_thermal_exit(mvm);
805  out_free:
806         iwl_fw_flush_dump(&mvm->fwrt);
807
808         if (iwlmvm_mod_params.init_dbg)
809                 return op_mode;
810         iwl_phy_db_free(mvm->phy_db);
811         kfree(mvm->scan_cmd);
812         iwl_trans_op_mode_leave(trans);
813
814         ieee80211_free_hw(mvm->hw);
815         return NULL;
816 }
817
818 static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
819 {
820         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
821         int i;
822
823         /* If d0i3 is supported, we have released the reference that
824          * the transport started with, so we should take it back now
825          * that we are leaving.
826          */
827         if (iwl_mvm_is_d0i3_supported(mvm))
828                 iwl_trans_ref(mvm->trans);
829
830         iwl_mvm_leds_exit(mvm);
831
832         iwl_mvm_thermal_exit(mvm);
833
834         if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
835                 ieee80211_unregister_hw(mvm->hw);
836                 mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
837         }
838
839         kfree(mvm->scan_cmd);
840         kfree(mvm->mcast_filter_cmd);
841         mvm->mcast_filter_cmd = NULL;
842
843 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
844         kfree(mvm->d3_resume_sram);
845 #endif
846
847         iwl_trans_op_mode_leave(mvm->trans);
848
849         iwl_phy_db_free(mvm->phy_db);
850         mvm->phy_db = NULL;
851
852         kfree(mvm->nvm_data);
853         for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
854                 kfree(mvm->nvm_sections[i].data);
855
856         iwl_mvm_tof_clean(mvm);
857
858         mutex_destroy(&mvm->mutex);
859         mutex_destroy(&mvm->d0i3_suspend_mutex);
860
861         ieee80211_free_hw(mvm->hw);
862 }
863
864 struct iwl_async_handler_entry {
865         struct list_head list;
866         struct iwl_rx_cmd_buffer rxb;
867         enum iwl_rx_handler_context context;
868         void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
869 };
870
871 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
872 {
873         struct iwl_async_handler_entry *entry, *tmp;
874
875         spin_lock_bh(&mvm->async_handlers_lock);
876         list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
877                 iwl_free_rxb(&entry->rxb);
878                 list_del(&entry->list);
879                 kfree(entry);
880         }
881         spin_unlock_bh(&mvm->async_handlers_lock);
882 }
883
884 static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
885 {
886         struct iwl_mvm *mvm =
887                 container_of(wk, struct iwl_mvm, async_handlers_wk);
888         struct iwl_async_handler_entry *entry, *tmp;
889         LIST_HEAD(local_list);
890
891         /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
892
893         /*
894          * Sync with Rx path with a lock. Remove all the entries from this list,
895          * add them to a local one (lock free), and then handle them.
896          */
897         spin_lock_bh(&mvm->async_handlers_lock);
898         list_splice_init(&mvm->async_handlers_list, &local_list);
899         spin_unlock_bh(&mvm->async_handlers_lock);
900
901         list_for_each_entry_safe(entry, tmp, &local_list, list) {
902                 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
903                         mutex_lock(&mvm->mutex);
904                 entry->fn(mvm, &entry->rxb);
905                 iwl_free_rxb(&entry->rxb);
906                 list_del(&entry->list);
907                 if (entry->context == RX_HANDLER_ASYNC_LOCKED)
908                         mutex_unlock(&mvm->mutex);
909                 kfree(entry);
910         }
911 }
912
913 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
914                                             struct iwl_rx_packet *pkt)
915 {
916         struct iwl_fw_dbg_trigger_tlv *trig;
917         struct iwl_fw_dbg_trigger_cmd *cmds_trig;
918         int i;
919
920         if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
921                 return;
922
923         trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
924         cmds_trig = (void *)trig->data;
925
926         if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
927                 return;
928
929         for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
930                 /* don't collect on CMD 0 */
931                 if (!cmds_trig->cmds[i].cmd_id)
932                         break;
933
934                 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
935                     cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
936                         continue;
937
938                 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
939                                         "CMD 0x%02x.%02x received",
940                                         pkt->hdr.group_id, pkt->hdr.cmd);
941                 break;
942         }
943 }
944
945 static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
946                               struct iwl_rx_cmd_buffer *rxb,
947                               struct iwl_rx_packet *pkt)
948 {
949         int i;
950
951         iwl_mvm_rx_check_trigger(mvm, pkt);
952
953         /*
954          * Do the notification wait before RX handlers so
955          * even if the RX handler consumes the RXB we have
956          * access to it in the notification wait entry.
957          */
958         iwl_notification_wait_notify(&mvm->notif_wait, pkt);
959
960         for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
961                 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
962                 struct iwl_async_handler_entry *entry;
963
964                 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
965                         continue;
966
967                 if (rx_h->context == RX_HANDLER_SYNC) {
968                         rx_h->fn(mvm, rxb);
969                         return;
970                 }
971
972                 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
973                 /* we can't do much... */
974                 if (!entry)
975                         return;
976
977                 entry->rxb._page = rxb_steal_page(rxb);
978                 entry->rxb._offset = rxb->_offset;
979                 entry->rxb._rx_page_order = rxb->_rx_page_order;
980                 entry->fn = rx_h->fn;
981                 entry->context = rx_h->context;
982                 spin_lock(&mvm->async_handlers_lock);
983                 list_add_tail(&entry->list, &mvm->async_handlers_list);
984                 spin_unlock(&mvm->async_handlers_lock);
985                 schedule_work(&mvm->async_handlers_wk);
986                 return;
987         }
988
989         iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
990 }
991
992 static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
993                        struct napi_struct *napi,
994                        struct iwl_rx_cmd_buffer *rxb)
995 {
996         struct iwl_rx_packet *pkt = rxb_addr(rxb);
997         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
998         u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
999
1000         if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1001                 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1002         else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1003                 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1004         else
1005                 iwl_mvm_rx_common(mvm, rxb, pkt);
1006 }
1007
1008 static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1009                           struct napi_struct *napi,
1010                           struct iwl_rx_cmd_buffer *rxb)
1011 {
1012         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1013         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1014         u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1015
1016         if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1017                 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1018         else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1019                                          RX_QUEUES_NOTIFICATION)))
1020                 iwl_mvm_rx_queue_notif(mvm, rxb, 0);
1021         else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1022                 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1023         else
1024                 iwl_mvm_rx_common(mvm, rxb, pkt);
1025 }
1026
1027 void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1028 {
1029         int q;
1030
1031         if (WARN_ON_ONCE(!mq))
1032                 return;
1033
1034         for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1035                 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
1036                         IWL_DEBUG_TX_QUEUES(mvm,
1037                                             "mac80211 %d already stopped\n", q);
1038                         continue;
1039                 }
1040
1041                 ieee80211_stop_queue(mvm->hw, q);
1042         }
1043 }
1044
1045 static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
1046                              const struct iwl_device_cmd *cmd)
1047 {
1048         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1049
1050         /*
1051          * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
1052          * commands that need to block the Tx queues.
1053          */
1054         iwl_trans_block_txq_ptrs(mvm->trans, false);
1055 }
1056
1057 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1058 {
1059         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1060         unsigned long mq;
1061
1062         spin_lock_bh(&mvm->queue_info_lock);
1063         mq = mvm->hw_queue_to_mac80211[hw_queue];
1064         spin_unlock_bh(&mvm->queue_info_lock);
1065
1066         iwl_mvm_stop_mac_queues(mvm, mq);
1067 }
1068
1069 void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
1070 {
1071         int q;
1072
1073         if (WARN_ON_ONCE(!mq))
1074                 return;
1075
1076         for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
1077                 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
1078                         IWL_DEBUG_TX_QUEUES(mvm,
1079                                             "mac80211 %d still stopped\n", q);
1080                         continue;
1081                 }
1082
1083                 ieee80211_wake_queue(mvm->hw, q);
1084         }
1085 }
1086
1087 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1088 {
1089         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1090         unsigned long mq;
1091
1092         spin_lock_bh(&mvm->queue_info_lock);
1093         mq = mvm->hw_queue_to_mac80211[hw_queue];
1094         spin_unlock_bh(&mvm->queue_info_lock);
1095
1096         iwl_mvm_start_mac_queues(mvm, mq);
1097 }
1098
1099 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1100 {
1101         bool state = iwl_mvm_is_radio_killed(mvm);
1102
1103         if (state)
1104                 wake_up(&mvm->rx_sync_waitq);
1105
1106         wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
1107 }
1108
1109 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1110 {
1111         if (state)
1112                 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1113         else
1114                 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1115
1116         iwl_mvm_set_rfkill_state(mvm);
1117 }
1118
1119 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1120 {
1121         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1122         bool calibrating = READ_ONCE(mvm->calibrating);
1123
1124         if (state)
1125                 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1126         else
1127                 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1128
1129         iwl_mvm_set_rfkill_state(mvm);
1130
1131         /* iwl_run_init_mvm_ucode is waiting for results, abort it */
1132         if (calibrating)
1133                 iwl_abort_notification_waits(&mvm->notif_wait);
1134
1135         /*
1136          * Stop the device if we run OPERATIONAL firmware or if we are in the
1137          * middle of the calibrations.
1138          */
1139         return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
1140 }
1141
1142 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1143 {
1144         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1145         struct ieee80211_tx_info *info;
1146
1147         info = IEEE80211_SKB_CB(skb);
1148         iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1149         ieee80211_free_txskb(mvm->hw, skb);
1150 }
1151
1152 struct iwl_mvm_reprobe {
1153         struct device *dev;
1154         struct work_struct work;
1155 };
1156
1157 static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1158 {
1159         struct iwl_mvm_reprobe *reprobe;
1160
1161         reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1162         if (device_reprobe(reprobe->dev))
1163                 dev_err(reprobe->dev, "reprobe failed!\n");
1164         kfree(reprobe);
1165         module_put(THIS_MODULE);
1166 }
1167
1168 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1169 {
1170         iwl_abort_notification_waits(&mvm->notif_wait);
1171
1172         /*
1173          * This is a bit racy, but worst case we tell mac80211 about
1174          * a stopped/aborted scan when that was already done which
1175          * is not a problem. It is necessary to abort any os scan
1176          * here because mac80211 requires having the scan cleared
1177          * before restarting.
1178          * We'll reset the scan_status to NONE in restart cleanup in
1179          * the next start() call from mac80211. If restart isn't called
1180          * (no fw restart) scan status will stay busy.
1181          */
1182         iwl_mvm_report_scan_aborted(mvm);
1183
1184         /*
1185          * If we're restarting already, don't cycle restarts.
1186          * If INIT fw asserted, it will likely fail again.
1187          * If WoWLAN fw asserted, don't restart either, mac80211
1188          * can't recover this since we're already half suspended.
1189          */
1190         if (!mvm->fw_restart && fw_error) {
1191                 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
1192                                         NULL);
1193         } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1194                 struct iwl_mvm_reprobe *reprobe;
1195
1196                 IWL_ERR(mvm,
1197                         "Firmware error during reconfiguration - reprobe!\n");
1198
1199                 /*
1200                  * get a module reference to avoid doing this while unloading
1201                  * anyway and to avoid scheduling a work with code that's
1202                  * being removed.
1203                  */
1204                 if (!try_module_get(THIS_MODULE)) {
1205                         IWL_ERR(mvm, "Module is being unloaded - abort\n");
1206                         return;
1207                 }
1208
1209                 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1210                 if (!reprobe) {
1211                         module_put(THIS_MODULE);
1212                         return;
1213                 }
1214                 reprobe->dev = mvm->trans->dev;
1215                 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1216                 schedule_work(&reprobe->work);
1217         } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1218                    mvm->hw_registered) {
1219                 /* don't let the transport/FW power down */
1220                 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1221
1222                 if (fw_error && mvm->fw_restart > 0)
1223                         mvm->fw_restart--;
1224                 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1225                 ieee80211_restart_hw(mvm->hw);
1226         }
1227 }
1228
1229 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1230 {
1231         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1232
1233         iwl_mvm_dump_nic_error_log(mvm);
1234
1235         iwl_mvm_nic_restart(mvm, true);
1236 }
1237
1238 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1239 {
1240         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1241
1242         WARN_ON(1);
1243         iwl_mvm_nic_restart(mvm, true);
1244 }
1245
1246 struct iwl_d0i3_iter_data {
1247         struct iwl_mvm *mvm;
1248         struct ieee80211_vif *connected_vif;
1249         u8 ap_sta_id;
1250         u8 vif_count;
1251         u8 offloading_tid;
1252         bool disable_offloading;
1253 };
1254
1255 static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1256                                         struct ieee80211_vif *vif,
1257                                         struct iwl_d0i3_iter_data *iter_data)
1258 {
1259         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1260         struct iwl_mvm_sta *mvmsta;
1261         u32 available_tids = 0;
1262         u8 tid;
1263
1264         if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1265                     mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
1266                 return false;
1267
1268         mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1269         if (!mvmsta)
1270                 return false;
1271
1272         spin_lock_bh(&mvmsta->lock);
1273         for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1274                 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1275
1276                 /*
1277                  * in case of pending tx packets, don't use this tid
1278                  * for offloading in order to prevent reuse of the same
1279                  * qos seq counters.
1280                  */
1281                 if (iwl_mvm_tid_queued(mvm, tid_data))
1282                         continue;
1283
1284                 if (tid_data->state != IWL_AGG_OFF)
1285                         continue;
1286
1287                 available_tids |= BIT(tid);
1288         }
1289         spin_unlock_bh(&mvmsta->lock);
1290
1291         /*
1292          * disallow protocol offloading if we have no available tid
1293          * (with no pending frames and no active aggregation,
1294          * as we don't handle "holes" properly - the scheduler needs the
1295          * frame's seq number and TFD index to match)
1296          */
1297         if (!available_tids)
1298                 return true;
1299
1300         /* for simplicity, just use the first available tid */
1301         iter_data->offloading_tid = ffs(available_tids) - 1;
1302         return false;
1303 }
1304
1305 static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1306                                         struct ieee80211_vif *vif)
1307 {
1308         struct iwl_d0i3_iter_data *data = _data;
1309         struct iwl_mvm *mvm = data->mvm;
1310         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1311         u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1312
1313         IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1314         if (vif->type != NL80211_IFTYPE_STATION ||
1315             !vif->bss_conf.assoc)
1316                 return;
1317
1318         /*
1319          * in case of pending tx packets or active aggregations,
1320          * avoid offloading features in order to prevent reuse of
1321          * the same qos seq counters.
1322          */
1323         if (iwl_mvm_disallow_offloading(mvm, vif, data))
1324                 data->disable_offloading = true;
1325
1326         iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1327         iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
1328                                    false, flags);
1329
1330         /*
1331          * on init/association, mvm already configures POWER_TABLE_CMD
1332          * and REPLY_MCAST_FILTER_CMD, so currently don't
1333          * reconfigure them (we might want to use different
1334          * params later on, though).
1335          */
1336         data->ap_sta_id = mvmvif->ap_sta_id;
1337         data->vif_count++;
1338
1339         /*
1340          * no new commands can be sent at this stage, so it's safe
1341          * to save the vif pointer during d0i3 entrance.
1342          */
1343         data->connected_vif = vif;
1344 }
1345
1346 static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1347                                     struct iwl_wowlan_config_cmd *cmd,
1348                                     struct iwl_d0i3_iter_data *iter_data)
1349 {
1350         struct ieee80211_sta *ap_sta;
1351         struct iwl_mvm_sta *mvm_ap_sta;
1352
1353         if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
1354                 return;
1355
1356         rcu_read_lock();
1357
1358         ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1359         if (IS_ERR_OR_NULL(ap_sta))
1360                 goto out;
1361
1362         mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1363         cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1364         cmd->offloading_tid = iter_data->offloading_tid;
1365         cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
1366                 ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
1367         /*
1368          * The d0i3 uCode takes care of the nonqos counters,
1369          * so configure only the qos seq ones.
1370          */
1371         iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1372 out:
1373         rcu_read_unlock();
1374 }
1375
1376 int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1377 {
1378         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1379         u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1380         int ret;
1381         struct iwl_d0i3_iter_data d0i3_iter_data = {
1382                 .mvm = mvm,
1383         };
1384         struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1385                 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1386                                              IWL_WOWLAN_WAKEUP_BEACON_MISS |
1387                                              IWL_WOWLAN_WAKEUP_LINK_CHANGE),
1388         };
1389         struct iwl_d3_manager_config d3_cfg_cmd = {
1390                 .min_sleep_time = cpu_to_le32(1000),
1391                 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1392         };
1393
1394         IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1395
1396         if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1397                 return -EINVAL;
1398
1399         set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1400
1401         /*
1402          * iwl_mvm_ref_sync takes a reference before checking the flag.
1403          * so by checking there is no held reference we prevent a state
1404          * in which iwl_mvm_ref_sync continues successfully while we
1405          * configure the firmware to enter d0i3
1406          */
1407         if (iwl_mvm_ref_taken(mvm)) {
1408                 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1409                 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1410                 wake_up(&mvm->d0i3_exit_waitq);
1411                 return 1;
1412         }
1413
1414         ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1415                                                    IEEE80211_IFACE_ITER_NORMAL,
1416                                                    iwl_mvm_enter_d0i3_iterator,
1417                                                    &d0i3_iter_data);
1418         if (d0i3_iter_data.vif_count == 1) {
1419                 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1420                 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1421         } else {
1422                 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1423                 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1424                 mvm->d0i3_offloading = false;
1425         }
1426
1427         /* make sure we have no running tx while configuring the seqno */
1428         synchronize_net();
1429
1430         /* Flush the hw queues, in case something got queued during entry */
1431         /* TODO new tx api */
1432         if (iwl_mvm_has_new_tx_api(mvm)) {
1433                 WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
1434         } else {
1435                 ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
1436                                             flags);
1437                 if (ret)
1438                         return ret;
1439         }
1440
1441         /* configure wowlan configuration only if needed */
1442         if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
1443                 /* wake on beacons only if beacon storing isn't supported */
1444                 if (!fw_has_capa(&mvm->fw->ucode_capa,
1445                                  IWL_UCODE_TLV_CAPA_BEACON_STORING))
1446                         wowlan_config_cmd.wakeup_filter |=
1447                                 cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
1448
1449                 iwl_mvm_wowlan_config_key_params(mvm,
1450                                                  d0i3_iter_data.connected_vif,
1451                                                  true, flags);
1452
1453                 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1454                                         &d0i3_iter_data);
1455
1456                 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1457                                            sizeof(wowlan_config_cmd),
1458                                            &wowlan_config_cmd);
1459                 if (ret)
1460                         return ret;
1461         }
1462
1463         return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1464                                     flags | CMD_MAKE_TRANS_IDLE,
1465                                     sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1466 }
1467
1468 static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1469                                        struct ieee80211_vif *vif)
1470 {
1471         struct iwl_mvm *mvm = _data;
1472         u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1473
1474         IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1475         if (vif->type != NL80211_IFTYPE_STATION ||
1476             !vif->bss_conf.assoc)
1477                 return;
1478
1479         iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1480 }
1481
1482 struct iwl_mvm_d0i3_exit_work_iter_data {
1483         struct iwl_mvm *mvm;
1484         struct iwl_wowlan_status *status;
1485         u32 wakeup_reasons;
1486 };
1487
1488 static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
1489                                         struct ieee80211_vif *vif)
1490 {
1491         struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
1492         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1493         u32 reasons = data->wakeup_reasons;
1494
1495         /* consider only the relevant station interface */
1496         if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1497             data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
1498                 return;
1499
1500         if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1501                 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1502         else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
1503                 ieee80211_beacon_loss(vif);
1504         else
1505                 iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
1506 }
1507
1508 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1509 {
1510         struct ieee80211_sta *sta = NULL;
1511         struct iwl_mvm_sta *mvm_ap_sta;
1512         int i;
1513         bool wake_queues = false;
1514
1515         lockdep_assert_held(&mvm->mutex);
1516
1517         spin_lock_bh(&mvm->d0i3_tx_lock);
1518
1519         if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
1520                 goto out;
1521
1522         IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1523
1524         /* get the sta in order to update seq numbers and re-enqueue skbs */
1525         sta = rcu_dereference_protected(
1526                         mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1527                         lockdep_is_held(&mvm->mutex));
1528
1529         if (IS_ERR_OR_NULL(sta)) {
1530                 sta = NULL;
1531                 goto out;
1532         }
1533
1534         if (mvm->d0i3_offloading && qos_seq) {
1535                 /* update qos seq numbers if offloading was enabled */
1536                 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1537                 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1538                         u16 seq = le16_to_cpu(qos_seq[i]);
1539                         /* firmware stores last-used one, we store next one */
1540                         seq += 0x10;
1541                         mvm_ap_sta->tid_data[i].seq_number = seq;
1542                 }
1543         }
1544 out:
1545         /* re-enqueue (or drop) all packets */
1546         while (!skb_queue_empty(&mvm->d0i3_tx)) {
1547                 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1548
1549                 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1550                         ieee80211_free_txskb(mvm->hw, skb);
1551
1552                 /* if the skb_queue is not empty, we need to wake queues */
1553                 wake_queues = true;
1554         }
1555         clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1556         wake_up(&mvm->d0i3_exit_waitq);
1557         mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1558         if (wake_queues)
1559                 ieee80211_wake_queues(mvm->hw);
1560
1561         spin_unlock_bh(&mvm->d0i3_tx_lock);
1562 }
1563
1564 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1565 {
1566         struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1567         struct iwl_host_cmd get_status_cmd = {
1568                 .id = WOWLAN_GET_STATUSES,
1569                 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1570         };
1571         struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
1572                 .mvm = mvm,
1573         };
1574
1575         struct iwl_wowlan_status *status;
1576         int ret;
1577         u32 wakeup_reasons = 0;
1578         __le16 *qos_seq = NULL;
1579
1580         mutex_lock(&mvm->mutex);
1581         ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1582         if (ret)
1583                 goto out;
1584
1585         status = (void *)get_status_cmd.resp_pkt->data;
1586         wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1587         qos_seq = status->qos_seq_ctr;
1588
1589         IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1590
1591         iter_data.wakeup_reasons = wakeup_reasons;
1592         iter_data.status = status;
1593         ieee80211_iterate_active_interfaces(mvm->hw,
1594                                             IEEE80211_IFACE_ITER_NORMAL,
1595                                             iwl_mvm_d0i3_exit_work_iter,
1596                                             &iter_data);
1597 out:
1598         iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1599
1600         IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1601                        wakeup_reasons);
1602
1603         /* qos_seq might point inside resp_pkt, so free it only now */
1604         if (get_status_cmd.resp_pkt)
1605                 iwl_free_resp(&get_status_cmd);
1606
1607         /* the FW might have updated the regdomain */
1608         iwl_mvm_update_changed_regdom(mvm);
1609
1610         iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1611         mutex_unlock(&mvm->mutex);
1612 }
1613
1614 int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1615 {
1616         u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1617                     CMD_WAKE_UP_TRANS;
1618         int ret;
1619
1620         IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1621
1622         if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
1623                 return -EINVAL;
1624
1625         mutex_lock(&mvm->d0i3_suspend_mutex);
1626         if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1627                 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1628                 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1629                 mutex_unlock(&mvm->d0i3_suspend_mutex);
1630                 return 0;
1631         }
1632         mutex_unlock(&mvm->d0i3_suspend_mutex);
1633
1634         ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1635         if (ret)
1636                 goto out;
1637
1638         ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1639                                                    IEEE80211_IFACE_ITER_NORMAL,
1640                                                    iwl_mvm_exit_d0i3_iterator,
1641                                                    mvm);
1642 out:
1643         schedule_work(&mvm->d0i3_exit_work);
1644         return ret;
1645 }
1646
1647 int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1648 {
1649         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1650
1651         iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1652         return _iwl_mvm_exit_d0i3(mvm);
1653 }
1654
1655 #define IWL_MVM_COMMON_OPS                                      \
1656         /* these could be differentiated */                     \
1657         .async_cb = iwl_mvm_async_cb,                           \
1658         .queue_full = iwl_mvm_stop_sw_queue,                    \
1659         .queue_not_full = iwl_mvm_wake_sw_queue,                \
1660         .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,              \
1661         .free_skb = iwl_mvm_free_skb,                           \
1662         .nic_error = iwl_mvm_nic_error,                         \
1663         .cmd_queue_full = iwl_mvm_cmd_queue_full,               \
1664         .nic_config = iwl_mvm_nic_config,                       \
1665         .enter_d0i3 = iwl_mvm_enter_d0i3,                       \
1666         .exit_d0i3 = iwl_mvm_exit_d0i3,                         \
1667         /* as we only register one, these MUST be common! */    \
1668         .start = iwl_op_mode_mvm_start,                         \
1669         .stop = iwl_op_mode_mvm_stop
1670
1671 static const struct iwl_op_mode_ops iwl_mvm_ops = {
1672         IWL_MVM_COMMON_OPS,
1673         .rx = iwl_mvm_rx,
1674 };
1675
1676 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1677                               struct napi_struct *napi,
1678                               struct iwl_rx_cmd_buffer *rxb,
1679                               unsigned int queue)
1680 {
1681         struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1682         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1683         u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1684
1685         if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
1686                 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
1687         else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1688                                          RX_QUEUES_NOTIFICATION)))
1689                 iwl_mvm_rx_queue_notif(mvm, rxb, queue);
1690         else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1691                 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1692 }
1693
1694 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1695         IWL_MVM_COMMON_OPS,
1696         .rx = iwl_mvm_rx_mq,
1697         .rx_rss = iwl_mvm_rx_mq_rss,
1698 };