| 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
| 2 | /* |
| 3 | * Copyright (C) 2012-2014, 2018-2024 Intel Corporation |
| 4 | * Copyright (C) 2013-2015 Intel Mobile Communications GmbH |
| 5 | * Copyright (C) 2016-2017 Intel Deutschland GmbH |
| 6 | */ |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/rtnetlink.h> |
| 9 | #include <linux/vmalloc.h> |
| 10 | #include <net/mac80211.h> |
| 11 | |
| 12 | #include "fw/notif-wait.h" |
| 13 | #include "iwl-trans.h" |
| 14 | #include "iwl-op-mode.h" |
| 15 | #include "fw/img.h" |
| 16 | #include "iwl-debug.h" |
| 17 | #include "iwl-drv.h" |
| 18 | #include "iwl-modparams.h" |
| 19 | #include "mvm.h" |
| 20 | #include "iwl-phy-db.h" |
| 21 | #include "iwl-nvm-utils.h" |
| 22 | #include "iwl-csr.h" |
| 23 | #include "iwl-io.h" |
| 24 | #include "iwl-prph.h" |
| 25 | #include "rs.h" |
| 26 | #include "fw/api/scan.h" |
| 27 | #include "fw/api/rfi.h" |
| 28 | #include "time-event.h" |
| 29 | #include "fw-api.h" |
| 30 | #include "fw/acpi.h" |
| 31 | #include "fw/uefi.h" |
| 32 | #include "time-sync.h" |
| 33 | |
| 34 | #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" |
| 35 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
| 36 | MODULE_LICENSE("GPL"); |
| 37 | MODULE_IMPORT_NS(IWLWIFI); |
| 38 | |
| 39 | static const struct iwl_op_mode_ops iwl_mvm_ops; |
| 40 | static const struct iwl_op_mode_ops iwl_mvm_ops_mq; |
| 41 | |
| 42 | struct iwl_mvm_mod_params iwlmvm_mod_params = { |
| 43 | .power_scheme = IWL_POWER_SCHEME_BPS, |
| 44 | }; |
| 45 | |
| 46 | module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); |
| 47 | MODULE_PARM_DESC(power_scheme, |
| 48 | "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); |
| 49 | |
| 50 | /* |
| 51 | * module init and exit functions |
| 52 | */ |
| 53 | static int __init iwl_mvm_init(void) |
| 54 | { |
| 55 | int ret; |
| 56 | |
| 57 | ret = iwl_mvm_rate_control_register(); |
| 58 | if (ret) { |
| 59 | pr_err("Unable to register rate control algorithm: %d\n", ret); |
| 60 | return ret; |
| 61 | } |
| 62 | |
| 63 | ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); |
| 64 | if (ret) |
| 65 | pr_err("Unable to register MVM op_mode: %d\n", ret); |
| 66 | |
| 67 | return ret; |
| 68 | } |
| 69 | module_init(iwl_mvm_init); |
| 70 | |
| 71 | static void __exit iwl_mvm_exit(void) |
| 72 | { |
| 73 | iwl_opmode_deregister("iwlmvm"); |
| 74 | iwl_mvm_rate_control_unregister(); |
| 75 | } |
| 76 | module_exit(iwl_mvm_exit); |
| 77 | |
| 78 | static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) |
| 79 | { |
| 80 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 81 | u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; |
| 82 | u32 reg_val; |
| 83 | u32 phy_config = iwl_mvm_get_phy_config(mvm); |
| 84 | |
| 85 | radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> |
| 86 | FW_PHY_CFG_RADIO_TYPE_POS; |
| 87 | radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> |
| 88 | FW_PHY_CFG_RADIO_STEP_POS; |
| 89 | radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> |
| 90 | FW_PHY_CFG_RADIO_DASH_POS; |
| 91 | |
| 92 | IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, |
| 93 | radio_cfg_step, radio_cfg_dash); |
| 94 | |
| 95 | if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) |
| 96 | return; |
| 97 | |
| 98 | /* SKU control */ |
| 99 | reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); |
| 100 | |
| 101 | /* radio configuration */ |
| 102 | reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; |
| 103 | reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; |
| 104 | reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; |
| 105 | |
| 106 | WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & |
| 107 | ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); |
| 108 | |
| 109 | /* |
| 110 | * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC |
| 111 | * sampling, and shouldn't be set to any non-zero value. |
| 112 | * The same is supposed to be true of the other HW, but unsetting |
| 113 | * them (such as the 7260) causes automatic tests to fail on seemingly |
| 114 | * unrelated errors. Need to further investigate this, but for now |
| 115 | * we'll separate cases. |
| 116 | */ |
| 117 | if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) |
| 118 | reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; |
| 119 | |
| 120 | if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) |
| 121 | reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; |
| 122 | |
| 123 | iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, |
| 124 | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH | |
| 125 | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | |
| 126 | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | |
| 127 | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | |
| 128 | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | |
| 129 | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | |
| 130 | CSR_HW_IF_CONFIG_REG_D3_DEBUG, |
| 131 | reg_val); |
| 132 | |
| 133 | /* |
| 134 | * W/A : NIC is stuck in a reset state after Early PCIe power off |
| 135 | * (PCIe power is lost before PERST# is asserted), causing ME FW |
| 136 | * to lose ownership and not being able to obtain it back. |
| 137 | */ |
| 138 | if (!mvm->trans->cfg->apmg_not_supported) |
| 139 | iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, |
| 140 | APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, |
| 141 | ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); |
| 142 | } |
| 143 | |
| 144 | static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, |
| 145 | struct iwl_rx_cmd_buffer *rxb) |
| 146 | { |
| 147 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 148 | struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data; |
| 149 | struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); |
| 150 | |
| 151 | /* FW recommendations is only for entering EMLSR */ |
| 152 | if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active) |
| 153 | return; |
| 154 | |
| 155 | if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER) |
| 156 | iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW); |
| 157 | else |
| 158 | iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, |
| 159 | iwl_mvm_get_primary_link(vif)); |
| 160 | } |
| 161 | |
| 162 | static void iwl_mvm_rx_esr_trans_fail_notif(struct iwl_mvm *mvm, |
| 163 | struct iwl_rx_cmd_buffer *rxb) |
| 164 | { |
| 165 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 166 | struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data; |
| 167 | struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); |
| 168 | u8 fw_link_id = le32_to_cpu(notif->link_id); |
| 169 | struct ieee80211_bss_conf *bss_conf; |
| 170 | |
| 171 | if (IS_ERR_OR_NULL(vif)) |
| 172 | return; |
| 173 | |
| 174 | IWL_DEBUG_INFO(mvm, "Failed to %s eSR on link %d, reason %d\n", |
| 175 | le32_to_cpu(notif->activation) ? "enter" : "exit", |
| 176 | le32_to_cpu(notif->link_id), |
| 177 | le32_to_cpu(notif->err_code)); |
| 178 | |
| 179 | /* we couldn't go back to single link, disconnect */ |
| 180 | if (!le32_to_cpu(notif->activation)) { |
| 181 | iwl_mvm_connection_loss(mvm, vif, "emlsr exit failed"); |
| 182 | return; |
| 183 | } |
| 184 | |
| 185 | bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, false); |
| 186 | if (IWL_FW_CHECK(mvm, !bss_conf, |
| 187 | "FW reported failure to activate EMLSR on a non-existing link: %d\n", |
| 188 | fw_link_id)) |
| 189 | return; |
| 190 | |
| 191 | /* |
| 192 | * We failed to activate the second link and enter EMLSR, we need to go |
| 193 | * back to single link. |
| 194 | */ |
| 195 | iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_FAIL_ENTRY, |
| 196 | bss_conf->link_id); |
| 197 | } |
| 198 | |
| 199 | static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, |
| 200 | struct iwl_rx_cmd_buffer *rxb) |
| 201 | { |
| 202 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 203 | struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; |
| 204 | struct ieee80211_supported_band *sband; |
| 205 | const struct ieee80211_sta_he_cap *he_cap; |
| 206 | struct ieee80211_vif *vif; |
| 207 | |
| 208 | if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) |
| 209 | return; |
| 210 | |
| 211 | vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); |
| 212 | if (!vif || vif->type != NL80211_IFTYPE_STATION) |
| 213 | return; |
| 214 | |
| 215 | if (!vif->bss_conf.chanreq.oper.chan || |
| 216 | vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ || |
| 217 | vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40) |
| 218 | return; |
| 219 | |
| 220 | if (!vif->cfg.assoc) |
| 221 | return; |
| 222 | |
| 223 | /* this shouldn't happen *again*, ignore it */ |
| 224 | if (mvm->cca_40mhz_workaround) |
| 225 | return; |
| 226 | |
| 227 | /* |
| 228 | * We'll decrement this on disconnect - so set to 2 since we'll |
| 229 | * still have to disconnect from the current AP first. |
| 230 | */ |
| 231 | mvm->cca_40mhz_workaround = 2; |
| 232 | |
| 233 | /* |
| 234 | * This capability manipulation isn't really ideal, but it's the |
| 235 | * easiest choice - otherwise we'd have to do some major changes |
| 236 | * in mac80211 to support this, which isn't worth it. This does |
| 237 | * mean that userspace may have outdated information, but that's |
| 238 | * actually not an issue at all. |
| 239 | */ |
| 240 | sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; |
| 241 | |
| 242 | WARN_ON(!sband->ht_cap.ht_supported); |
| 243 | WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); |
| 244 | sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; |
| 245 | |
| 246 | he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); |
| 247 | |
| 248 | if (he_cap) { |
| 249 | /* we know that ours is writable */ |
| 250 | struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; |
| 251 | |
| 252 | WARN_ON(!he->has_he); |
| 253 | WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & |
| 254 | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)); |
| 255 | he->he_cap_elem.phy_cap_info[0] &= |
| 256 | ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; |
| 257 | } |
| 258 | |
| 259 | ieee80211_disconnect(vif, true); |
| 260 | } |
| 261 | |
| 262 | void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, |
| 263 | struct ieee80211_bss_conf *link_conf) |
| 264 | { |
| 265 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 266 | struct iwl_mvm *mvm = mvmvif->mvm; |
| 267 | enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; |
| 268 | |
| 269 | if (!link_conf) |
| 270 | return; |
| 271 | |
| 272 | if (mvm->fw_static_smps_request && |
| 273 | link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 && |
| 274 | link_conf->he_support) |
| 275 | mode = IEEE80211_SMPS_STATIC; |
| 276 | |
| 277 | iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode, |
| 278 | link_conf->link_id); |
| 279 | } |
| 280 | |
| 281 | static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, |
| 282 | struct ieee80211_vif *vif) |
| 283 | { |
| 284 | struct ieee80211_bss_conf *link_conf; |
| 285 | unsigned int link_id; |
| 286 | |
| 287 | rcu_read_lock(); |
| 288 | |
| 289 | for_each_vif_active_link(vif, link_conf, link_id) |
| 290 | iwl_mvm_update_link_smps(vif, link_conf); |
| 291 | |
| 292 | rcu_read_unlock(); |
| 293 | } |
| 294 | |
| 295 | static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, |
| 296 | struct iwl_rx_cmd_buffer *rxb) |
| 297 | { |
| 298 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 299 | struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; |
| 300 | |
| 301 | /* firmware is expected to handle that in RLC offload mode */ |
| 302 | if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm), |
| 303 | "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n", |
| 304 | req->event)) |
| 305 | return; |
| 306 | |
| 307 | /* |
| 308 | * We could pass it to the iterator data, but also need to remember |
| 309 | * it for new interfaces that are added while in this state. |
| 310 | */ |
| 311 | mvm->fw_static_smps_request = |
| 312 | req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); |
| 313 | ieee80211_iterate_interfaces(mvm->hw, |
| 314 | IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, |
| 315 | iwl_mvm_intf_dual_chain_req, NULL); |
| 316 | } |
| 317 | |
| 318 | /** |
| 319 | * enum iwl_rx_handler_context: context for Rx handler |
| 320 | * @RX_HANDLER_SYNC : this means that it will be called in the Rx path |
| 321 | * which can't acquire mvm->mutex. |
| 322 | * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex |
| 323 | * (and only in this case!), it should be set as ASYNC. In that case, |
| 324 | * it will be called from a worker with mvm->mutex held. |
| 325 | * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the |
| 326 | * mutex itself, it will be called from a worker without mvm->mutex held. |
| 327 | * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock |
| 328 | * and mvm->mutex. Will be handled with the wiphy_work queue infra |
| 329 | * instead of regular work queue. |
| 330 | */ |
| 331 | enum iwl_rx_handler_context { |
| 332 | RX_HANDLER_SYNC, |
| 333 | RX_HANDLER_ASYNC_LOCKED, |
| 334 | RX_HANDLER_ASYNC_UNLOCKED, |
| 335 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 336 | }; |
| 337 | |
| 338 | /** |
| 339 | * struct iwl_rx_handlers: handler for FW notification |
| 340 | * @cmd_id: command id |
| 341 | * @min_size: minimum size to expect for the notification |
| 342 | * @context: see &iwl_rx_handler_context |
| 343 | * @fn: the function is called when notification is received |
| 344 | */ |
| 345 | struct iwl_rx_handlers { |
| 346 | u16 cmd_id, min_size; |
| 347 | enum iwl_rx_handler_context context; |
| 348 | void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
| 349 | }; |
| 350 | |
| 351 | #define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \ |
| 352 | { .cmd_id = _cmd_id, .fn = _fn, .context = _context, } |
| 353 | #define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \ |
| 354 | { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, } |
| 355 | #define RX_HANDLER(_cmd_id, _fn, _context, _struct) \ |
| 356 | { .cmd_id = _cmd_id, .fn = _fn, \ |
| 357 | .context = _context, .min_size = sizeof(_struct), } |
| 358 | #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \ |
| 359 | { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \ |
| 360 | .context = _context, .min_size = sizeof(_struct), } |
| 361 | |
| 362 | /* |
| 363 | * Handlers for fw notifications |
| 364 | * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME |
| 365 | * This list should be in order of frequency for performance purposes. |
| 366 | * |
| 367 | * The handler can be one from three contexts, see &iwl_rx_handler_context |
| 368 | */ |
| 369 | static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { |
| 370 | RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC, |
| 371 | struct iwl_tx_resp), |
| 372 | RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC, |
| 373 | struct iwl_mvm_ba_notif), |
| 374 | |
| 375 | RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, |
| 376 | iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC, |
| 377 | struct iwl_tlc_update_notif), |
| 378 | |
| 379 | RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif, |
| 380 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 381 | struct iwl_bt_coex_prof_old_notif), |
| 382 | RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif, |
| 383 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 384 | struct iwl_bt_coex_profile_notif), |
| 385 | RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, |
| 386 | RX_HANDLER_ASYNC_LOCKED), |
| 387 | RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, |
| 388 | RX_HANDLER_ASYNC_LOCKED), |
| 389 | |
| 390 | RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, |
| 391 | iwl_mvm_handle_rx_system_oper_stats, |
| 392 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 393 | struct iwl_system_statistics_notif_oper), |
| 394 | RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, |
| 395 | iwl_mvm_handle_rx_system_oper_part1_stats, |
| 396 | RX_HANDLER_ASYNC_LOCKED, |
| 397 | struct iwl_system_statistics_part1_notif_oper), |
| 398 | RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF, |
| 399 | iwl_mvm_handle_rx_system_end_stats_notif, |
| 400 | RX_HANDLER_ASYNC_LOCKED, |
| 401 | struct iwl_system_statistics_end_notif), |
| 402 | |
| 403 | RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, |
| 404 | iwl_mvm_window_status_notif, RX_HANDLER_SYNC, |
| 405 | struct iwl_ba_window_status_notif), |
| 406 | |
| 407 | RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, |
| 408 | RX_HANDLER_SYNC, struct iwl_time_event_notif), |
| 409 | RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, |
| 410 | iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC, |
| 411 | struct iwl_mvm_session_prot_notif), |
| 412 | RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, |
| 413 | RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif), |
| 414 | |
| 415 | RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC, |
| 416 | struct iwl_mvm_eosp_notification), |
| 417 | |
| 418 | RX_HANDLER(SCAN_ITERATION_COMPLETE, |
| 419 | iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC, |
| 420 | struct iwl_lmac_scan_complete_notif), |
| 421 | RX_HANDLER(SCAN_OFFLOAD_COMPLETE, |
| 422 | iwl_mvm_rx_lmac_scan_complete_notif, |
| 423 | RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete), |
| 424 | RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION, |
| 425 | iwl_mvm_rx_scan_match_found, |
| 426 | RX_HANDLER_SYNC), |
| 427 | RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, |
| 428 | RX_HANDLER_ASYNC_LOCKED, |
| 429 | struct iwl_umac_scan_complete), |
| 430 | RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, |
| 431 | iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, |
| 432 | struct iwl_umac_scan_iter_complete_notif), |
| 433 | |
| 434 | RX_HANDLER(MISSED_BEACONS_NOTIFICATION, |
| 435 | iwl_mvm_rx_missed_beacons_notif_legacy, |
| 436 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 437 | struct iwl_missed_beacons_notif_v4), |
| 438 | |
| 439 | RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF, |
| 440 | iwl_mvm_rx_missed_beacons_notif, |
| 441 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 442 | struct iwl_missed_beacons_notif), |
| 443 | RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, |
| 444 | struct iwl_error_resp), |
| 445 | RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, |
| 446 | iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC, |
| 447 | struct iwl_uapsd_misbehaving_ap_notif), |
| 448 | RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, |
| 449 | RX_HANDLER_ASYNC_LOCKED), |
| 450 | RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, |
| 451 | iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), |
| 452 | RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, |
| 453 | iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC, |
| 454 | struct ct_kill_notif), |
| 455 | |
| 456 | RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, |
| 457 | RX_HANDLER_ASYNC_LOCKED, |
| 458 | struct iwl_tdls_channel_switch_notif), |
| 459 | RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, |
| 460 | RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1), |
| 461 | RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, |
| 462 | iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED, |
| 463 | struct iwl_ftm_responder_stats), |
| 464 | |
| 465 | RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, |
| 466 | iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), |
| 467 | RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF, |
| 468 | iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), |
| 469 | |
| 470 | RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, |
| 471 | iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC, |
| 472 | struct iwl_mfu_assert_dump_notif), |
| 473 | RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, |
| 474 | iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC, |
| 475 | struct iwl_stored_beacon_notif_v2), |
| 476 | RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, |
| 477 | iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC, |
| 478 | struct iwl_mu_group_mgmt_notif), |
| 479 | RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, |
| 480 | iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC, |
| 481 | struct iwl_mvm_pm_state_notification), |
| 482 | RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, |
| 483 | iwl_mvm_probe_resp_data_notif, |
| 484 | RX_HANDLER_ASYNC_LOCKED, |
| 485 | struct iwl_probe_resp_data_notif), |
| 486 | RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, |
| 487 | iwl_mvm_channel_switch_start_notif, |
| 488 | RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif), |
| 489 | RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, |
| 490 | iwl_mvm_channel_switch_error_notif, |
| 491 | RX_HANDLER_ASYNC_UNLOCKED, |
| 492 | struct iwl_channel_switch_error_notif), |
| 493 | |
| 494 | RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF, |
| 495 | iwl_mvm_rx_esr_mode_notif, |
| 496 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 497 | struct iwl_mvm_esr_mode_notif), |
| 498 | |
| 499 | RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, |
| 500 | iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, |
| 501 | struct iwl_datapath_monitor_notif), |
| 502 | |
| 503 | RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST, |
| 504 | iwl_mvm_rx_thermal_dual_chain_req, |
| 505 | RX_HANDLER_ASYNC_LOCKED, |
| 506 | struct iwl_thermal_dual_chain_request), |
| 507 | |
| 508 | RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF, |
| 509 | iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED, |
| 510 | struct iwl_rfi_deactivate_notif), |
| 511 | |
| 512 | RX_HANDLER_GRP(LEGACY_GROUP, |
| 513 | WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION, |
| 514 | iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC, |
| 515 | struct iwl_time_msmt_notify), |
| 516 | RX_HANDLER_GRP(LEGACY_GROUP, |
| 517 | WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, |
| 518 | iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, |
| 519 | struct iwl_time_msmt_cfm_notify), |
| 520 | RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, |
| 521 | iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED, |
| 522 | struct iwl_roc_notif), |
| 523 | RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF, |
| 524 | iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED, |
| 525 | struct iwl_umac_scan_channel_survey_notif), |
| 526 | RX_HANDLER_GRP(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF, |
| 527 | iwl_mvm_rx_esr_trans_fail_notif, |
| 528 | RX_HANDLER_ASYNC_LOCKED_WIPHY, |
| 529 | struct iwl_esr_trans_fail_notif), |
| 530 | }; |
| 531 | #undef RX_HANDLER |
| 532 | #undef RX_HANDLER_GRP |
| 533 | |
| 534 | /* Please keep this array *SORTED* by hex value. |
| 535 | * Access is done through binary search |
| 536 | */ |
| 537 | static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { |
| 538 | HCMD_NAME(UCODE_ALIVE_NTFY), |
| 539 | HCMD_NAME(REPLY_ERROR), |
| 540 | HCMD_NAME(ECHO_CMD), |
| 541 | HCMD_NAME(INIT_COMPLETE_NOTIF), |
| 542 | HCMD_NAME(PHY_CONTEXT_CMD), |
| 543 | HCMD_NAME(DBG_CFG), |
| 544 | HCMD_NAME(SCAN_CFG_CMD), |
| 545 | HCMD_NAME(SCAN_REQ_UMAC), |
| 546 | HCMD_NAME(SCAN_ABORT_UMAC), |
| 547 | HCMD_NAME(SCAN_COMPLETE_UMAC), |
| 548 | HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), |
| 549 | HCMD_NAME(ADD_STA_KEY), |
| 550 | HCMD_NAME(ADD_STA), |
| 551 | HCMD_NAME(REMOVE_STA), |
| 552 | HCMD_NAME(TX_CMD), |
| 553 | HCMD_NAME(SCD_QUEUE_CFG), |
| 554 | HCMD_NAME(TXPATH_FLUSH), |
| 555 | HCMD_NAME(MGMT_MCAST_KEY), |
| 556 | HCMD_NAME(WEP_KEY), |
| 557 | HCMD_NAME(SHARED_MEM_CFG), |
| 558 | HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), |
| 559 | HCMD_NAME(MAC_CONTEXT_CMD), |
| 560 | HCMD_NAME(TIME_EVENT_CMD), |
| 561 | HCMD_NAME(TIME_EVENT_NOTIFICATION), |
| 562 | HCMD_NAME(BINDING_CONTEXT_CMD), |
| 563 | HCMD_NAME(TIME_QUOTA_CMD), |
| 564 | HCMD_NAME(NON_QOS_TX_COUNTER_CMD), |
| 565 | HCMD_NAME(LEDS_CMD), |
| 566 | HCMD_NAME(LQ_CMD), |
| 567 | HCMD_NAME(FW_PAGING_BLOCK_CMD), |
| 568 | HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), |
| 569 | HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), |
| 570 | HCMD_NAME(HOT_SPOT_CMD), |
| 571 | HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), |
| 572 | HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), |
| 573 | HCMD_NAME(BT_COEX_CI), |
| 574 | HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION), |
| 575 | HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION), |
| 576 | HCMD_NAME(PHY_CONFIGURATION_CMD), |
| 577 | HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), |
| 578 | HCMD_NAME(PHY_DB_CMD), |
| 579 | HCMD_NAME(SCAN_OFFLOAD_COMPLETE), |
| 580 | HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), |
| 581 | HCMD_NAME(POWER_TABLE_CMD), |
| 582 | HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), |
| 583 | HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), |
| 584 | HCMD_NAME(NVM_ACCESS_CMD), |
| 585 | HCMD_NAME(BEACON_NOTIFICATION), |
| 586 | HCMD_NAME(BEACON_TEMPLATE_CMD), |
| 587 | HCMD_NAME(TX_ANT_CONFIGURATION_CMD), |
| 588 | HCMD_NAME(BT_CONFIG), |
| 589 | HCMD_NAME(STATISTICS_CMD), |
| 590 | HCMD_NAME(STATISTICS_NOTIFICATION), |
| 591 | HCMD_NAME(EOSP_NOTIFICATION), |
| 592 | HCMD_NAME(REDUCE_TX_POWER_CMD), |
| 593 | HCMD_NAME(MISSED_BEACONS_NOTIFICATION), |
| 594 | HCMD_NAME(TDLS_CONFIG_CMD), |
| 595 | HCMD_NAME(MAC_PM_POWER_TABLE), |
| 596 | HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), |
| 597 | HCMD_NAME(MFUART_LOAD_NOTIFICATION), |
| 598 | HCMD_NAME(RSS_CONFIG_CMD), |
| 599 | HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), |
| 600 | HCMD_NAME(REPLY_RX_PHY_CMD), |
| 601 | HCMD_NAME(REPLY_RX_MPDU_CMD), |
| 602 | HCMD_NAME(BAR_FRAME_RELEASE), |
| 603 | HCMD_NAME(FRAME_RELEASE), |
| 604 | HCMD_NAME(BA_NOTIF), |
| 605 | HCMD_NAME(MCC_UPDATE_CMD), |
| 606 | HCMD_NAME(MCC_CHUB_UPDATE_CMD), |
| 607 | HCMD_NAME(MARKER_CMD), |
| 608 | HCMD_NAME(BT_PROFILE_NOTIFICATION), |
| 609 | HCMD_NAME(MCAST_FILTER_CMD), |
| 610 | HCMD_NAME(REPLY_SF_CFG_CMD), |
| 611 | HCMD_NAME(REPLY_BEACON_FILTERING_CMD), |
| 612 | HCMD_NAME(D3_CONFIG_CMD), |
| 613 | HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), |
| 614 | HCMD_NAME(MATCH_FOUND_NOTIFICATION), |
| 615 | HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), |
| 616 | HCMD_NAME(WOWLAN_PATTERNS), |
| 617 | HCMD_NAME(WOWLAN_CONFIGURATION), |
| 618 | HCMD_NAME(WOWLAN_TSC_RSC_PARAM), |
| 619 | HCMD_NAME(WOWLAN_TKIP_PARAM), |
| 620 | HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), |
| 621 | HCMD_NAME(WOWLAN_GET_STATUSES), |
| 622 | HCMD_NAME(SCAN_ITERATION_COMPLETE), |
| 623 | HCMD_NAME(D0I3_END_CMD), |
| 624 | HCMD_NAME(LTR_CONFIG), |
| 625 | HCMD_NAME(LDBG_CONFIG_CMD), |
| 626 | HCMD_NAME(DEBUG_LOG_MSG), |
| 627 | }; |
| 628 | |
| 629 | /* Please keep this array *SORTED* by hex value. |
| 630 | * Access is done through binary search |
| 631 | */ |
| 632 | static const struct iwl_hcmd_names iwl_mvm_system_names[] = { |
| 633 | HCMD_NAME(SHARED_MEM_CFG_CMD), |
| 634 | HCMD_NAME(SOC_CONFIGURATION_CMD), |
| 635 | HCMD_NAME(INIT_EXTENDED_CFG_CMD), |
| 636 | HCMD_NAME(FW_ERROR_RECOVERY_CMD), |
| 637 | HCMD_NAME(RFI_CONFIG_CMD), |
| 638 | HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), |
| 639 | HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), |
| 640 | HCMD_NAME(SYSTEM_STATISTICS_CMD), |
| 641 | HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), |
| 642 | HCMD_NAME(RFI_DEACTIVATE_NOTIF), |
| 643 | }; |
| 644 | |
| 645 | /* Please keep this array *SORTED* by hex value. |
| 646 | * Access is done through binary search |
| 647 | */ |
| 648 | static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { |
| 649 | HCMD_NAME(LOW_LATENCY_CMD), |
| 650 | HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), |
| 651 | HCMD_NAME(SESSION_PROTECTION_CMD), |
| 652 | HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD), |
| 653 | HCMD_NAME(MAC_CONFIG_CMD), |
| 654 | HCMD_NAME(LINK_CONFIG_CMD), |
| 655 | HCMD_NAME(STA_CONFIG_CMD), |
| 656 | HCMD_NAME(AUX_STA_CMD), |
| 657 | HCMD_NAME(STA_REMOVE_CMD), |
| 658 | HCMD_NAME(STA_DISABLE_TX_CMD), |
| 659 | HCMD_NAME(ROC_CMD), |
| 660 | HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF), |
| 661 | HCMD_NAME(ROC_NOTIF), |
| 662 | HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), |
| 663 | HCMD_NAME(MISSED_VAP_NOTIF), |
| 664 | HCMD_NAME(SESSION_PROTECTION_NOTIF), |
| 665 | HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF), |
| 666 | HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), |
| 667 | }; |
| 668 | |
| 669 | /* Please keep this array *SORTED* by hex value. |
| 670 | * Access is done through binary search |
| 671 | */ |
| 672 | static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { |
| 673 | HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), |
| 674 | HCMD_NAME(CTDP_CONFIG_CMD), |
| 675 | HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), |
| 676 | HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), |
| 677 | HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD), |
| 678 | HCMD_NAME(CT_KILL_NOTIFICATION), |
| 679 | HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), |
| 680 | }; |
| 681 | |
| 682 | /* Please keep this array *SORTED* by hex value. |
| 683 | * Access is done through binary search |
| 684 | */ |
| 685 | static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { |
| 686 | HCMD_NAME(DQA_ENABLE_CMD), |
| 687 | HCMD_NAME(UPDATE_MU_GROUPS_CMD), |
| 688 | HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), |
| 689 | HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD), |
| 690 | HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), |
| 691 | HCMD_NAME(STA_HE_CTXT_CMD), |
| 692 | HCMD_NAME(RLC_CONFIG_CMD), |
| 693 | HCMD_NAME(RFH_QUEUE_CONFIG_CMD), |
| 694 | HCMD_NAME(TLC_MNG_CONFIG_CMD), |
| 695 | HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), |
| 696 | HCMD_NAME(SCD_QUEUE_CONFIG_CMD), |
| 697 | HCMD_NAME(SEC_KEY_CMD), |
| 698 | HCMD_NAME(ESR_MODE_NOTIF), |
| 699 | HCMD_NAME(MONITOR_NOTIF), |
| 700 | HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), |
| 701 | HCMD_NAME(STA_PM_NOTIF), |
| 702 | HCMD_NAME(MU_GROUP_MGMT_NOTIF), |
| 703 | HCMD_NAME(RX_QUEUES_NOTIFICATION), |
| 704 | }; |
| 705 | |
| 706 | /* Please keep this array *SORTED* by hex value. |
| 707 | * Access is done through binary search |
| 708 | */ |
| 709 | static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { |
| 710 | HCMD_NAME(STATISTICS_OPER_NOTIF), |
| 711 | HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), |
| 712 | }; |
| 713 | |
| 714 | /* Please keep this array *SORTED* by hex value. |
| 715 | * Access is done through binary search |
| 716 | */ |
| 717 | static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { |
| 718 | HCMD_NAME(LMAC_RD_WR), |
| 719 | HCMD_NAME(UMAC_RD_WR), |
| 720 | HCMD_NAME(HOST_EVENT_CFG), |
| 721 | HCMD_NAME(DBGC_SUSPEND_RESUME), |
| 722 | HCMD_NAME(BUFFER_ALLOCATION), |
| 723 | HCMD_NAME(GET_TAS_STATUS), |
| 724 | HCMD_NAME(FW_DUMP_COMPLETE_CMD), |
| 725 | HCMD_NAME(FW_CLEAR_BUFFER), |
| 726 | HCMD_NAME(MFU_ASSERT_DUMP_NTF), |
| 727 | }; |
| 728 | |
| 729 | /* Please keep this array *SORTED* by hex value. |
| 730 | * Access is done through binary search |
| 731 | */ |
| 732 | static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { |
| 733 | HCMD_NAME(CHANNEL_SURVEY_NOTIF), |
| 734 | HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), |
| 735 | }; |
| 736 | |
| 737 | /* Please keep this array *SORTED* by hex value. |
| 738 | * Access is done through binary search |
| 739 | */ |
| 740 | static const struct iwl_hcmd_names iwl_mvm_location_names[] = { |
| 741 | HCMD_NAME(TOF_RANGE_REQ_CMD), |
| 742 | HCMD_NAME(TOF_CONFIG_CMD), |
| 743 | HCMD_NAME(TOF_RANGE_ABORT_CMD), |
| 744 | HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), |
| 745 | HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), |
| 746 | HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), |
| 747 | HCMD_NAME(TOF_LC_NOTIF), |
| 748 | HCMD_NAME(TOF_RESPONDER_STATS), |
| 749 | HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), |
| 750 | HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), |
| 751 | }; |
| 752 | |
| 753 | /* Please keep this array *SORTED* by hex value. |
| 754 | * Access is done through binary search |
| 755 | */ |
| 756 | static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { |
| 757 | HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION), |
| 758 | HCMD_NAME(WOWLAN_INFO_NOTIFICATION), |
| 759 | HCMD_NAME(D3_END_NOTIFICATION), |
| 760 | HCMD_NAME(STORED_BEACON_NTF), |
| 761 | }; |
| 762 | |
| 763 | /* Please keep this array *SORTED* by hex value. |
| 764 | * Access is done through binary search |
| 765 | */ |
| 766 | static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { |
| 767 | HCMD_NAME(NVM_ACCESS_COMPLETE), |
| 768 | HCMD_NAME(NVM_GET_INFO), |
| 769 | HCMD_NAME(TAS_CONFIG), |
| 770 | }; |
| 771 | |
| 772 | /* Please keep this array *SORTED* by hex value. |
| 773 | * Access is done through binary search |
| 774 | */ |
| 775 | static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = { |
| 776 | HCMD_NAME(PROFILE_NOTIF), |
| 777 | }; |
| 778 | |
| 779 | static const struct iwl_hcmd_arr iwl_mvm_groups[] = { |
| 780 | [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), |
| 781 | [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), |
| 782 | [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), |
| 783 | [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), |
| 784 | [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), |
| 785 | [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), |
| 786 | [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names), |
| 787 | [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), |
| 788 | [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names), |
| 789 | [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), |
| 790 | [REGULATORY_AND_NVM_GROUP] = |
| 791 | HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), |
| 792 | [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names), |
| 793 | [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), |
| 794 | }; |
| 795 | |
| 796 | /* this forward declaration can avoid to export the function */ |
| 797 | static void iwl_mvm_async_handlers_wk(struct work_struct *wk); |
| 798 | static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy, |
| 799 | struct wiphy_work *work); |
| 800 | |
| 801 | static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) |
| 802 | { |
| 803 | const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; |
| 804 | u64 dflt_pwr_limit; |
| 805 | |
| 806 | if (!backoff) |
| 807 | return 0; |
| 808 | |
| 809 | iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit); |
| 810 | |
| 811 | while (backoff->pwr) { |
| 812 | if (dflt_pwr_limit >= backoff->pwr) |
| 813 | return backoff->backoff; |
| 814 | |
| 815 | backoff++; |
| 816 | } |
| 817 | |
| 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) |
| 822 | { |
| 823 | struct iwl_mvm *mvm = |
| 824 | container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); |
| 825 | struct ieee80211_vif *tx_blocked_vif; |
| 826 | struct iwl_mvm_vif *mvmvif; |
| 827 | |
| 828 | guard(mvm)(mvm); |
| 829 | |
| 830 | tx_blocked_vif = |
| 831 | rcu_dereference_protected(mvm->csa_tx_blocked_vif, |
| 832 | lockdep_is_held(&mvm->mutex)); |
| 833 | |
| 834 | if (!tx_blocked_vif) |
| 835 | return; |
| 836 | |
| 837 | mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); |
| 838 | iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); |
| 839 | RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); |
| 840 | } |
| 841 | |
| 842 | static void iwl_mvm_fwrt_dump_start(void *ctx) |
| 843 | { |
| 844 | struct iwl_mvm *mvm = ctx; |
| 845 | |
| 846 | mutex_lock(&mvm->mutex); |
| 847 | } |
| 848 | |
| 849 | static void iwl_mvm_fwrt_dump_end(void *ctx) |
| 850 | { |
| 851 | struct iwl_mvm *mvm = ctx; |
| 852 | |
| 853 | mutex_unlock(&mvm->mutex); |
| 854 | } |
| 855 | |
| 856 | static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) |
| 857 | { |
| 858 | struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; |
| 859 | |
| 860 | guard(mvm)(mvm); |
| 861 | return iwl_mvm_send_cmd(mvm, host_cmd); |
| 862 | } |
| 863 | |
| 864 | static bool iwl_mvm_d3_debug_enable(void *ctx) |
| 865 | { |
| 866 | return IWL_MVM_D3_DEBUG; |
| 867 | } |
| 868 | |
| 869 | static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { |
| 870 | .dump_start = iwl_mvm_fwrt_dump_start, |
| 871 | .dump_end = iwl_mvm_fwrt_dump_end, |
| 872 | .send_hcmd = iwl_mvm_fwrt_send_hcmd, |
| 873 | .d3_debug_enable = iwl_mvm_d3_debug_enable, |
| 874 | }; |
| 875 | |
| 876 | static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) |
| 877 | { |
| 878 | struct iwl_trans *trans = mvm->trans; |
| 879 | int ret; |
| 880 | |
| 881 | if (trans->csme_own) { |
| 882 | if (WARN(!mvm->mei_registered, |
| 883 | "csme is owner, but we aren't registered to iwlmei\n")) |
| 884 | goto get_nvm_from_fw; |
| 885 | |
| 886 | mvm->mei_nvm_data = iwl_mei_get_nvm(); |
| 887 | if (mvm->mei_nvm_data) { |
| 888 | /* |
| 889 | * mvm->mei_nvm_data is set and because of that, |
| 890 | * we'll load the NVM from the FW when we'll get |
| 891 | * ownership. |
| 892 | */ |
| 893 | mvm->nvm_data = |
| 894 | iwl_parse_mei_nvm_data(trans, trans->cfg, |
| 895 | mvm->mei_nvm_data, |
| 896 | mvm->fw, |
| 897 | mvm->set_tx_ant, |
| 898 | mvm->set_rx_ant); |
| 899 | return 0; |
| 900 | } |
| 901 | |
| 902 | IWL_ERR(mvm, |
| 903 | "Got a NULL NVM from CSME, trying to get it from the device\n"); |
| 904 | } |
| 905 | |
| 906 | get_nvm_from_fw: |
| 907 | rtnl_lock(); |
| 908 | wiphy_lock(mvm->hw->wiphy); |
| 909 | mutex_lock(&mvm->mutex); |
| 910 | |
| 911 | ret = iwl_trans_start_hw(mvm->trans); |
| 912 | if (ret) { |
| 913 | mutex_unlock(&mvm->mutex); |
| 914 | wiphy_unlock(mvm->hw->wiphy); |
| 915 | rtnl_unlock(); |
| 916 | return ret; |
| 917 | } |
| 918 | |
| 919 | ret = iwl_run_init_mvm_ucode(mvm); |
| 920 | if (ret && ret != -ERFKILL) |
| 921 | iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); |
| 922 | if (!ret && iwl_mvm_is_lar_supported(mvm)) { |
| 923 | mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; |
| 924 | ret = iwl_mvm_init_mcc(mvm); |
| 925 | } |
| 926 | |
| 927 | iwl_mvm_stop_device(mvm); |
| 928 | |
| 929 | mutex_unlock(&mvm->mutex); |
| 930 | wiphy_unlock(mvm->hw->wiphy); |
| 931 | rtnl_unlock(); |
| 932 | |
| 933 | if (ret) |
| 934 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); |
| 935 | |
| 936 | /* no longer need this regardless of failure or not */ |
| 937 | mvm->fw_product_reset = false; |
| 938 | |
| 939 | return ret; |
| 940 | } |
| 941 | |
| 942 | static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) |
| 943 | { |
| 944 | struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; |
| 945 | int ret; |
| 946 | |
| 947 | iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); |
| 948 | |
| 949 | ret = iwl_mvm_mac_setup_register(mvm); |
| 950 | if (ret) |
| 951 | return ret; |
| 952 | |
| 953 | mvm->hw_registered = true; |
| 954 | |
| 955 | iwl_mvm_dbgfs_register(mvm); |
| 956 | |
| 957 | wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, |
| 958 | mvm->mei_rfkill_blocked, |
| 959 | RFKILL_HARD_BLOCK_NOT_OWNER); |
| 960 | |
| 961 | iwl_mvm_mei_set_sw_rfkill_state(mvm); |
| 962 | |
| 963 | return 0; |
| 964 | } |
| 965 | |
| 966 | struct iwl_mvm_frob_txf_data { |
| 967 | u8 *buf; |
| 968 | size_t buflen; |
| 969 | }; |
| 970 | |
| 971 | static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw, |
| 972 | struct ieee80211_vif *vif, |
| 973 | struct ieee80211_sta *sta, |
| 974 | struct ieee80211_key_conf *key, |
| 975 | void *data) |
| 976 | { |
| 977 | struct iwl_mvm_frob_txf_data *txf = data; |
| 978 | u8 keylen, match, matchend; |
| 979 | u8 *keydata; |
| 980 | size_t i; |
| 981 | |
| 982 | switch (key->cipher) { |
| 983 | case WLAN_CIPHER_SUITE_CCMP: |
| 984 | keydata = key->key; |
| 985 | keylen = key->keylen; |
| 986 | break; |
| 987 | case WLAN_CIPHER_SUITE_WEP40: |
| 988 | case WLAN_CIPHER_SUITE_WEP104: |
| 989 | case WLAN_CIPHER_SUITE_TKIP: |
| 990 | /* |
| 991 | * WEP has short keys which might show up in the payload, |
| 992 | * and then you can deduce the key, so in this case just |
| 993 | * remove all FIFO data. |
| 994 | * For TKIP, we don't know the phase 2 keys here, so same. |
| 995 | */ |
| 996 | memset(txf->buf, 0xBB, txf->buflen); |
| 997 | return; |
| 998 | default: |
| 999 | return; |
| 1000 | } |
| 1001 | |
| 1002 | /* scan for key material and clear it out */ |
| 1003 | match = 0; |
| 1004 | for (i = 0; i < txf->buflen; i++) { |
| 1005 | if (txf->buf[i] != keydata[match]) { |
| 1006 | match = 0; |
| 1007 | continue; |
| 1008 | } |
| 1009 | match++; |
| 1010 | if (match == keylen) { |
| 1011 | memset(txf->buf + i - keylen, 0xAA, keylen); |
| 1012 | match = 0; |
| 1013 | } |
| 1014 | } |
| 1015 | |
| 1016 | /* we're dealing with a FIFO, so check wrapped around data */ |
| 1017 | matchend = match; |
| 1018 | for (i = 0; match && i < keylen - match; i++) { |
| 1019 | if (txf->buf[i] != keydata[match]) |
| 1020 | break; |
| 1021 | match++; |
| 1022 | if (match == keylen) { |
| 1023 | memset(txf->buf, 0xAA, i + 1); |
| 1024 | memset(txf->buf + txf->buflen - matchend, 0xAA, |
| 1025 | matchend); |
| 1026 | break; |
| 1027 | } |
| 1028 | } |
| 1029 | } |
| 1030 | |
| 1031 | static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen) |
| 1032 | { |
| 1033 | struct iwl_mvm_frob_txf_data txf = { |
| 1034 | .buf = buf, |
| 1035 | .buflen = buflen, |
| 1036 | }; |
| 1037 | struct iwl_mvm *mvm = ctx; |
| 1038 | |
| 1039 | /* embedded key material exists only on old API */ |
| 1040 | if (iwl_mvm_has_new_tx_api(mvm)) |
| 1041 | return; |
| 1042 | |
| 1043 | rcu_read_lock(); |
| 1044 | ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); |
| 1045 | rcu_read_unlock(); |
| 1046 | } |
| 1047 | |
| 1048 | static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len) |
| 1049 | { |
| 1050 | /* we only use wide headers for commands */ |
| 1051 | struct iwl_cmd_header_wide *hdr = hcmd; |
| 1052 | unsigned int frob_start = sizeof(*hdr), frob_end = 0; |
| 1053 | |
| 1054 | if (len < sizeof(hdr)) |
| 1055 | return; |
| 1056 | |
| 1057 | /* all the commands we care about are in LONG_GROUP */ |
| 1058 | if (hdr->group_id != LONG_GROUP) |
| 1059 | return; |
| 1060 | |
| 1061 | switch (hdr->cmd) { |
| 1062 | case WEP_KEY: |
| 1063 | case WOWLAN_TKIP_PARAM: |
| 1064 | case WOWLAN_KEK_KCK_MATERIAL: |
| 1065 | case ADD_STA_KEY: |
| 1066 | /* |
| 1067 | * blank out everything here, easier than dealing |
| 1068 | * with the various versions of the command |
| 1069 | */ |
| 1070 | frob_end = INT_MAX; |
| 1071 | break; |
| 1072 | case MGMT_MCAST_KEY: |
| 1073 | frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); |
| 1074 | BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) != |
| 1075 | offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); |
| 1076 | |
| 1077 | frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); |
| 1078 | BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) < |
| 1079 | offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); |
| 1080 | break; |
| 1081 | } |
| 1082 | |
| 1083 | if (frob_start >= frob_end) |
| 1084 | return; |
| 1085 | |
| 1086 | if (frob_end > len) |
| 1087 | frob_end = len; |
| 1088 | |
| 1089 | memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); |
| 1090 | } |
| 1091 | |
| 1092 | static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen) |
| 1093 | { |
| 1094 | const struct iwl_dump_exclude *excl; |
| 1095 | struct iwl_mvm *mvm = ctx; |
| 1096 | int i; |
| 1097 | |
| 1098 | switch (mvm->fwrt.cur_fw_img) { |
| 1099 | case IWL_UCODE_INIT: |
| 1100 | default: |
| 1101 | /* not relevant */ |
| 1102 | return; |
| 1103 | case IWL_UCODE_REGULAR: |
| 1104 | case IWL_UCODE_REGULAR_USNIFFER: |
| 1105 | excl = mvm->fw->dump_excl; |
| 1106 | break; |
| 1107 | case IWL_UCODE_WOWLAN: |
| 1108 | excl = mvm->fw->dump_excl_wowlan; |
| 1109 | break; |
| 1110 | } |
| 1111 | |
| 1112 | BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != |
| 1113 | sizeof(mvm->fw->dump_excl_wowlan)); |
| 1114 | |
| 1115 | for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { |
| 1116 | u32 start, end; |
| 1117 | |
| 1118 | if (!excl[i].addr || !excl[i].size) |
| 1119 | continue; |
| 1120 | |
| 1121 | start = excl[i].addr; |
| 1122 | end = start + excl[i].size; |
| 1123 | |
| 1124 | if (end <= mem_addr || start >= mem_addr + buflen) |
| 1125 | continue; |
| 1126 | |
| 1127 | if (start < mem_addr) |
| 1128 | start = mem_addr; |
| 1129 | |
| 1130 | if (end > mem_addr + buflen) |
| 1131 | end = mem_addr + buflen; |
| 1132 | |
| 1133 | memset((u8 *)mem + start - mem_addr, 0xAA, end - start); |
| 1134 | } |
| 1135 | } |
| 1136 | |
| 1137 | static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { |
| 1138 | .frob_txf = iwl_mvm_frob_txf, |
| 1139 | .frob_hcmd = iwl_mvm_frob_hcmd, |
| 1140 | .frob_mem = iwl_mvm_frob_mem, |
| 1141 | }; |
| 1142 | |
| 1143 | static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) |
| 1144 | { |
| 1145 | struct iwl_mvm *mvm = priv; |
| 1146 | struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; |
| 1147 | |
| 1148 | /* |
| 1149 | * This is protected by the guarantee that this function will not be |
| 1150 | * called twice on two different threads |
| 1151 | */ |
| 1152 | prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); |
| 1153 | |
| 1154 | curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); |
| 1155 | if (!curr_conn_info) |
| 1156 | return; |
| 1157 | |
| 1158 | curr_conn_info->conn_info = *conn_info; |
| 1159 | |
| 1160 | rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); |
| 1161 | |
| 1162 | if (prev_conn_info) |
| 1163 | kfree_rcu(prev_conn_info, rcu_head); |
| 1164 | } |
| 1165 | |
| 1166 | static void iwl_mvm_mei_rfkill(void *priv, bool blocked, |
| 1167 | bool csme_taking_ownership) |
| 1168 | { |
| 1169 | struct iwl_mvm *mvm = priv; |
| 1170 | |
| 1171 | if (blocked && !csme_taking_ownership) |
| 1172 | return; |
| 1173 | |
| 1174 | mvm->mei_rfkill_blocked = blocked; |
| 1175 | if (!mvm->hw_registered) |
| 1176 | return; |
| 1177 | |
| 1178 | wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, |
| 1179 | mvm->mei_rfkill_blocked, |
| 1180 | RFKILL_HARD_BLOCK_NOT_OWNER); |
| 1181 | } |
| 1182 | |
| 1183 | static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) |
| 1184 | { |
| 1185 | struct iwl_mvm *mvm = priv; |
| 1186 | |
| 1187 | if (!mvm->hw_registered || !mvm->csme_vif) |
| 1188 | return; |
| 1189 | |
| 1190 | iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); |
| 1191 | } |
| 1192 | |
| 1193 | static void iwl_mvm_sap_connected_wk(struct work_struct *wk) |
| 1194 | { |
| 1195 | struct iwl_mvm *mvm = |
| 1196 | container_of(wk, struct iwl_mvm, sap_connected_wk); |
| 1197 | int ret; |
| 1198 | |
| 1199 | ret = iwl_mvm_start_get_nvm(mvm); |
| 1200 | if (ret) |
| 1201 | goto out_free; |
| 1202 | |
| 1203 | ret = iwl_mvm_start_post_nvm(mvm); |
| 1204 | if (ret) |
| 1205 | goto out_free; |
| 1206 | |
| 1207 | return; |
| 1208 | |
| 1209 | out_free: |
| 1210 | IWL_ERR(mvm, "Couldn't get started...\n"); |
| 1211 | iwl_mei_start_unregister(); |
| 1212 | iwl_mei_unregister_complete(); |
| 1213 | iwl_fw_flush_dumps(&mvm->fwrt); |
| 1214 | iwl_mvm_thermal_exit(mvm); |
| 1215 | iwl_fw_runtime_free(&mvm->fwrt); |
| 1216 | iwl_phy_db_free(mvm->phy_db); |
| 1217 | kfree(mvm->scan_cmd); |
| 1218 | iwl_trans_op_mode_leave(mvm->trans); |
| 1219 | kfree(mvm->nvm_data); |
| 1220 | kfree(mvm->mei_nvm_data); |
| 1221 | |
| 1222 | ieee80211_free_hw(mvm->hw); |
| 1223 | } |
| 1224 | |
| 1225 | static void iwl_mvm_mei_sap_connected(void *priv) |
| 1226 | { |
| 1227 | struct iwl_mvm *mvm = priv; |
| 1228 | |
| 1229 | if (!mvm->hw_registered) |
| 1230 | schedule_work(&mvm->sap_connected_wk); |
| 1231 | } |
| 1232 | |
| 1233 | static void iwl_mvm_mei_nic_stolen(void *priv) |
| 1234 | { |
| 1235 | struct iwl_mvm *mvm = priv; |
| 1236 | |
| 1237 | rtnl_lock(); |
| 1238 | cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); |
| 1239 | rtnl_unlock(); |
| 1240 | } |
| 1241 | |
| 1242 | static const struct iwl_mei_ops mei_ops = { |
| 1243 | .me_conn_status = iwl_mvm_me_conn_status, |
| 1244 | .rfkill = iwl_mvm_mei_rfkill, |
| 1245 | .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, |
| 1246 | .sap_connected = iwl_mvm_mei_sap_connected, |
| 1247 | .nic_stolen = iwl_mvm_mei_nic_stolen, |
| 1248 | }; |
| 1249 | |
| 1250 | static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac, |
| 1251 | struct ieee80211_vif *vif) |
| 1252 | { |
| 1253 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
| 1254 | |
| 1255 | if (ieee80211_vif_is_mld(vif) && mvmvif->authorized) |
| 1256 | iwl_mvm_select_links(mvmvif->mvm, vif); |
| 1257 | } |
| 1258 | |
| 1259 | static void iwl_mvm_trig_link_selection(struct wiphy *wiphy, |
| 1260 | struct wiphy_work *wk) |
| 1261 | { |
| 1262 | struct iwl_mvm *mvm = |
| 1263 | container_of(wk, struct iwl_mvm, trig_link_selection_wk); |
| 1264 | |
| 1265 | mutex_lock(&mvm->mutex); |
| 1266 | ieee80211_iterate_active_interfaces(mvm->hw, |
| 1267 | IEEE80211_IFACE_ITER_NORMAL, |
| 1268 | iwl_mvm_find_link_selection_vif, |
| 1269 | NULL); |
| 1270 | mutex_unlock(&mvm->mutex); |
| 1271 | } |
| 1272 | |
| 1273 | static struct iwl_op_mode * |
| 1274 | iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, |
| 1275 | const struct iwl_fw *fw, struct dentry *dbgfs_dir) |
| 1276 | { |
| 1277 | struct ieee80211_hw *hw; |
| 1278 | struct iwl_op_mode *op_mode; |
| 1279 | struct iwl_mvm *mvm; |
| 1280 | struct iwl_trans_config trans_cfg = {}; |
| 1281 | static const u8 no_reclaim_cmds[] = { |
| 1282 | TX_CMD, |
| 1283 | }; |
| 1284 | u32 max_agg; |
| 1285 | size_t scan_size; |
| 1286 | u32 min_backoff; |
| 1287 | struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; |
| 1288 | |
| 1289 | /* |
| 1290 | * We use IWL_STATION_COUNT_MAX to check the validity of the station |
| 1291 | * index all over the driver - check that its value corresponds to the |
| 1292 | * array size. |
| 1293 | */ |
| 1294 | BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != |
| 1295 | IWL_STATION_COUNT_MAX); |
| 1296 | |
| 1297 | /******************************** |
| 1298 | * 1. Allocating and configuring HW data |
| 1299 | ********************************/ |
| 1300 | hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + |
| 1301 | sizeof(struct iwl_mvm), |
| 1302 | iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops : |
| 1303 | &iwl_mvm_hw_ops); |
| 1304 | if (!hw) |
| 1305 | return NULL; |
| 1306 | |
| 1307 | if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) |
| 1308 | max_agg = 512; |
| 1309 | else |
| 1310 | max_agg = IEEE80211_MAX_AMPDU_BUF_HE; |
| 1311 | |
| 1312 | hw->max_rx_aggregation_subframes = max_agg; |
| 1313 | |
| 1314 | if (cfg->max_tx_agg_size) |
| 1315 | hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; |
| 1316 | else |
| 1317 | hw->max_tx_aggregation_subframes = max_agg; |
| 1318 | |
| 1319 | op_mode = hw->priv; |
| 1320 | |
| 1321 | mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1322 | mvm->dev = trans->dev; |
| 1323 | mvm->trans = trans; |
| 1324 | mvm->cfg = cfg; |
| 1325 | mvm->fw = fw; |
| 1326 | mvm->hw = hw; |
| 1327 | |
| 1328 | iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, |
| 1329 | &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); |
| 1330 | |
| 1331 | iwl_mvm_get_bios_tables(mvm); |
| 1332 | iwl_uefi_get_sgom_table(trans, &mvm->fwrt); |
| 1333 | iwl_uefi_get_step_table(trans); |
| 1334 | |
| 1335 | mvm->init_status = 0; |
| 1336 | |
| 1337 | if (iwl_mvm_has_new_rx_api(mvm)) { |
| 1338 | op_mode->ops = &iwl_mvm_ops_mq; |
| 1339 | trans->rx_mpdu_cmd_hdr_size = |
| 1340 | (trans->trans_cfg->device_family >= |
| 1341 | IWL_DEVICE_FAMILY_AX210) ? |
| 1342 | sizeof(struct iwl_rx_mpdu_desc) : |
| 1343 | IWL_RX_DESC_SIZE_V1; |
| 1344 | } else { |
| 1345 | op_mode->ops = &iwl_mvm_ops; |
| 1346 | trans->rx_mpdu_cmd_hdr_size = |
| 1347 | sizeof(struct iwl_rx_mpdu_res_start); |
| 1348 | |
| 1349 | if (WARN_ON(trans->num_rx_queues > 1)) |
| 1350 | goto out_free; |
| 1351 | } |
| 1352 | |
| 1353 | mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; |
| 1354 | mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt); |
| 1355 | |
| 1356 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 1357 | /* |
| 1358 | * If we have the new TX/queue allocation API initialize them |
| 1359 | * all to invalid numbers. We'll rewrite the ones that we need |
| 1360 | * later, but that doesn't happen for all of them all of the |
| 1361 | * time (e.g. P2P Device is optional), and if a dynamic queue |
| 1362 | * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then |
| 1363 | * iwl_mvm_is_static_queue() erroneously returns true, and we |
| 1364 | * might have things getting stuck. |
| 1365 | */ |
| 1366 | mvm->aux_queue = IWL_MVM_INVALID_QUEUE; |
| 1367 | mvm->snif_queue = IWL_MVM_INVALID_QUEUE; |
| 1368 | mvm->probe_queue = IWL_MVM_INVALID_QUEUE; |
| 1369 | mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; |
| 1370 | } else { |
| 1371 | mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; |
| 1372 | mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; |
| 1373 | mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; |
| 1374 | mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; |
| 1375 | } |
| 1376 | |
| 1377 | mvm->sf_state = SF_UNINIT; |
| 1378 | if (iwl_mvm_has_unified_ucode(mvm)) |
| 1379 | iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); |
| 1380 | else |
| 1381 | iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); |
| 1382 | mvm->drop_bcn_ap_mode = true; |
| 1383 | |
| 1384 | mutex_init(&mvm->mutex); |
| 1385 | spin_lock_init(&mvm->async_handlers_lock); |
| 1386 | INIT_LIST_HEAD(&mvm->time_event_list); |
| 1387 | INIT_LIST_HEAD(&mvm->aux_roc_te_list); |
| 1388 | INIT_LIST_HEAD(&mvm->async_handlers_list); |
| 1389 | spin_lock_init(&mvm->time_event_lock); |
| 1390 | INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); |
| 1391 | INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); |
| 1392 | INIT_LIST_HEAD(&mvm->resp_pasn_list); |
| 1393 | |
| 1394 | INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); |
| 1395 | INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); |
| 1396 | INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); |
| 1397 | INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); |
| 1398 | INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); |
| 1399 | INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); |
| 1400 | INIT_LIST_HEAD(&mvm->add_stream_txqs); |
| 1401 | spin_lock_init(&mvm->add_stream_lock); |
| 1402 | |
| 1403 | wiphy_work_init(&mvm->async_handlers_wiphy_wk, |
| 1404 | iwl_mvm_async_handlers_wiphy_wk); |
| 1405 | |
| 1406 | wiphy_work_init(&mvm->trig_link_selection_wk, |
| 1407 | iwl_mvm_trig_link_selection); |
| 1408 | |
| 1409 | init_waitqueue_head(&mvm->rx_sync_waitq); |
| 1410 | |
| 1411 | mvm->queue_sync_state = 0; |
| 1412 | |
| 1413 | SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); |
| 1414 | |
| 1415 | spin_lock_init(&mvm->tcm.lock); |
| 1416 | INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); |
| 1417 | mvm->tcm.ts = jiffies; |
| 1418 | mvm->tcm.ll_ts = jiffies; |
| 1419 | mvm->tcm.uapsd_nonagg_ts = jiffies; |
| 1420 | |
| 1421 | INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); |
| 1422 | |
| 1423 | mvm->cmd_ver.range_resp = |
| 1424 | iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, |
| 1425 | TOF_RANGE_RESPONSE_NOTIF, 5); |
| 1426 | /* we only support up to version 9 */ |
| 1427 | if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) |
| 1428 | goto out_free; |
| 1429 | |
| 1430 | /* |
| 1431 | * Populate the state variables that the transport layer needs |
| 1432 | * to know about. |
| 1433 | */ |
| 1434 | trans_cfg.op_mode = op_mode; |
| 1435 | trans_cfg.no_reclaim_cmds = no_reclaim_cmds; |
| 1436 | trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); |
| 1437 | |
| 1438 | trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size(); |
| 1439 | |
| 1440 | trans->wide_cmd_header = true; |
| 1441 | trans_cfg.bc_table_dword = |
| 1442 | mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; |
| 1443 | |
| 1444 | trans_cfg.command_groups = iwl_mvm_groups; |
| 1445 | trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); |
| 1446 | |
| 1447 | trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; |
| 1448 | trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; |
| 1449 | trans_cfg.scd_set_active = true; |
| 1450 | |
| 1451 | trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, |
| 1452 | driver_data[2]); |
| 1453 | |
| 1454 | snprintf(mvm->hw->wiphy->fw_version, |
| 1455 | sizeof(mvm->hw->wiphy->fw_version), |
| 1456 | "%.31s", fw->fw_version); |
| 1457 | |
| 1458 | trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, |
| 1459 | IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); |
| 1460 | |
| 1461 | trans_cfg.queue_alloc_cmd_ver = |
| 1462 | iwl_fw_lookup_cmd_ver(mvm->fw, |
| 1463 | WIDE_ID(DATA_PATH_GROUP, |
| 1464 | SCD_QUEUE_CONFIG_CMD), |
| 1465 | 0); |
| 1466 | mvm->sta_remove_requires_queue_remove = |
| 1467 | trans_cfg.queue_alloc_cmd_ver > 0; |
| 1468 | |
| 1469 | mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); |
| 1470 | |
| 1471 | /* Configure transport layer */ |
| 1472 | iwl_trans_configure(mvm->trans, &trans_cfg); |
| 1473 | |
| 1474 | trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; |
| 1475 | trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; |
| 1476 | trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; |
| 1477 | memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, |
| 1478 | sizeof(trans->dbg.conf_tlv)); |
| 1479 | trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; |
| 1480 | |
| 1481 | trans->iml = mvm->fw->iml; |
| 1482 | trans->iml_len = mvm->fw->iml_len; |
| 1483 | |
| 1484 | /* set up notification wait support */ |
| 1485 | iwl_notification_wait_init(&mvm->notif_wait); |
| 1486 | |
| 1487 | /* Init phy db */ |
| 1488 | mvm->phy_db = iwl_phy_db_init(trans); |
| 1489 | if (!mvm->phy_db) { |
| 1490 | IWL_ERR(mvm, "Cannot init phy_db\n"); |
| 1491 | goto out_free; |
| 1492 | } |
| 1493 | |
| 1494 | if (iwlwifi_mod_params.nvm_file) |
| 1495 | mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; |
| 1496 | else |
| 1497 | IWL_DEBUG_EEPROM(mvm->trans->dev, |
| 1498 | "working without external nvm file\n"); |
| 1499 | |
| 1500 | scan_size = iwl_mvm_scan_size(mvm); |
| 1501 | |
| 1502 | mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); |
| 1503 | if (!mvm->scan_cmd) |
| 1504 | goto out_free; |
| 1505 | mvm->scan_cmd_size = scan_size; |
| 1506 | |
| 1507 | /* invalidate ids to prevent accidental removal of sta_id 0 */ |
| 1508 | mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; |
| 1509 | mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; |
| 1510 | |
| 1511 | /* Set EBS as successful as long as not stated otherwise by the FW. */ |
| 1512 | mvm->last_ebs_successful = true; |
| 1513 | |
| 1514 | min_backoff = iwl_mvm_min_backoff(mvm); |
| 1515 | iwl_mvm_thermal_initialize(mvm, min_backoff); |
| 1516 | |
| 1517 | if (!iwl_mvm_has_new_rx_stats_api(mvm)) |
| 1518 | memset(&mvm->rx_stats_v3, 0, |
| 1519 | sizeof(struct mvm_statistics_rx_v3)); |
| 1520 | else |
| 1521 | memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); |
| 1522 | |
| 1523 | iwl_mvm_ftm_initiator_smooth_config(mvm); |
| 1524 | |
| 1525 | iwl_mvm_init_time_sync(&mvm->time_sync); |
| 1526 | |
| 1527 | mvm->debugfs_dir = dbgfs_dir; |
| 1528 | |
| 1529 | mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); |
| 1530 | |
| 1531 | iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); |
| 1532 | |
| 1533 | if (iwl_mvm_start_get_nvm(mvm)) { |
| 1534 | /* |
| 1535 | * Getting NVM failed while CSME is the owner, but we are |
| 1536 | * registered to MEI, we'll get the NVM later when it'll be |
| 1537 | * possible to get it from CSME. |
| 1538 | */ |
| 1539 | if (trans->csme_own && mvm->mei_registered) |
| 1540 | return op_mode; |
| 1541 | |
| 1542 | goto out_thermal_exit; |
| 1543 | } |
| 1544 | |
| 1545 | |
| 1546 | if (iwl_mvm_start_post_nvm(mvm)) |
| 1547 | goto out_thermal_exit; |
| 1548 | |
| 1549 | return op_mode; |
| 1550 | |
| 1551 | out_thermal_exit: |
| 1552 | iwl_mvm_thermal_exit(mvm); |
| 1553 | if (mvm->mei_registered) { |
| 1554 | iwl_mei_start_unregister(); |
| 1555 | iwl_mei_unregister_complete(); |
| 1556 | } |
| 1557 | out_free: |
| 1558 | iwl_fw_flush_dumps(&mvm->fwrt); |
| 1559 | iwl_fw_runtime_free(&mvm->fwrt); |
| 1560 | |
| 1561 | iwl_phy_db_free(mvm->phy_db); |
| 1562 | kfree(mvm->scan_cmd); |
| 1563 | iwl_trans_op_mode_leave(trans); |
| 1564 | |
| 1565 | ieee80211_free_hw(mvm->hw); |
| 1566 | return NULL; |
| 1567 | } |
| 1568 | |
| 1569 | void iwl_mvm_stop_device(struct iwl_mvm *mvm) |
| 1570 | { |
| 1571 | lockdep_assert_held(&mvm->mutex); |
| 1572 | |
| 1573 | iwl_fw_cancel_timestamp(&mvm->fwrt); |
| 1574 | |
| 1575 | clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); |
| 1576 | |
| 1577 | iwl_mvm_pause_tcm(mvm, false); |
| 1578 | |
| 1579 | iwl_fw_dbg_stop_sync(&mvm->fwrt); |
| 1580 | iwl_trans_stop_device(mvm->trans); |
| 1581 | iwl_free_fw_paging(&mvm->fwrt); |
| 1582 | iwl_fw_dump_conf_clear(&mvm->fwrt); |
| 1583 | iwl_mvm_mei_device_state(mvm, false); |
| 1584 | } |
| 1585 | |
| 1586 | static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) |
| 1587 | { |
| 1588 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1589 | int i; |
| 1590 | |
| 1591 | if (mvm->mei_registered) { |
| 1592 | rtnl_lock(); |
| 1593 | iwl_mei_set_netdev(NULL); |
| 1594 | rtnl_unlock(); |
| 1595 | iwl_mei_start_unregister(); |
| 1596 | } |
| 1597 | |
| 1598 | /* |
| 1599 | * After we unregister from mei, the worker can't be scheduled |
| 1600 | * anymore. |
| 1601 | */ |
| 1602 | cancel_work_sync(&mvm->sap_connected_wk); |
| 1603 | |
| 1604 | iwl_mvm_leds_exit(mvm); |
| 1605 | |
| 1606 | iwl_mvm_thermal_exit(mvm); |
| 1607 | |
| 1608 | /* |
| 1609 | * If we couldn't get ownership on the device and we couldn't |
| 1610 | * get the NVM from CSME, we haven't registered to mac80211. |
| 1611 | * In that case, we didn't fail op_mode_start, because we are |
| 1612 | * waiting for CSME to allow us to get the NVM to register to |
| 1613 | * mac80211. If that didn't happen, we haven't registered to |
| 1614 | * mac80211, hence the if below. |
| 1615 | */ |
| 1616 | if (mvm->hw_registered) |
| 1617 | ieee80211_unregister_hw(mvm->hw); |
| 1618 | |
| 1619 | kfree(mvm->scan_cmd); |
| 1620 | kfree(mvm->mcast_filter_cmd); |
| 1621 | mvm->mcast_filter_cmd = NULL; |
| 1622 | |
| 1623 | kfree(mvm->error_recovery_buf); |
| 1624 | mvm->error_recovery_buf = NULL; |
| 1625 | |
| 1626 | iwl_mvm_ptp_remove(mvm); |
| 1627 | |
| 1628 | iwl_trans_op_mode_leave(mvm->trans); |
| 1629 | |
| 1630 | iwl_phy_db_free(mvm->phy_db); |
| 1631 | mvm->phy_db = NULL; |
| 1632 | |
| 1633 | kfree(mvm->nvm_data); |
| 1634 | kfree(mvm->mei_nvm_data); |
| 1635 | kfree(rcu_access_pointer(mvm->csme_conn_info)); |
| 1636 | kfree(mvm->temp_nvm_data); |
| 1637 | for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) |
| 1638 | kfree(mvm->nvm_sections[i].data); |
| 1639 | kfree(mvm->acs_survey); |
| 1640 | |
| 1641 | cancel_delayed_work_sync(&mvm->tcm.work); |
| 1642 | |
| 1643 | iwl_fw_runtime_free(&mvm->fwrt); |
| 1644 | mutex_destroy(&mvm->mutex); |
| 1645 | |
| 1646 | if (mvm->mei_registered) |
| 1647 | iwl_mei_unregister_complete(); |
| 1648 | |
| 1649 | ieee80211_free_hw(mvm->hw); |
| 1650 | } |
| 1651 | |
| 1652 | struct iwl_async_handler_entry { |
| 1653 | struct list_head list; |
| 1654 | struct iwl_rx_cmd_buffer rxb; |
| 1655 | enum iwl_rx_handler_context context; |
| 1656 | void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
| 1657 | }; |
| 1658 | |
| 1659 | void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) |
| 1660 | { |
| 1661 | struct iwl_async_handler_entry *entry, *tmp; |
| 1662 | |
| 1663 | spin_lock_bh(&mvm->async_handlers_lock); |
| 1664 | list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { |
| 1665 | iwl_free_rxb(&entry->rxb); |
| 1666 | list_del(&entry->list); |
| 1667 | kfree(entry); |
| 1668 | } |
| 1669 | spin_unlock_bh(&mvm->async_handlers_lock); |
| 1670 | } |
| 1671 | |
| 1672 | /* |
| 1673 | * This function receives a bitmap of rx async handler contexts |
| 1674 | * (&iwl_rx_handler_context) to handle, and runs only them |
| 1675 | */ |
| 1676 | static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm, |
| 1677 | u8 contexts) |
| 1678 | { |
| 1679 | struct iwl_async_handler_entry *entry, *tmp; |
| 1680 | LIST_HEAD(local_list); |
| 1681 | |
| 1682 | /* |
| 1683 | * Sync with Rx path with a lock. Remove all the entries of the |
| 1684 | * wanted contexts from this list, add them to a local one (lock free), |
| 1685 | * and then handle them. |
| 1686 | */ |
| 1687 | spin_lock_bh(&mvm->async_handlers_lock); |
| 1688 | list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { |
| 1689 | if (!(BIT(entry->context) & contexts)) |
| 1690 | continue; |
| 1691 | list_del(&entry->list); |
| 1692 | list_add_tail(&entry->list, &local_list); |
| 1693 | } |
| 1694 | spin_unlock_bh(&mvm->async_handlers_lock); |
| 1695 | |
| 1696 | list_for_each_entry_safe(entry, tmp, &local_list, list) { |
| 1697 | if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) |
| 1698 | mutex_lock(&mvm->mutex); |
| 1699 | entry->fn(mvm, &entry->rxb); |
| 1700 | iwl_free_rxb(&entry->rxb); |
| 1701 | list_del(&entry->list); |
| 1702 | if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) |
| 1703 | mutex_unlock(&mvm->mutex); |
| 1704 | kfree(entry); |
| 1705 | } |
| 1706 | } |
| 1707 | |
| 1708 | static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy, |
| 1709 | struct wiphy_work *wk) |
| 1710 | { |
| 1711 | struct iwl_mvm *mvm = |
| 1712 | container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk); |
| 1713 | u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY); |
| 1714 | |
| 1715 | iwl_mvm_async_handlers_by_context(mvm, contexts); |
| 1716 | } |
| 1717 | |
| 1718 | static void iwl_mvm_async_handlers_wk(struct work_struct *wk) |
| 1719 | { |
| 1720 | struct iwl_mvm *mvm = |
| 1721 | container_of(wk, struct iwl_mvm, async_handlers_wk); |
| 1722 | u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) | |
| 1723 | BIT(RX_HANDLER_ASYNC_UNLOCKED); |
| 1724 | |
| 1725 | iwl_mvm_async_handlers_by_context(mvm, contexts); |
| 1726 | } |
| 1727 | |
| 1728 | static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, |
| 1729 | struct iwl_rx_packet *pkt) |
| 1730 | { |
| 1731 | struct iwl_fw_dbg_trigger_tlv *trig; |
| 1732 | struct iwl_fw_dbg_trigger_cmd *cmds_trig; |
| 1733 | int i; |
| 1734 | |
| 1735 | trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, |
| 1736 | FW_DBG_TRIGGER_FW_NOTIF); |
| 1737 | if (!trig) |
| 1738 | return; |
| 1739 | |
| 1740 | cmds_trig = (void *)trig->data; |
| 1741 | |
| 1742 | for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { |
| 1743 | /* don't collect on CMD 0 */ |
| 1744 | if (!cmds_trig->cmds[i].cmd_id) |
| 1745 | break; |
| 1746 | |
| 1747 | if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || |
| 1748 | cmds_trig->cmds[i].group_id != pkt->hdr.group_id) |
| 1749 | continue; |
| 1750 | |
| 1751 | iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, |
| 1752 | "CMD 0x%02x.%02x received", |
| 1753 | pkt->hdr.group_id, pkt->hdr.cmd); |
| 1754 | break; |
| 1755 | } |
| 1756 | } |
| 1757 | |
| 1758 | static void iwl_mvm_rx_common(struct iwl_mvm *mvm, |
| 1759 | struct iwl_rx_cmd_buffer *rxb, |
| 1760 | struct iwl_rx_packet *pkt) |
| 1761 | { |
| 1762 | unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); |
| 1763 | int i; |
| 1764 | union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; |
| 1765 | |
| 1766 | iwl_dbg_tlv_time_point(&mvm->fwrt, |
| 1767 | IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data); |
| 1768 | iwl_mvm_rx_check_trigger(mvm, pkt); |
| 1769 | |
| 1770 | /* |
| 1771 | * Do the notification wait before RX handlers so |
| 1772 | * even if the RX handler consumes the RXB we have |
| 1773 | * access to it in the notification wait entry. |
| 1774 | */ |
| 1775 | iwl_notification_wait_notify(&mvm->notif_wait, pkt); |
| 1776 | |
| 1777 | for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { |
| 1778 | const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; |
| 1779 | struct iwl_async_handler_entry *entry; |
| 1780 | |
| 1781 | if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) |
| 1782 | continue; |
| 1783 | |
| 1784 | if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size, |
| 1785 | "unexpected notification 0x%04x size %d, need %d\n", |
| 1786 | rx_h->cmd_id, pkt_len, rx_h->min_size)) |
| 1787 | return; |
| 1788 | |
| 1789 | if (rx_h->context == RX_HANDLER_SYNC) { |
| 1790 | rx_h->fn(mvm, rxb); |
| 1791 | return; |
| 1792 | } |
| 1793 | |
| 1794 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
| 1795 | /* we can't do much... */ |
| 1796 | if (!entry) |
| 1797 | return; |
| 1798 | |
| 1799 | entry->rxb._page = rxb_steal_page(rxb); |
| 1800 | entry->rxb._offset = rxb->_offset; |
| 1801 | entry->rxb._rx_page_order = rxb->_rx_page_order; |
| 1802 | entry->fn = rx_h->fn; |
| 1803 | entry->context = rx_h->context; |
| 1804 | spin_lock(&mvm->async_handlers_lock); |
| 1805 | list_add_tail(&entry->list, &mvm->async_handlers_list); |
| 1806 | spin_unlock(&mvm->async_handlers_lock); |
| 1807 | if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY) |
| 1808 | wiphy_work_queue(mvm->hw->wiphy, |
| 1809 | &mvm->async_handlers_wiphy_wk); |
| 1810 | else |
| 1811 | schedule_work(&mvm->async_handlers_wk); |
| 1812 | break; |
| 1813 | } |
| 1814 | } |
| 1815 | |
| 1816 | static void iwl_mvm_rx(struct iwl_op_mode *op_mode, |
| 1817 | struct napi_struct *napi, |
| 1818 | struct iwl_rx_cmd_buffer *rxb) |
| 1819 | { |
| 1820 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1821 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1822 | u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); |
| 1823 | |
| 1824 | if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) |
| 1825 | iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); |
| 1826 | else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) |
| 1827 | iwl_mvm_rx_rx_phy_cmd(mvm, rxb); |
| 1828 | else |
| 1829 | iwl_mvm_rx_common(mvm, rxb, pkt); |
| 1830 | } |
| 1831 | |
| 1832 | void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, |
| 1833 | struct napi_struct *napi, |
| 1834 | struct iwl_rx_cmd_buffer *rxb) |
| 1835 | { |
| 1836 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 1837 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1838 | u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); |
| 1839 | |
| 1840 | if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) |
| 1841 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); |
| 1842 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
| 1843 | RX_QUEUES_NOTIFICATION))) |
| 1844 | iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); |
| 1845 | else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) |
| 1846 | iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); |
| 1847 | else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) |
| 1848 | iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0); |
| 1849 | else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) |
| 1850 | iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); |
| 1851 | else |
| 1852 | iwl_mvm_rx_common(mvm, rxb, pkt); |
| 1853 | } |
| 1854 | |
| 1855 | static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) |
| 1856 | { |
| 1857 | return queue == mvm->aux_queue || queue == mvm->probe_queue || |
| 1858 | queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; |
| 1859 | } |
| 1860 | |
| 1861 | static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, |
| 1862 | int hw_queue, bool start) |
| 1863 | { |
| 1864 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1865 | struct ieee80211_sta *sta; |
| 1866 | struct ieee80211_txq *txq; |
| 1867 | struct iwl_mvm_txq *mvmtxq; |
| 1868 | int i; |
| 1869 | unsigned long tid_bitmap; |
| 1870 | struct iwl_mvm_sta *mvmsta; |
| 1871 | u8 sta_id; |
| 1872 | |
| 1873 | sta_id = iwl_mvm_has_new_tx_api(mvm) ? |
| 1874 | mvm->tvqm_info[hw_queue].sta_id : |
| 1875 | mvm->queue_info[hw_queue].ra_sta_id; |
| 1876 | |
| 1877 | if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) |
| 1878 | return; |
| 1879 | |
| 1880 | rcu_read_lock(); |
| 1881 | |
| 1882 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
| 1883 | if (IS_ERR_OR_NULL(sta)) |
| 1884 | goto out; |
| 1885 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 1886 | |
| 1887 | if (iwl_mvm_is_static_queue(mvm, hw_queue)) { |
| 1888 | if (!start) |
| 1889 | ieee80211_stop_queues(mvm->hw); |
| 1890 | else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) |
| 1891 | ieee80211_wake_queues(mvm->hw); |
| 1892 | |
| 1893 | goto out; |
| 1894 | } |
| 1895 | |
| 1896 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 1897 | int tid = mvm->tvqm_info[hw_queue].txq_tid; |
| 1898 | |
| 1899 | tid_bitmap = BIT(tid); |
| 1900 | } else { |
| 1901 | tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; |
| 1902 | } |
| 1903 | |
| 1904 | for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
| 1905 | int tid = i; |
| 1906 | |
| 1907 | if (tid == IWL_MAX_TID_COUNT) |
| 1908 | tid = IEEE80211_NUM_TIDS; |
| 1909 | |
| 1910 | txq = sta->txq[tid]; |
| 1911 | mvmtxq = iwl_mvm_txq_from_mac80211(txq); |
| 1912 | if (start) |
| 1913 | clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); |
| 1914 | else |
| 1915 | set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); |
| 1916 | |
| 1917 | if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) { |
| 1918 | local_bh_disable(); |
| 1919 | iwl_mvm_mac_itxq_xmit(mvm->hw, txq); |
| 1920 | local_bh_enable(); |
| 1921 | } |
| 1922 | } |
| 1923 | |
| 1924 | out: |
| 1925 | rcu_read_unlock(); |
| 1926 | } |
| 1927 | |
| 1928 | static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) |
| 1929 | { |
| 1930 | iwl_mvm_queue_state_change(op_mode, hw_queue, false); |
| 1931 | } |
| 1932 | |
| 1933 | static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) |
| 1934 | { |
| 1935 | iwl_mvm_queue_state_change(op_mode, hw_queue, true); |
| 1936 | } |
| 1937 | |
| 1938 | static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) |
| 1939 | { |
| 1940 | wiphy_rfkill_set_hw_state(mvm->hw->wiphy, |
| 1941 | iwl_mvm_is_radio_killed(mvm)); |
| 1942 | } |
| 1943 | |
| 1944 | void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) |
| 1945 | { |
| 1946 | if (state) |
| 1947 | set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); |
| 1948 | else |
| 1949 | clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); |
| 1950 | |
| 1951 | iwl_mvm_set_rfkill_state(mvm); |
| 1952 | } |
| 1953 | |
| 1954 | struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) |
| 1955 | { |
| 1956 | return rcu_dereference_protected(mvm->csme_conn_info, |
| 1957 | lockdep_is_held(&mvm->mutex)); |
| 1958 | } |
| 1959 | |
| 1960 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
| 1961 | { |
| 1962 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1963 | bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); |
| 1964 | bool unified = iwl_mvm_has_unified_ucode(mvm); |
| 1965 | |
| 1966 | if (state) |
| 1967 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
| 1968 | else |
| 1969 | clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
| 1970 | |
| 1971 | iwl_mvm_set_rfkill_state(mvm); |
| 1972 | |
| 1973 | /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ |
| 1974 | if (rfkill_safe_init_done) |
| 1975 | iwl_abort_notification_waits(&mvm->notif_wait); |
| 1976 | |
| 1977 | /* |
| 1978 | * Don't ask the transport to stop the firmware. We'll do it |
| 1979 | * after cfg80211 takes us down. |
| 1980 | */ |
| 1981 | if (unified) |
| 1982 | return false; |
| 1983 | |
| 1984 | /* |
| 1985 | * Stop the device if we run OPERATIONAL firmware or if we are in the |
| 1986 | * middle of the calibrations. |
| 1987 | */ |
| 1988 | return state && rfkill_safe_init_done; |
| 1989 | } |
| 1990 | |
| 1991 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) |
| 1992 | { |
| 1993 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 1994 | struct ieee80211_tx_info *info; |
| 1995 | |
| 1996 | info = IEEE80211_SKB_CB(skb); |
| 1997 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); |
| 1998 | ieee80211_free_txskb(mvm->hw, skb); |
| 1999 | } |
| 2000 | |
| 2001 | struct iwl_mvm_reprobe { |
| 2002 | struct device *dev; |
| 2003 | struct work_struct work; |
| 2004 | }; |
| 2005 | |
| 2006 | static void iwl_mvm_reprobe_wk(struct work_struct *wk) |
| 2007 | { |
| 2008 | struct iwl_mvm_reprobe *reprobe; |
| 2009 | |
| 2010 | reprobe = container_of(wk, struct iwl_mvm_reprobe, work); |
| 2011 | if (device_reprobe(reprobe->dev)) |
| 2012 | dev_err(reprobe->dev, "reprobe failed!\n"); |
| 2013 | put_device(reprobe->dev); |
| 2014 | kfree(reprobe); |
| 2015 | module_put(THIS_MODULE); |
| 2016 | } |
| 2017 | |
| 2018 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) |
| 2019 | { |
| 2020 | iwl_abort_notification_waits(&mvm->notif_wait); |
| 2021 | iwl_dbg_tlv_del_timers(mvm->trans); |
| 2022 | |
| 2023 | /* |
| 2024 | * This is a bit racy, but worst case we tell mac80211 about |
| 2025 | * a stopped/aborted scan when that was already done which |
| 2026 | * is not a problem. It is necessary to abort any os scan |
| 2027 | * here because mac80211 requires having the scan cleared |
| 2028 | * before restarting. |
| 2029 | * We'll reset the scan_status to NONE in restart cleanup in |
| 2030 | * the next start() call from mac80211. If restart isn't called |
| 2031 | * (no fw restart) scan status will stay busy. |
| 2032 | */ |
| 2033 | iwl_mvm_report_scan_aborted(mvm); |
| 2034 | |
| 2035 | /* |
| 2036 | * If we're restarting already, don't cycle restarts. |
| 2037 | * If INIT fw asserted, it will likely fail again. |
| 2038 | * If WoWLAN fw asserted, don't restart either, mac80211 |
| 2039 | * can't recover this since we're already half suspended. |
| 2040 | */ |
| 2041 | if (!mvm->fw_restart && fw_error) { |
| 2042 | iwl_fw_error_collect(&mvm->fwrt, false); |
| 2043 | } else if (test_bit(IWL_MVM_STATUS_STARTING, |
| 2044 | &mvm->status)) { |
| 2045 | IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); |
| 2046 | } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
| 2047 | struct iwl_mvm_reprobe *reprobe; |
| 2048 | |
| 2049 | IWL_ERR(mvm, |
| 2050 | "Firmware error during reconfiguration - reprobe!\n"); |
| 2051 | |
| 2052 | /* |
| 2053 | * get a module reference to avoid doing this while unloading |
| 2054 | * anyway and to avoid scheduling a work with code that's |
| 2055 | * being removed. |
| 2056 | */ |
| 2057 | if (!try_module_get(THIS_MODULE)) { |
| 2058 | IWL_ERR(mvm, "Module is being unloaded - abort\n"); |
| 2059 | return; |
| 2060 | } |
| 2061 | |
| 2062 | reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); |
| 2063 | if (!reprobe) { |
| 2064 | module_put(THIS_MODULE); |
| 2065 | return; |
| 2066 | } |
| 2067 | reprobe->dev = get_device(mvm->trans->dev); |
| 2068 | INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); |
| 2069 | schedule_work(&reprobe->work); |
| 2070 | } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, |
| 2071 | &mvm->status)) { |
| 2072 | IWL_ERR(mvm, "HW restart already requested, but not started\n"); |
| 2073 | } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && |
| 2074 | mvm->hw_registered && |
| 2075 | !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { |
| 2076 | /* This should be first thing before trying to collect any |
| 2077 | * data to avoid endless loops if any HW error happens while |
| 2078 | * collecting debug data. |
| 2079 | */ |
| 2080 | set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); |
| 2081 | |
| 2082 | if (mvm->fw->ucode_capa.error_log_size) { |
| 2083 | u32 src_size = mvm->fw->ucode_capa.error_log_size; |
| 2084 | u32 src_addr = mvm->fw->ucode_capa.error_log_addr; |
| 2085 | u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); |
| 2086 | |
| 2087 | if (recover_buf) { |
| 2088 | mvm->error_recovery_buf = recover_buf; |
| 2089 | iwl_trans_read_mem_bytes(mvm->trans, |
| 2090 | src_addr, |
| 2091 | recover_buf, |
| 2092 | src_size); |
| 2093 | } |
| 2094 | } |
| 2095 | |
| 2096 | iwl_fw_error_collect(&mvm->fwrt, false); |
| 2097 | |
| 2098 | if (fw_error && mvm->fw_restart > 0) { |
| 2099 | mvm->fw_restart--; |
| 2100 | ieee80211_restart_hw(mvm->hw); |
| 2101 | } else if (mvm->fwrt.trans->dbg.restart_required) { |
| 2102 | IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); |
| 2103 | mvm->fwrt.trans->dbg.restart_required = false; |
| 2104 | ieee80211_restart_hw(mvm->hw); |
| 2105 | } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { |
| 2106 | ieee80211_restart_hw(mvm->hw); |
| 2107 | } |
| 2108 | } |
| 2109 | } |
| 2110 | |
| 2111 | static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) |
| 2112 | { |
| 2113 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2114 | |
| 2115 | if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && |
| 2116 | !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, |
| 2117 | &mvm->status)) |
| 2118 | iwl_mvm_dump_nic_error_log(mvm); |
| 2119 | |
| 2120 | if (sync) { |
| 2121 | iwl_fw_error_collect(&mvm->fwrt, true); |
| 2122 | /* |
| 2123 | * Currently, the only case for sync=true is during |
| 2124 | * shutdown, so just stop in this case. If/when that |
| 2125 | * changes, we need to be a bit smarter here. |
| 2126 | */ |
| 2127 | return; |
| 2128 | } |
| 2129 | |
| 2130 | /* |
| 2131 | * If the firmware crashes while we're already considering it |
| 2132 | * to be dead then don't ask for a restart, that cannot do |
| 2133 | * anything useful anyway. |
| 2134 | */ |
| 2135 | if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) |
| 2136 | return; |
| 2137 | |
| 2138 | iwl_mvm_nic_restart(mvm, false); |
| 2139 | } |
| 2140 | |
| 2141 | static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) |
| 2142 | { |
| 2143 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2144 | |
| 2145 | WARN_ON(1); |
| 2146 | iwl_mvm_nic_restart(mvm, true); |
| 2147 | } |
| 2148 | |
| 2149 | static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, |
| 2150 | enum iwl_fw_ini_time_point tp_id, |
| 2151 | union iwl_dbg_tlv_tp_data *tp_data) |
| 2152 | { |
| 2153 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2154 | |
| 2155 | iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); |
| 2156 | } |
| 2157 | |
| 2158 | #ifdef CONFIG_PM_SLEEP |
| 2159 | static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) |
| 2160 | { |
| 2161 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2162 | |
| 2163 | mutex_lock(&mvm->mutex); |
| 2164 | clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); |
| 2165 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
| 2166 | iwl_mvm_stop_device(mvm); |
| 2167 | mvm->fast_resume = false; |
| 2168 | mutex_unlock(&mvm->mutex); |
| 2169 | } |
| 2170 | #else |
| 2171 | static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) |
| 2172 | {} |
| 2173 | #endif |
| 2174 | |
| 2175 | #define IWL_MVM_COMMON_OPS \ |
| 2176 | /* these could be differentiated */ \ |
| 2177 | .queue_full = iwl_mvm_stop_sw_queue, \ |
| 2178 | .queue_not_full = iwl_mvm_wake_sw_queue, \ |
| 2179 | .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ |
| 2180 | .free_skb = iwl_mvm_free_skb, \ |
| 2181 | .nic_error = iwl_mvm_nic_error, \ |
| 2182 | .cmd_queue_full = iwl_mvm_cmd_queue_full, \ |
| 2183 | .nic_config = iwl_mvm_nic_config, \ |
| 2184 | /* as we only register one, these MUST be common! */ \ |
| 2185 | .start = iwl_op_mode_mvm_start, \ |
| 2186 | .stop = iwl_op_mode_mvm_stop, \ |
| 2187 | .time_point = iwl_op_mode_mvm_time_point, \ |
| 2188 | .device_powered_off = iwl_op_mode_mvm_device_powered_off |
| 2189 | |
| 2190 | static const struct iwl_op_mode_ops iwl_mvm_ops = { |
| 2191 | IWL_MVM_COMMON_OPS, |
| 2192 | .rx = iwl_mvm_rx, |
| 2193 | }; |
| 2194 | |
| 2195 | static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, |
| 2196 | struct napi_struct *napi, |
| 2197 | struct iwl_rx_cmd_buffer *rxb, |
| 2198 | unsigned int queue) |
| 2199 | { |
| 2200 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
| 2201 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 2202 | u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); |
| 2203 | |
| 2204 | if (unlikely(queue >= mvm->trans->num_rx_queues)) |
| 2205 | return; |
| 2206 | |
| 2207 | if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) |
| 2208 | iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); |
| 2209 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
| 2210 | RX_QUEUES_NOTIFICATION))) |
| 2211 | iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); |
| 2212 | else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) |
| 2213 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); |
| 2214 | } |
| 2215 | |
| 2216 | static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { |
| 2217 | IWL_MVM_COMMON_OPS, |
| 2218 | .rx = iwl_mvm_rx_mq, |
| 2219 | .rx_rss = iwl_mvm_rx_mq_rss, |
| 2220 | }; |