iwlwifi: acpi: rename geo structs to contain versioning
[linux-2.6-block.git] / drivers / net / wireless / intel / iwlwifi / mvm / fw.c
CommitLineData
8ca151b5
JB
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8d193ca2 8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
bdccdb85 9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
cceb4507 10 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
8ca151b5
JB
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
8ca151b5 21 * The full GNU General Public License is included in this distribution
410dc5aa 22 * in the file called COPYING.
8ca151b5
JB
23 *
24 * Contact Information:
cb2f8277 25 * Intel Linux Wireless <linuxwifi@intel.com>
8ca151b5
JB
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
8d193ca2 30 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
bdccdb85 31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
cceb4507 32 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
8ca151b5
JB
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *****************************************************************************/
62#include <net/mac80211.h>
854d773e 63#include <linux/netdevice.h>
8ca151b5
JB
64
65#include "iwl-trans.h"
66#include "iwl-op-mode.h"
d962f9b1 67#include "fw/img.h"
8ca151b5
JB
68#include "iwl-debug.h"
69#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
70#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
8c23f95c 71#include "iwl-prph.h"
813df5ce 72#include "fw/acpi.h"
8ca151b5
JB
73
74#include "mvm.h"
7174beb6 75#include "fw/dbg.h"
8ca151b5 76#include "iwl-phy-db.h"
9c4f7d51
ST
77#include "iwl-modparams.h"
78#include "iwl-nvm-parse.h"
8ca151b5
JB
79
80#define MVM_UCODE_ALIVE_TIMEOUT HZ
81#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
82
83#define UCODE_VALID_OK cpu_to_le32(0x1)
84
8ca151b5
JB
85struct iwl_mvm_alive_data {
86 bool valid;
87 u32 scd_base_addr;
88};
89
8ca151b5
JB
90static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
91{
92 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
93 .valid = cpu_to_le32(valid_tx_ant),
94 };
95
33223542 96 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
a1022927 97 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
8ca151b5
JB
98 sizeof(tx_ant_cmd), &tx_ant_cmd);
99}
100
43413a97
SS
101static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
102{
103 int i;
104 struct iwl_rss_config_cmd cmd = {
105 .flags = cpu_to_le32(IWL_RSS_ENABLE),
608dce95
SS
106 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
107 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
108 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
109 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
110 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
111 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
43413a97
SS
112 };
113
f43495fd
SS
114 if (mvm->trans->num_rx_queues == 1)
115 return 0;
116
854d773e 117 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
43413a97 118 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
854d773e
SS
119 cmd.indirection_table[i] =
120 1 + (i % (mvm->trans->num_rx_queues - 1));
121 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
43413a97
SS
122
123 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
124}
125
8edbfaa1
SS
126static int iwl_configure_rxq(struct iwl_mvm *mvm)
127{
dbf592f3 128 int i, num_queues, size, ret;
8edbfaa1 129 struct iwl_rfh_queue_config *cmd;
dbf592f3
JB
130 struct iwl_host_cmd hcmd = {
131 .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
132 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
133 };
8edbfaa1
SS
134
135 /* Do not configure default queue, it is configured via context info */
136 num_queues = mvm->trans->num_rx_queues - 1;
137
dbf592f3 138 size = struct_size(cmd, data, num_queues);
8edbfaa1
SS
139
140 cmd = kzalloc(size, GFP_KERNEL);
141 if (!cmd)
142 return -ENOMEM;
143
144 cmd->num_queues = num_queues;
145
146 for (i = 0; i < num_queues; i++) {
147 struct iwl_trans_rxq_dma_data data;
148
149 cmd->data[i].q_num = i + 1;
150 iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
151
152 cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
153 cmd->data[i].urbd_stts_wrptr =
154 cpu_to_le64(data.urbd_stts_wrptr);
155 cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
156 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
157 }
158
dbf592f3
JB
159 hcmd.data[0] = cmd;
160 hcmd.len[0] = size;
161
162 ret = iwl_mvm_send_cmd(mvm, &hcmd);
163
164 kfree(cmd);
165
166 return ret;
8edbfaa1
SS
167}
168
97d5be7e
LK
169static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
170{
171 struct iwl_dqa_enable_cmd dqa_cmd = {
172 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
173 };
174 u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
175 int ret;
176
177 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
178 if (ret)
179 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
180 else
181 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
182
183 return ret;
184}
185
bdccdb85
GBA
186void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
187 struct iwl_rx_cmd_buffer *rxb)
188{
189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
190 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
191 __le32 *dump_data = mfu_dump_notif->data;
192 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
193 int i;
194
195 if (mfu_dump_notif->index_num == 0)
196 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
197 le32_to_cpu(mfu_dump_notif->assert_id));
198
199 for (i = 0; i < n_words; i++)
200 IWL_DEBUG_INFO(mvm,
201 "MFUART assert dump, dword %u: 0x%08x\n",
202 le16_to_cpu(mfu_dump_notif->index_num) *
203 n_words + i,
204 le32_to_cpu(dump_data[i]));
205}
206
8ca151b5
JB
207static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
208 struct iwl_rx_packet *pkt, void *data)
209{
210 struct iwl_mvm *mvm =
211 container_of(notif_wait, struct iwl_mvm, notif_wait);
212 struct iwl_mvm_alive_data *alive_data = data;
5c228d63 213 struct mvm_alive_resp_v3 *palive3;
7e1223b5 214 struct mvm_alive_resp *palive;
5c228d63
SS
215 struct iwl_umac_alive *umac;
216 struct iwl_lmac_alive *lmac1;
217 struct iwl_lmac_alive *lmac2 = NULL;
218 u16 status;
cfa5d0ca 219 u32 lmac_error_event_table, umac_error_table;
01a9ca51 220
5c228d63
SS
221 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
222 palive = (void *)pkt->data;
223 umac = &palive->umac_data;
224 lmac1 = &palive->lmac_data[0];
225 lmac2 = &palive->lmac_data[1];
226 status = le16_to_cpu(palive->status);
227 } else {
228 palive3 = (void *)pkt->data;
229 umac = &palive3->umac_data;
230 lmac1 = &palive3->lmac_data;
231 status = le16_to_cpu(palive3->status);
232 }
01a9ca51 233
22463857
SM
234 lmac_error_event_table =
235 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
236 iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
237
5c228d63 238 if (lmac2)
91c28b83 239 mvm->trans->dbg.lmac_error_event_table[1] =
22463857 240 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
ffa70264 241
cfa5d0ca
MG
242 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
243
244 if (umac_error_table) {
245 if (umac_error_table >=
246 mvm->trans->cfg->min_umac_error_event_table) {
247 iwl_fw_umac_set_alive_err_table(mvm->trans,
248 umac_error_table);
249 } else {
250 IWL_ERR(mvm,
251 "Not valid error log pointer 0x%08X for %s uCode\n",
252 umac_error_table,
253 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
254 "Init" : "RT");
255 }
3485e76e 256 }
fb5b2846 257
22463857 258 alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
5c228d63 259 alive_data->valid = status == IWL_ALIVE_STATUS_OK;
7e1223b5 260
5c228d63
SS
261 IWL_DEBUG_FW(mvm,
262 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
263 status, lmac1->ver_type, lmac1->ver_subtype);
7e1223b5 264
5c228d63
SS
265 if (lmac2)
266 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
7e1223b5 267
5c228d63
SS
268 IWL_DEBUG_FW(mvm,
269 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
270 le32_to_cpu(umac->umac_major),
271 le32_to_cpu(umac->umac_minor));
8ca151b5 272
0a3a3e9e
SM
273 iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
274
8ca151b5
JB
275 return true;
276}
277
1f370650
SS
278static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
279 struct iwl_rx_packet *pkt, void *data)
280{
281 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
282
283 return true;
284}
285
8ca151b5
JB
286static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
287 struct iwl_rx_packet *pkt, void *data)
288{
289 struct iwl_phy_db *phy_db = data;
290
291 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
292 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
293 return true;
294 }
295
ce1f2778 296 WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
8ca151b5
JB
297
298 return false;
299}
300
301static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
302 enum iwl_ucode_type ucode_type)
303{
304 struct iwl_notification_wait alive_wait;
94a8d87c 305 struct iwl_mvm_alive_data alive_data = {};
8ca151b5 306 const struct fw_img *fw;
cfbc6c4c 307 int ret;
702e975d 308 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
6eb031d2 309 static const u16 alive_cmd[] = { MVM_ALIVE };
b3500b47
EG
310 bool run_in_rfkill =
311 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
8ca151b5 312
61df750c 313 if (ucode_type == IWL_UCODE_REGULAR &&
3d2d4422
GBA
314 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
315 !(fw_has_capa(&mvm->fw->ucode_capa,
316 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
612da1ef 317 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
61df750c 318 else
612da1ef 319 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
befe9b6f 320 if (WARN_ON(!fw))
8ca151b5 321 return -EINVAL;
702e975d 322 iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
65b280fe 323 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
8ca151b5
JB
324
325 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
326 alive_cmd, ARRAY_SIZE(alive_cmd),
327 iwl_alive_fn, &alive_data);
328
b3500b47
EG
329 /*
330 * We want to load the INIT firmware even in RFKILL
331 * For the unified firmware case, the ucode_type is not
332 * INIT, but we still need to run it.
333 */
334 ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
8ca151b5 335 if (ret) {
702e975d 336 iwl_fw_set_current_image(&mvm->fwrt, old_type);
8ca151b5
JB
337 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
338 return ret;
339 }
340
341 /*
342 * Some things may run in the background now, but we
343 * just wait for the ALIVE notification here.
344 */
345 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
346 MVM_UCODE_ALIVE_TIMEOUT);
347 if (ret) {
d6be9c1d
SS
348 struct iwl_trans *trans = mvm->trans;
349
20f5aef5
JB
350 if (trans->trans_cfg->device_family >=
351 IWL_DEVICE_FAMILY_22000) {
d6be9c1d
SS
352 IWL_ERR(mvm,
353 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
ea695b7c
ST
354 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
355 iwl_read_umac_prph(trans,
356 UMAG_SB_CPU_2_STATUS));
20f5aef5
JB
357 IWL_ERR(mvm, "UMAC PC: 0x%x\n",
358 iwl_read_umac_prph(trans,
359 UREG_UMAC_CURRENT_PC));
360 IWL_ERR(mvm, "LMAC PC: 0x%x\n",
361 iwl_read_umac_prph(trans,
362 UREG_LMAC1_CURRENT_PC));
363 if (iwl_mvm_is_cdb_supported(mvm))
364 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
365 iwl_read_umac_prph(trans,
366 UREG_LMAC2_CURRENT_PC));
367 } else if (trans->trans_cfg->device_family >=
368 IWL_DEVICE_FAMILY_8000) {
192de2b4
DS
369 IWL_ERR(mvm,
370 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
d6be9c1d
SS
371 iwl_read_prph(trans, SB_CPU_1_STATUS),
372 iwl_read_prph(trans, SB_CPU_2_STATUS));
20f5aef5
JB
373 }
374
375 if (ret == -ETIMEDOUT)
376 iwl_fw_dbg_error_collect(&mvm->fwrt,
377 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
378
702e975d 379 iwl_fw_set_current_image(&mvm->fwrt, old_type);
8ca151b5
JB
380 return ret;
381 }
382
383 if (!alive_data.valid) {
384 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
702e975d 385 iwl_fw_set_current_image(&mvm->fwrt, old_type);
8ca151b5
JB
386 return -EIO;
387 }
388
389 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
390
391 /*
392 * Note: all the queues are enabled as part of the interface
393 * initialization, but in firmware restart scenarios they
394 * could be stopped, so wake them up. In firmware restart,
395 * mac80211 will have the queues stopped as well until the
396 * reconfiguration completes. During normal startup, they
397 * will be empty.
398 */
399
4ecafae9 400 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
1c14089e
JB
401 /*
402 * Set a 'fake' TID for the command queue, since we use the
403 * hweight() of the tid_bitmap as a refcount now. Not that
404 * we ever even consider the command queue as one we might
405 * want to reuse, but be safe nevertheless.
406 */
407 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
408 BIT(IWL_MAX_TID_COUNT + 2);
8ca151b5 409
65b280fe 410 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
f7805b33
LC
411#ifdef CONFIG_IWLWIFI_DEBUGFS
412 iwl_fw_set_dbg_rec_on(&mvm->fwrt);
413#endif
8ca151b5
JB
414
415 return 0;
416}
8ca151b5 417
8c5f47b1
JB
418static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
419{
420 struct iwl_notification_wait init_wait;
421 struct iwl_nvm_access_complete_cmd nvm_complete = {};
422 struct iwl_init_extended_cfg_cmd init_cfg = {
423 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
424 };
425 static const u16 init_complete[] = {
426 INIT_COMPLETE_NOTIF,
427 };
428 int ret;
429
a4584729
HD
430 if (mvm->trans->cfg->tx_with_siso_diversity)
431 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
432
8c5f47b1
JB
433 lockdep_assert_held(&mvm->mutex);
434
94022562
EG
435 mvm->rfkill_safe_init_done = false;
436
8c5f47b1
JB
437 iwl_init_notification_wait(&mvm->notif_wait,
438 &init_wait,
439 init_complete,
440 ARRAY_SIZE(init_complete),
441 iwl_wait_init_complete,
442 NULL);
443
b108d8c7 444 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
86ce5c74 445
8c5f47b1
JB
446 /* Will also start the device */
447 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
448 if (ret) {
449 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
450 goto error;
451 }
b108d8c7
SM
452 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
453 NULL);
8c5f47b1
JB
454
455 /* Send init config command to mark that we are sending NVM access
456 * commands
457 */
458 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
b3500b47
EG
459 INIT_EXTENDED_CFG_CMD),
460 CMD_SEND_IN_RFKILL,
8c5f47b1
JB
461 sizeof(init_cfg), &init_cfg);
462 if (ret) {
463 IWL_ERR(mvm, "Failed to run init config command: %d\n",
464 ret);
465 goto error;
466 }
467
e9e1ba3d
SS
468 /* Load NVM to NIC if needed */
469 if (mvm->nvm_file_name) {
9c4f7d51
ST
470 iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
471 mvm->nvm_sections);
8c5f47b1 472 iwl_mvm_load_nvm_to_nic(mvm);
e9e1ba3d 473 }
8c5f47b1 474
d4f3695e 475 if (IWL_MVM_PARSE_NVM && read_nvm) {
5bd1d2c1 476 ret = iwl_nvm_init(mvm);
d4f3695e
SS
477 if (ret) {
478 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
479 goto error;
480 }
481 }
482
8c5f47b1 483 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
b3500b47
EG
484 NVM_ACCESS_COMPLETE),
485 CMD_SEND_IN_RFKILL,
8c5f47b1
JB
486 sizeof(nvm_complete), &nvm_complete);
487 if (ret) {
488 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
489 ret);
490 goto error;
491 }
492
493 /* We wait for the INIT complete notification */
e9e1ba3d
SS
494 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
495 MVM_UCODE_ALIVE_TIMEOUT);
496 if (ret)
497 return ret;
498
499 /* Read the NVM only at driver load time, no need to do this twice */
d4f3695e 500 if (!IWL_MVM_PARSE_NVM && read_nvm) {
4c625c56 501 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
c135cb56
ST
502 if (IS_ERR(mvm->nvm_data)) {
503 ret = PTR_ERR(mvm->nvm_data);
504 mvm->nvm_data = NULL;
e9e1ba3d
SS
505 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
506 return ret;
507 }
508 }
509
b3500b47
EG
510 mvm->rfkill_safe_init_done = true;
511
e9e1ba3d 512 return 0;
8c5f47b1
JB
513
514error:
515 iwl_remove_notification(&mvm->notif_wait, &init_wait);
516 return ret;
517}
518
c4ace426
GA
519#ifdef CONFIG_ACPI
520static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
521 struct iwl_phy_specific_cfg *phy_filters)
522{
523 /*
524 * TODO: read specific phy config from BIOS
525 * ACPI table for this feature has not been defined yet,
526 * so for now we use hardcoded values.
527 */
528
529 if (IWL_MVM_PHY_FILTER_CHAIN_A) {
530 phy_filters->filter_cfg_chain_a =
531 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
532 }
533 if (IWL_MVM_PHY_FILTER_CHAIN_B) {
534 phy_filters->filter_cfg_chain_b =
535 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
536 }
537 if (IWL_MVM_PHY_FILTER_CHAIN_C) {
538 phy_filters->filter_cfg_chain_c =
539 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
540 }
541 if (IWL_MVM_PHY_FILTER_CHAIN_D) {
542 phy_filters->filter_cfg_chain_d =
543 cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
544 }
545}
546
547#else /* CONFIG_ACPI */
548
549static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
550 struct iwl_phy_specific_cfg *phy_filters)
551{
552}
553#endif /* CONFIG_ACPI */
554
8ca151b5
JB
555static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
556{
c4ace426 557 struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
702e975d 558 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
c4ace426
GA
559 struct iwl_phy_specific_cfg phy_filters = {};
560 u8 cmd_ver;
561 size_t cmd_size;
8ca151b5 562
bb99ff9b 563 if (iwl_mvm_has_unified_ucode(mvm) &&
d923b020 564 !mvm->trans->cfg->tx_with_siso_diversity)
bb99ff9b 565 return 0;
d923b020
LC
566
567 if (mvm->trans->cfg->tx_with_siso_diversity) {
bb99ff9b
LC
568 /*
569 * TODO: currently we don't set the antenna but letting the NIC
570 * to decide which antenna to use. This should come from BIOS.
571 */
572 phy_cfg_cmd.phy_cfg =
573 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
574 }
575
8ca151b5 576 /* Set parameters */
a0544272 577 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
86a2b204
LC
578
579 /* set flags extra PHY configuration flags from the device's cfg */
7897dfa2
LC
580 phy_cfg_cmd.phy_cfg |=
581 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
86a2b204 582
8ca151b5
JB
583 phy_cfg_cmd.calib_control.event_trigger =
584 mvm->fw->default_calib[ucode_type].event_trigger;
585 phy_cfg_cmd.calib_control.flow_trigger =
586 mvm->fw->default_calib[ucode_type].flow_trigger;
587
c4ace426
GA
588 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
589 PHY_CONFIGURATION_CMD);
590 if (cmd_ver == 3) {
591 iwl_mvm_phy_filter_init(mvm, &phy_filters);
592 memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
593 sizeof(struct iwl_phy_specific_cfg));
594 }
595
8ca151b5
JB
596 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
597 phy_cfg_cmd.phy_cfg);
c4ace426
GA
598 cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
599 sizeof(struct iwl_phy_cfg_cmd_v1);
a1022927 600 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
c4ace426 601 cmd_size, &phy_cfg_cmd);
8ca151b5
JB
602}
603
8ca151b5
JB
604int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
605{
606 struct iwl_notification_wait calib_wait;
6eb031d2 607 static const u16 init_complete[] = {
8ca151b5
JB
608 INIT_COMPLETE_NOTIF,
609 CALIB_RES_NOTIF_PHY_DB
610 };
611 int ret;
612
7d6222e2 613 if (iwl_mvm_has_unified_ucode(mvm))
8c5f47b1
JB
614 return iwl_run_unified_mvm_ucode(mvm, true);
615
8ca151b5
JB
616 lockdep_assert_held(&mvm->mutex);
617
94022562 618 mvm->rfkill_safe_init_done = false;
8ca151b5
JB
619
620 iwl_init_notification_wait(&mvm->notif_wait,
621 &calib_wait,
622 init_complete,
623 ARRAY_SIZE(init_complete),
624 iwl_wait_phy_db_entry,
625 mvm->phy_db);
626
627 /* Will also start the device */
628 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
629 if (ret) {
630 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
00e0c6c8 631 goto remove_notif;
8ca151b5
JB
632 }
633
7d34a7d7 634 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
b3de3ef4
EG
635 ret = iwl_mvm_send_bt_init_conf(mvm);
636 if (ret)
00e0c6c8 637 goto remove_notif;
b3de3ef4 638 }
931d4160 639
81a67e32 640 /* Read the NVM only at driver load time, no need to do this twice */
8ca151b5 641 if (read_nvm) {
5bd1d2c1 642 ret = iwl_nvm_init(mvm);
8ca151b5
JB
643 if (ret) {
644 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
00e0c6c8 645 goto remove_notif;
8ca151b5
JB
646 }
647 }
648
81a67e32 649 /* In case we read the NVM from external file, load it to the NIC */
e02a9d60 650 if (mvm->nvm_file_name)
81a67e32
EL
651 iwl_mvm_load_nvm_to_nic(mvm);
652
64866e5d
LC
653 WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
654 "Too old NVM version (0x%0x, required = 0x%0x)",
655 mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
8ca151b5 656
4f59334b
EH
657 /*
658 * abort after reading the nvm in case RF Kill is on, we will complete
659 * the init seq later when RF kill will switch to off
660 */
1a3fe0b2 661 if (iwl_mvm_is_radio_hw_killed(mvm)) {
4f59334b
EH
662 IWL_DEBUG_RF_KILL(mvm,
663 "jump over all phy activities due to RF kill\n");
00e0c6c8 664 goto remove_notif;
4f59334b
EH
665 }
666
b3500b47 667 mvm->rfkill_safe_init_done = true;
31b8b343 668
e07cbb53 669 /* Send TX valid antennas before triggering calibrations */
a0544272 670 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
e07cbb53 671 if (ret)
00e0c6c8 672 goto remove_notif;
e07cbb53 673
8ca151b5
JB
674 ret = iwl_send_phy_cfg_cmd(mvm);
675 if (ret) {
676 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
677 ret);
00e0c6c8 678 goto remove_notif;
8ca151b5
JB
679 }
680
681 /*
682 * Some things may run in the background now, but we
683 * just wait for the calibration complete notification.
684 */
685 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
00e0c6c8
LC
686 MVM_UCODE_CALIB_TIMEOUT);
687 if (!ret)
688 goto out;
31b8b343 689
00e0c6c8 690 if (iwl_mvm_is_radio_hw_killed(mvm)) {
31b8b343 691 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
00e0c6c8
LC
692 ret = 0;
693 } else {
694 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
695 ret);
31b8b343 696 }
00e0c6c8 697
8ca151b5
JB
698 goto out;
699
00e0c6c8 700remove_notif:
8ca151b5
JB
701 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
702out:
b3500b47 703 mvm->rfkill_safe_init_done = false;
a4082843 704 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
8ca151b5
JB
705 /* we want to debug INIT and we have no NVM - fake */
706 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
707 sizeof(struct ieee80211_channel) +
708 sizeof(struct ieee80211_rate),
709 GFP_KERNEL);
710 if (!mvm->nvm_data)
711 return -ENOMEM;
8ca151b5
JB
712 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
713 mvm->nvm_data->bands[0].n_channels = 1;
714 mvm->nvm_data->bands[0].n_bitrates = 1;
715 mvm->nvm_data->bands[0].bitrates =
716 (void *)mvm->nvm_data->channels + 1;
717 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
718 }
719
720 return ret;
721}
722
84bfffa9
EG
723static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
724{
725 struct iwl_ltr_config_cmd cmd = {
726 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
727 };
728
729 if (!mvm->trans->ltr_enabled)
730 return 0;
731
84bfffa9
EG
732 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
733 sizeof(cmd), &cmd);
734}
735
c386dacb 736#ifdef CONFIG_ACPI
42ce76d6 737int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
da2830ac 738{
216cdfb5
LC
739 struct iwl_dev_tx_power_cmd cmd = {
740 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
71e9378b 741 };
9c08cef8 742 __le16 *per_chain;
1edd56e6 743 int ret;
39c1a972 744 u16 len = 0;
fbb7957d
LC
745 u32 n_subbands;
746 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
747 REDUCE_TX_POWER_CMD);
748
749 if (cmd_ver == 6) {
750 len = sizeof(cmd.v6);
751 n_subbands = IWL_NUM_SUB_BANDS_V2;
752 per_chain = cmd.v6.per_chain[0][0];
753 } else if (fw_has_api(&mvm->fw->ucode_capa,
754 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
0791c2fc 755 len = sizeof(cmd.v5);
fbb7957d 756 n_subbands = IWL_NUM_SUB_BANDS;
9c08cef8
LC
757 per_chain = cmd.v5.per_chain[0][0];
758 } else if (fw_has_capa(&mvm->fw->ucode_capa,
fbb7957d 759 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
216cdfb5 760 len = sizeof(cmd.v4);
fbb7957d 761 n_subbands = IWL_NUM_SUB_BANDS;
9c08cef8
LC
762 per_chain = cmd.v4.per_chain[0][0];
763 } else {
216cdfb5 764 len = sizeof(cmd.v3);
fbb7957d 765 n_subbands = IWL_NUM_SUB_BANDS;
9c08cef8
LC
766 per_chain = cmd.v3.per_chain[0][0];
767 }
55bfa4b9 768
216cdfb5
LC
769 /* all structs have the same common part, add it */
770 len += sizeof(cmd.common);
da2830ac 771
9c08cef8 772 ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES,
fbb7957d 773 n_subbands, prof_a, prof_b);
1edd56e6
LC
774
775 /* return on error or if the profile is disabled (positive number) */
776 if (ret)
777 return ret;
778
42ce76d6 779 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
42ce76d6
LC
780 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
781}
782
7fe90e0e
HD
783int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
784{
dd2a1256 785 union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
f604324e 786 struct iwl_geo_tx_power_profiles_resp *resp;
0c3d7282 787 u16 len;
39c1a972 788 int ret;
0c3d7282
HD
789 struct iwl_host_cmd cmd;
790
dd2a1256
LC
791 /* the ops field is at the same spot for all versions, so set in v1 */
792 geo_tx_cmd.v1.ops =
793 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
794
39c1a972 795 if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
dd2a1256
LC
796 IWL_UCODE_TLV_API_SAR_TABLE_VER))
797 len = sizeof(geo_tx_cmd.v2);
798 else
799 len = sizeof(geo_tx_cmd.v1);
7fe90e0e 800
39c1a972
IZ
801 if (!iwl_sar_geo_support(&mvm->fwrt))
802 return -EOPNOTSUPP;
803
0c3d7282 804 cmd = (struct iwl_host_cmd){
7fe90e0e 805 .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
0c3d7282 806 .len = { len, },
7fe90e0e 807 .flags = CMD_WANT_SKB,
39c1a972 808 .data = { &geo_tx_cmd },
7fe90e0e
HD
809 };
810
811 ret = iwl_mvm_send_cmd(mvm, &cmd);
812 if (ret) {
813 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
814 return ret;
815 }
f604324e
LC
816
817 resp = (void *)cmd.resp_pkt->data;
818 ret = le32_to_cpu(resp->profile_idx);
819
820 if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES))
821 ret = -EIO;
822
7fe90e0e
HD
823 iwl_free_resp(&cmd);
824 return ret;
825}
826
a6bff3cb
HD
827static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
828{
dd2a1256 829 union iwl_geo_tx_power_profiles_cmd cmd;
39c1a972 830 u16 len;
0433ae55 831 int ret;
a6bff3cb 832
dd2a1256
LC
833 /* the table is also at the same position both in v1 and v2 */
834 ret = iwl_sar_geo_init(&mvm->fwrt, cmd.v1.table);
a6bff3cb 835
0433ae55
GBA
836 /*
837 * It is a valid scenario to not support SAR, or miss wgds table,
838 * but in that case there is no need to send the command.
839 */
840 if (ret)
841 return 0;
a6bff3cb 842
dd2a1256
LC
843 /* the ops field is at the same spot for all versions, so set in v1 */
844 cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
0c3d7282 845
dd2a1256
LC
846 if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
847 IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
848 len = sizeof(cmd.v2);
849 cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
39c1a972 850 } else {
dd2a1256 851 len = sizeof(cmd.v1);
0c3d7282
HD
852 }
853
dd2a1256
LC
854 return iwl_mvm_send_cmd_pdu(mvm,
855 WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
856 0, len, &cmd);
a6bff3cb
HD
857}
858
6ce1e5c0
GA
859static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
860{
861 union acpi_object *wifi_pkg, *data, *enabled;
f2134f66
GA
862 union iwl_ppag_table_cmd ppag_table;
863 int i, j, ret, tbl_rev, num_sub_bands;
6ce1e5c0 864 int idx = 2;
f2134f66 865 s8 *gain;
6ce1e5c0 866
f2134f66
GA
867 /*
868 * The 'enabled' field is the same in v1 and v2 so we can just
869 * use v1 to access it.
870 */
871 mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
6ce1e5c0
GA
872 data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
873 if (IS_ERR(data))
874 return PTR_ERR(data);
875
f2134f66 876 /* try to read ppag table revision 1 */
6ce1e5c0 877 wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
f2134f66
GA
878 ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
879 if (!IS_ERR(wifi_pkg)) {
880 if (tbl_rev != 1) {
881 ret = -EINVAL;
882 goto out_free;
883 }
884 num_sub_bands = IWL_NUM_SUB_BANDS_V2;
885 gain = mvm->fwrt.ppag_table.v2.gain[0];
886 mvm->fwrt.ppag_ver = 2;
887 IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n");
888 goto read_table;
6ce1e5c0
GA
889 }
890
f2134f66
GA
891 /* try to read ppag table revision 0 */
892 wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
893 ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
894 if (!IS_ERR(wifi_pkg)) {
895 if (tbl_rev != 0) {
896 ret = -EINVAL;
897 goto out_free;
898 }
899 num_sub_bands = IWL_NUM_SUB_BANDS;
900 gain = mvm->fwrt.ppag_table.v1.gain[0];
901 mvm->fwrt.ppag_ver = 1;
902 IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
903 goto read_table;
3ed83da3 904 }
f2134f66
GA
905 ret = PTR_ERR(wifi_pkg);
906 goto out_free;
3ed83da3 907
f2134f66 908read_table:
6ce1e5c0
GA
909 enabled = &wifi_pkg->package.elements[1];
910 if (enabled->type != ACPI_TYPE_INTEGER ||
911 (enabled->integer.value != 0 && enabled->integer.value != 1)) {
912 ret = -EINVAL;
913 goto out_free;
914 }
915
f2134f66
GA
916 ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
917 if (!ppag_table.v1.enabled) {
6ce1e5c0
GA
918 ret = 0;
919 goto out_free;
920 }
921
922 /*
923 * read, verify gain values and save them into the PPAG table.
924 * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
925 * following sub-bands to High-Band (5GHz).
926 */
f2134f66
GA
927 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
928 for (j = 0; j < num_sub_bands; j++) {
6ce1e5c0
GA
929 union acpi_object *ent;
930
931 ent = &wifi_pkg->package.elements[idx++];
932 if (ent->type != ACPI_TYPE_INTEGER ||
933 (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
934 (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
935 (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
936 (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
f2134f66 937 ppag_table.v1.enabled = cpu_to_le32(0);
6ce1e5c0
GA
938 ret = -EINVAL;
939 goto out_free;
940 }
f2134f66 941 gain[i * num_sub_bands + j] = ent->integer.value;
6ce1e5c0
GA
942 }
943 }
944 ret = 0;
945out_free:
946 kfree(data);
947 return ret;
948}
949
950int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
951{
f2134f66
GA
952 u8 cmd_ver;
953 int i, j, ret, num_sub_bands, cmd_size;
954 union iwl_ppag_table_cmd ppag_table;
955 s8 *gain;
6ce1e5c0
GA
956
957 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
958 IWL_DEBUG_RADIO(mvm,
959 "PPAG capability not supported by FW, command not sent.\n");
960 return 0;
961 }
f2134f66
GA
962 if (!mvm->fwrt.ppag_table.v1.enabled) {
963 IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
160bab43
GA
964 return 0;
965 }
966
f2134f66
GA
967 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
968 PER_PLATFORM_ANT_GAIN_CMD);
969 if (cmd_ver == 1) {
970 num_sub_bands = IWL_NUM_SUB_BANDS;
971 gain = mvm->fwrt.ppag_table.v1.gain[0];
972 cmd_size = sizeof(ppag_table.v1);
973 if (mvm->fwrt.ppag_ver == 2) {
974 IWL_DEBUG_RADIO(mvm,
975 "PPAG table is v2 but FW supports v1, sending truncated table\n");
976 }
977 } else if (cmd_ver == 2) {
978 num_sub_bands = IWL_NUM_SUB_BANDS_V2;
979 gain = mvm->fwrt.ppag_table.v2.gain[0];
980 cmd_size = sizeof(ppag_table.v2);
981 if (mvm->fwrt.ppag_ver == 1) {
982 IWL_DEBUG_RADIO(mvm,
983 "PPAG table is v1 but FW supports v2, sending padded table\n");
984 }
985 } else {
986 IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
987 return 0;
988 }
6ce1e5c0 989
f2134f66
GA
990 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
991 for (j = 0; j < num_sub_bands; j++) {
6ce1e5c0
GA
992 IWL_DEBUG_RADIO(mvm,
993 "PPAG table: chain[%d] band[%d]: gain = %d\n",
f2134f66 994 i, j, gain[i * num_sub_bands + j]);
6ce1e5c0
GA
995 }
996 }
f2134f66 997 IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
6ce1e5c0
GA
998 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
999 PER_PLATFORM_ANT_GAIN_CMD),
f2134f66 1000 0, cmd_size, &ppag_table);
6ce1e5c0
GA
1001 if (ret < 0)
1002 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
1003 ret);
1004
1005 return ret;
1006}
1007
1008static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1009{
1010 int ret;
1011
1012 ret = iwl_mvm_get_ppag_table(mvm);
1013 if (ret < 0) {
1014 IWL_DEBUG_RADIO(mvm,
1015 "PPAG BIOS table invalid or unavailable. (%d)\n",
1016 ret);
1017 return 0;
1018 }
1019 return iwl_mvm_ppag_send_cmd(mvm);
1020}
1021
28dd7ccd
MG
1022static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1023{
1024 int ret;
1025 struct iwl_tas_config_cmd cmd = {};
1026 int list_size;
1027
1028 BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) <
1029 APCI_WTAS_BLACK_LIST_MAX);
1030
1031 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
1032 IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
1033 return;
1034 }
1035
1036 ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size);
1037 if (ret < 0) {
1038 IWL_DEBUG_RADIO(mvm,
1039 "TAS table invalid or unavailable. (%d)\n",
1040 ret);
1041 return;
1042 }
1043
1044 if (list_size < 0)
1045 return;
1046
1047 /* list size if TAS enabled can only be non-negative */
1048 cmd.black_list_size = cpu_to_le32((u32)list_size);
1049
1050 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
1051 TAS_CONFIG),
1052 0, sizeof(cmd), &cmd);
1053 if (ret < 0)
1054 IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
1055}
f5b1cb2e 1056
02d31e9b 1057static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
f5b1cb2e
GA
1058{
1059 int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
1060 DSM_FUNC_ENABLE_INDONESIA_5G2);
1061
02d31e9b
GA
1062 if (ret < 0)
1063 IWL_DEBUG_RADIO(mvm,
1064 "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
1065 ret);
f5b1cb2e 1066
02d31e9b
GA
1067 else if (ret >= DSM_VALUE_INDONESIA_MAX)
1068 IWL_DEBUG_RADIO(mvm,
1069 "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
1070 ret);
1071
1072 else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
1073 IWL_DEBUG_RADIO(mvm,
1074 "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
1075 return DSM_VALUE_INDONESIA_ENABLE;
1076 }
1077 /* default behaviour is disabled */
1078 return DSM_VALUE_INDONESIA_DISABLE;
1079}
1080
1081static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
1082{
1083 int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
1084 DSM_FUNC_DISABLE_SRD);
1085
1086 if (ret < 0)
1087 IWL_DEBUG_RADIO(mvm,
1088 "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
1089 ret);
1090
1091 else if (ret >= DSM_VALUE_SRD_MAX)
1092 IWL_DEBUG_RADIO(mvm,
1093 "DSM function DISABLE_SRD return invalid value, ret=%d\n",
1094 ret);
1095
1096 else if (ret == DSM_VALUE_SRD_PASSIVE) {
1097 IWL_DEBUG_RADIO(mvm,
1098 "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
1099 return DSM_VALUE_SRD_PASSIVE;
1100
1101 } else if (ret == DSM_VALUE_SRD_DISABLE) {
1102 IWL_DEBUG_RADIO(mvm,
1103 "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
1104 return DSM_VALUE_SRD_DISABLE;
1105 }
1106 /* default behaviour is active */
1107 return DSM_VALUE_SRD_ACTIVE;
f5b1cb2e
GA
1108}
1109
1110static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1111{
02d31e9b
GA
1112 u8 ret;
1113 int cmd_ret;
f5b1cb2e
GA
1114 struct iwl_lari_config_change_cmd cmd = {};
1115
02d31e9b 1116 if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
f5b1cb2e
GA
1117 cmd.config_bitmap |=
1118 cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
1119
02d31e9b
GA
1120 ret = iwl_mvm_eval_dsm_disable_srd(mvm);
1121 if (ret == DSM_VALUE_SRD_PASSIVE)
1122 cmd.config_bitmap |=
1123 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
1124
1125 else if (ret == DSM_VALUE_SRD_DISABLE)
1126 cmd.config_bitmap |=
1127 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
1128
f5b1cb2e
GA
1129 /* apply more config masks here */
1130
1131 if (cmd.config_bitmap) {
02d31e9b
GA
1132 IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n");
1133 cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
1134 WIDE_ID(REGULATORY_AND_NVM_GROUP,
1135 LARI_CONFIG_CHANGE),
1136 0, sizeof(cmd), &cmd);
1137 if (cmd_ret < 0)
f5b1cb2e
GA
1138 IWL_DEBUG_RADIO(mvm,
1139 "Failed to send LARI_CONFIG_CHANGE (%d)\n",
02d31e9b 1140 cmd_ret);
f5b1cb2e
GA
1141 }
1142}
69964905 1143#else /* CONFIG_ACPI */
69964905 1144
39c1a972
IZ
1145inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
1146 int prof_a, int prof_b)
69964905
LC
1147{
1148 return -ENOENT;
1149}
a6bff3cb 1150
39c1a972 1151inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
5d041c46
LC
1152{
1153 return -ENOENT;
1154}
1155
a6bff3cb
HD
1156static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1157{
1158 return 0;
1159}
18f1755d 1160
6ce1e5c0
GA
1161int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1162{
1163 return -ENOENT;
1164}
1165
1166static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1167{
7937fd32 1168 return 0;
6ce1e5c0 1169}
28dd7ccd
MG
1170
1171static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1172{
1173}
f5b1cb2e
GA
1174
1175static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1176{
1177}
69964905
LC
1178#endif /* CONFIG_ACPI */
1179
f130bb75
MG
1180void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
1181{
1182 u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
1183 int ret;
1184 u32 resp;
1185
1186 struct iwl_fw_error_recovery_cmd recovery_cmd = {
1187 .flags = cpu_to_le32(flags),
1188 .buf_size = 0,
1189 };
1190 struct iwl_host_cmd host_cmd = {
1191 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
1192 .flags = CMD_WANT_SKB,
1193 .data = {&recovery_cmd, },
1194 .len = {sizeof(recovery_cmd), },
1195 };
1196
1197 /* no error log was defined in TLV */
1198 if (!error_log_size)
1199 return;
1200
1201 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1202 /* no buf was allocated while HW reset */
1203 if (!mvm->error_recovery_buf)
1204 return;
1205
1206 host_cmd.data[1] = mvm->error_recovery_buf;
1207 host_cmd.len[1] = error_log_size;
1208 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1209 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1210 }
1211
1212 ret = iwl_mvm_send_cmd(mvm, &host_cmd);
1213 kfree(mvm->error_recovery_buf);
1214 mvm->error_recovery_buf = NULL;
1215
1216 if (ret) {
1217 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1218 return;
1219 }
1220
1221 /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1222 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1223 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
1224 if (resp)
1225 IWL_ERR(mvm,
1226 "Failed to send recovery cmd blob was invalid %d\n",
1227 resp);
1228 }
1229}
1230
42ce76d6
LC
1231static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1232{
1233 int ret;
1234
39c1a972 1235 ret = iwl_sar_get_wrds_table(&mvm->fwrt);
42ce76d6
LC
1236 if (ret < 0) {
1237 IWL_DEBUG_RADIO(mvm,
69964905 1238 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
42ce76d6 1239 ret);
5d041c46
LC
1240 /*
1241 * If not available, don't fail and don't bother with EWRD.
1242 * Return 1 to tell that we can't use WGDS either.
1243 */
1244 return 1;
42ce76d6
LC
1245 }
1246
39c1a972 1247 ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
69964905
LC
1248 /* if EWRD is not available, we can still use WRDS, so don't fail */
1249 if (ret < 0)
1250 IWL_DEBUG_RADIO(mvm,
1251 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1252 ret);
1253
1edd56e6 1254 return iwl_mvm_sar_select_profile(mvm, 1, 1);
da2830ac
LC
1255}
1256
1f370650 1257static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
8ca151b5 1258{
1f370650 1259 int ret;
8ca151b5 1260
7d6222e2 1261 if (iwl_mvm_has_unified_ucode(mvm))
1f370650 1262 return iwl_run_unified_mvm_ucode(mvm, false);
8ca151b5 1263
8d193ca2 1264 ret = iwl_run_init_mvm_ucode(mvm, false);
f2082a53 1265
f2082a53 1266 if (ret) {
8d193ca2 1267 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
f4744258
LK
1268
1269 if (iwlmvm_mod_params.init_dbg)
1270 return 0;
1f370650 1271 return ret;
8d193ca2 1272 }
8ca151b5 1273
203c83d3 1274 iwl_fw_dbg_stop_sync(&mvm->fwrt);
bab3cb92
EG
1275 iwl_trans_stop_device(mvm->trans);
1276 ret = iwl_trans_start_hw(mvm->trans);
f2082a53 1277 if (ret)
1f370650 1278 return ret;
8ca151b5 1279
b108d8c7 1280 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
da2eb669 1281
94022562 1282 mvm->rfkill_safe_init_done = false;
8ca151b5 1283 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1f370650
SS
1284 if (ret)
1285 return ret;
1286
94022562
EG
1287 mvm->rfkill_safe_init_done = true;
1288
b108d8c7
SM
1289 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
1290 NULL);
da2eb669 1291
702e975d 1292 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1f370650
SS
1293}
1294
1295int iwl_mvm_up(struct iwl_mvm *mvm)
1296{
1297 int ret, i;
1298 struct ieee80211_channel *chan;
1299 struct cfg80211_chan_def chandef;
dd36a507 1300 struct ieee80211_supported_band *sband = NULL;
1f370650
SS
1301
1302 lockdep_assert_held(&mvm->mutex);
1303
1304 ret = iwl_trans_start_hw(mvm->trans);
1305 if (ret)
1306 return ret;
1307
1308 ret = iwl_mvm_load_rt_fw(mvm);
8ca151b5
JB
1309 if (ret) {
1310 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
72d3c7bb
JB
1311 if (ret != -ERFKILL)
1312 iwl_fw_dbg_error_collect(&mvm->fwrt,
1313 FW_DBG_TRIGGER_DRIVER);
8ca151b5
JB
1314 goto error;
1315 }
1316
d0b813fc 1317 iwl_get_shared_mem_conf(&mvm->fwrt);
04fd2c28 1318
1f3b0ff8
LE
1319 ret = iwl_mvm_sf_update(mvm, NULL, false);
1320 if (ret)
1321 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1322
a1af4c48 1323 if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
7a14c23d
SS
1324 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1325 /* if we have a destination, assume EARLY START */
1326 if (mvm->fw->dbg.dest_tlv)
1327 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1328 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1329 }
6a951267 1330
a0544272 1331 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
8ca151b5
JB
1332 if (ret)
1333 goto error;
1334
7d6222e2
JB
1335 if (!iwl_mvm_has_unified_ucode(mvm)) {
1336 /* Send phy db control command and then phy db calibration */
1f370650
SS
1337 ret = iwl_send_phy_db_data(mvm->phy_db);
1338 if (ret)
1339 goto error;
1f370650 1340 }
8ca151b5 1341
bb99ff9b
LC
1342 ret = iwl_send_phy_cfg_cmd(mvm);
1343 if (ret)
1344 goto error;
1345
b3de3ef4
EG
1346 ret = iwl_mvm_send_bt_init_conf(mvm);
1347 if (ret)
1348 goto error;
1349
cceb4507
SM
1350 if (fw_has_capa(&mvm->fw->ucode_capa,
1351 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
a8eb340f 1352 ret = iwl_set_soc_latency(&mvm->fwrt);
cceb4507
SM
1353 if (ret)
1354 goto error;
1355 }
1356
43413a97 1357 /* Init RSS configuration */
286ca8eb 1358 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
8edbfaa1
SS
1359 ret = iwl_configure_rxq(mvm);
1360 if (ret) {
1361 IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
1362 ret);
1363 goto error;
1364 }
1365 }
1366
1367 if (iwl_mvm_has_new_rx_api(mvm)) {
43413a97
SS
1368 ret = iwl_send_rss_cfg_cmd(mvm);
1369 if (ret) {
1370 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1371 ret);
1372 goto error;
1373 }
1374 }
1375
8ca151b5 1376 /* init the fw <-> mac80211 STA mapping */
0ae98812 1377 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
8ca151b5
JB
1378 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1379
0ae98812 1380 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1d3c3f63 1381
b2b7875b
JB
1382 /* reset quota debouncing buffer - 0xff will yield invalid data */
1383 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1384
79660869
IL
1385 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
1386 ret = iwl_mvm_send_dqa_cmd(mvm);
1387 if (ret)
1388 goto error;
1389 }
97d5be7e 1390
8ca151b5
JB
1391 /* Add auxiliary station for scanning */
1392 ret = iwl_mvm_add_aux_sta(mvm);
1393 if (ret)
1394 goto error;
1395
53a9d61e 1396 /* Add all the PHY contexts */
dd36a507
TM
1397 i = 0;
1398 while (!sband && i < NUM_NL80211_BANDS)
1399 sband = mvm->hw->wiphy->bands[i++];
1400
1401 if (WARN_ON_ONCE(!sband))
1402 goto error;
1403
1404 chan = &sband->channels[0];
1405
53a9d61e
IP
1406 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1407 for (i = 0; i < NUM_PHY_CTX; i++) {
1408 /*
1409 * The channel used here isn't relevant as it's
1410 * going to be overwritten in the other flows.
1411 * For now use the first channel we have.
1412 */
1413 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1414 &chandef, 1, 1);
1415 if (ret)
1416 goto error;
1417 }
8ca151b5 1418
c221daf2
CRI
1419 if (iwl_mvm_is_tt_in_fw(mvm)) {
1420 /* in order to give the responsibility of ct-kill and
1421 * TX backoff to FW we need to send empty temperature reporting
1422 * cmd during init time
1423 */
1424 iwl_mvm_send_temp_report_ths_cmd(mvm);
1425 } else {
1426 /* Initialize tx backoffs to the minimal possible */
1427 iwl_mvm_tt_tx_backoff(mvm, 0);
1428 }
5c89e7bc 1429
242d9c8b 1430#ifdef CONFIG_THERMAL
5c89e7bc 1431 /* TODO: read the budget from BIOS / Platform NVM */
944eafc2
CRI
1432
1433 /*
1434 * In case there is no budget from BIOS / Platform NVM the default
1435 * budget should be 2000mW (cooling state 0).
1436 */
1437 if (iwl_mvm_is_ctdp_supported(mvm)) {
5c89e7bc
CRI
1438 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1439 mvm->cooling_dev.cur_state);
75cfe338
LC
1440 if (ret)
1441 goto error;
1442 }
c221daf2 1443#endif
0c0e2c71 1444
aa43ae12
AM
1445 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
1446 WARN_ON(iwl_mvm_config_ltr(mvm));
9180ac50 1447
c1cb92fc 1448 ret = iwl_mvm_power_update_device(mvm);
64b928c4
AB
1449 if (ret)
1450 goto error;
1451
f5b1cb2e 1452 iwl_mvm_lari_cfg(mvm);
35af15d1
AN
1453 /*
1454 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1455 * anyway, so don't init MCC.
1456 */
1457 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1458 ret = iwl_mvm_init_mcc(mvm);
1459 if (ret)
1460 goto error;
1461 }
90d4f7db 1462
859d914c 1463 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
4ca87a5f 1464 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
b66b5817 1465 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
d2496221
DS
1466 ret = iwl_mvm_config_scan(mvm);
1467 if (ret)
1468 goto error;
1469 }
1470
f130bb75
MG
1471 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1472 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1473
48e775e6
HD
1474 if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1475 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1476
6ce1e5c0
GA
1477 ret = iwl_mvm_ppag_init(mvm);
1478 if (ret)
1479 goto error;
1480
da2830ac 1481 ret = iwl_mvm_sar_init(mvm);
5d041c46
LC
1482 if (ret == 0) {
1483 ret = iwl_mvm_sar_geo_init(mvm);
1edd56e6 1484 } else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
5d041c46
LC
1485 /*
1486 * If basic SAR is not available, we check for WGDS,
1487 * which should *not* be available either. If it is
1488 * available, issue an error, because we can't use SAR
1489 * Geo without basic SAR.
1490 */
1491 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1492 }
da2830ac 1493
5d041c46 1494 if (ret < 0)
a6bff3cb
HD
1495 goto error;
1496
28dd7ccd 1497 iwl_mvm_tas_init(mvm);
7089ae63
JB
1498 iwl_mvm_leds_sync(mvm);
1499
53a9d61e 1500 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
8ca151b5
JB
1501 return 0;
1502 error:
f4744258 1503 if (!iwlmvm_mod_params.init_dbg || !ret)
de8ba41b 1504 iwl_mvm_stop_device(mvm);
8ca151b5
JB
1505 return ret;
1506}
1507
1508int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1509{
1510 int ret, i;
1511
1512 lockdep_assert_held(&mvm->mutex);
1513
1514 ret = iwl_trans_start_hw(mvm->trans);
1515 if (ret)
1516 return ret;
1517
1518 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1519 if (ret) {
1520 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1521 goto error;
1522 }
1523
a0544272 1524 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
8ca151b5
JB
1525 if (ret)
1526 goto error;
1527
1528 /* Send phy db control command and then phy db calibration*/
1529 ret = iwl_send_phy_db_data(mvm->phy_db);
1530 if (ret)
1531 goto error;
1532
1533 ret = iwl_send_phy_cfg_cmd(mvm);
1534 if (ret)
1535 goto error;
1536
1537 /* init the fw <-> mac80211 STA mapping */
0ae98812 1538 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
8ca151b5
JB
1539 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1540
1541 /* Add auxiliary station for scanning */
1542 ret = iwl_mvm_add_aux_sta(mvm);
1543 if (ret)
1544 goto error;
1545
1546 return 0;
1547 error:
fcb6b92a 1548 iwl_mvm_stop_device(mvm);
8ca151b5
JB
1549 return ret;
1550}
1551
0416841d
JB
1552void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1553 struct iwl_rx_cmd_buffer *rxb)
8ca151b5
JB
1554{
1555 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1556 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1557 u32 flags = le32_to_cpu(card_state_notif->flags);
1558
1559 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1560 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1561 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1562 (flags & CT_KILL_CARD_DISABLED) ?
1563 "Reached" : "Not reached");
8ca151b5
JB
1564}
1565
0416841d
JB
1566void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1567 struct iwl_rx_cmd_buffer *rxb)
30269c12
CRI
1568{
1569 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1570 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1571
0c8d0a47
GBA
1572 IWL_DEBUG_INFO(mvm,
1573 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1574 le32_to_cpu(mfuart_notif->installed_ver),
1575 le32_to_cpu(mfuart_notif->external_ver),
1576 le32_to_cpu(mfuart_notif->status),
1577 le32_to_cpu(mfuart_notif->duration));
1578
19f63c53
GBA
1579 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1580 IWL_DEBUG_INFO(mvm,
0c8d0a47 1581 "MFUART: image size: 0x%08x\n",
19f63c53 1582 le32_to_cpu(mfuart_notif->image_size));
30269c12 1583}