Commit | Line | Data |
---|---|---|
8ca151b5 JB |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
51368bf7 | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
8d193ca2 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
43413a97 | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
8ca151b5 JB |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of version 2 of the GNU General Public License as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but | |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
24 | * USA | |
25 | * | |
26 | * The full GNU General Public License is included in this distribution | |
410dc5aa | 27 | * in the file called COPYING. |
8ca151b5 JB |
28 | * |
29 | * Contact Information: | |
cb2f8277 | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
8ca151b5 JB |
31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
32 | * | |
33 | * BSD LICENSE | |
34 | * | |
51368bf7 | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
8d193ca2 | 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
8ca151b5 JB |
37 | * All rights reserved. |
38 | * | |
39 | * Redistribution and use in source and binary forms, with or without | |
40 | * modification, are permitted provided that the following conditions | |
41 | * are met: | |
42 | * | |
43 | * * Redistributions of source code must retain the above copyright | |
44 | * notice, this list of conditions and the following disclaimer. | |
45 | * * Redistributions in binary form must reproduce the above copyright | |
46 | * notice, this list of conditions and the following disclaimer in | |
47 | * the documentation and/or other materials provided with the | |
48 | * distribution. | |
49 | * * Neither the name Intel Corporation nor the names of its | |
50 | * contributors may be used to endorse or promote products derived | |
51 | * from this software without specific prior written permission. | |
52 | * | |
53 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
54 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
55 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
56 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
57 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
58 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
59 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
60 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
61 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
62 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
63 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
64 | * | |
65 | *****************************************************************************/ | |
66 | #include <net/mac80211.h> | |
67 | ||
68 | #include "iwl-trans.h" | |
69 | #include "iwl-op-mode.h" | |
70 | #include "iwl-fw.h" | |
71 | #include "iwl-debug.h" | |
72 | #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ | |
73 | #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ | |
8c23f95c | 74 | #include "iwl-prph.h" |
8ca151b5 JB |
75 | #include "iwl-eeprom-parse.h" |
76 | ||
77 | #include "mvm.h" | |
2f89a5d7 | 78 | #include "fw-dbg.h" |
8ca151b5 JB |
79 | #include "iwl-phy-db.h" |
80 | ||
81 | #define MVM_UCODE_ALIVE_TIMEOUT HZ | |
82 | #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) | |
83 | ||
84 | #define UCODE_VALID_OK cpu_to_le32(0x1) | |
85 | ||
8ca151b5 JB |
86 | struct iwl_mvm_alive_data { |
87 | bool valid; | |
88 | u32 scd_base_addr; | |
89 | }; | |
90 | ||
91 | static inline const struct fw_img * | |
92 | iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) | |
93 | { | |
94 | if (ucode_type >= IWL_UCODE_TYPE_MAX) | |
95 | return NULL; | |
96 | ||
97 | return &mvm->fw->img[ucode_type]; | |
98 | } | |
99 | ||
100 | static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) | |
101 | { | |
102 | struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { | |
103 | .valid = cpu_to_le32(valid_tx_ant), | |
104 | }; | |
105 | ||
33223542 | 106 | IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); |
a1022927 | 107 | return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, |
8ca151b5 JB |
108 | sizeof(tx_ant_cmd), &tx_ant_cmd); |
109 | } | |
110 | ||
43413a97 SS |
111 | static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) |
112 | { | |
113 | int i; | |
114 | struct iwl_rss_config_cmd cmd = { | |
115 | .flags = cpu_to_le32(IWL_RSS_ENABLE), | |
116 | .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | | |
117 | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | | |
118 | IWL_RSS_HASH_TYPE_IPV6_TCP | | |
119 | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, | |
120 | }; | |
121 | ||
122 | for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) | |
123 | cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; | |
dd4d3161 | 124 | memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); |
43413a97 SS |
125 | |
126 | return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); | |
127 | } | |
128 | ||
905e36ae | 129 | void iwl_free_fw_paging(struct iwl_mvm *mvm) |
a6c4fb44 MG |
130 | { |
131 | int i; | |
132 | ||
133 | if (!mvm->fw_paging_db[0].fw_paging_block) | |
134 | return; | |
135 | ||
136 | for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { | |
137 | if (!mvm->fw_paging_db[i].fw_paging_block) { | |
138 | IWL_DEBUG_FW(mvm, | |
139 | "Paging: block %d already freed, continue to next page\n", | |
140 | i); | |
141 | ||
142 | continue; | |
143 | } | |
144 | ||
145 | __free_pages(mvm->fw_paging_db[i].fw_paging_block, | |
146 | get_order(mvm->fw_paging_db[i].fw_paging_size)); | |
147 | } | |
e1120187 | 148 | kfree(mvm->trans->paging_download_buf); |
905e36ae MG |
149 | mvm->trans->paging_download_buf = NULL; |
150 | ||
a6c4fb44 MG |
151 | memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); |
152 | } | |
153 | ||
154 | static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) | |
155 | { | |
156 | int sec_idx, idx; | |
157 | u32 offset = 0; | |
158 | ||
159 | /* | |
160 | * find where is the paging image start point: | |
161 | * if CPU2 exist and it's in paging format, then the image looks like: | |
162 | * CPU1 sections (2 or more) | |
163 | * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 | |
164 | * CPU2 sections (not paged) | |
165 | * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 | |
166 | * non paged to CPU2 paging sec | |
167 | * CPU2 paging CSS | |
168 | * CPU2 paging image (including instruction and data) | |
169 | */ | |
170 | for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { | |
171 | if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { | |
172 | sec_idx++; | |
173 | break; | |
174 | } | |
175 | } | |
176 | ||
cd47a3d3 MG |
177 | /* |
178 | * If paging is enabled there should be at least 2 more sections left | |
179 | * (one for CSS and one for Paging data) | |
180 | */ | |
181 | if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { | |
182 | IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); | |
a6c4fb44 MG |
183 | iwl_free_fw_paging(mvm); |
184 | return -EINVAL; | |
185 | } | |
186 | ||
187 | /* copy the CSS block to the dram */ | |
188 | IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", | |
189 | sec_idx); | |
190 | ||
191 | memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), | |
192 | image->sec[sec_idx].data, | |
193 | mvm->fw_paging_db[0].fw_paging_size); | |
194 | ||
195 | IWL_DEBUG_FW(mvm, | |
196 | "Paging: copied %d CSS bytes to first block\n", | |
197 | mvm->fw_paging_db[0].fw_paging_size); | |
198 | ||
199 | sec_idx++; | |
200 | ||
201 | /* | |
202 | * copy the paging blocks to the dram | |
203 | * loop index start from 1 since that CSS block already copied to dram | |
204 | * and CSS index is 0. | |
205 | * loop stop at num_of_paging_blk since that last block is not full. | |
206 | */ | |
207 | for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { | |
208 | memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), | |
209 | image->sec[sec_idx].data + offset, | |
210 | mvm->fw_paging_db[idx].fw_paging_size); | |
211 | ||
212 | IWL_DEBUG_FW(mvm, | |
213 | "Paging: copied %d paging bytes to block %d\n", | |
214 | mvm->fw_paging_db[idx].fw_paging_size, | |
215 | idx); | |
216 | ||
217 | offset += mvm->fw_paging_db[idx].fw_paging_size; | |
218 | } | |
219 | ||
220 | /* copy the last paging block */ | |
221 | if (mvm->num_of_pages_in_last_blk > 0) { | |
222 | memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), | |
223 | image->sec[sec_idx].data + offset, | |
224 | FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); | |
225 | ||
226 | IWL_DEBUG_FW(mvm, | |
227 | "Paging: copied %d pages in the last block %d\n", | |
228 | mvm->num_of_pages_in_last_blk, idx); | |
229 | } | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, | |
235 | const struct fw_img *image) | |
236 | { | |
237 | struct page *block; | |
238 | dma_addr_t phys = 0; | |
239 | int blk_idx = 0; | |
240 | int order, num_of_pages; | |
241 | int dma_enabled; | |
242 | ||
243 | if (mvm->fw_paging_db[0].fw_paging_block) | |
244 | return 0; | |
245 | ||
246 | dma_enabled = is_device_dma_capable(mvm->trans->dev); | |
247 | ||
248 | /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ | |
249 | BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); | |
250 | ||
251 | num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; | |
252 | mvm->num_of_paging_blk = ((num_of_pages - 1) / | |
253 | NUM_OF_PAGE_PER_GROUP) + 1; | |
254 | ||
255 | mvm->num_of_pages_in_last_blk = | |
256 | num_of_pages - | |
257 | NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); | |
258 | ||
259 | IWL_DEBUG_FW(mvm, | |
260 | "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", | |
261 | mvm->num_of_paging_blk, | |
262 | mvm->num_of_pages_in_last_blk); | |
263 | ||
264 | /* allocate block of 4Kbytes for paging CSS */ | |
265 | order = get_order(FW_PAGING_SIZE); | |
266 | block = alloc_pages(GFP_KERNEL, order); | |
267 | if (!block) { | |
268 | /* free all the previous pages since we failed */ | |
269 | iwl_free_fw_paging(mvm); | |
270 | return -ENOMEM; | |
271 | } | |
272 | ||
273 | mvm->fw_paging_db[blk_idx].fw_paging_block = block; | |
274 | mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; | |
275 | ||
276 | if (dma_enabled) { | |
277 | phys = dma_map_page(mvm->trans->dev, block, 0, | |
278 | PAGE_SIZE << order, DMA_BIDIRECTIONAL); | |
279 | if (dma_mapping_error(mvm->trans->dev, phys)) { | |
280 | /* | |
281 | * free the previous pages and the current one since | |
282 | * we failed to map_page. | |
283 | */ | |
284 | iwl_free_fw_paging(mvm); | |
285 | return -ENOMEM; | |
286 | } | |
287 | mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; | |
e1120187 MG |
288 | } else { |
289 | mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | | |
290 | blk_idx << BLOCK_2_EXP_SIZE; | |
a6c4fb44 MG |
291 | } |
292 | ||
293 | IWL_DEBUG_FW(mvm, | |
294 | "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", | |
295 | order); | |
296 | ||
297 | /* | |
298 | * allocate blocks in dram. | |
299 | * since that CSS allocated in fw_paging_db[0] loop start from index 1 | |
300 | */ | |
301 | for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { | |
302 | /* allocate block of PAGING_BLOCK_SIZE (32K) */ | |
303 | order = get_order(PAGING_BLOCK_SIZE); | |
304 | block = alloc_pages(GFP_KERNEL, order); | |
305 | if (!block) { | |
306 | /* free all the previous pages since we failed */ | |
307 | iwl_free_fw_paging(mvm); | |
308 | return -ENOMEM; | |
309 | } | |
310 | ||
311 | mvm->fw_paging_db[blk_idx].fw_paging_block = block; | |
312 | mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; | |
313 | ||
314 | if (dma_enabled) { | |
315 | phys = dma_map_page(mvm->trans->dev, block, 0, | |
316 | PAGE_SIZE << order, | |
317 | DMA_BIDIRECTIONAL); | |
318 | if (dma_mapping_error(mvm->trans->dev, phys)) { | |
319 | /* | |
320 | * free the previous pages and the current one | |
321 | * since we failed to map_page. | |
322 | */ | |
323 | iwl_free_fw_paging(mvm); | |
324 | return -ENOMEM; | |
325 | } | |
326 | mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; | |
e1120187 MG |
327 | } else { |
328 | mvm->fw_paging_db[blk_idx].fw_paging_phys = | |
329 | PAGING_ADDR_SIG | | |
330 | blk_idx << BLOCK_2_EXP_SIZE; | |
a6c4fb44 MG |
331 | } |
332 | ||
333 | IWL_DEBUG_FW(mvm, | |
334 | "Paging: allocated 32K bytes (order %d) for firmware paging.\n", | |
335 | order); | |
336 | } | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
341 | static int iwl_save_fw_paging(struct iwl_mvm *mvm, | |
342 | const struct fw_img *fw) | |
343 | { | |
344 | int ret; | |
345 | ||
346 | ret = iwl_alloc_fw_paging_mem(mvm, fw); | |
347 | if (ret) | |
348 | return ret; | |
349 | ||
350 | return iwl_fill_paging_mem(mvm, fw); | |
351 | } | |
352 | ||
353 | /* send paging cmd to FW in case CPU2 has paging image */ | |
354 | static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) | |
355 | { | |
356 | int blk_idx; | |
357 | __le32 dev_phy_addr; | |
358 | struct iwl_fw_paging_cmd fw_paging_cmd = { | |
359 | .flags = | |
360 | cpu_to_le32(PAGING_CMD_IS_SECURED | | |
361 | PAGING_CMD_IS_ENABLED | | |
362 | (mvm->num_of_pages_in_last_blk << | |
363 | PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), | |
364 | .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), | |
365 | .block_num = cpu_to_le32(mvm->num_of_paging_blk), | |
366 | }; | |
367 | ||
368 | /* loop for for all paging blocks + CSS block */ | |
369 | for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { | |
370 | dev_phy_addr = | |
371 | cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> | |
372 | PAGE_2_EXP_SIZE); | |
373 | fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; | |
374 | } | |
375 | ||
376 | return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, | |
377 | IWL_ALWAYS_LONG_GROUP, 0), | |
378 | 0, sizeof(fw_paging_cmd), &fw_paging_cmd); | |
379 | } | |
380 | ||
e1120187 MG |
381 | /* |
382 | * Send paging item cmd to FW in case CPU2 has paging image | |
383 | */ | |
384 | static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) | |
385 | { | |
386 | int ret; | |
387 | struct iwl_fw_get_item_cmd fw_get_item_cmd = { | |
388 | .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), | |
389 | }; | |
390 | ||
391 | struct iwl_fw_get_item_resp *item_resp; | |
392 | struct iwl_host_cmd cmd = { | |
393 | .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), | |
394 | .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, | |
395 | .data = { &fw_get_item_cmd, }, | |
396 | }; | |
397 | ||
398 | cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); | |
399 | ||
400 | ret = iwl_mvm_send_cmd(mvm, &cmd); | |
401 | if (ret) { | |
402 | IWL_ERR(mvm, | |
403 | "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", | |
404 | ret); | |
405 | return ret; | |
406 | } | |
407 | ||
408 | item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; | |
409 | if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { | |
410 | IWL_ERR(mvm, | |
411 | "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", | |
412 | le32_to_cpu(item_resp->item_id)); | |
413 | ret = -EIO; | |
414 | goto exit; | |
415 | } | |
416 | ||
c94d7996 MG |
417 | /* Add an extra page for headers */ |
418 | mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + | |
419 | FW_PAGING_SIZE, | |
e1120187 MG |
420 | GFP_KERNEL); |
421 | if (!mvm->trans->paging_download_buf) { | |
422 | ret = -ENOMEM; | |
423 | goto exit; | |
424 | } | |
425 | mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); | |
426 | mvm->trans->paging_db = mvm->fw_paging_db; | |
427 | IWL_DEBUG_FW(mvm, | |
428 | "Paging: got paging request address (paging_req_addr 0x%08x)\n", | |
429 | mvm->trans->paging_req_addr); | |
430 | ||
431 | exit: | |
432 | iwl_free_resp(&cmd); | |
433 | ||
434 | return ret; | |
435 | } | |
436 | ||
8ca151b5 JB |
437 | static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, |
438 | struct iwl_rx_packet *pkt, void *data) | |
439 | { | |
440 | struct iwl_mvm *mvm = | |
441 | container_of(notif_wait, struct iwl_mvm, notif_wait); | |
442 | struct iwl_mvm_alive_data *alive_data = data; | |
7e1223b5 | 443 | struct mvm_alive_resp_ver1 *palive1; |
01a9ca51 | 444 | struct mvm_alive_resp_ver2 *palive2; |
7e1223b5 | 445 | struct mvm_alive_resp *palive; |
01a9ca51 | 446 | |
7e1223b5 EG |
447 | if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { |
448 | palive1 = (void *)pkt->data; | |
01a9ca51 EH |
449 | |
450 | mvm->support_umac_log = false; | |
451 | mvm->error_event_table = | |
7e1223b5 EG |
452 | le32_to_cpu(palive1->error_event_table_ptr); |
453 | mvm->log_event_table = | |
454 | le32_to_cpu(palive1->log_event_table_ptr); | |
455 | alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); | |
01a9ca51 | 456 | |
7e1223b5 | 457 | alive_data->valid = le16_to_cpu(palive1->status) == |
01a9ca51 EH |
458 | IWL_ALIVE_STATUS_OK; |
459 | IWL_DEBUG_FW(mvm, | |
460 | "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", | |
7e1223b5 EG |
461 | le16_to_cpu(palive1->status), palive1->ver_type, |
462 | palive1->ver_subtype, palive1->flags); | |
463 | } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { | |
01a9ca51 EH |
464 | palive2 = (void *)pkt->data; |
465 | ||
01a9ca51 EH |
466 | mvm->error_event_table = |
467 | le32_to_cpu(palive2->error_event_table_ptr); | |
468 | mvm->log_event_table = | |
469 | le32_to_cpu(palive2->log_event_table_ptr); | |
470 | alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); | |
471 | mvm->umac_error_event_table = | |
472 | le32_to_cpu(palive2->error_info_addr); | |
91479b64 EH |
473 | mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); |
474 | mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); | |
01a9ca51 EH |
475 | |
476 | alive_data->valid = le16_to_cpu(palive2->status) == | |
477 | IWL_ALIVE_STATUS_OK; | |
ffa70264 EG |
478 | if (mvm->umac_error_event_table) |
479 | mvm->support_umac_log = true; | |
480 | ||
01a9ca51 EH |
481 | IWL_DEBUG_FW(mvm, |
482 | "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", | |
483 | le16_to_cpu(palive2->status), palive2->ver_type, | |
484 | palive2->ver_subtype, palive2->flags); | |
485 | ||
486 | IWL_DEBUG_FW(mvm, | |
487 | "UMAC version: Major - 0x%x, Minor - 0x%x\n", | |
488 | palive2->umac_major, palive2->umac_minor); | |
7e1223b5 EG |
489 | } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { |
490 | palive = (void *)pkt->data; | |
491 | ||
492 | mvm->error_event_table = | |
493 | le32_to_cpu(palive->error_event_table_ptr); | |
494 | mvm->log_event_table = | |
495 | le32_to_cpu(palive->log_event_table_ptr); | |
496 | alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); | |
497 | mvm->umac_error_event_table = | |
498 | le32_to_cpu(palive->error_info_addr); | |
499 | mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); | |
500 | mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); | |
501 | ||
502 | alive_data->valid = le16_to_cpu(palive->status) == | |
503 | IWL_ALIVE_STATUS_OK; | |
504 | if (mvm->umac_error_event_table) | |
505 | mvm->support_umac_log = true; | |
506 | ||
507 | IWL_DEBUG_FW(mvm, | |
508 | "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", | |
509 | le16_to_cpu(palive->status), palive->ver_type, | |
510 | palive->ver_subtype, palive->flags); | |
511 | ||
512 | IWL_DEBUG_FW(mvm, | |
513 | "UMAC version: Major - 0x%x, Minor - 0x%x\n", | |
514 | le32_to_cpu(palive->umac_major), | |
515 | le32_to_cpu(palive->umac_minor)); | |
01a9ca51 | 516 | } |
8ca151b5 JB |
517 | |
518 | return true; | |
519 | } | |
520 | ||
521 | static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, | |
522 | struct iwl_rx_packet *pkt, void *data) | |
523 | { | |
524 | struct iwl_phy_db *phy_db = data; | |
525 | ||
526 | if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { | |
527 | WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); | |
528 | return true; | |
529 | } | |
530 | ||
531 | WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); | |
532 | ||
533 | return false; | |
534 | } | |
535 | ||
536 | static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, | |
537 | enum iwl_ucode_type ucode_type) | |
538 | { | |
539 | struct iwl_notification_wait alive_wait; | |
540 | struct iwl_mvm_alive_data alive_data; | |
541 | const struct fw_img *fw; | |
542 | int ret, i; | |
543 | enum iwl_ucode_type old_type = mvm->cur_ucode; | |
6eb031d2 | 544 | static const u16 alive_cmd[] = { MVM_ALIVE }; |
91479b64 | 545 | struct iwl_sf_region st_fwrd_space; |
8ca151b5 | 546 | |
61df750c | 547 | if (ucode_type == IWL_UCODE_REGULAR && |
3d2d4422 GBA |
548 | iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && |
549 | !(fw_has_capa(&mvm->fw->ucode_capa, | |
550 | IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) | |
61df750c EH |
551 | fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); |
552 | else | |
553 | fw = iwl_get_ucode_image(mvm, ucode_type); | |
befe9b6f | 554 | if (WARN_ON(!fw)) |
8ca151b5 | 555 | return -EINVAL; |
befe9b6f JB |
556 | mvm->cur_ucode = ucode_type; |
557 | mvm->ucode_loaded = false; | |
8ca151b5 JB |
558 | |
559 | iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, | |
560 | alive_cmd, ARRAY_SIZE(alive_cmd), | |
561 | iwl_alive_fn, &alive_data); | |
562 | ||
563 | ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); | |
564 | if (ret) { | |
565 | mvm->cur_ucode = old_type; | |
566 | iwl_remove_notification(&mvm->notif_wait, &alive_wait); | |
567 | return ret; | |
568 | } | |
569 | ||
570 | /* | |
571 | * Some things may run in the background now, but we | |
572 | * just wait for the ALIVE notification here. | |
573 | */ | |
574 | ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, | |
575 | MVM_UCODE_ALIVE_TIMEOUT); | |
576 | if (ret) { | |
192de2b4 DS |
577 | if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
578 | IWL_ERR(mvm, | |
579 | "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", | |
580 | iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), | |
581 | iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); | |
8ca151b5 JB |
582 | mvm->cur_ucode = old_type; |
583 | return ret; | |
584 | } | |
585 | ||
586 | if (!alive_data.valid) { | |
587 | IWL_ERR(mvm, "Loaded ucode is not valid!\n"); | |
588 | mvm->cur_ucode = old_type; | |
589 | return -EIO; | |
590 | } | |
591 | ||
91479b64 EH |
592 | /* |
593 | * update the sdio allocation according to the pointer we get in the | |
594 | * alive notification. | |
595 | */ | |
596 | st_fwrd_space.addr = mvm->sf_space.addr; | |
597 | st_fwrd_space.size = mvm->sf_space.size; | |
598 | ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); | |
82e8aea0 ES |
599 | if (ret) { |
600 | IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); | |
601 | return ret; | |
602 | } | |
91479b64 | 603 | |
8ca151b5 JB |
604 | iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); |
605 | ||
a6c4fb44 MG |
606 | /* |
607 | * configure and operate fw paging mechanism. | |
608 | * driver configures the paging flow only once, CPU2 paging image | |
609 | * included in the IWL_UCODE_INIT image. | |
610 | */ | |
611 | if (fw->paging_mem_size) { | |
e1120187 MG |
612 | /* |
613 | * When dma is not enabled, the driver needs to copy / write | |
614 | * the downloaded / uploaded page to / from the smem. | |
615 | * This gets the location of the place were the pages are | |
616 | * stored. | |
617 | */ | |
618 | if (!is_device_dma_capable(mvm->trans->dev)) { | |
619 | ret = iwl_trans_get_paging_item(mvm); | |
620 | if (ret) { | |
621 | IWL_ERR(mvm, "failed to get FW paging item\n"); | |
622 | return ret; | |
623 | } | |
624 | } | |
625 | ||
a6c4fb44 MG |
626 | ret = iwl_save_fw_paging(mvm, fw); |
627 | if (ret) { | |
628 | IWL_ERR(mvm, "failed to save the FW paging image\n"); | |
629 | return ret; | |
630 | } | |
631 | ||
632 | ret = iwl_send_paging_cmd(mvm, fw); | |
633 | if (ret) { | |
634 | IWL_ERR(mvm, "failed to send the paging cmd\n"); | |
635 | iwl_free_fw_paging(mvm); | |
636 | return ret; | |
637 | } | |
638 | } | |
639 | ||
8ca151b5 JB |
640 | /* |
641 | * Note: all the queues are enabled as part of the interface | |
642 | * initialization, but in firmware restart scenarios they | |
643 | * could be stopped, so wake them up. In firmware restart, | |
644 | * mac80211 will have the queues stopped as well until the | |
645 | * reconfiguration completes. During normal startup, they | |
646 | * will be empty. | |
647 | */ | |
648 | ||
4ecafae9 LK |
649 | memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); |
650 | mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; | |
8ca151b5 | 651 | |
df197c00 JB |
652 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) |
653 | atomic_set(&mvm->mac80211_queue_stop_count[i], 0); | |
8ca151b5 JB |
654 | |
655 | mvm->ucode_loaded = true; | |
656 | ||
657 | return 0; | |
658 | } | |
8ca151b5 JB |
659 | |
660 | static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) | |
661 | { | |
662 | struct iwl_phy_cfg_cmd phy_cfg_cmd; | |
663 | enum iwl_ucode_type ucode_type = mvm->cur_ucode; | |
664 | ||
665 | /* Set parameters */ | |
a0544272 | 666 | phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); |
8ca151b5 JB |
667 | phy_cfg_cmd.calib_control.event_trigger = |
668 | mvm->fw->default_calib[ucode_type].event_trigger; | |
669 | phy_cfg_cmd.calib_control.flow_trigger = | |
670 | mvm->fw->default_calib[ucode_type].flow_trigger; | |
671 | ||
672 | IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", | |
673 | phy_cfg_cmd.phy_cfg); | |
674 | ||
a1022927 | 675 | return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, |
8ca151b5 JB |
676 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); |
677 | } | |
678 | ||
8ca151b5 JB |
679 | int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) |
680 | { | |
681 | struct iwl_notification_wait calib_wait; | |
6eb031d2 | 682 | static const u16 init_complete[] = { |
8ca151b5 JB |
683 | INIT_COMPLETE_NOTIF, |
684 | CALIB_RES_NOTIF_PHY_DB | |
685 | }; | |
686 | int ret; | |
687 | ||
688 | lockdep_assert_held(&mvm->mutex); | |
689 | ||
8d193ca2 | 690 | if (WARN_ON_ONCE(mvm->calibrating)) |
8ca151b5 JB |
691 | return 0; |
692 | ||
693 | iwl_init_notification_wait(&mvm->notif_wait, | |
694 | &calib_wait, | |
695 | init_complete, | |
696 | ARRAY_SIZE(init_complete), | |
697 | iwl_wait_phy_db_entry, | |
698 | mvm->phy_db); | |
699 | ||
700 | /* Will also start the device */ | |
701 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); | |
702 | if (ret) { | |
703 | IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); | |
704 | goto error; | |
705 | } | |
706 | ||
ae397472 | 707 | ret = iwl_send_bt_init_conf(mvm); |
931d4160 EG |
708 | if (ret) |
709 | goto error; | |
710 | ||
81a67e32 | 711 | /* Read the NVM only at driver load time, no need to do this twice */ |
8ca151b5 JB |
712 | if (read_nvm) { |
713 | /* Read nvm */ | |
14b485f0 | 714 | ret = iwl_nvm_init(mvm, true); |
8ca151b5 JB |
715 | if (ret) { |
716 | IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); | |
717 | goto error; | |
718 | } | |
719 | } | |
720 | ||
81a67e32 | 721 | /* In case we read the NVM from external file, load it to the NIC */ |
e02a9d60 | 722 | if (mvm->nvm_file_name) |
81a67e32 EL |
723 | iwl_mvm_load_nvm_to_nic(mvm); |
724 | ||
8ca151b5 JB |
725 | ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); |
726 | WARN_ON(ret); | |
727 | ||
4f59334b EH |
728 | /* |
729 | * abort after reading the nvm in case RF Kill is on, we will complete | |
730 | * the init seq later when RF kill will switch to off | |
731 | */ | |
1a3fe0b2 | 732 | if (iwl_mvm_is_radio_hw_killed(mvm)) { |
4f59334b EH |
733 | IWL_DEBUG_RF_KILL(mvm, |
734 | "jump over all phy activities due to RF kill\n"); | |
735 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); | |
a4082843 AN |
736 | ret = 1; |
737 | goto out; | |
4f59334b EH |
738 | } |
739 | ||
31b8b343 EG |
740 | mvm->calibrating = true; |
741 | ||
e07cbb53 | 742 | /* Send TX valid antennas before triggering calibrations */ |
a0544272 | 743 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
e07cbb53 DS |
744 | if (ret) |
745 | goto error; | |
746 | ||
8ca151b5 JB |
747 | /* |
748 | * Send phy configurations command to init uCode | |
749 | * to start the 16.0 uCode init image internal calibrations. | |
750 | */ | |
751 | ret = iwl_send_phy_cfg_cmd(mvm); | |
752 | if (ret) { | |
753 | IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", | |
754 | ret); | |
755 | goto error; | |
756 | } | |
757 | ||
758 | /* | |
759 | * Some things may run in the background now, but we | |
760 | * just wait for the calibration complete notification. | |
761 | */ | |
762 | ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, | |
763 | MVM_UCODE_CALIB_TIMEOUT); | |
31b8b343 | 764 | |
1a3fe0b2 | 765 | if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { |
31b8b343 EG |
766 | IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); |
767 | ret = 1; | |
768 | } | |
8ca151b5 JB |
769 | goto out; |
770 | ||
771 | error: | |
772 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); | |
773 | out: | |
31b8b343 | 774 | mvm->calibrating = false; |
a4082843 | 775 | if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { |
8ca151b5 JB |
776 | /* we want to debug INIT and we have no NVM - fake */ |
777 | mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + | |
778 | sizeof(struct ieee80211_channel) + | |
779 | sizeof(struct ieee80211_rate), | |
780 | GFP_KERNEL); | |
781 | if (!mvm->nvm_data) | |
782 | return -ENOMEM; | |
8ca151b5 JB |
783 | mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; |
784 | mvm->nvm_data->bands[0].n_channels = 1; | |
785 | mvm->nvm_data->bands[0].n_bitrates = 1; | |
786 | mvm->nvm_data->bands[0].bitrates = | |
787 | (void *)mvm->nvm_data->channels + 1; | |
788 | mvm->nvm_data->bands[0].bitrates->hw_value = 10; | |
789 | } | |
790 | ||
791 | return ret; | |
792 | } | |
793 | ||
04fd2c28 LK |
794 | static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) |
795 | { | |
796 | struct iwl_host_cmd cmd = { | |
797 | .id = SHARED_MEM_CFG, | |
798 | .flags = CMD_WANT_SKB, | |
799 | .data = { NULL, }, | |
800 | .len = { 0, }, | |
801 | }; | |
802 | struct iwl_rx_packet *pkt; | |
803 | struct iwl_shared_mem_cfg *mem_cfg; | |
804 | u32 i; | |
805 | ||
806 | lockdep_assert_held(&mvm->mutex); | |
807 | ||
808 | if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) | |
809 | return; | |
810 | ||
811 | pkt = cmd.resp_pkt; | |
04fd2c28 LK |
812 | mem_cfg = (void *)pkt->data; |
813 | ||
814 | mvm->shared_mem_cfg.shared_mem_addr = | |
815 | le32_to_cpu(mem_cfg->shared_mem_addr); | |
816 | mvm->shared_mem_cfg.shared_mem_size = | |
817 | le32_to_cpu(mem_cfg->shared_mem_size); | |
818 | mvm->shared_mem_cfg.sample_buff_addr = | |
819 | le32_to_cpu(mem_cfg->sample_buff_addr); | |
820 | mvm->shared_mem_cfg.sample_buff_size = | |
821 | le32_to_cpu(mem_cfg->sample_buff_size); | |
822 | mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr); | |
823 | for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) | |
824 | mvm->shared_mem_cfg.txfifo_size[i] = | |
825 | le32_to_cpu(mem_cfg->txfifo_size[i]); | |
826 | for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) | |
827 | mvm->shared_mem_cfg.rxfifo_size[i] = | |
828 | le32_to_cpu(mem_cfg->rxfifo_size[i]); | |
829 | mvm->shared_mem_cfg.page_buff_addr = | |
830 | le32_to_cpu(mem_cfg->page_buff_addr); | |
831 | mvm->shared_mem_cfg.page_buff_size = | |
832 | le32_to_cpu(mem_cfg->page_buff_size); | |
833 | IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); | |
834 | ||
04fd2c28 LK |
835 | iwl_free_resp(&cmd); |
836 | } | |
837 | ||
84bfffa9 EG |
838 | static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) |
839 | { | |
840 | struct iwl_ltr_config_cmd cmd = { | |
841 | .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), | |
842 | }; | |
843 | ||
844 | if (!mvm->trans->ltr_enabled) | |
845 | return 0; | |
846 | ||
84bfffa9 EG |
847 | return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, |
848 | sizeof(cmd), &cmd); | |
849 | } | |
850 | ||
8ca151b5 JB |
851 | int iwl_mvm_up(struct iwl_mvm *mvm) |
852 | { | |
853 | int ret, i; | |
53a9d61e IP |
854 | struct ieee80211_channel *chan; |
855 | struct cfg80211_chan_def chandef; | |
8ca151b5 JB |
856 | |
857 | lockdep_assert_held(&mvm->mutex); | |
858 | ||
859 | ret = iwl_trans_start_hw(mvm->trans); | |
860 | if (ret) | |
861 | return ret; | |
862 | ||
ff116373 EL |
863 | /* |
864 | * If we haven't completed the run of the init ucode during | |
865 | * module loading, load init ucode now | |
866 | * (for example, if we were in RFKILL) | |
867 | */ | |
8d193ca2 EH |
868 | ret = iwl_run_init_mvm_ucode(mvm, false); |
869 | if (ret && !iwlmvm_mod_params.init_dbg) { | |
870 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); | |
871 | /* this can't happen */ | |
872 | if (WARN_ON(ret > 0)) | |
873 | ret = -ERFKILL; | |
874 | goto error; | |
875 | } | |
876 | if (!iwlmvm_mod_params.init_dbg) { | |
877 | /* | |
878 | * Stop and start the transport without entering low power | |
879 | * mode. This will save the state of other components on the | |
880 | * device that are triggered by the INIT firwmare (MFUART). | |
881 | */ | |
882 | _iwl_trans_stop_device(mvm->trans, false); | |
d643c432 | 883 | ret = _iwl_trans_start_hw(mvm->trans, false); |
8d193ca2 | 884 | if (ret) |
d643c432 | 885 | goto error; |
8ca151b5 JB |
886 | } |
887 | ||
888 | if (iwlmvm_mod_params.init_dbg) | |
889 | return 0; | |
890 | ||
891 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); | |
892 | if (ret) { | |
893 | IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); | |
894 | goto error; | |
895 | } | |
896 | ||
6c7d32cf | 897 | iwl_mvm_get_shared_mem_conf(mvm); |
04fd2c28 | 898 | |
1f3b0ff8 LE |
899 | ret = iwl_mvm_sf_update(mvm, NULL, false); |
900 | if (ret) | |
901 | IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); | |
902 | ||
6a951267 | 903 | mvm->fw_dbg_conf = FW_DBG_INVALID; |
945d4202 EG |
904 | /* if we have a destination, assume EARLY START */ |
905 | if (mvm->fw->dbg_dest_tlv) | |
906 | mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; | |
d2709ad7 | 907 | iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); |
6a951267 | 908 | |
a0544272 | 909 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
8ca151b5 JB |
910 | if (ret) |
911 | goto error; | |
912 | ||
931d4160 EG |
913 | ret = iwl_send_bt_init_conf(mvm); |
914 | if (ret) | |
915 | goto error; | |
916 | ||
8ca151b5 JB |
917 | /* Send phy db control command and then phy db calibration*/ |
918 | ret = iwl_send_phy_db_data(mvm->phy_db); | |
919 | if (ret) | |
920 | goto error; | |
921 | ||
922 | ret = iwl_send_phy_cfg_cmd(mvm); | |
923 | if (ret) | |
924 | goto error; | |
925 | ||
43413a97 SS |
926 | /* Init RSS configuration */ |
927 | if (iwl_mvm_has_new_rx_api(mvm)) { | |
928 | ret = iwl_send_rss_cfg_cmd(mvm); | |
929 | if (ret) { | |
930 | IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", | |
931 | ret); | |
932 | goto error; | |
933 | } | |
934 | } | |
935 | ||
8ca151b5 JB |
936 | /* init the fw <-> mac80211 STA mapping */ |
937 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) | |
938 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); | |
939 | ||
1d3c3f63 AN |
940 | mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; |
941 | ||
b2b7875b JB |
942 | /* reset quota debouncing buffer - 0xff will yield invalid data */ |
943 | memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); | |
944 | ||
8ca151b5 JB |
945 | /* Add auxiliary station for scanning */ |
946 | ret = iwl_mvm_add_aux_sta(mvm); | |
947 | if (ret) | |
948 | goto error; | |
949 | ||
53a9d61e IP |
950 | /* Add all the PHY contexts */ |
951 | chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; | |
952 | cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); | |
953 | for (i = 0; i < NUM_PHY_CTX; i++) { | |
954 | /* | |
955 | * The channel used here isn't relevant as it's | |
956 | * going to be overwritten in the other flows. | |
957 | * For now use the first channel we have. | |
958 | */ | |
959 | ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], | |
960 | &chandef, 1, 1); | |
961 | if (ret) | |
962 | goto error; | |
963 | } | |
8ca151b5 | 964 | |
c221daf2 CRI |
965 | #ifdef CONFIG_THERMAL |
966 | if (iwl_mvm_is_tt_in_fw(mvm)) { | |
967 | /* in order to give the responsibility of ct-kill and | |
968 | * TX backoff to FW we need to send empty temperature reporting | |
969 | * cmd during init time | |
970 | */ | |
971 | iwl_mvm_send_temp_report_ths_cmd(mvm); | |
972 | } else { | |
973 | /* Initialize tx backoffs to the minimal possible */ | |
974 | iwl_mvm_tt_tx_backoff(mvm, 0); | |
975 | } | |
5c89e7bc CRI |
976 | |
977 | /* TODO: read the budget from BIOS / Platform NVM */ | |
978 | if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) | |
979 | ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, | |
980 | mvm->cooling_dev.cur_state); | |
c221daf2 | 981 | #else |
0c0e2c71 IY |
982 | /* Initialize tx backoffs to the minimal possible */ |
983 | iwl_mvm_tt_tx_backoff(mvm, 0); | |
c221daf2 | 984 | #endif |
0c0e2c71 | 985 | |
84bfffa9 | 986 | WARN_ON(iwl_mvm_config_ltr(mvm)); |
9180ac50 | 987 | |
c1cb92fc | 988 | ret = iwl_mvm_power_update_device(mvm); |
64b928c4 AB |
989 | if (ret) |
990 | goto error; | |
991 | ||
35af15d1 AN |
992 | /* |
993 | * RTNL is not taken during Ct-kill, but we don't need to scan/Tx | |
994 | * anyway, so don't init MCC. | |
995 | */ | |
996 | if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { | |
997 | ret = iwl_mvm_init_mcc(mvm); | |
998 | if (ret) | |
999 | goto error; | |
1000 | } | |
90d4f7db | 1001 | |
859d914c | 1002 | if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { |
4ca87a5f | 1003 | mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; |
d2496221 DS |
1004 | ret = iwl_mvm_config_scan(mvm); |
1005 | if (ret) | |
1006 | goto error; | |
1007 | } | |
1008 | ||
93190fb0 AA |
1009 | if (iwl_mvm_is_csum_supported(mvm) && |
1010 | mvm->cfg->features & NETIF_F_RXCSUM) | |
1011 | iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); | |
1012 | ||
7498cf4c EP |
1013 | /* allow FW/transport low power modes if not during restart */ |
1014 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) | |
1015 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); | |
1016 | ||
53a9d61e | 1017 | IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); |
8ca151b5 JB |
1018 | return 0; |
1019 | error: | |
fcb6b92a | 1020 | iwl_mvm_stop_device(mvm); |
8ca151b5 JB |
1021 | return ret; |
1022 | } | |
1023 | ||
1024 | int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) | |
1025 | { | |
1026 | int ret, i; | |
1027 | ||
1028 | lockdep_assert_held(&mvm->mutex); | |
1029 | ||
1030 | ret = iwl_trans_start_hw(mvm->trans); | |
1031 | if (ret) | |
1032 | return ret; | |
1033 | ||
1034 | ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); | |
1035 | if (ret) { | |
1036 | IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); | |
1037 | goto error; | |
1038 | } | |
1039 | ||
a0544272 | 1040 | ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); |
8ca151b5 JB |
1041 | if (ret) |
1042 | goto error; | |
1043 | ||
1044 | /* Send phy db control command and then phy db calibration*/ | |
1045 | ret = iwl_send_phy_db_data(mvm->phy_db); | |
1046 | if (ret) | |
1047 | goto error; | |
1048 | ||
1049 | ret = iwl_send_phy_cfg_cmd(mvm); | |
1050 | if (ret) | |
1051 | goto error; | |
1052 | ||
1053 | /* init the fw <-> mac80211 STA mapping */ | |
1054 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) | |
1055 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); | |
1056 | ||
1057 | /* Add auxiliary station for scanning */ | |
1058 | ret = iwl_mvm_add_aux_sta(mvm); | |
1059 | if (ret) | |
1060 | goto error; | |
1061 | ||
1062 | return 0; | |
1063 | error: | |
fcb6b92a | 1064 | iwl_mvm_stop_device(mvm); |
8ca151b5 JB |
1065 | return ret; |
1066 | } | |
1067 | ||
0416841d JB |
1068 | void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, |
1069 | struct iwl_rx_cmd_buffer *rxb) | |
8ca151b5 JB |
1070 | { |
1071 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
1072 | struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; | |
1073 | u32 flags = le32_to_cpu(card_state_notif->flags); | |
1074 | ||
1075 | IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", | |
1076 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | |
1077 | (flags & SW_CARD_DISABLED) ? "Kill" : "On", | |
1078 | (flags & CT_KILL_CARD_DISABLED) ? | |
1079 | "Reached" : "Not reached"); | |
8ca151b5 JB |
1080 | } |
1081 | ||
0416841d JB |
1082 | void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, |
1083 | struct iwl_rx_cmd_buffer *rxb) | |
30269c12 CRI |
1084 | { |
1085 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
1086 | struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; | |
1087 | ||
1088 | IWL_DEBUG_INFO(mvm, | |
1089 | "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", | |
1090 | le32_to_cpu(mfuart_notif->installed_ver), | |
1091 | le32_to_cpu(mfuart_notif->external_ver), | |
1092 | le32_to_cpu(mfuart_notif->status), | |
1093 | le32_to_cpu(mfuart_notif->duration)); | |
30269c12 | 1094 | } |