ath10k: remove redundant -ve check against u32 integer size
[linux-2.6-block.git] / drivers / net / wireless / ath / ath10k / pci.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
8b1083d6 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5e3dd157
KV
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
650b91fb 22#include <linux/bitops.h>
5e3dd157
KV
23
24#include "core.h"
25#include "debug.h"
f25b9f28 26#include "coredump.h"
5e3dd157
KV
27
28#include "targaddrs.h"
29#include "bmi.h"
30
31#include "hif.h"
32#include "htc.h"
33
34#include "ce.h"
35#include "pci.h"
36
35098463
KV
37enum ath10k_pci_reset_mode {
38 ATH10K_PCI_RESET_AUTO = 0,
39 ATH10K_PCI_RESET_WARM_ONLY = 1,
40};
41
cfe9c45b 42static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
35098463 43static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
cfe9c45b 44
cfe9c45b
MK
45module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
46MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
47
35098463
KV
48module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
49MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
50
0399eca8
KV
51/* how long wait to wait for target to initialise, in ms */
52#define ATH10K_PCI_TARGET_WAIT 3000
61c95cea 53#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
0399eca8 54
703f261d
AL
55/* Maximum number of bytes that can be handled atomically by
56 * diag read and write.
57 */
58#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
59
9baa3c34 60static const struct pci_device_id ath10k_pci_id_table[] = {
5e3dd157 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
36582e5d 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
d63955b3 63 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
8a055a8a 64 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
e565c312 65 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
651b4cdc 66 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
a226b519 67 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
6fd3dd71 68 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
5e3dd157
KV
69 {0}
70};
71
7505f7c3
MK
72static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
73 /* QCA988X pre 2.0 chips are not supported because they need some nasty
74 * hacks. ath10k doesn't have them and these devices crash horribly
75 * because of that.
76 */
77 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
36582e5d
MK
78
79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
81 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
82 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
83 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
84
d63955b3
MK
85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
87 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
88 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
89 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
36582e5d 90
8a055a8a 91 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
034074f3 92
651b4cdc
VT
93 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
94
e565c312
AK
95 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
96
034074f3 97 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
12551ced 98 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
651b4cdc 99
6fd3dd71 100 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
7505f7c3
MK
101};
102
728f95ee 103static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
fc36e3ff 104static int ath10k_pci_cold_reset(struct ath10k *ar);
6e4202c3 105static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
fc15ca13
MK
106static int ath10k_pci_init_irq(struct ath10k *ar);
107static int ath10k_pci_deinit_irq(struct ath10k *ar);
108static int ath10k_pci_request_irq(struct ath10k *ar);
109static void ath10k_pci_free_irq(struct ath10k *ar);
6bb099b0
BG
110static int ath10k_pci_bmi_wait(struct ath10k *ar,
111 struct ath10k_ce_pipe *tx_pipe,
85622cde
MK
112 struct ath10k_ce_pipe *rx_pipe,
113 struct bmi_xfer *xfer);
6e4202c3 114static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
0e5b2950 115static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
9d9bdbb0 116static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
a70587b3
RM
117static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
118static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
6419fdbb 119static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
afb0bf7f 120static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
5e3dd157 121
2727a743 122static struct ce_attr host_ce_config_wlan[] = {
48e9c225
KV
123 /* CE0: host->target HTC control and raw streams */
124 {
125 .flags = CE_ATTR_FLAGS,
126 .src_nentries = 16,
127 .src_sz_max = 256,
128 .dest_nentries = 0,
0e5b2950 129 .send_cb = ath10k_pci_htc_tx_cb,
48e9c225
KV
130 },
131
132 /* CE1: target->host HTT + HTC control */
133 {
134 .flags = CE_ATTR_FLAGS,
135 .src_nentries = 0,
63838640 136 .src_sz_max = 2048,
48e9c225 137 .dest_nentries = 512,
6419fdbb 138 .recv_cb = ath10k_pci_htt_htc_rx_cb,
48e9c225
KV
139 },
140
141 /* CE2: target->host WMI */
142 {
143 .flags = CE_ATTR_FLAGS,
144 .src_nentries = 0,
145 .src_sz_max = 2048,
30abb330 146 .dest_nentries = 128,
9d9bdbb0 147 .recv_cb = ath10k_pci_htc_rx_cb,
48e9c225
KV
148 },
149
150 /* CE3: host->target WMI */
151 {
152 .flags = CE_ATTR_FLAGS,
153 .src_nentries = 32,
154 .src_sz_max = 2048,
155 .dest_nentries = 0,
0e5b2950 156 .send_cb = ath10k_pci_htc_tx_cb,
48e9c225
KV
157 },
158
159 /* CE4: host->target HTT */
160 {
161 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
162 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
163 .src_sz_max = 256,
164 .dest_nentries = 0,
a70587b3 165 .send_cb = ath10k_pci_htt_tx_cb,
48e9c225
KV
166 },
167
a70587b3 168 /* CE5: target->host HTT (HIF->HTT) */
48e9c225
KV
169 {
170 .flags = CE_ATTR_FLAGS,
171 .src_nentries = 0,
a70587b3
RM
172 .src_sz_max = 512,
173 .dest_nentries = 512,
174 .recv_cb = ath10k_pci_htt_rx_cb,
48e9c225
KV
175 },
176
177 /* CE6: target autonomous hif_memcpy */
178 {
179 .flags = CE_ATTR_FLAGS,
180 .src_nentries = 0,
181 .src_sz_max = 0,
182 .dest_nentries = 0,
183 },
184
185 /* CE7: ce_diag, the Diagnostic Window */
186 {
187 .flags = CE_ATTR_FLAGS,
188 .src_nentries = 2,
189 .src_sz_max = DIAG_TRANSFER_LIMIT,
190 .dest_nentries = 2,
191 },
050af069
VT
192
193 /* CE8: target->host pktlog */
194 {
195 .flags = CE_ATTR_FLAGS,
196 .src_nentries = 0,
197 .src_sz_max = 2048,
198 .dest_nentries = 128,
afb0bf7f 199 .recv_cb = ath10k_pci_pktlog_rx_cb,
050af069
VT
200 },
201
202 /* CE9 target autonomous qcache memcpy */
203 {
204 .flags = CE_ATTR_FLAGS,
205 .src_nentries = 0,
206 .src_sz_max = 0,
207 .dest_nentries = 0,
208 },
209
210 /* CE10: target autonomous hif memcpy */
211 {
212 .flags = CE_ATTR_FLAGS,
213 .src_nentries = 0,
214 .src_sz_max = 0,
215 .dest_nentries = 0,
216 },
217
218 /* CE11: target autonomous hif memcpy */
219 {
220 .flags = CE_ATTR_FLAGS,
221 .src_nentries = 0,
222 .src_sz_max = 0,
223 .dest_nentries = 0,
224 },
5e3dd157
KV
225};
226
227/* Target firmware's Copy Engine configuration. */
2727a743 228static struct ce_pipe_config target_ce_config_wlan[] = {
d88effba
KV
229 /* CE0: host->target HTC control and raw streams */
230 {
0fdc14e4
MK
231 .pipenum = __cpu_to_le32(0),
232 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
233 .nentries = __cpu_to_le32(32),
234 .nbytes_max = __cpu_to_le32(256),
235 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
236 .reserved = __cpu_to_le32(0),
d88effba
KV
237 },
238
239 /* CE1: target->host HTT + HTC control */
240 {
0fdc14e4
MK
241 .pipenum = __cpu_to_le32(1),
242 .pipedir = __cpu_to_le32(PIPEDIR_IN),
243 .nentries = __cpu_to_le32(32),
63838640 244 .nbytes_max = __cpu_to_le32(2048),
0fdc14e4
MK
245 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
246 .reserved = __cpu_to_le32(0),
d88effba
KV
247 },
248
249 /* CE2: target->host WMI */
250 {
0fdc14e4
MK
251 .pipenum = __cpu_to_le32(2),
252 .pipedir = __cpu_to_le32(PIPEDIR_IN),
30abb330 253 .nentries = __cpu_to_le32(64),
0fdc14e4
MK
254 .nbytes_max = __cpu_to_le32(2048),
255 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
256 .reserved = __cpu_to_le32(0),
d88effba
KV
257 },
258
259 /* CE3: host->target WMI */
260 {
0fdc14e4
MK
261 .pipenum = __cpu_to_le32(3),
262 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
263 .nentries = __cpu_to_le32(32),
264 .nbytes_max = __cpu_to_le32(2048),
265 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
266 .reserved = __cpu_to_le32(0),
d88effba
KV
267 },
268
269 /* CE4: host->target HTT */
270 {
0fdc14e4
MK
271 .pipenum = __cpu_to_le32(4),
272 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
273 .nentries = __cpu_to_le32(256),
274 .nbytes_max = __cpu_to_le32(256),
275 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
276 .reserved = __cpu_to_le32(0),
d88effba
KV
277 },
278
5e3dd157 279 /* NB: 50% of src nentries, since tx has 2 frags */
d88effba 280
a70587b3 281 /* CE5: target->host HTT (HIF->HTT) */
d88effba 282 {
0fdc14e4 283 .pipenum = __cpu_to_le32(5),
a70587b3 284 .pipedir = __cpu_to_le32(PIPEDIR_IN),
0fdc14e4 285 .nentries = __cpu_to_le32(32),
a70587b3 286 .nbytes_max = __cpu_to_le32(512),
0fdc14e4
MK
287 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
288 .reserved = __cpu_to_le32(0),
d88effba
KV
289 },
290
291 /* CE6: Reserved for target autonomous hif_memcpy */
292 {
0fdc14e4
MK
293 .pipenum = __cpu_to_le32(6),
294 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
295 .nentries = __cpu_to_le32(32),
296 .nbytes_max = __cpu_to_le32(4096),
297 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
298 .reserved = __cpu_to_le32(0),
d88effba
KV
299 },
300
5e3dd157 301 /* CE7 used only by Host */
050af069
VT
302 {
303 .pipenum = __cpu_to_le32(7),
304 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
305 .nentries = __cpu_to_le32(0),
306 .nbytes_max = __cpu_to_le32(0),
307 .flags = __cpu_to_le32(0),
308 .reserved = __cpu_to_le32(0),
309 },
310
311 /* CE8 target->host packtlog */
312 {
313 .pipenum = __cpu_to_le32(8),
314 .pipedir = __cpu_to_le32(PIPEDIR_IN),
315 .nentries = __cpu_to_le32(64),
316 .nbytes_max = __cpu_to_le32(2048),
317 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
318 .reserved = __cpu_to_le32(0),
319 },
320
321 /* CE9 target autonomous qcache memcpy */
322 {
323 .pipenum = __cpu_to_le32(9),
324 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
325 .nentries = __cpu_to_le32(32),
326 .nbytes_max = __cpu_to_le32(2048),
327 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
328 .reserved = __cpu_to_le32(0),
329 },
330
331 /* It not necessary to send target wlan configuration for CE10 & CE11
332 * as these CEs are not actively used in target.
333 */
5e3dd157
KV
334};
335
d7bfb7aa
MK
336/*
337 * Map from service/endpoint to Copy Engine.
338 * This table is derived from the CE_PCI TABLE, above.
339 * It is passed to the Target at startup for use by firmware.
340 */
2727a743 341static struct service_to_pipe target_service_to_ce_map_wlan[] = {
d7bfb7aa 342 {
0fdc14e4
MK
343 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
344 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
345 __cpu_to_le32(3),
d7bfb7aa
MK
346 },
347 {
0fdc14e4
MK
348 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
349 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
350 __cpu_to_le32(2),
d7bfb7aa
MK
351 },
352 {
0fdc14e4
MK
353 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
354 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
355 __cpu_to_le32(3),
d7bfb7aa
MK
356 },
357 {
0fdc14e4
MK
358 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
359 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
360 __cpu_to_le32(2),
d7bfb7aa
MK
361 },
362 {
0fdc14e4
MK
363 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
364 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
365 __cpu_to_le32(3),
d7bfb7aa
MK
366 },
367 {
0fdc14e4
MK
368 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
369 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
370 __cpu_to_le32(2),
d7bfb7aa
MK
371 },
372 {
0fdc14e4
MK
373 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
374 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
375 __cpu_to_le32(3),
d7bfb7aa
MK
376 },
377 {
0fdc14e4
MK
378 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
379 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
380 __cpu_to_le32(2),
d7bfb7aa
MK
381 },
382 {
0fdc14e4
MK
383 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
384 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
385 __cpu_to_le32(3),
d7bfb7aa
MK
386 },
387 {
0fdc14e4
MK
388 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
389 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
390 __cpu_to_le32(2),
d7bfb7aa
MK
391 },
392 {
0fdc14e4
MK
393 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
394 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
395 __cpu_to_le32(0),
d7bfb7aa
MK
396 },
397 {
0fdc14e4
MK
398 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
399 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
400 __cpu_to_le32(1),
d7bfb7aa 401 },
0fdc14e4
MK
402 { /* not used */
403 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
404 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
405 __cpu_to_le32(0),
d7bfb7aa 406 },
0fdc14e4
MK
407 { /* not used */
408 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
409 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
410 __cpu_to_le32(1),
d7bfb7aa
MK
411 },
412 {
0fdc14e4
MK
413 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
414 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
415 __cpu_to_le32(4),
d7bfb7aa
MK
416 },
417 {
0fdc14e4
MK
418 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
419 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
a70587b3 420 __cpu_to_le32(5),
d7bfb7aa
MK
421 },
422
423 /* (Additions here) */
424
0fdc14e4
MK
425 { /* must be last */
426 __cpu_to_le32(0),
427 __cpu_to_le32(0),
428 __cpu_to_le32(0),
d7bfb7aa
MK
429 },
430};
431
77258d40
MK
432static bool ath10k_pci_is_awake(struct ath10k *ar)
433{
434 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
435 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
436 RTC_STATE_ADDRESS);
437
438 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
439}
440
441static void __ath10k_pci_wake(struct ath10k *ar)
442{
443 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
444
445 lockdep_assert_held(&ar_pci->ps_lock);
446
447 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
448 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
449
450 iowrite32(PCIE_SOC_WAKE_V_MASK,
451 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
452 PCIE_SOC_WAKE_ADDRESS);
453}
454
455static void __ath10k_pci_sleep(struct ath10k *ar)
456{
457 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
458
459 lockdep_assert_held(&ar_pci->ps_lock);
460
461 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
462 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
463
464 iowrite32(PCIE_SOC_WAKE_RESET,
465 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
466 PCIE_SOC_WAKE_ADDRESS);
467 ar_pci->ps_awake = false;
468}
469
470static int ath10k_pci_wake_wait(struct ath10k *ar)
471{
472 int tot_delay = 0;
473 int curr_delay = 5;
474
475 while (tot_delay < PCIE_WAKE_TIMEOUT) {
39b91b81
MK
476 if (ath10k_pci_is_awake(ar)) {
477 if (tot_delay > PCIE_WAKE_LATE_US)
23de5797 478 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
39b91b81 479 tot_delay / 1000);
77258d40 480 return 0;
39b91b81 481 }
77258d40
MK
482
483 udelay(curr_delay);
484 tot_delay += curr_delay;
485
486 if (curr_delay < 50)
487 curr_delay += 5;
488 }
489
490 return -ETIMEDOUT;
491}
492
1aaf8efb
AK
493static int ath10k_pci_force_wake(struct ath10k *ar)
494{
495 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
496 unsigned long flags;
497 int ret = 0;
498
d9d6a5ae
RM
499 if (ar_pci->pci_ps)
500 return ret;
501
1aaf8efb
AK
502 spin_lock_irqsave(&ar_pci->ps_lock, flags);
503
504 if (!ar_pci->ps_awake) {
505 iowrite32(PCIE_SOC_WAKE_V_MASK,
506 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
507 PCIE_SOC_WAKE_ADDRESS);
508
509 ret = ath10k_pci_wake_wait(ar);
510 if (ret == 0)
511 ar_pci->ps_awake = true;
512 }
513
514 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
515
516 return ret;
517}
518
519static void ath10k_pci_force_sleep(struct ath10k *ar)
520{
521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
522 unsigned long flags;
523
524 spin_lock_irqsave(&ar_pci->ps_lock, flags);
525
526 iowrite32(PCIE_SOC_WAKE_RESET,
527 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
528 PCIE_SOC_WAKE_ADDRESS);
529 ar_pci->ps_awake = false;
530
531 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
532}
533
77258d40
MK
534static int ath10k_pci_wake(struct ath10k *ar)
535{
536 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
537 unsigned long flags;
538 int ret = 0;
539
1aaf8efb
AK
540 if (ar_pci->pci_ps == 0)
541 return ret;
542
77258d40
MK
543 spin_lock_irqsave(&ar_pci->ps_lock, flags);
544
545 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
546 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
547
548 /* This function can be called very frequently. To avoid excessive
549 * CPU stalls for MMIO reads use a cache var to hold the device state.
550 */
551 if (!ar_pci->ps_awake) {
552 __ath10k_pci_wake(ar);
553
554 ret = ath10k_pci_wake_wait(ar);
555 if (ret == 0)
556 ar_pci->ps_awake = true;
557 }
558
559 if (ret == 0) {
560 ar_pci->ps_wake_refcount++;
561 WARN_ON(ar_pci->ps_wake_refcount == 0);
562 }
563
564 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
565
566 return ret;
567}
568
569static void ath10k_pci_sleep(struct ath10k *ar)
570{
571 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
572 unsigned long flags;
573
1aaf8efb
AK
574 if (ar_pci->pci_ps == 0)
575 return;
576
77258d40
MK
577 spin_lock_irqsave(&ar_pci->ps_lock, flags);
578
579 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
580 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
581
582 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
583 goto skip;
584
585 ar_pci->ps_wake_refcount--;
586
587 mod_timer(&ar_pci->ps_timer, jiffies +
588 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
589
590skip:
591 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
592}
593
7ac76764 594static void ath10k_pci_ps_timer(struct timer_list *t)
77258d40 595{
7ac76764
KC
596 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
597 struct ath10k *ar = ar_pci->ar;
77258d40
MK
598 unsigned long flags;
599
600 spin_lock_irqsave(&ar_pci->ps_lock, flags);
601
602 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
603 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
604
605 if (ar_pci->ps_wake_refcount > 0)
606 goto skip;
607
608 __ath10k_pci_sleep(ar);
609
610skip:
611 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
612}
613
614static void ath10k_pci_sleep_sync(struct ath10k *ar)
615{
616 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
617 unsigned long flags;
618
1aaf8efb
AK
619 if (ar_pci->pci_ps == 0) {
620 ath10k_pci_force_sleep(ar);
621 return;
622 }
623
77258d40
MK
624 del_timer_sync(&ar_pci->ps_timer);
625
626 spin_lock_irqsave(&ar_pci->ps_lock, flags);
627 WARN_ON(ar_pci->ps_wake_refcount > 0);
628 __ath10k_pci_sleep(ar);
629 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
630}
631
4ddb3299 632static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
77258d40
MK
633{
634 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
635 int ret;
636
aeae5b4c
MK
637 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
638 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
639 offset, offset + sizeof(value), ar_pci->mem_len);
640 return;
641 }
642
77258d40
MK
643 ret = ath10k_pci_wake(ar);
644 if (ret) {
645 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
646 value, offset, ret);
647 return;
648 }
649
650 iowrite32(value, ar_pci->mem + offset);
651 ath10k_pci_sleep(ar);
652}
653
4ddb3299 654static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
77258d40
MK
655{
656 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657 u32 val;
658 int ret;
659
aeae5b4c
MK
660 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
661 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
662 offset, offset + sizeof(val), ar_pci->mem_len);
663 return 0;
664 }
665
77258d40
MK
666 ret = ath10k_pci_wake(ar);
667 if (ret) {
668 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
669 offset, ret);
670 return 0xffffffff;
671 }
672
673 val = ioread32(ar_pci->mem + offset);
674 ath10k_pci_sleep(ar);
675
676 return val;
677}
678
4ddb3299
RM
679inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
680{
641fe28a 681 struct ath10k_ce *ce = ath10k_ce_priv(ar);
4ddb3299 682
641fe28a 683 ce->bus_ops->write32(ar, offset, value);
4ddb3299
RM
684}
685
686inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
687{
641fe28a 688 struct ath10k_ce *ce = ath10k_ce_priv(ar);
4ddb3299 689
641fe28a 690 return ce->bus_ops->read32(ar, offset);
4ddb3299
RM
691}
692
77258d40
MK
693u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
694{
695 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
696}
697
698void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
699{
700 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
701}
702
703u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
704{
705 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
706}
707
708void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
709{
710 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
711}
712
f52f5171 713bool ath10k_pci_irq_pending(struct ath10k *ar)
e539887b
MK
714{
715 u32 cause;
716
717 /* Check if the shared legacy irq is for us */
718 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
719 PCIE_INTR_CAUSE_ADDRESS);
720 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
721 return true;
722
723 return false;
724}
725
f52f5171 726void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
2685218b
MK
727{
728 /* IMPORTANT: INTR_CLR register has to be set after
729 * INTR_ENABLE is set to 0, otherwise interrupt can not be
d6dfe25c
MR
730 * really cleared.
731 */
2685218b
MK
732 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
733 0);
734 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
735 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
736
737 /* IMPORTANT: this extra read transaction is required to
d6dfe25c
MR
738 * flush the posted write buffer.
739 */
cfbc06a9
KV
740 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
741 PCIE_INTR_ENABLE_ADDRESS);
2685218b
MK
742}
743
f52f5171 744void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
2685218b
MK
745{
746 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
747 PCIE_INTR_ENABLE_ADDRESS,
748 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
749
750 /* IMPORTANT: this extra read transaction is required to
d6dfe25c
MR
751 * flush the posted write buffer.
752 */
cfbc06a9
KV
753 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
754 PCIE_INTR_ENABLE_ADDRESS);
2685218b
MK
755}
756
403d627b 757static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
ab977bd0 758{
ab977bd0
MK
759 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
760
cfe9011a 761 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
403d627b 762 return "msi";
d8bb26b9
KV
763
764 return "legacy";
ab977bd0
MK
765}
766
728f95ee 767static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ab977bd0 768{
728f95ee 769 struct ath10k *ar = pipe->hif_ce_state;
641fe28a 770 struct ath10k_ce *ce = ath10k_ce_priv(ar);
728f95ee
MK
771 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
772 struct sk_buff *skb;
773 dma_addr_t paddr;
ab977bd0
MK
774 int ret;
775
728f95ee
MK
776 skb = dev_alloc_skb(pipe->buf_sz);
777 if (!skb)
778 return -ENOMEM;
779
780 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
781
782 paddr = dma_map_single(ar->dev, skb->data,
783 skb->len + skb_tailroom(skb),
784 DMA_FROM_DEVICE);
785 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
7aa7a72a 786 ath10k_warn(ar, "failed to dma map pci rx buf\n");
728f95ee
MK
787 dev_kfree_skb_any(skb);
788 return -EIO;
789 }
790
8582bf3b 791 ATH10K_SKB_RXCB(skb)->paddr = paddr;
728f95ee 792
641fe28a 793 spin_lock_bh(&ce->ce_lock);
2a1e1ad3 794 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
641fe28a 795 spin_unlock_bh(&ce->ce_lock);
ab977bd0 796 if (ret) {
728f95ee
MK
797 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
798 DMA_FROM_DEVICE);
799 dev_kfree_skb_any(skb);
ab977bd0
MK
800 return ret;
801 }
802
803 return 0;
804}
805
ab4e3db0 806static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ab977bd0 807{
728f95ee
MK
808 struct ath10k *ar = pipe->hif_ce_state;
809 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641fe28a 810 struct ath10k_ce *ce = ath10k_ce_priv(ar);
728f95ee
MK
811 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
812 int ret, num;
813
728f95ee
MK
814 if (pipe->buf_sz == 0)
815 return;
816
817 if (!ce_pipe->dest_ring)
818 return;
819
641fe28a 820 spin_lock_bh(&ce->ce_lock);
728f95ee 821 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
641fe28a 822 spin_unlock_bh(&ce->ce_lock);
128abd09
RM
823
824 while (num >= 0) {
728f95ee
MK
825 ret = __ath10k_pci_rx_post_buf(pipe);
826 if (ret) {
ab4e3db0
RM
827 if (ret == -ENOSPC)
828 break;
7aa7a72a 829 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
728f95ee
MK
830 mod_timer(&ar_pci->rx_post_retry, jiffies +
831 ATH10K_PCI_RX_POST_RETRY_MS);
832 break;
833 }
128abd09 834 num--;
728f95ee
MK
835 }
836}
837
f52f5171 838void ath10k_pci_rx_post(struct ath10k *ar)
728f95ee
MK
839{
840 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
841 int i;
842
728f95ee 843 for (i = 0; i < CE_COUNT; i++)
ab4e3db0 844 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
728f95ee
MK
845}
846
7ac76764 847void ath10k_pci_rx_replenish_retry(struct timer_list *t)
728f95ee 848{
7ac76764
KC
849 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
850 struct ath10k *ar = ar_pci->ar;
728f95ee
MK
851
852 ath10k_pci_rx_post(ar);
ab977bd0
MK
853}
854
7f622593 855static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
418ca599 856{
7f622593 857 u32 val = 0, region = addr & 0xfffff;
418ca599 858
7f622593
ARN
859 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
860 & 0x7ff) << 21;
861 val |= 0x100000 | region;
862 return val;
863}
864
865static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
866{
867 u32 val = 0, region = addr & 0xfffff;
418ca599 868
7f622593
ARN
869 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
870 val |= 0x100000 | region;
418ca599
VT
871 return val;
872}
873
7f622593
ARN
874static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
875{
876 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
877
878 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
879 return -ENOTSUPP;
880
881 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
882}
883
5e3dd157
KV
884/*
885 * Diagnostic read/write access is provided for startup/config/debug usage.
886 * Caller must guarantee proper alignment, when applicable, and single user
887 * at any moment.
888 */
889static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
890 int nbytes)
891{
892 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641fe28a 893 struct ath10k_ce *ce = ath10k_ce_priv(ar);
5e3dd157 894 int ret = 0;
24d9ef5e 895 u32 *buf;
1e56d512 896 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
2aa39115 897 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
898 /* Host buffer address in CE space */
899 u32 ce_data;
900 dma_addr_t ce_data_base = 0;
901 void *data_buf = NULL;
902 int i;
903
641fe28a 904 spin_lock_bh(&ce->ce_lock);
eef25405 905
5e3dd157
KV
906 ce_diag = ar_pci->ce_diag;
907
908 /*
909 * Allocate a temporary bounce buffer to hold caller's data
910 * to be DMA'ed from Target. This guarantees
911 * 1) 4-byte alignment
912 * 2) Buffer in DMA-able space
913 */
1e56d512
ARN
914 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
915
0de4df5b 916 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
1e56d512 917 alloc_nbytes,
68c03249
MK
918 &ce_data_base,
919 GFP_ATOMIC);
5e3dd157
KV
920
921 if (!data_buf) {
922 ret = -ENOMEM;
923 goto done;
924 }
5e3dd157 925
1e56d512 926 remaining_bytes = nbytes;
5e3dd157
KV
927 ce_data = ce_data_base;
928 while (remaining_bytes) {
929 nbytes = min_t(unsigned int, remaining_bytes,
930 DIAG_TRANSFER_LIMIT);
931
2a1e1ad3 932 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
5e3dd157
KV
933 if (ret != 0)
934 goto done;
935
936 /* Request CE to send from Target(!) address to Host buffer */
937 /*
938 * The address supplied by the caller is in the
939 * Target CPU virtual address space.
940 *
941 * In order to use this address with the diagnostic CE,
942 * convert it from Target CPU virtual address space
943 * to CE address space
944 */
418ca599 945 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
5e3dd157 946
eef25405
KV
947 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
948 0);
5e3dd157
KV
949 if (ret)
950 goto done;
951
952 i = 0;
765952e4
RM
953 while (ath10k_ce_completed_send_next_nolock(ce_diag,
954 NULL) != 0) {
5e3dd157
KV
955 mdelay(1);
956 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
957 ret = -EBUSY;
958 goto done;
959 }
960 }
961
5e3dd157 962 i = 0;
24d9ef5e
RM
963 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
964 (void **)&buf,
965 &completed_nbytes)
966 != 0) {
5e3dd157
KV
967 mdelay(1);
968
969 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
970 ret = -EBUSY;
971 goto done;
972 }
973 }
974
975 if (nbytes != completed_nbytes) {
976 ret = -EIO;
977 goto done;
978 }
979
24d9ef5e 980 if (*buf != ce_data) {
5e3dd157
KV
981 ret = -EIO;
982 goto done;
983 }
984
985 remaining_bytes -= nbytes;
1e56d512
ARN
986 memcpy(data, data_buf, nbytes);
987
5e3dd157 988 address += nbytes;
1e56d512 989 data += nbytes;
5e3dd157
KV
990 }
991
992done:
5e3dd157
KV
993
994 if (data_buf)
1e56d512 995 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
68c03249 996 ce_data_base);
5e3dd157 997
641fe28a 998 spin_unlock_bh(&ce->ce_lock);
eef25405 999
5e3dd157
KV
1000 return ret;
1001}
1002
3d29a3e0
KV
1003static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1004{
0fdc14e4
MK
1005 __le32 val = 0;
1006 int ret;
1007
1008 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1009 *value = __le32_to_cpu(val);
1010
1011 return ret;
3d29a3e0
KV
1012}
1013
1014static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1015 u32 src, u32 len)
1016{
1017 u32 host_addr, addr;
1018 int ret;
1019
1020 host_addr = host_interest_item_address(src);
1021
1022 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1023 if (ret != 0) {
7aa7a72a 1024 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
3d29a3e0
KV
1025 src, ret);
1026 return ret;
1027 }
1028
1029 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1030 if (ret != 0) {
7aa7a72a 1031 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
3d29a3e0
KV
1032 addr, len, ret);
1033 return ret;
1034 }
1035
1036 return 0;
1037}
1038
1039#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
8cc7f26c 1040 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
3d29a3e0 1041
f52f5171
RM
1042int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1043 const void *data, int nbytes)
5e3dd157
KV
1044{
1045 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641fe28a 1046 struct ath10k_ce *ce = ath10k_ce_priv(ar);
5e3dd157 1047 int ret = 0;
24d9ef5e 1048 u32 *buf;
5e3dd157 1049 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
2aa39115 1050 struct ath10k_ce_pipe *ce_diag;
5e3dd157
KV
1051 void *data_buf = NULL;
1052 u32 ce_data; /* Host buffer address in CE space */
1053 dma_addr_t ce_data_base = 0;
1054 int i;
1055
641fe28a 1056 spin_lock_bh(&ce->ce_lock);
eef25405 1057
5e3dd157
KV
1058 ce_diag = ar_pci->ce_diag;
1059
1060 /*
1061 * Allocate a temporary bounce buffer to hold caller's data
1062 * to be DMA'ed to Target. This guarantees
1063 * 1) 4-byte alignment
1064 * 2) Buffer in DMA-able space
1065 */
1066 orig_nbytes = nbytes;
68c03249
MK
1067 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1068 orig_nbytes,
1069 &ce_data_base,
1070 GFP_ATOMIC);
5e3dd157
KV
1071 if (!data_buf) {
1072 ret = -ENOMEM;
1073 goto done;
1074 }
1075
1076 /* Copy caller's data to allocated DMA buf */
0fdc14e4 1077 memcpy(data_buf, data, orig_nbytes);
5e3dd157
KV
1078
1079 /*
1080 * The address supplied by the caller is in the
1081 * Target CPU virtual address space.
1082 *
1083 * In order to use this address with the diagnostic CE,
1084 * convert it from
1085 * Target CPU virtual address space
1086 * to
1087 * CE address space
1088 */
418ca599 1089 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
5e3dd157
KV
1090
1091 remaining_bytes = orig_nbytes;
1092 ce_data = ce_data_base;
1093 while (remaining_bytes) {
1094 /* FIXME: check cast */
1095 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1096
1097 /* Set up to receive directly into Target(!) address */
2a1e1ad3 1098 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
5e3dd157
KV
1099 if (ret != 0)
1100 goto done;
1101
1102 /*
1103 * Request CE to send caller-supplied data that
1104 * was copied to bounce buffer to Target(!) address.
1105 */
eef25405
KV
1106 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1107 nbytes, 0, 0);
5e3dd157
KV
1108 if (ret != 0)
1109 goto done;
1110
1111 i = 0;
765952e4
RM
1112 while (ath10k_ce_completed_send_next_nolock(ce_diag,
1113 NULL) != 0) {
5e3dd157
KV
1114 mdelay(1);
1115
1116 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1117 ret = -EBUSY;
1118 goto done;
1119 }
1120 }
1121
5e3dd157 1122 i = 0;
24d9ef5e
RM
1123 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1124 (void **)&buf,
1125 &completed_nbytes)
1126 != 0) {
5e3dd157
KV
1127 mdelay(1);
1128
1129 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1130 ret = -EBUSY;
1131 goto done;
1132 }
1133 }
1134
1135 if (nbytes != completed_nbytes) {
1136 ret = -EIO;
1137 goto done;
1138 }
1139
24d9ef5e 1140 if (*buf != address) {
5e3dd157
KV
1141 ret = -EIO;
1142 goto done;
1143 }
1144
1145 remaining_bytes -= nbytes;
1146 address += nbytes;
1147 ce_data += nbytes;
1148 }
1149
1150done:
1151 if (data_buf) {
68c03249
MK
1152 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1153 ce_data_base);
5e3dd157
KV
1154 }
1155
1156 if (ret != 0)
7aa7a72a 1157 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
50f87a67 1158 address, ret);
5e3dd157 1159
641fe28a 1160 spin_unlock_bh(&ce->ce_lock);
eef25405 1161
5e3dd157
KV
1162 return ret;
1163}
1164
0fdc14e4
MK
1165static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1166{
1167 __le32 val = __cpu_to_le32(value);
1168
1169 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1170}
1171
5e3dd157 1172/* Called by lower (CE) layer when a send to Target completes. */
0e5b2950 1173static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
5e3dd157
KV
1174{
1175 struct ath10k *ar = ce_state->ar;
1cb86d47
MK
1176 struct sk_buff_head list;
1177 struct sk_buff *skb;
5e3dd157 1178
1cb86d47 1179 __skb_queue_head_init(&list);
765952e4 1180 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
a16942e6 1181 /* no need to call tx completion for NULL pointers */
1cb86d47 1182 if (skb == NULL)
726346fc
MK
1183 continue;
1184
1cb86d47 1185 __skb_queue_tail(&list, skb);
5440ce25 1186 }
1cb86d47
MK
1187
1188 while ((skb = __skb_dequeue(&list)))
0e5b2950 1189 ath10k_htc_tx_completion_handler(ar, skb);
5e3dd157
KV
1190}
1191
a70587b3
RM
1192static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1193 void (*callback)(struct ath10k *ar,
1194 struct sk_buff *skb))
5e3dd157
KV
1195{
1196 struct ath10k *ar = ce_state->ar;
1197 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
87263e5b 1198 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
5e3dd157 1199 struct sk_buff *skb;
1cb86d47 1200 struct sk_buff_head list;
5440ce25 1201 void *transfer_context;
2f5280da 1202 unsigned int nbytes, max_nbytes;
5e3dd157 1203
1cb86d47 1204 __skb_queue_head_init(&list);
5440ce25 1205 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
24d9ef5e 1206 &nbytes) == 0) {
5e3dd157 1207 skb = transfer_context;
2f5280da 1208 max_nbytes = skb->len + skb_tailroom(skb);
8582bf3b 1209 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2f5280da
MK
1210 max_nbytes, DMA_FROM_DEVICE);
1211
1212 if (unlikely(max_nbytes < nbytes)) {
7aa7a72a 1213 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
2f5280da
MK
1214 nbytes, max_nbytes);
1215 dev_kfree_skb_any(skb);
1216 continue;
1217 }
5e3dd157 1218
2f5280da 1219 skb_put(skb, nbytes);
1cb86d47
MK
1220 __skb_queue_tail(&list, skb);
1221 }
a360e54c 1222
1cb86d47 1223 while ((skb = __skb_dequeue(&list))) {
a360e54c
MK
1224 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1225 ce_state->id, skb->len);
1226 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1227 skb->data, skb->len);
1228
a70587b3 1229 callback(ar, skb);
2f5280da 1230 }
c29a380e 1231
728f95ee 1232 ath10k_pci_rx_post_pipe(pipe_info);
5e3dd157
KV
1233}
1234
128abd09
RM
1235static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1236 void (*callback)(struct ath10k *ar,
1237 struct sk_buff *skb))
1238{
1239 struct ath10k *ar = ce_state->ar;
1240 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1241 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1242 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1243 struct sk_buff *skb;
1244 struct sk_buff_head list;
1245 void *transfer_context;
1246 unsigned int nbytes, max_nbytes, nentries;
1247 int orig_len;
1248
1249 /* No need to aquire ce_lock for CE5, since this is the only place CE5
1250 * is processed other than init and deinit. Before releasing CE5
1251 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1252 */
1253 __skb_queue_head_init(&list);
1254 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1255 &nbytes) == 0) {
1256 skb = transfer_context;
1257 max_nbytes = skb->len + skb_tailroom(skb);
1258
1259 if (unlikely(max_nbytes < nbytes)) {
1260 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1261 nbytes, max_nbytes);
1262 continue;
1263 }
1264
1265 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1266 max_nbytes, DMA_FROM_DEVICE);
1267 skb_put(skb, nbytes);
1268 __skb_queue_tail(&list, skb);
1269 }
1270
1271 nentries = skb_queue_len(&list);
1272 while ((skb = __skb_dequeue(&list))) {
1273 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1274 ce_state->id, skb->len);
1275 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1276 skb->data, skb->len);
1277
1278 orig_len = skb->len;
1279 callback(ar, skb);
1280 skb_push(skb, orig_len - skb->len);
1281 skb_reset_tail_pointer(skb);
1282 skb_trim(skb, 0);
1283
1284 /*let device gain the buffer again*/
1285 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1286 skb->len + skb_tailroom(skb),
1287 DMA_FROM_DEVICE);
1288 }
1289 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1290}
1291
a70587b3
RM
1292/* Called by lower (CE) layer when data is received from the Target. */
1293static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1294{
1295 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
6419fdbb
RM
1296}
1297
1298static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1299{
1300 /* CE4 polling needs to be done whenever CE pipe which transports
1301 * HTT Rx (target->host) is processed.
1302 */
1303 ath10k_ce_per_engine_service(ce_state->ar, 4);
1304
1305 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
a70587b3
RM
1306}
1307
afb0bf7f
VN
1308/* Called by lower (CE) layer when data is received from the Target.
1309 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1310 */
1311static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1312{
1313 ath10k_pci_process_rx_cb(ce_state,
1314 ath10k_htt_rx_pktlog_completion_handler);
1315}
1316
a70587b3
RM
1317/* Called by lower (CE) layer when a send to HTT Target completes. */
1318static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1319{
1320 struct ath10k *ar = ce_state->ar;
1321 struct sk_buff *skb;
a70587b3 1322
765952e4 1323 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
a70587b3
RM
1324 /* no need to call tx completion for NULL pointers */
1325 if (!skb)
1326 continue;
1327
1328 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1329 skb->len, DMA_TO_DEVICE);
1330 ath10k_htt_hif_tx_complete(ar, skb);
1331 }
1332}
1333
1334static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1335{
1336 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1337 ath10k_htt_t2h_msg_handler(ar, skb);
1338}
1339
1340/* Called by lower (CE) layer when HTT data is received from the Target. */
1341static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1342{
1343 /* CE4 polling needs to be done whenever CE pipe which transports
1344 * HTT Rx (target->host) is processed.
1345 */
1346 ath10k_ce_per_engine_service(ce_state->ar, 4);
1347
128abd09 1348 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
a70587b3
RM
1349}
1350
f52f5171
RM
1351int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1352 struct ath10k_hif_sg_item *items, int n_items)
5e3dd157 1353{
5e3dd157 1354 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641fe28a 1355 struct ath10k_ce *ce = ath10k_ce_priv(ar);
726346fc
MK
1356 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1357 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1358 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
7147a131
MK
1359 unsigned int nentries_mask;
1360 unsigned int sw_index;
1361 unsigned int write_index;
08b8aa09 1362 int err, i = 0;
5e3dd157 1363
641fe28a 1364 spin_lock_bh(&ce->ce_lock);
5e3dd157 1365
7147a131
MK
1366 nentries_mask = src_ring->nentries_mask;
1367 sw_index = src_ring->sw_index;
1368 write_index = src_ring->write_index;
1369
726346fc
MK
1370 if (unlikely(CE_RING_DELTA(nentries_mask,
1371 write_index, sw_index - 1) < n_items)) {
1372 err = -ENOBUFS;
08b8aa09 1373 goto err;
726346fc 1374 }
5e3dd157 1375
726346fc 1376 for (i = 0; i < n_items - 1; i++) {
7aa7a72a 1377 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
1378 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1379 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 1380 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc 1381 items[i].vaddr, items[i].len);
5e3dd157 1382
726346fc
MK
1383 err = ath10k_ce_send_nolock(ce_pipe,
1384 items[i].transfer_context,
1385 items[i].paddr,
1386 items[i].len,
1387 items[i].transfer_id,
1388 CE_SEND_FLAG_GATHER);
1389 if (err)
08b8aa09 1390 goto err;
726346fc
MK
1391 }
1392
1393 /* `i` is equal to `n_items -1` after for() */
1394
7aa7a72a 1395 ath10k_dbg(ar, ATH10K_DBG_PCI,
726346fc
MK
1396 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1397 i, items[i].paddr, items[i].len, n_items);
7aa7a72a 1398 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
726346fc
MK
1399 items[i].vaddr, items[i].len);
1400
1401 err = ath10k_ce_send_nolock(ce_pipe,
1402 items[i].transfer_context,
1403 items[i].paddr,
1404 items[i].len,
1405 items[i].transfer_id,
1406 0);
1407 if (err)
08b8aa09
MK
1408 goto err;
1409
641fe28a 1410 spin_unlock_bh(&ce->ce_lock);
08b8aa09
MK
1411 return 0;
1412
1413err:
1414 for (; i > 0; i--)
1415 __ath10k_ce_send_revert(ce_pipe);
726346fc 1416
641fe28a 1417 spin_unlock_bh(&ce->ce_lock);
726346fc 1418 return err;
5e3dd157
KV
1419}
1420
f52f5171
RM
1421int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1422 size_t buf_len)
eef25405
KV
1423{
1424 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1425}
1426
f52f5171 1427u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
5e3dd157
KV
1428{
1429 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
50f87a67 1430
7aa7a72a 1431 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
50f87a67 1432
3efcb3b4 1433 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
5e3dd157
KV
1434}
1435
384914b2
BG
1436static void ath10k_pci_dump_registers(struct ath10k *ar,
1437 struct ath10k_fw_crash_data *crash_data)
5e3dd157 1438{
0fdc14e4
MK
1439 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1440 int i, ret;
5e3dd157 1441
384914b2 1442 lockdep_assert_held(&ar->data_lock);
5e3dd157 1443
3d29a3e0
KV
1444 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1445 hi_failure_state,
0fdc14e4 1446 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1d2b48d6 1447 if (ret) {
7aa7a72a 1448 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
5e3dd157
KV
1449 return;
1450 }
1451
1452 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1453
7aa7a72a 1454 ath10k_err(ar, "firmware register dump:\n");
5e3dd157 1455 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
7aa7a72a 1456 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5e3dd157 1457 i,
0fdc14e4
MK
1458 __le32_to_cpu(reg_dump_values[i]),
1459 __le32_to_cpu(reg_dump_values[i + 1]),
1460 __le32_to_cpu(reg_dump_values[i + 2]),
1461 __le32_to_cpu(reg_dump_values[i + 3]));
affd3217 1462
1bbb119d
MK
1463 if (!crash_data)
1464 return;
1465
384914b2 1466 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
0fdc14e4 1467 crash_data->registers[i] = reg_dump_values[i];
384914b2
BG
1468}
1469
703f261d
AL
1470static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1471 const struct ath10k_mem_region *mem_region,
1472 u8 *buf, size_t buf_len)
1473{
1474 const struct ath10k_mem_section *cur_section, *next_section;
1475 unsigned int count, section_size, skip_size;
1476 int ret, i, j;
1477
1478 if (!mem_region || !buf)
1479 return 0;
1480
703f261d
AL
1481 cur_section = &mem_region->section_table.sections[0];
1482
1483 if (mem_region->start > cur_section->start) {
cdd4743e 1484 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
703f261d
AL
1485 mem_region->start, cur_section->start);
1486 return 0;
1487 }
1488
1489 skip_size = cur_section->start - mem_region->start;
1490
1491 /* fill the gap between the first register section and register
1492 * start address
1493 */
1494 for (i = 0; i < skip_size; i++) {
1495 *buf = ATH10K_MAGIC_NOT_COPIED;
1496 buf++;
1497 }
1498
1499 count = 0;
1500
1501 for (i = 0; cur_section != NULL; i++) {
1502 section_size = cur_section->end - cur_section->start;
1503
1504 if (section_size <= 0) {
1505 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1506 cur_section->start,
1507 cur_section->end);
1508 break;
1509 }
1510
1511 if ((i + 1) == mem_region->section_table.size) {
1512 /* last section */
1513 next_section = NULL;
1514 skip_size = 0;
1515 } else {
1516 next_section = cur_section + 1;
1517
1518 if (cur_section->end > next_section->start) {
1519 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1520 next_section->start,
1521 cur_section->end);
1522 break;
1523 }
1524
1525 skip_size = next_section->start - cur_section->end;
1526 }
1527
1528 if (buf_len < (skip_size + section_size)) {
1529 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1530 break;
1531 }
1532
1533 buf_len -= skip_size + section_size;
1534
1535 /* read section to dest memory */
1536 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1537 buf, section_size);
1538 if (ret) {
1539 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1540 cur_section->start, ret);
1541 break;
1542 }
1543
1544 buf += section_size;
1545 count += section_size;
1546
1547 /* fill in the gap between this section and the next */
1548 for (j = 0; j < skip_size; j++) {
1549 *buf = ATH10K_MAGIC_NOT_COPIED;
1550 buf++;
1551 }
1552
1553 count += skip_size;
1554
1555 if (!next_section)
1556 /* this was the last section */
1557 break;
1558
1559 cur_section = next_section;
1560 }
1561
1562 return count;
1563}
1564
1565static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1566{
1567 u32 val;
1568
1569 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1570 FW_RAM_CONFIG_ADDRESS, config);
1571
1572 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1573 FW_RAM_CONFIG_ADDRESS);
1574 if (val != config) {
1575 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1576 val, config);
1577 return -EIO;
1578 }
1579
1580 return 0;
1581}
1582
1583static void ath10k_pci_dump_memory(struct ath10k *ar,
1584 struct ath10k_fw_crash_data *crash_data)
1585{
1586 const struct ath10k_hw_mem_layout *mem_layout;
1587 const struct ath10k_mem_region *current_region;
1588 struct ath10k_dump_ram_data_hdr *hdr;
1589 u32 count, shift;
1590 size_t buf_len;
1591 int ret, i;
1592 u8 *buf;
1593
1594 lockdep_assert_held(&ar->data_lock);
1595
1596 if (!crash_data)
1597 return;
1598
1599 mem_layout = ath10k_coredump_get_mem_layout(ar);
1600 if (!mem_layout)
1601 return;
1602
1603 current_region = &mem_layout->region_table.regions[0];
1604
1605 buf = crash_data->ramdump_buf;
1606 buf_len = crash_data->ramdump_buf_len;
1607
1608 memset(buf, 0, buf_len);
1609
1610 for (i = 0; i < mem_layout->region_table.size; i++) {
1611 count = 0;
1612
1613 if (current_region->len > buf_len) {
1614 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1615 current_region->name,
1616 current_region->len,
1617 buf_len);
1618 break;
1619 }
1620
1621 /* To get IRAM dump, the host driver needs to switch target
1622 * ram config from DRAM to IRAM.
1623 */
1624 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1625 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1626 shift = current_region->start >> 20;
1627
1628 ret = ath10k_pci_set_ram_config(ar, shift);
1629 if (ret) {
1630 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1631 current_region->name, ret);
1632 break;
1633 }
1634 }
1635
1636 /* Reserve space for the header. */
1637 hdr = (void *)buf;
1638 buf += sizeof(*hdr);
1639 buf_len -= sizeof(*hdr);
1640
1641 if (current_region->section_table.size > 0) {
1642 /* Copy each section individually. */
1643 count = ath10k_pci_dump_memory_section(ar,
1644 current_region,
1645 buf,
1646 current_region->len);
1647 } else {
1648 /* No individiual memory sections defined so we can
1649 * copy the entire memory region.
1650 */
1651 ret = ath10k_pci_diag_read_mem(ar,
1652 current_region->start,
1653 buf,
1654 current_region->len);
1655 if (ret) {
1656 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1657 current_region->name, ret);
1658 break;
1659 }
1660
1661 count = current_region->len;
1662 }
1663
1664 hdr->region_type = cpu_to_le32(current_region->type);
1665 hdr->start = cpu_to_le32(current_region->start);
1666 hdr->length = cpu_to_le32(count);
1667
1668 if (count == 0)
1669 /* Note: the header remains, just with zero length. */
1670 break;
1671
1672 buf += count;
1673 buf_len -= count;
1674
1675 current_region++;
1676 }
1677}
1678
0e9848c0 1679static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
384914b2
BG
1680{
1681 struct ath10k_fw_crash_data *crash_data;
ab3f9c88 1682 char guid[UUID_STRING_LEN + 1];
384914b2
BG
1683
1684 spin_lock_bh(&ar->data_lock);
1685
f51dbe73
BG
1686 ar->stats.fw_crash_counter++;
1687
f25b9f28 1688 crash_data = ath10k_coredump_new(ar);
384914b2
BG
1689
1690 if (crash_data)
ab3f9c88 1691 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
384914b2 1692 else
ab3f9c88 1693 scnprintf(guid, sizeof(guid), "n/a");
384914b2 1694
ab3f9c88 1695 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
8a0c797e 1696 ath10k_print_driver_info(ar);
384914b2 1697 ath10k_pci_dump_registers(ar, crash_data);
c75c398b 1698 ath10k_ce_dump_registers(ar, crash_data);
703f261d 1699 ath10k_pci_dump_memory(ar, crash_data);
384914b2 1700
384914b2 1701 spin_unlock_bh(&ar->data_lock);
affd3217 1702
5e90de86 1703 queue_work(ar->workqueue, &ar->restart_work);
5e3dd157
KV
1704}
1705
f52f5171
RM
1706void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1707 int force)
5e3dd157 1708{
7aa7a72a 1709 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
50f87a67 1710
5e3dd157
KV
1711 if (!force) {
1712 int resources;
1713 /*
1714 * Decide whether to actually poll for completions, or just
1715 * wait for a later chance.
1716 * If there seem to be plenty of resources left, then just wait
1717 * since checking involves reading a CE register, which is a
1718 * relatively expensive operation.
1719 */
1720 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1721
1722 /*
1723 * If at least 50% of the total resources are still available,
1724 * don't bother checking again yet.
1725 */
1726 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1727 return;
1728 }
1729 ath10k_ce_per_engine_service(ar, pipe);
1730}
1731
3c97f5de 1732static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
5e3dd157
KV
1733{
1734 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1735
728f95ee 1736 del_timer_sync(&ar_pci->rx_post_retry);
96a9d0dc
MK
1737}
1738
f52f5171
RM
1739int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1740 u8 *ul_pipe, u8 *dl_pipe)
5e3dd157 1741{
7c6aa25d
MK
1742 const struct service_to_pipe *entry;
1743 bool ul_set = false, dl_set = false;
1744 int i;
5e3dd157 1745
7aa7a72a 1746 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
50f87a67 1747
7c6aa25d
MK
1748 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1749 entry = &target_service_to_ce_map_wlan[i];
5e3dd157 1750
0fdc14e4 1751 if (__le32_to_cpu(entry->service_id) != service_id)
7c6aa25d 1752 continue;
5e3dd157 1753
0fdc14e4 1754 switch (__le32_to_cpu(entry->pipedir)) {
7c6aa25d
MK
1755 case PIPEDIR_NONE:
1756 break;
1757 case PIPEDIR_IN:
1758 WARN_ON(dl_set);
0fdc14e4 1759 *dl_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1760 dl_set = true;
1761 break;
1762 case PIPEDIR_OUT:
1763 WARN_ON(ul_set);
0fdc14e4 1764 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1765 ul_set = true;
1766 break;
1767 case PIPEDIR_INOUT:
1768 WARN_ON(dl_set);
1769 WARN_ON(ul_set);
0fdc14e4
MK
1770 *dl_pipe = __le32_to_cpu(entry->pipenum);
1771 *ul_pipe = __le32_to_cpu(entry->pipenum);
7c6aa25d
MK
1772 dl_set = true;
1773 ul_set = true;
1774 break;
1775 }
5e3dd157 1776 }
5e3dd157 1777
7c6aa25d
MK
1778 if (WARN_ON(!ul_set || !dl_set))
1779 return -ENOENT;
5e3dd157 1780
7c6aa25d 1781 return 0;
5e3dd157
KV
1782}
1783
f52f5171
RM
1784void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1785 u8 *ul_pipe, u8 *dl_pipe)
5e3dd157 1786{
7aa7a72a 1787 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
50f87a67 1788
5e3dd157
KV
1789 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1790 ATH10K_HTC_SVC_ID_RSVD_CTRL,
400143e4 1791 ul_pipe, dl_pipe);
5e3dd157
KV
1792}
1793
3c97f5de 1794void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
5e3dd157 1795{
7c0f0e3c
MK
1796 u32 val;
1797
6e4202c3
VT
1798 switch (ar->hw_rev) {
1799 case ATH10K_HW_QCA988X:
6fd3dd71 1800 case ATH10K_HW_QCA9887:
6e4202c3 1801 case ATH10K_HW_QCA6174:
a226b519 1802 case ATH10K_HW_QCA9377:
6e4202c3
VT
1803 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1804 CORE_CTRL_ADDRESS);
1805 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1806 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1807 CORE_CTRL_ADDRESS, val);
1808 break;
1809 case ATH10K_HW_QCA99X0:
651b4cdc 1810 case ATH10K_HW_QCA9984:
e565c312 1811 case ATH10K_HW_QCA9888:
0b523ced 1812 case ATH10K_HW_QCA4019:
6e4202c3
VT
1813 /* TODO: Find appropriate register configuration for QCA99X0
1814 * to mask irq/MSI.
1815 */
39bfe9f7 1816 break;
f9e18304
GS
1817 case ATH10K_HW_WCN3990:
1818 break;
6e4202c3 1819 }
7c0f0e3c
MK
1820}
1821
1822static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1823{
1824 u32 val;
1825
6e4202c3
VT
1826 switch (ar->hw_rev) {
1827 case ATH10K_HW_QCA988X:
6fd3dd71 1828 case ATH10K_HW_QCA9887:
6e4202c3 1829 case ATH10K_HW_QCA6174:
a226b519 1830 case ATH10K_HW_QCA9377:
6e4202c3
VT
1831 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1832 CORE_CTRL_ADDRESS);
1833 val |= CORE_CTRL_PCIE_REG_31_MASK;
1834 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1835 CORE_CTRL_ADDRESS, val);
1836 break;
1837 case ATH10K_HW_QCA99X0:
651b4cdc 1838 case ATH10K_HW_QCA9984:
e565c312 1839 case ATH10K_HW_QCA9888:
0b523ced 1840 case ATH10K_HW_QCA4019:
6e4202c3
VT
1841 /* TODO: Find appropriate register configuration for QCA99X0
1842 * to unmask irq/MSI.
1843 */
1844 break;
f9e18304
GS
1845 case ATH10K_HW_WCN3990:
1846 break;
6e4202c3 1847 }
7c0f0e3c 1848}
5e3dd157 1849
7c0f0e3c
MK
1850static void ath10k_pci_irq_disable(struct ath10k *ar)
1851{
ec5ba4d3 1852 ath10k_ce_disable_interrupts(ar);
e75db4e3 1853 ath10k_pci_disable_and_clear_legacy_irq(ar);
7c0f0e3c
MK
1854 ath10k_pci_irq_msi_fw_mask(ar);
1855}
1856
1857static void ath10k_pci_irq_sync(struct ath10k *ar)
1858{
1859 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 1860
cfe9011a 1861 synchronize_irq(ar_pci->pdev->irq);
5e3dd157
KV
1862}
1863
ec5ba4d3 1864static void ath10k_pci_irq_enable(struct ath10k *ar)
5e3dd157 1865{
ec5ba4d3 1866 ath10k_ce_enable_interrupts(ar);
e75db4e3 1867 ath10k_pci_enable_legacy_irq(ar);
7c0f0e3c 1868 ath10k_pci_irq_msi_fw_unmask(ar);
5e3dd157
KV
1869}
1870
1871static int ath10k_pci_hif_start(struct ath10k *ar)
1872{
76d870ed 1873 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
9a14969f 1874
7aa7a72a 1875 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
5e3dd157 1876
1427228d
KV
1877 napi_enable(&ar->napi);
1878
ec5ba4d3 1879 ath10k_pci_irq_enable(ar);
728f95ee 1880 ath10k_pci_rx_post(ar);
50f87a67 1881
76d870ed
JD
1882 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1883 ar_pci->link_ctl);
1884
5e3dd157
KV
1885 return 0;
1886}
1887
099ac7ce 1888static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
5e3dd157
KV
1889{
1890 struct ath10k *ar;
099ac7ce
MK
1891 struct ath10k_ce_pipe *ce_pipe;
1892 struct ath10k_ce_ring *ce_ring;
1893 struct sk_buff *skb;
1894 int i;
5e3dd157 1895
099ac7ce
MK
1896 ar = pci_pipe->hif_ce_state;
1897 ce_pipe = pci_pipe->ce_hdl;
1898 ce_ring = ce_pipe->dest_ring;
5e3dd157 1899
099ac7ce 1900 if (!ce_ring)
5e3dd157
KV
1901 return;
1902
099ac7ce
MK
1903 if (!pci_pipe->buf_sz)
1904 return;
5e3dd157 1905
099ac7ce
MK
1906 for (i = 0; i < ce_ring->nentries; i++) {
1907 skb = ce_ring->per_transfer_context[i];
1908 if (!skb)
1909 continue;
1910
1911 ce_ring->per_transfer_context[i] = NULL;
1912
8582bf3b 1913 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
099ac7ce 1914 skb->len + skb_tailroom(skb),
5e3dd157 1915 DMA_FROM_DEVICE);
099ac7ce 1916 dev_kfree_skb_any(skb);
5e3dd157
KV
1917 }
1918}
1919
099ac7ce 1920static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
5e3dd157
KV
1921{
1922 struct ath10k *ar;
099ac7ce
MK
1923 struct ath10k_ce_pipe *ce_pipe;
1924 struct ath10k_ce_ring *ce_ring;
099ac7ce 1925 struct sk_buff *skb;
099ac7ce 1926 int i;
5e3dd157 1927
099ac7ce 1928 ar = pci_pipe->hif_ce_state;
099ac7ce
MK
1929 ce_pipe = pci_pipe->ce_hdl;
1930 ce_ring = ce_pipe->src_ring;
5e3dd157 1931
099ac7ce 1932 if (!ce_ring)
5e3dd157
KV
1933 return;
1934
099ac7ce
MK
1935 if (!pci_pipe->buf_sz)
1936 return;
5e3dd157 1937
099ac7ce
MK
1938 for (i = 0; i < ce_ring->nentries; i++) {
1939 skb = ce_ring->per_transfer_context[i];
1940 if (!skb)
2415fc16 1941 continue;
2415fc16 1942
099ac7ce 1943 ce_ring->per_transfer_context[i] = NULL;
099ac7ce 1944
0e5b2950 1945 ath10k_htc_tx_completion_handler(ar, skb);
5e3dd157
KV
1946 }
1947}
1948
1949/*
1950 * Cleanup residual buffers for device shutdown:
1951 * buffers that were enqueued for receive
1952 * buffers that were to be sent
1953 * Note: Buffers that had completed but which were
1954 * not yet processed are on a completion queue. They
1955 * are handled when the completion thread shuts down.
1956 */
1957static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1958{
1959 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1960 int pipe_num;
1961
fad6ed78 1962 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
87263e5b 1963 struct ath10k_pci_pipe *pipe_info;
5e3dd157
KV
1964
1965 pipe_info = &ar_pci->pipe_info[pipe_num];
1966 ath10k_pci_rx_pipe_cleanup(pipe_info);
1967 ath10k_pci_tx_pipe_cleanup(pipe_info);
1968 }
1969}
1970
f52f5171 1971void ath10k_pci_ce_deinit(struct ath10k *ar)
5e3dd157 1972{
25d0dbcb 1973 int i;
5e3dd157 1974
25d0dbcb
MK
1975 for (i = 0; i < CE_COUNT; i++)
1976 ath10k_ce_deinit_pipe(ar, i);
5e3dd157
KV
1977}
1978
f52f5171 1979void ath10k_pci_flush(struct ath10k *ar)
5e3dd157 1980{
3c97f5de 1981 ath10k_pci_rx_retry_sync(ar);
728f95ee
MK
1982 ath10k_pci_buffer_cleanup(ar);
1983}
5e3dd157 1984
5e3dd157
KV
1985static void ath10k_pci_hif_stop(struct ath10k *ar)
1986{
77258d40
MK
1987 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1988 unsigned long flags;
1989
7aa7a72a 1990 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
5e3dd157 1991
10d23db4
MK
1992 /* Most likely the device has HTT Rx ring configured. The only way to
1993 * prevent the device from accessing (and possible corrupting) host
1994 * memory is to reset the chip now.
e75db4e3
MK
1995 *
1996 * There's also no known way of masking MSI interrupts on the device.
1997 * For ranged MSI the CE-related interrupts can be masked. However
1998 * regardless how many MSI interrupts are assigned the first one
1999 * is always used for firmware indications (crashes) and cannot be
2000 * masked. To prevent the device from asserting the interrupt reset it
2001 * before proceeding with cleanup.
10d23db4 2002 */
6e4202c3 2003 ath10k_pci_safe_chip_reset(ar);
e75db4e3
MK
2004
2005 ath10k_pci_irq_disable(ar);
7c0f0e3c 2006 ath10k_pci_irq_sync(ar);
e75db4e3 2007 ath10k_pci_flush(ar);
3c97f5de
RM
2008 napi_synchronize(&ar->napi);
2009 napi_disable(&ar->napi);
77258d40
MK
2010
2011 spin_lock_irqsave(&ar_pci->ps_lock, flags);
2012 WARN_ON(ar_pci->ps_wake_refcount > 0);
2013 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
5e3dd157
KV
2014}
2015
f52f5171
RM
2016int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2017 void *req, u32 req_len,
2018 void *resp, u32 *resp_len)
5e3dd157
KV
2019{
2020 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2aa39115
MK
2021 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2022 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2023 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2024 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
5e3dd157
KV
2025 dma_addr_t req_paddr = 0;
2026 dma_addr_t resp_paddr = 0;
2027 struct bmi_xfer xfer = {};
2028 void *treq, *tresp = NULL;
2029 int ret = 0;
2030
85622cde
MK
2031 might_sleep();
2032
5e3dd157
KV
2033 if (resp && !resp_len)
2034 return -EINVAL;
2035
2036 if (resp && resp_len && *resp_len == 0)
2037 return -EINVAL;
2038
2039 treq = kmemdup(req, req_len, GFP_KERNEL);
2040 if (!treq)
2041 return -ENOMEM;
2042
2043 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2044 ret = dma_mapping_error(ar->dev, req_paddr);
5e55e3cb
MK
2045 if (ret) {
2046 ret = -EIO;
5e3dd157 2047 goto err_dma;
5e55e3cb 2048 }
5e3dd157
KV
2049
2050 if (resp && resp_len) {
2051 tresp = kzalloc(*resp_len, GFP_KERNEL);
2052 if (!tresp) {
2053 ret = -ENOMEM;
2054 goto err_req;
2055 }
2056
2057 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2058 DMA_FROM_DEVICE);
2059 ret = dma_mapping_error(ar->dev, resp_paddr);
5e55e3cb 2060 if (ret) {
22baa980 2061 ret = -EIO;
5e3dd157 2062 goto err_req;
5e55e3cb 2063 }
5e3dd157
KV
2064
2065 xfer.wait_for_resp = true;
2066 xfer.resp_len = 0;
2067
728f95ee 2068 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
5e3dd157
KV
2069 }
2070
5e3dd157
KV
2071 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2072 if (ret)
2073 goto err_resp;
2074
6bb099b0 2075 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
85622cde 2076 if (ret) {
5dac5f37 2077 dma_addr_t unused_buffer;
5e3dd157
KV
2078 unsigned int unused_nbytes;
2079 unsigned int unused_id;
2080
5e3dd157
KV
2081 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2082 &unused_nbytes, &unused_id);
2083 } else {
2084 /* non-zero means we did not time out */
2085 ret = 0;
2086 }
2087
2088err_resp:
2089 if (resp) {
5dac5f37 2090 dma_addr_t unused_buffer;
5e3dd157
KV
2091
2092 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2093 dma_unmap_single(ar->dev, resp_paddr,
2094 *resp_len, DMA_FROM_DEVICE);
2095 }
2096err_req:
2097 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2098
2099 if (ret == 0 && resp_len) {
2100 *resp_len = min(*resp_len, xfer.resp_len);
2101 memcpy(resp, tresp, xfer.resp_len);
2102 }
2103err_dma:
2104 kfree(treq);
2105 kfree(tresp);
2106
2107 return ret;
2108}
2109
5440ce25 2110static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
5e3dd157 2111{
5440ce25 2112 struct bmi_xfer *xfer;
5440ce25 2113
765952e4 2114 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
5440ce25 2115 return;
5e3dd157 2116
2374b186 2117 xfer->tx_done = true;
5e3dd157
KV
2118}
2119
5440ce25 2120static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
5e3dd157 2121{
7aa7a72a 2122 struct ath10k *ar = ce_state->ar;
5440ce25 2123 struct bmi_xfer *xfer;
5440ce25 2124 unsigned int nbytes;
5440ce25 2125
24d9ef5e
RM
2126 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2127 &nbytes))
5440ce25 2128 return;
5e3dd157 2129
04ed9dfe
MK
2130 if (WARN_ON_ONCE(!xfer))
2131 return;
2132
5e3dd157 2133 if (!xfer->wait_for_resp) {
7aa7a72a 2134 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
5e3dd157
KV
2135 return;
2136 }
2137
2138 xfer->resp_len = nbytes;
2374b186 2139 xfer->rx_done = true;
5e3dd157
KV
2140}
2141
6bb099b0
BG
2142static int ath10k_pci_bmi_wait(struct ath10k *ar,
2143 struct ath10k_ce_pipe *tx_pipe,
85622cde
MK
2144 struct ath10k_ce_pipe *rx_pipe,
2145 struct bmi_xfer *xfer)
2146{
2147 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
6bb099b0
BG
2148 unsigned long started = jiffies;
2149 unsigned long dur;
2150 int ret;
85622cde
MK
2151
2152 while (time_before_eq(jiffies, timeout)) {
2153 ath10k_pci_bmi_send_done(tx_pipe);
2154 ath10k_pci_bmi_recv_data(rx_pipe);
2155
6bb099b0
BG
2156 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2157 ret = 0;
2158 goto out;
2159 }
85622cde
MK
2160
2161 schedule();
2162 }
5e3dd157 2163
6bb099b0
BG
2164 ret = -ETIMEDOUT;
2165
2166out:
2167 dur = jiffies - started;
2168 if (dur > HZ)
2169 ath10k_dbg(ar, ATH10K_DBG_BMI,
2170 "bmi cmd took %lu jiffies hz %d ret %d\n",
2171 dur, HZ, ret);
2172 return ret;
85622cde 2173}
5e3dd157
KV
2174
2175/*
2176 * Send an interrupt to the device to wake up the Target CPU
2177 * so it has an opportunity to notice any changed state.
2178 */
2179static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2180{
9e264945 2181 u32 addr, val;
5e3dd157 2182
1ad38fd7 2183 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
9e264945
MK
2184 val = ath10k_pci_read32(ar, addr);
2185 val |= CORE_CTRL_CPU_INTR_MASK;
2186 ath10k_pci_write32(ar, addr, val);
5e3dd157 2187
1d2b48d6 2188 return 0;
5e3dd157
KV
2189}
2190
d63955b3
MK
2191static int ath10k_pci_get_num_banks(struct ath10k *ar)
2192{
2193 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2194
2195 switch (ar_pci->pdev->device) {
2196 case QCA988X_2_0_DEVICE_ID:
8bd47021 2197 case QCA99X0_2_0_DEVICE_ID:
e565c312 2198 case QCA9888_2_0_DEVICE_ID:
651b4cdc 2199 case QCA9984_1_0_DEVICE_ID:
6fd3dd71 2200 case QCA9887_1_0_DEVICE_ID:
d63955b3 2201 return 1;
36582e5d 2202 case QCA6164_2_1_DEVICE_ID:
d63955b3
MK
2203 case QCA6174_2_1_DEVICE_ID:
2204 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
2205 case QCA6174_HW_1_0_CHIP_ID_REV:
2206 case QCA6174_HW_1_1_CHIP_ID_REV:
11a002ef
MK
2207 case QCA6174_HW_2_1_CHIP_ID_REV:
2208 case QCA6174_HW_2_2_CHIP_ID_REV:
d63955b3
MK
2209 return 3;
2210 case QCA6174_HW_1_3_CHIP_ID_REV:
2211 return 2;
d63955b3
MK
2212 case QCA6174_HW_3_0_CHIP_ID_REV:
2213 case QCA6174_HW_3_1_CHIP_ID_REV:
2214 case QCA6174_HW_3_2_CHIP_ID_REV:
2215 return 9;
2216 }
2217 break;
a226b519 2218 case QCA9377_1_0_DEVICE_ID:
77cf13ad 2219 return 4;
d63955b3
MK
2220 }
2221
2222 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2223 return 1;
2224}
2225
4ddb3299
RM
2226static int ath10k_bus_get_num_banks(struct ath10k *ar)
2227{
641fe28a 2228 struct ath10k_ce *ce = ath10k_ce_priv(ar);
4ddb3299 2229
641fe28a 2230 return ce->bus_ops->get_num_banks(ar);
4ddb3299
RM
2231}
2232
f52f5171 2233int ath10k_pci_init_config(struct ath10k *ar)
5e3dd157
KV
2234{
2235 u32 interconnect_targ_addr;
2236 u32 pcie_state_targ_addr = 0;
2237 u32 pipe_cfg_targ_addr = 0;
2238 u32 svc_to_pipe_map = 0;
2239 u32 pcie_config_flags = 0;
2240 u32 ealloc_value;
2241 u32 ealloc_targ_addr;
2242 u32 flag2_value;
2243 u32 flag2_targ_addr;
2244 int ret = 0;
2245
2246 /* Download to Target the CE Config and the service-to-CE map */
2247 interconnect_targ_addr =
2248 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2249
2250 /* Supply Target-side CE configuration */
9e264945
MK
2251 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2252 &pcie_state_targ_addr);
5e3dd157 2253 if (ret != 0) {
7aa7a72a 2254 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
5e3dd157
KV
2255 return ret;
2256 }
2257
2258 if (pcie_state_targ_addr == 0) {
2259 ret = -EIO;
7aa7a72a 2260 ath10k_err(ar, "Invalid pcie state addr\n");
5e3dd157
KV
2261 return ret;
2262 }
2263
9e264945 2264 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 2265 offsetof(struct pcie_state,
9e264945
MK
2266 pipe_cfg_addr)),
2267 &pipe_cfg_targ_addr);
5e3dd157 2268 if (ret != 0) {
7aa7a72a 2269 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
5e3dd157
KV
2270 return ret;
2271 }
2272
2273 if (pipe_cfg_targ_addr == 0) {
2274 ret = -EIO;
7aa7a72a 2275 ath10k_err(ar, "Invalid pipe cfg addr\n");
5e3dd157
KV
2276 return ret;
2277 }
2278
2279 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
5b07e07f 2280 target_ce_config_wlan,
050af069
VT
2281 sizeof(struct ce_pipe_config) *
2282 NUM_TARGET_CE_CONFIG_WLAN);
5e3dd157
KV
2283
2284 if (ret != 0) {
7aa7a72a 2285 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
5e3dd157
KV
2286 return ret;
2287 }
2288
9e264945 2289 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 2290 offsetof(struct pcie_state,
9e264945
MK
2291 svc_to_pipe_map)),
2292 &svc_to_pipe_map);
5e3dd157 2293 if (ret != 0) {
7aa7a72a 2294 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
5e3dd157
KV
2295 return ret;
2296 }
2297
2298 if (svc_to_pipe_map == 0) {
2299 ret = -EIO;
7aa7a72a 2300 ath10k_err(ar, "Invalid svc_to_pipe map\n");
5e3dd157
KV
2301 return ret;
2302 }
2303
2304 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
5b07e07f
KV
2305 target_service_to_ce_map_wlan,
2306 sizeof(target_service_to_ce_map_wlan));
5e3dd157 2307 if (ret != 0) {
7aa7a72a 2308 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
5e3dd157
KV
2309 return ret;
2310 }
2311
9e264945 2312 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
5e3dd157 2313 offsetof(struct pcie_state,
9e264945
MK
2314 config_flags)),
2315 &pcie_config_flags);
5e3dd157 2316 if (ret != 0) {
7aa7a72a 2317 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
5e3dd157
KV
2318 return ret;
2319 }
2320
2321 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2322
9e264945
MK
2323 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2324 offsetof(struct pcie_state,
2325 config_flags)),
2326 pcie_config_flags);
5e3dd157 2327 if (ret != 0) {
7aa7a72a 2328 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
5e3dd157
KV
2329 return ret;
2330 }
2331
2332 /* configure early allocation */
2333 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2334
9e264945 2335 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
5e3dd157 2336 if (ret != 0) {
9165dabb 2337 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
5e3dd157
KV
2338 return ret;
2339 }
2340
2341 /* first bank is switched to IRAM */
2342 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2343 HI_EARLY_ALLOC_MAGIC_MASK);
4ddb3299 2344 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
d63955b3 2345 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
5e3dd157
KV
2346 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2347
9e264945 2348 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
5e3dd157 2349 if (ret != 0) {
7aa7a72a 2350 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
5e3dd157
KV
2351 return ret;
2352 }
2353
2354 /* Tell Target to proceed with initialization */
2355 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2356
9e264945 2357 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
5e3dd157 2358 if (ret != 0) {
7aa7a72a 2359 ath10k_err(ar, "Failed to get option val: %d\n", ret);
5e3dd157
KV
2360 return ret;
2361 }
2362
2363 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2364
9e264945 2365 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
5e3dd157 2366 if (ret != 0) {
7aa7a72a 2367 ath10k_err(ar, "Failed to set option val: %d\n", ret);
5e3dd157
KV
2368 return ret;
2369 }
2370
2371 return 0;
2372}
2373
2727a743
RH
2374static void ath10k_pci_override_ce_config(struct ath10k *ar)
2375{
2376 struct ce_attr *attr;
2377 struct ce_pipe_config *config;
2378
2379 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2380 * since it is currently used for other feature.
2381 */
2382
2383 /* Override Host's Copy Engine 5 configuration */
2384 attr = &host_ce_config_wlan[5];
2385 attr->src_sz_max = 0;
2386 attr->dest_nentries = 0;
2387
2388 /* Override Target firmware's Copy Engine configuration */
2389 config = &target_ce_config_wlan[5];
2390 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2391 config->nbytes_max = __cpu_to_le32(2048);
2392
2393 /* Map from service/endpoint to Copy Engine */
2394 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2395}
2396
f52f5171 2397int ath10k_pci_alloc_pipes(struct ath10k *ar)
25d0dbcb 2398{
84cbf3a7
MK
2399 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2400 struct ath10k_pci_pipe *pipe;
641fe28a 2401 struct ath10k_ce *ce = ath10k_ce_priv(ar);
25d0dbcb
MK
2402 int i, ret;
2403
2404 for (i = 0; i < CE_COUNT; i++) {
84cbf3a7 2405 pipe = &ar_pci->pipe_info[i];
641fe28a 2406 pipe->ce_hdl = &ce->ce_states[i];
84cbf3a7
MK
2407 pipe->pipe_num = i;
2408 pipe->hif_ce_state = ar;
2409
9d9bdbb0 2410 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
25d0dbcb 2411 if (ret) {
7aa7a72a 2412 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
25d0dbcb
MK
2413 i, ret);
2414 return ret;
2415 }
84cbf3a7
MK
2416
2417 /* Last CE is Diagnostic Window */
050af069 2418 if (i == CE_DIAG_PIPE) {
84cbf3a7
MK
2419 ar_pci->ce_diag = pipe->ce_hdl;
2420 continue;
2421 }
2422
2423 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
25d0dbcb
MK
2424 }
2425
2426 return 0;
2427}
2428
f52f5171 2429void ath10k_pci_free_pipes(struct ath10k *ar)
25d0dbcb
MK
2430{
2431 int i;
5e3dd157 2432
25d0dbcb
MK
2433 for (i = 0; i < CE_COUNT; i++)
2434 ath10k_ce_free_pipe(ar, i);
2435}
5e3dd157 2436
f52f5171 2437int ath10k_pci_init_pipes(struct ath10k *ar)
5e3dd157 2438{
84cbf3a7 2439 int i, ret;
5e3dd157 2440
84cbf3a7
MK
2441 for (i = 0; i < CE_COUNT; i++) {
2442 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
25d0dbcb 2443 if (ret) {
7aa7a72a 2444 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
84cbf3a7 2445 i, ret);
25d0dbcb 2446 return ret;
5e3dd157 2447 }
5e3dd157
KV
2448 }
2449
5e3dd157
KV
2450 return 0;
2451}
2452
5c771e74 2453static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
5e3dd157 2454{
5c771e74
MK
2455 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2456 FW_IND_EVENT_PENDING;
2457}
5e3dd157 2458
5c771e74
MK
2459static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2460{
2461 u32 val;
5e3dd157 2462
5c771e74
MK
2463 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2464 val &= ~FW_IND_EVENT_PENDING;
2465 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
5e3dd157
KV
2466}
2467
fb7caaba
MSS
2468static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2469{
2470 u32 val;
2471
2472 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2473 return (val == 0xffffffff);
2474}
2475
de01357b
MK
2476/* this function effectively clears target memory controller assert line */
2477static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2478{
2479 u32 val;
2480
2481 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2482 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2483 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2484 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2485
2486 msleep(10);
2487
2488 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2489 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2490 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2491 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2492
2493 msleep(10);
2494}
2495
61c1648b 2496static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
fc36e3ff 2497{
fc36e3ff
MK
2498 u32 val;
2499
b39712ce 2500 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
fc36e3ff 2501
fc36e3ff 2502 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
61c1648b
MK
2503 SOC_RESET_CONTROL_ADDRESS);
2504 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2505 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2506}
2507
2508static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2509{
2510 u32 val;
fc36e3ff 2511
fc36e3ff
MK
2512 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2513 SOC_RESET_CONTROL_ADDRESS);
61c1648b 2514
fc36e3ff
MK
2515 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2516 val | SOC_RESET_CONTROL_CE_RST_MASK);
fc36e3ff 2517 msleep(10);
fc36e3ff
MK
2518 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2519 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
61c1648b
MK
2520}
2521
2522static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2523{
2524 u32 val;
2525
fc36e3ff 2526 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
61c1648b
MK
2527 SOC_LF_TIMER_CONTROL0_ADDRESS);
2528 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2529 SOC_LF_TIMER_CONTROL0_ADDRESS,
2530 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2531}
fc36e3ff 2532
61c1648b
MK
2533static int ath10k_pci_warm_reset(struct ath10k *ar)
2534{
2535 int ret;
2536
2537 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
de01357b 2538
61c1648b
MK
2539 spin_lock_bh(&ar->data_lock);
2540 ar->stats.fw_warm_reset_counter++;
2541 spin_unlock_bh(&ar->data_lock);
fc36e3ff 2542
61c1648b 2543 ath10k_pci_irq_disable(ar);
fc36e3ff 2544
61c1648b
MK
2545 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2546 * were to access copy engine while host performs copy engine reset
2547 * then it is possible for the device to confuse pci-e controller to
2548 * the point of bringing host system to a complete stop (i.e. hang).
2549 */
2550 ath10k_pci_warm_reset_si0(ar);
2551 ath10k_pci_warm_reset_cpu(ar);
2552 ath10k_pci_init_pipes(ar);
2553 ath10k_pci_wait_for_target_init(ar);
fc36e3ff 2554
61c1648b
MK
2555 ath10k_pci_warm_reset_clear_lf(ar);
2556 ath10k_pci_warm_reset_ce(ar);
2557 ath10k_pci_warm_reset_cpu(ar);
2558 ath10k_pci_init_pipes(ar);
fc36e3ff 2559
61c1648b
MK
2560 ret = ath10k_pci_wait_for_target_init(ar);
2561 if (ret) {
2562 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2563 return ret;
2564 }
fc36e3ff 2565
7aa7a72a 2566 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
fc36e3ff 2567
c0c378f9 2568 return 0;
fc36e3ff
MK
2569}
2570
0fc7e270
VT
2571static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2572{
2573 ath10k_pci_irq_disable(ar);
2574 return ath10k_pci_qca99x0_chip_reset(ar);
2575}
2576
6e4202c3
VT
2577static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2578{
0fc7e270
VT
2579 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2580
2581 if (!ar_pci->pci_soft_reset)
6e4202c3 2582 return -ENOTSUPP;
0fc7e270
VT
2583
2584 return ar_pci->pci_soft_reset(ar);
6e4202c3
VT
2585}
2586
d63955b3 2587static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
0bc14d06
MK
2588{
2589 int i, ret;
2590 u32 val;
2591
d63955b3 2592 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
0bc14d06
MK
2593
2594 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2595 * It is thus preferred to use warm reset which is safer but may not be
2596 * able to recover the device from all possible fail scenarios.
2597 *
2598 * Warm reset doesn't always work on first try so attempt it a few
2599 * times before giving up.
2600 */
2601 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2602 ret = ath10k_pci_warm_reset(ar);
2603 if (ret) {
2604 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2605 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2606 ret);
2607 continue;
2608 }
2609
2610 /* FIXME: Sometimes copy engine doesn't recover after warm
2611 * reset. In most cases this needs cold reset. In some of these
2612 * cases the device is in such a state that a cold reset may
2613 * lock up the host.
2614 *
2615 * Reading any host interest register via copy engine is
2616 * sufficient to verify if device is capable of booting
2617 * firmware blob.
2618 */
2619 ret = ath10k_pci_init_pipes(ar);
2620 if (ret) {
2621 ath10k_warn(ar, "failed to init copy engine: %d\n",
2622 ret);
2623 continue;
2624 }
2625
2626 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2627 &val);
2628 if (ret) {
2629 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2630 ret);
2631 continue;
2632 }
2633
2634 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2635 return 0;
2636 }
2637
2638 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2639 ath10k_warn(ar, "refusing cold reset as requested\n");
2640 return -EPERM;
2641 }
2642
2643 ret = ath10k_pci_cold_reset(ar);
2644 if (ret) {
2645 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2646 return ret;
2647 }
2648
2649 ret = ath10k_pci_wait_for_target_init(ar);
2650 if (ret) {
2651 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2652 ret);
2653 return ret;
2654 }
2655
d63955b3
MK
2656 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2657
2658 return 0;
2659}
2660
2661static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2662{
2663 int ret;
2664
2665 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2666
2667 /* FIXME: QCA6174 requires cold + warm reset to work. */
2668
2669 ret = ath10k_pci_cold_reset(ar);
2670 if (ret) {
2671 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2672 return ret;
2673 }
2674
2675 ret = ath10k_pci_wait_for_target_init(ar);
2676 if (ret) {
2677 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
617b0f4d 2678 ret);
d63955b3
MK
2679 return ret;
2680 }
2681
2682 ret = ath10k_pci_warm_reset(ar);
2683 if (ret) {
2684 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2685 return ret;
2686 }
2687
2688 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
0bc14d06
MK
2689
2690 return 0;
2691}
2692
6e4202c3
VT
2693static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2694{
2695 int ret;
2696
2697 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2698
2699 ret = ath10k_pci_cold_reset(ar);
2700 if (ret) {
2701 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2702 return ret;
2703 }
2704
2705 ret = ath10k_pci_wait_for_target_init(ar);
2706 if (ret) {
2707 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2708 ret);
2709 return ret;
2710 }
2711
2712 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2713
2714 return 0;
2715}
2716
d63955b3
MK
2717static int ath10k_pci_chip_reset(struct ath10k *ar)
2718{
0fc7e270
VT
2719 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2720
2721 if (WARN_ON(!ar_pci->pci_hard_reset))
d63955b3 2722 return -ENOTSUPP;
0fc7e270
VT
2723
2724 return ar_pci->pci_hard_reset(ar);
d63955b3
MK
2725}
2726
0bc14d06 2727static int ath10k_pci_hif_power_up(struct ath10k *ar)
8c5c5368 2728{
76d870ed 2729 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
8c5c5368
MK
2730 int ret;
2731
0bc14d06
MK
2732 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2733
76d870ed
JD
2734 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2735 &ar_pci->link_ctl);
2736 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2737 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2738
8c5c5368
MK
2739 /*
2740 * Bring the target up cleanly.
2741 *
2742 * The target may be in an undefined state with an AUX-powered Target
2743 * and a Host in WoW mode. If the Host crashes, loses power, or is
2744 * restarted (without unloading the driver) then the Target is left
2745 * (aux) powered and running. On a subsequent driver load, the Target
2746 * is in an unexpected state. We try to catch that here in order to
2747 * reset the Target and retry the probe.
2748 */
0bc14d06 2749 ret = ath10k_pci_chip_reset(ar);
5b2589fc 2750 if (ret) {
a2fa8800
MK
2751 if (ath10k_pci_has_fw_crashed(ar)) {
2752 ath10k_warn(ar, "firmware crashed during chip reset\n");
2753 ath10k_pci_fw_crashed_clear(ar);
2754 ath10k_pci_fw_crashed_dump(ar);
2755 }
2756
0bc14d06 2757 ath10k_err(ar, "failed to reset chip: %d\n", ret);
707b1bbd 2758 goto err_sleep;
5b2589fc 2759 }
8c5c5368 2760
84cbf3a7 2761 ret = ath10k_pci_init_pipes(ar);
1d2b48d6 2762 if (ret) {
7aa7a72a 2763 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
707b1bbd 2764 goto err_sleep;
ab977bd0
MK
2765 }
2766
98563d5a
MK
2767 ret = ath10k_pci_init_config(ar);
2768 if (ret) {
7aa7a72a 2769 ath10k_err(ar, "failed to setup init config: %d\n", ret);
5c771e74 2770 goto err_ce;
98563d5a 2771 }
8c5c5368
MK
2772
2773 ret = ath10k_pci_wake_target_cpu(ar);
2774 if (ret) {
7aa7a72a 2775 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
5c771e74 2776 goto err_ce;
8c5c5368
MK
2777 }
2778
2779 return 0;
2780
2781err_ce:
2782 ath10k_pci_ce_deinit(ar);
61c95cea 2783
707b1bbd 2784err_sleep:
61c95cea
MK
2785 return ret;
2786}
2787
f52f5171 2788void ath10k_pci_hif_power_down(struct ath10k *ar)
8c5c5368 2789{
7aa7a72a 2790 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
50f87a67 2791
c011b281
MK
2792 /* Currently hif_power_up performs effectively a reset and hif_stop
2793 * resets the chip as well so there's no point in resetting here.
2794 */
8c5c5368
MK
2795}
2796
8cd13cad 2797static int ath10k_pci_hif_suspend(struct ath10k *ar)
96378bd2
BN
2798{
2799 /* Nothing to do; the important stuff is in the driver suspend. */
2800 return 0;
2801}
2802
2803static int ath10k_pci_suspend(struct ath10k *ar)
8cd13cad 2804{
77258d40
MK
2805 /* The grace timer can still be counting down and ar->ps_awake be true.
2806 * It is known that the device may be asleep after resuming regardless
2807 * of the SoC powersave state before suspending. Hence make sure the
2808 * device is asleep before proceeding.
2809 */
2810 ath10k_pci_sleep_sync(ar);
320e14b8 2811
8cd13cad
MK
2812 return 0;
2813}
2814
2815static int ath10k_pci_hif_resume(struct ath10k *ar)
96378bd2
BN
2816{
2817 /* Nothing to do; the important stuff is in the driver resume. */
2818 return 0;
2819}
2820
2821static int ath10k_pci_resume(struct ath10k *ar)
8cd13cad
MK
2822{
2823 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2824 struct pci_dev *pdev = ar_pci->pdev;
2825 u32 val;
1aaf8efb
AK
2826 int ret = 0;
2827
d9d6a5ae
RM
2828 ret = ath10k_pci_force_wake(ar);
2829 if (ret) {
2830 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2831 return ret;
1aaf8efb 2832 }
8cd13cad 2833
9ff4be96
MK
2834 /* Suspend/Resume resets the PCI configuration space, so we have to
2835 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2836 * from interfering with C3 CPU state. pci_restore_state won't help
2837 * here since it only restores the first 64 bytes pci config header.
2838 */
2839 pci_read_config_dword(pdev, 0x40, &val);
2840 if ((val & 0x0000ff00) != 0)
2841 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
8cd13cad 2842
1aaf8efb 2843 return ret;
8cd13cad 2844}
8cd13cad 2845
6847f967
SE
2846static bool ath10k_pci_validate_cal(void *data, size_t size)
2847{
2848 __le16 *cal_words = data;
2849 u16 checksum = 0;
2850 size_t i;
2851
2852 if (size % 2 != 0)
2853 return false;
2854
2855 for (i = 0; i < size / 2; i++)
2856 checksum ^= le16_to_cpu(cal_words[i]);
2857
2858 return checksum == 0xffff;
2859}
2860
2861static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2862{
2863 /* Enable SI clock */
2864 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2865
2866 /* Configure GPIOs for I2C operation */
2867 ath10k_pci_write32(ar,
2868 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2869 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2870 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2871 GPIO_PIN0_CONFIG) |
2872 SM(1, GPIO_PIN0_PAD_PULL));
2873
2874 ath10k_pci_write32(ar,
2875 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2876 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2877 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2878 SM(1, GPIO_PIN0_PAD_PULL));
2879
2880 ath10k_pci_write32(ar,
2881 GPIO_BASE_ADDRESS +
2882 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2883 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2884
2885 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2886 ath10k_pci_write32(ar,
2887 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2888 SM(1, SI_CONFIG_ERR_INT) |
2889 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2890 SM(1, SI_CONFIG_I2C) |
2891 SM(1, SI_CONFIG_POS_SAMPLE) |
2892 SM(1, SI_CONFIG_INACTIVE_DATA) |
2893 SM(1, SI_CONFIG_INACTIVE_CLK) |
2894 SM(8, SI_CONFIG_DIVIDER));
2895}
2896
2897static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2898{
2899 u32 reg;
2900 int wait_limit;
2901
2902 /* set device select byte and for the read operation */
2903 reg = QCA9887_EEPROM_SELECT_READ |
2904 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2905 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2906 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2907
2908 /* write transmit data, transfer length, and START bit */
2909 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2910 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2911 SM(4, SI_CS_TX_CNT));
2912
2913 /* wait max 1 sec */
2914 wait_limit = 100000;
2915
2916 /* wait for SI_CS_DONE_INT */
2917 do {
2918 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2919 if (MS(reg, SI_CS_DONE_INT))
2920 break;
2921
2922 wait_limit--;
2923 udelay(10);
2924 } while (wait_limit > 0);
2925
2926 if (!MS(reg, SI_CS_DONE_INT)) {
2927 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2928 addr);
2929 return -ETIMEDOUT;
2930 }
2931
2932 /* clear SI_CS_DONE_INT */
2933 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2934
2935 if (MS(reg, SI_CS_DONE_ERR)) {
2936 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2937 return -EIO;
2938 }
2939
2940 /* extract receive data */
2941 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2942 *out = reg;
2943
2944 return 0;
2945}
2946
2947static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2948 size_t *data_len)
2949{
2950 u8 *caldata = NULL;
2951 size_t calsize, i;
2952 int ret;
2953
2954 if (!QCA_REV_9887(ar))
2955 return -EOPNOTSUPP;
2956
2957 calsize = ar->hw_params.cal_data_len;
2958 caldata = kmalloc(calsize, GFP_KERNEL);
2959 if (!caldata)
2960 return -ENOMEM;
2961
2962 ath10k_pci_enable_eeprom(ar);
2963
2964 for (i = 0; i < calsize; i++) {
2965 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2966 if (ret)
2967 goto err_free;
2968 }
2969
2970 if (!ath10k_pci_validate_cal(caldata, calsize))
2971 goto err_free;
2972
2973 *data = caldata;
2974 *data_len = calsize;
2975
2976 return 0;
2977
2978err_free:
5f4761dd 2979 kfree(caldata);
6847f967
SE
2980
2981 return -EINVAL;
2982}
2983
5e3dd157 2984static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
726346fc 2985 .tx_sg = ath10k_pci_hif_tx_sg,
eef25405 2986 .diag_read = ath10k_pci_hif_diag_read,
9f65ad25 2987 .diag_write = ath10k_pci_diag_write_mem,
5e3dd157
KV
2988 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2989 .start = ath10k_pci_hif_start,
2990 .stop = ath10k_pci_hif_stop,
2991 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2992 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2993 .send_complete_check = ath10k_pci_hif_send_complete_check,
5e3dd157 2994 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
8c5c5368
MK
2995 .power_up = ath10k_pci_hif_power_up,
2996 .power_down = ath10k_pci_hif_power_down,
077a3804
YL
2997 .read32 = ath10k_pci_read32,
2998 .write32 = ath10k_pci_write32,
8cd13cad
MK
2999 .suspend = ath10k_pci_hif_suspend,
3000 .resume = ath10k_pci_hif_resume,
6847f967 3001 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
5e3dd157
KV
3002};
3003
5e3dd157
KV
3004/*
3005 * Top-level interrupt handler for all PCI interrupts from a Target.
3006 * When a block of MSI interrupts is allocated, this top-level handler
3007 * is not used; instead, we directly call the correct sub-handler.
3008 */
3009static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3010{
3011 struct ath10k *ar = arg;
3012 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1aaf8efb
AK
3013 int ret;
3014
fb7caaba
MSS
3015 if (ath10k_pci_has_device_gone(ar))
3016 return IRQ_NONE;
3017
d9d6a5ae
RM
3018 ret = ath10k_pci_force_wake(ar);
3019 if (ret) {
3020 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3021 return IRQ_NONE;
1aaf8efb 3022 }
5e3dd157 3023
3c97f5de
RM
3024 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3025 !ath10k_pci_irq_pending(ar))
3026 return IRQ_NONE;
5e3dd157 3027
3c97f5de
RM
3028 ath10k_pci_disable_and_clear_legacy_irq(ar);
3029 ath10k_pci_irq_msi_fw_mask(ar);
3030 napi_schedule(&ar->napi);
5e3dd157
KV
3031
3032 return IRQ_HANDLED;
3033}
3034
3c97f5de 3035static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
ab977bd0 3036{
3c97f5de
RM
3037 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3038 int done = 0;
ab977bd0 3039
5c771e74
MK
3040 if (ath10k_pci_has_fw_crashed(ar)) {
3041 ath10k_pci_fw_crashed_clear(ar);
0e9848c0 3042 ath10k_pci_fw_crashed_dump(ar);
3c97f5de
RM
3043 napi_complete(ctx);
3044 return done;
ab977bd0
MK
3045 }
3046
5e3dd157
KV
3047 ath10k_ce_per_engine_service_any(ar);
3048
3c97f5de
RM
3049 done = ath10k_htt_txrx_compl_task(ar, budget);
3050
3051 if (done < budget) {
6ad20165 3052 napi_complete_done(ctx, done);
3c97f5de
RM
3053 /* In case of MSI, it is possible that interrupts are received
3054 * while NAPI poll is inprogress. So pending interrupts that are
3055 * received after processing all copy engine pipes by NAPI poll
3056 * will not be handled again. This is causing failure to
3057 * complete boot sequence in x86 platform. So before enabling
3058 * interrupts safer to check for pending interrupts for
3059 * immediate servicing.
3060 */
641fe28a 3061 if (ath10k_ce_interrupt_summary(ar)) {
3c97f5de
RM
3062 napi_reschedule(ctx);
3063 goto out;
3064 }
2685218b 3065 ath10k_pci_enable_legacy_irq(ar);
3c97f5de
RM
3066 ath10k_pci_irq_msi_fw_unmask(ar);
3067 }
3068
3069out:
3070 return done;
5e3dd157
KV
3071}
3072
fc15ca13 3073static int ath10k_pci_request_irq_msi(struct ath10k *ar)
5e3dd157
KV
3074{
3075 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3076 int ret;
3077
5e3dd157
KV
3078 ret = request_irq(ar_pci->pdev->irq,
3079 ath10k_pci_interrupt_handler,
3080 IRQF_SHARED, "ath10k_pci", ar);
fc15ca13 3081 if (ret) {
7aa7a72a 3082 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
fc15ca13 3083 ar_pci->pdev->irq, ret);
5e3dd157
KV
3084 return ret;
3085 }
3086
5e3dd157
KV
3087 return 0;
3088}
3089
fc15ca13 3090static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
5e3dd157
KV
3091{
3092 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3093 int ret;
3094
3095 ret = request_irq(ar_pci->pdev->irq,
3096 ath10k_pci_interrupt_handler,
3097 IRQF_SHARED, "ath10k_pci", ar);
f3782744 3098 if (ret) {
7aa7a72a 3099 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
fc15ca13 3100 ar_pci->pdev->irq, ret);
5e3dd157 3101 return ret;
f3782744 3102 }
5e3dd157 3103
5e3dd157
KV
3104 return 0;
3105}
3106
fc15ca13
MK
3107static int ath10k_pci_request_irq(struct ath10k *ar)
3108{
3109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 3110
cfe9011a
RM
3111 switch (ar_pci->oper_irq_mode) {
3112 case ATH10K_PCI_IRQ_LEGACY:
fc15ca13 3113 return ath10k_pci_request_irq_legacy(ar);
cfe9011a 3114 case ATH10K_PCI_IRQ_MSI:
fc15ca13 3115 return ath10k_pci_request_irq_msi(ar);
b8402d82 3116 default:
cfe9011a 3117 return -EINVAL;
fc15ca13 3118 }
5e3dd157
KV
3119}
3120
fc15ca13
MK
3121static void ath10k_pci_free_irq(struct ath10k *ar)
3122{
3123 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
fc15ca13 3124
cfe9011a 3125 free_irq(ar_pci->pdev->irq, ar);
fc15ca13
MK
3126}
3127
3c97f5de 3128void ath10k_pci_init_napi(struct ath10k *ar)
5e3dd157 3129{
3c97f5de
RM
3130 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3131 ATH10K_NAPI_BUDGET);
fc15ca13
MK
3132}
3133
3134static int ath10k_pci_init_irq(struct ath10k *ar)
3135{
3136 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3137 int ret;
5e3dd157 3138
3c97f5de 3139 ath10k_pci_init_napi(ar);
5e3dd157 3140
403d627b 3141 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
7aa7a72a
MK
3142 ath10k_info(ar, "limiting irq mode to: %d\n",
3143 ath10k_pci_irq_mode);
5e3dd157 3144
fc15ca13 3145 /* Try MSI */
cfe9c45b 3146 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
cfe9011a 3147 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
cfe9c45b 3148 ret = pci_enable_msi(ar_pci->pdev);
5e3dd157 3149 if (ret == 0)
cfe9c45b 3150 return 0;
5e3dd157 3151
cfe9c45b 3152 /* fall-through */
5e3dd157
KV
3153 }
3154
fc15ca13
MK
3155 /* Try legacy irq
3156 *
3157 * A potential race occurs here: The CORE_BASE write
3158 * depends on target correctly decoding AXI address but
3159 * host won't know when target writes BAR to CORE_CTRL.
3160 * This write might get lost if target has NOT written BAR.
3161 * For now, fix the race by repeating the write in below
d6dfe25c
MR
3162 * synchronization checking.
3163 */
cfe9011a 3164 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
5e3dd157 3165
fc15ca13
MK
3166 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3167 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
fc15ca13
MK
3168
3169 return 0;
5e3dd157
KV
3170}
3171
c0c378f9 3172static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
5e3dd157 3173{
fc15ca13
MK
3174 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3175 0);
5e3dd157
KV
3176}
3177
fc15ca13 3178static int ath10k_pci_deinit_irq(struct ath10k *ar)
5e3dd157
KV
3179{
3180 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
5e3dd157 3181
cfe9011a
RM
3182 switch (ar_pci->oper_irq_mode) {
3183 case ATH10K_PCI_IRQ_LEGACY:
c0c378f9 3184 ath10k_pci_deinit_irq_legacy(ar);
b8402d82 3185 break;
bb8b621a
AG
3186 default:
3187 pci_disable_msi(ar_pci->pdev);
b8402d82 3188 break;
fc15ca13
MK
3189 }
3190
b8402d82 3191 return 0;
5e3dd157
KV
3192}
3193
f52f5171 3194int ath10k_pci_wait_for_target_init(struct ath10k *ar)
5e3dd157
KV
3195{
3196 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0399eca8 3197 unsigned long timeout;
0399eca8 3198 u32 val;
5e3dd157 3199
7aa7a72a 3200 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
5e3dd157 3201
0399eca8
KV
3202 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3203
3204 do {
3205 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3206
7aa7a72a
MK
3207 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3208 val);
50f87a67 3209
0399eca8
KV
3210 /* target should never return this */
3211 if (val == 0xffffffff)
3212 continue;
3213
7710cd2e
MK
3214 /* the device has crashed so don't bother trying anymore */
3215 if (val & FW_IND_EVENT_PENDING)
3216 break;
3217
0399eca8
KV
3218 if (val & FW_IND_INITIALIZED)
3219 break;
3220
cfe9011a 3221 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
5e3dd157 3222 /* Fix potential race by repeating CORE_BASE writes */
a428249d 3223 ath10k_pci_enable_legacy_irq(ar);
0399eca8 3224
5e3dd157 3225 mdelay(10);
0399eca8 3226 } while (time_before(jiffies, timeout));
5e3dd157 3227
a428249d 3228 ath10k_pci_disable_and_clear_legacy_irq(ar);
7c0f0e3c 3229 ath10k_pci_irq_msi_fw_mask(ar);
a428249d 3230
6a4f6e1d 3231 if (val == 0xffffffff) {
7aa7a72a 3232 ath10k_err(ar, "failed to read device register, device is gone\n");
c0c378f9 3233 return -EIO;
6a4f6e1d
MK
3234 }
3235
7710cd2e 3236 if (val & FW_IND_EVENT_PENDING) {
7aa7a72a 3237 ath10k_warn(ar, "device has crashed during init\n");
c0c378f9 3238 return -ECOMM;
7710cd2e
MK
3239 }
3240
6a4f6e1d 3241 if (!(val & FW_IND_INITIALIZED)) {
7aa7a72a 3242 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
0399eca8 3243 val);
c0c378f9 3244 return -ETIMEDOUT;
5e3dd157
KV
3245 }
3246
7aa7a72a 3247 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
c0c378f9 3248 return 0;
5e3dd157
KV
3249}
3250
fc36e3ff 3251static int ath10k_pci_cold_reset(struct ath10k *ar)
5e3dd157 3252{
5e3dd157
KV
3253 u32 val;
3254
7aa7a72a 3255 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
5e3dd157 3256
f51dbe73
BG
3257 spin_lock_bh(&ar->data_lock);
3258
3259 ar->stats.fw_cold_reset_counter++;
3260
3261 spin_unlock_bh(&ar->data_lock);
3262
5e3dd157 3263 /* Put Target, including PCIe, into RESET. */
e479ed43 3264 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
5e3dd157 3265 val |= 1;
e479ed43 3266 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157 3267
acd19580
VT
3268 /* After writing into SOC_GLOBAL_RESET to put device into
3269 * reset and pulling out of reset pcie may not be stable
3270 * for any immediate pcie register access and cause bus error,
3271 * add delay before any pcie access request to fix this issue.
3272 */
3273 msleep(20);
5e3dd157
KV
3274
3275 /* Pull Target, including PCIe, out of RESET. */
3276 val &= ~1;
e479ed43 3277 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
5e3dd157 3278
acd19580 3279 msleep(20);
5e3dd157 3280
7aa7a72a 3281 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
50f87a67 3282
5b2589fc 3283 return 0;
5e3dd157
KV
3284}
3285
2986e3ef 3286static int ath10k_pci_claim(struct ath10k *ar)
5e3dd157 3287{
2986e3ef
MK
3288 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3289 struct pci_dev *pdev = ar_pci->pdev;
2986e3ef 3290 int ret;
5e3dd157
KV
3291
3292 pci_set_drvdata(pdev, ar);
3293
5e3dd157
KV
3294 ret = pci_enable_device(pdev);
3295 if (ret) {
7aa7a72a 3296 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2986e3ef 3297 return ret;
5e3dd157
KV
3298 }
3299
5e3dd157
KV
3300 ret = pci_request_region(pdev, BAR_NUM, "ath");
3301 if (ret) {
7aa7a72a 3302 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2986e3ef 3303 ret);
5e3dd157
KV
3304 goto err_device;
3305 }
3306
2986e3ef 3307 /* Target expects 32 bit DMA. Enforce it. */
5e3dd157
KV
3308 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3309 if (ret) {
7aa7a72a 3310 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
5e3dd157
KV
3311 goto err_region;
3312 }
3313
3314 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3315 if (ret) {
7aa7a72a 3316 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2986e3ef 3317 ret);
5e3dd157
KV
3318 goto err_region;
3319 }
3320
5e3dd157
KV
3321 pci_set_master(pdev);
3322
5e3dd157 3323 /* Arrange for access to Target SoC registers. */
aeae5b4c 3324 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2986e3ef
MK
3325 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3326 if (!ar_pci->mem) {
7aa7a72a 3327 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
5e3dd157
KV
3328 ret = -EIO;
3329 goto err_master;
3330 }
3331
75b34800 3332 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
2986e3ef
MK
3333 return 0;
3334
3335err_master:
3336 pci_clear_master(pdev);
3337
3338err_region:
3339 pci_release_region(pdev, BAR_NUM);
3340
3341err_device:
3342 pci_disable_device(pdev);
3343
3344 return ret;
3345}
3346
3347static void ath10k_pci_release(struct ath10k *ar)
3348{
3349 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3350 struct pci_dev *pdev = ar_pci->pdev;
3351
3352 pci_iounmap(pdev, ar_pci->mem);
3353 pci_release_region(pdev, BAR_NUM);
3354 pci_clear_master(pdev);
3355 pci_disable_device(pdev);
3356}
3357
7505f7c3
MK
3358static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3359{
3360 const struct ath10k_pci_supp_chip *supp_chip;
3361 int i;
3362 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3363
3364 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3365 supp_chip = &ath10k_pci_supp_chips[i];
3366
3367 if (supp_chip->dev_id == dev_id &&
3368 supp_chip->rev_id == rev_id)
3369 return true;
3370 }
3371
3372 return false;
3373}
3374
90188f80
RM
3375int ath10k_pci_setup_resource(struct ath10k *ar)
3376{
3377 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641fe28a 3378 struct ath10k_ce *ce = ath10k_ce_priv(ar);
90188f80
RM
3379 int ret;
3380
641fe28a 3381 spin_lock_init(&ce->ce_lock);
90188f80
RM
3382 spin_lock_init(&ar_pci->ps_lock);
3383
7ac76764 3384 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
90188f80 3385
b08b5b53 3386 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
90188f80
RM
3387 ath10k_pci_override_ce_config(ar);
3388
3389 ret = ath10k_pci_alloc_pipes(ar);
3390 if (ret) {
3391 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3392 ret);
3393 return ret;
3394 }
3395
3396 return 0;
3397}
3398
3399void ath10k_pci_release_resource(struct ath10k *ar)
3400{
3c97f5de
RM
3401 ath10k_pci_rx_retry_sync(ar);
3402 netif_napi_del(&ar->napi);
90188f80
RM
3403 ath10k_pci_ce_deinit(ar);
3404 ath10k_pci_free_pipes(ar);
3405}
3406
4ddb3299
RM
3407static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3408 .read32 = ath10k_bus_pci_read32,
3409 .write32 = ath10k_bus_pci_write32,
3410 .get_num_banks = ath10k_pci_get_num_banks,
3411};
3412
2986e3ef
MK
3413static int ath10k_pci_probe(struct pci_dev *pdev,
3414 const struct pci_device_id *pci_dev)
3415{
3416 int ret = 0;
3417 struct ath10k *ar;
3418 struct ath10k_pci *ar_pci;
d63955b3 3419 enum ath10k_hw_rev hw_rev;
2986e3ef 3420 u32 chip_id;
1aaf8efb 3421 bool pci_ps;
0fc7e270
VT
3422 int (*pci_soft_reset)(struct ath10k *ar);
3423 int (*pci_hard_reset)(struct ath10k *ar);
7f622593 3424 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
2986e3ef 3425
d63955b3
MK
3426 switch (pci_dev->device) {
3427 case QCA988X_2_0_DEVICE_ID:
3428 hw_rev = ATH10K_HW_QCA988X;
1aaf8efb 3429 pci_ps = false;
0fc7e270
VT
3430 pci_soft_reset = ath10k_pci_warm_reset;
3431 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
7f622593 3432 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
d63955b3 3433 break;
6fd3dd71 3434 case QCA9887_1_0_DEVICE_ID:
6fd3dd71
SE
3435 hw_rev = ATH10K_HW_QCA9887;
3436 pci_ps = false;
3437 pci_soft_reset = ath10k_pci_warm_reset;
3438 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
7f622593 3439 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
6fd3dd71 3440 break;
36582e5d 3441 case QCA6164_2_1_DEVICE_ID:
d63955b3
MK
3442 case QCA6174_2_1_DEVICE_ID:
3443 hw_rev = ATH10K_HW_QCA6174;
1aaf8efb 3444 pci_ps = true;
0fc7e270
VT
3445 pci_soft_reset = ath10k_pci_warm_reset;
3446 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
7f622593 3447 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
d63955b3 3448 break;
8bd47021
VT
3449 case QCA99X0_2_0_DEVICE_ID:
3450 hw_rev = ATH10K_HW_QCA99X0;
1aaf8efb 3451 pci_ps = false;
0fc7e270
VT
3452 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3453 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
7f622593 3454 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
8bd47021 3455 break;
651b4cdc
VT
3456 case QCA9984_1_0_DEVICE_ID:
3457 hw_rev = ATH10K_HW_QCA9984;
3458 pci_ps = false;
3459 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3460 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
7f622593 3461 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
651b4cdc 3462 break;
e565c312
AK
3463 case QCA9888_2_0_DEVICE_ID:
3464 hw_rev = ATH10K_HW_QCA9888;
3465 pci_ps = false;
3466 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3467 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
7f622593 3468 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
e565c312 3469 break;
a226b519
BM
3470 case QCA9377_1_0_DEVICE_ID:
3471 hw_rev = ATH10K_HW_QCA9377;
3472 pci_ps = true;
0fc7e270
VT
3473 pci_soft_reset = NULL;
3474 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
7f622593 3475 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
a226b519 3476 break;
d63955b3
MK
3477 default:
3478 WARN_ON(1);
3479 return -ENOTSUPP;
3480 }
3481
3482 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3483 hw_rev, &ath10k_pci_hif_ops);
2986e3ef 3484 if (!ar) {
7aa7a72a 3485 dev_err(&pdev->dev, "failed to allocate core\n");
2986e3ef
MK
3486 return -ENOMEM;
3487 }
3488
0a51b343
MP
3489 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3490 pdev->vendor, pdev->device,
3491 pdev->subsystem_vendor, pdev->subsystem_device);
7aa7a72a 3492
2986e3ef
MK
3493 ar_pci = ath10k_pci_priv(ar);
3494 ar_pci->pdev = pdev;
3495 ar_pci->dev = &pdev->dev;
3496 ar_pci->ar = ar;
36582e5d 3497 ar->dev_id = pci_dev->device;
1aaf8efb 3498 ar_pci->pci_ps = pci_ps;
641fe28a 3499 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
0fc7e270
VT
3500 ar_pci->pci_soft_reset = pci_soft_reset;
3501 ar_pci->pci_hard_reset = pci_hard_reset;
7f622593 3502 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
641fe28a 3503 ar->ce_priv = &ar_pci->ce;
5e3dd157 3504
0a51b343
MP
3505 ar->id.vendor = pdev->vendor;
3506 ar->id.device = pdev->device;
3507 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3508 ar->id.subsystem_device = pdev->subsystem_device;
de57e2c8 3509
7ac76764 3510 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
5e3dd157 3511
90188f80 3512 ret = ath10k_pci_setup_resource(ar);
e01ae68c 3513 if (ret) {
90188f80 3514 ath10k_err(ar, "failed to setup resource: %d\n", ret);
2986e3ef 3515 goto err_core_destroy;
e01ae68c
KV
3516 }
3517
90188f80 3518 ret = ath10k_pci_claim(ar);
25d0dbcb 3519 if (ret) {
90188f80
RM
3520 ath10k_err(ar, "failed to claim device: %d\n", ret);
3521 goto err_free_pipes;
25d0dbcb
MK
3522 }
3523
d9d6a5ae
RM
3524 ret = ath10k_pci_force_wake(ar);
3525 if (ret) {
3526 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
90188f80 3527 goto err_sleep;
1aaf8efb
AK
3528 }
3529
aa538aca
RM
3530 ath10k_pci_ce_deinit(ar);
3531 ath10k_pci_irq_disable(ar);
3532
403d627b 3533 ret = ath10k_pci_init_irq(ar);
5e3dd157 3534 if (ret) {
7aa7a72a 3535 ath10k_err(ar, "failed to init irqs: %d\n", ret);
90188f80 3536 goto err_sleep;
5e3dd157
KV
3537 }
3538
cfe9011a
RM
3539 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3540 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
403d627b
MK
3541 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3542
5c771e74
MK
3543 ret = ath10k_pci_request_irq(ar);
3544 if (ret) {
7aa7a72a 3545 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
5c771e74
MK
3546 goto err_deinit_irq;
3547 }
3548
1a7fecb7
MK
3549 ret = ath10k_pci_chip_reset(ar);
3550 if (ret) {
3551 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3552 goto err_free_irq;
3553 }
3554
3555 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3556 if (chip_id == 0xffffffff) {
3557 ath10k_err(ar, "failed to get chip id\n");
3558 goto err_free_irq;
3559 }
3560
3561 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3562 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3563 pdev->device, chip_id);
d9585a92 3564 goto err_free_irq;
1a7fecb7
MK
3565 }
3566
e01ae68c 3567 ret = ath10k_core_register(ar, chip_id);
5e3dd157 3568 if (ret) {
7aa7a72a 3569 ath10k_err(ar, "failed to register driver core: %d\n", ret);
5c771e74 3570 goto err_free_irq;
5e3dd157
KV
3571 }
3572
3573 return 0;
3574
5c771e74
MK
3575err_free_irq:
3576 ath10k_pci_free_irq(ar);
3c97f5de 3577 ath10k_pci_rx_retry_sync(ar);
5c771e74 3578
403d627b
MK
3579err_deinit_irq:
3580 ath10k_pci_deinit_irq(ar);
3581
c0c378f9 3582err_sleep:
0bcbbe67 3583 ath10k_pci_sleep_sync(ar);
2986e3ef
MK
3584 ath10k_pci_release(ar);
3585
90188f80
RM
3586err_free_pipes:
3587 ath10k_pci_free_pipes(ar);
3588
e7b54194 3589err_core_destroy:
5e3dd157 3590 ath10k_core_destroy(ar);
5e3dd157
KV
3591
3592 return ret;
3593}
3594
3595static void ath10k_pci_remove(struct pci_dev *pdev)
3596{
3597 struct ath10k *ar = pci_get_drvdata(pdev);
3598 struct ath10k_pci *ar_pci;
3599
7aa7a72a 3600 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
5e3dd157
KV
3601
3602 if (!ar)
3603 return;
3604
3605 ar_pci = ath10k_pci_priv(ar);
3606
3607 if (!ar_pci)
3608 return;
3609
5e3dd157 3610 ath10k_core_unregister(ar);
5c771e74 3611 ath10k_pci_free_irq(ar);
403d627b 3612 ath10k_pci_deinit_irq(ar);
90188f80 3613 ath10k_pci_release_resource(ar);
77258d40 3614 ath10k_pci_sleep_sync(ar);
2986e3ef 3615 ath10k_pci_release(ar);
5e3dd157 3616 ath10k_core_destroy(ar);
5e3dd157
KV
3617}
3618
5e3dd157
KV
3619MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3620
6af1de2e 3621static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
32faa3f0
RH
3622{
3623 struct ath10k *ar = dev_get_drvdata(dev);
3624 int ret;
3625
96378bd2 3626 ret = ath10k_pci_suspend(ar);
32faa3f0
RH
3627 if (ret)
3628 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3629
3630 return ret;
3631}
3632
6af1de2e 3633static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
32faa3f0
RH
3634{
3635 struct ath10k *ar = dev_get_drvdata(dev);
3636 int ret;
3637
96378bd2 3638 ret = ath10k_pci_resume(ar);
32faa3f0
RH
3639 if (ret)
3640 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3641
3642 return ret;
3643}
3644
3645static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3646 ath10k_pci_pm_suspend,
3647 ath10k_pci_pm_resume);
32faa3f0 3648
5e3dd157
KV
3649static struct pci_driver ath10k_pci_driver = {
3650 .name = "ath10k_pci",
3651 .id_table = ath10k_pci_id_table,
3652 .probe = ath10k_pci_probe,
3653 .remove = ath10k_pci_remove,
32faa3f0
RH
3654#ifdef CONFIG_PM
3655 .driver.pm = &ath10k_pci_pm_ops,
3656#endif
5e3dd157
KV
3657};
3658
3659static int __init ath10k_pci_init(void)
3660{
3661 int ret;
3662
3663 ret = pci_register_driver(&ath10k_pci_driver);
3664 if (ret)
7aa7a72a
MK
3665 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3666 ret);
5e3dd157 3667
0b523ced
RM
3668 ret = ath10k_ahb_init();
3669 if (ret)
3670 printk(KERN_ERR "ahb init failed: %d\n", ret);
3671
5e3dd157
KV
3672 return ret;
3673}
3674module_init(ath10k_pci_init);
3675
3676static void __exit ath10k_pci_exit(void)
3677{
3678 pci_unregister_driver(&ath10k_pci_driver);
0b523ced 3679 ath10k_ahb_exit();
5e3dd157
KV
3680}
3681
3682module_exit(ath10k_pci_exit);
3683
3684MODULE_AUTHOR("Qualcomm Atheros");
b855de0f 3685MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
5e3dd157 3686MODULE_LICENSE("Dual BSD/GPL");
5c427f5c
BM
3687
3688/* QCA988x 2.0 firmware files */
8026cae7
BM
3689MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3690MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
5c427f5c 3691MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
53513c30 3692MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
5e3dd157 3693MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
0a51b343 3694MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
5c427f5c 3695
6fd3dd71
SE
3696/* QCA9887 1.0 firmware files */
3697MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3698MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3699MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3700
5c427f5c
BM
3701/* QCA6174 2.1 firmware files */
3702MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
e451c1db 3703MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
5c427f5c 3704MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
0a51b343 3705MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
5c427f5c
BM
3706
3707/* QCA6174 3.1 firmware files */
3708MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
e451c1db 3709MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
aad1fd7f 3710MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
5c427f5c 3711MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
0a51b343 3712MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
a226b519
BM
3713
3714/* QCA9377 1.0 firmware files */
3715MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3716MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);