Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[linux-2.6-block.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23
24 #include "core.h"
25 #include "debug.h"
26
27 #include "targaddrs.h"
28 #include "bmi.h"
29
30 #include "hif.h"
31 #include "htc.h"
32
33 #include "ce.h"
34 #include "pci.h"
35
36 enum ath10k_pci_irq_mode {
37         ATH10K_PCI_IRQ_AUTO = 0,
38         ATH10K_PCI_IRQ_LEGACY = 1,
39         ATH10K_PCI_IRQ_MSI = 2,
40 };
41
42 static unsigned int ath10k_target_ps;
43 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44
45 module_param(ath10k_target_ps, uint, 0644);
46 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47
48 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50
51 #define QCA988X_2_0_DEVICE_ID   (0x003c)
52
53 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
54         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55         {0}
56 };
57
58 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59                                        u32 *data);
60
61 static void ath10k_pci_process_ce(struct ath10k *ar);
62 static int ath10k_pci_post_rx(struct ath10k *ar);
63 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64                                              int num);
65 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
66 static void ath10k_pci_stop_ce(struct ath10k *ar);
67 static int ath10k_pci_cold_reset(struct ath10k *ar);
68 static int ath10k_pci_warm_reset(struct ath10k *ar);
69 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
70 static int ath10k_pci_init_irq(struct ath10k *ar);
71 static int ath10k_pci_deinit_irq(struct ath10k *ar);
72 static int ath10k_pci_request_irq(struct ath10k *ar);
73 static void ath10k_pci_free_irq(struct ath10k *ar);
74 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
75                                struct ath10k_ce_pipe *rx_pipe,
76                                struct bmi_xfer *xfer);
77 static void ath10k_pci_cleanup_ce(struct ath10k *ar);
78
79 static const struct ce_attr host_ce_config_wlan[] = {
80         /* CE0: host->target HTC control and raw streams */
81         {
82                 .flags = CE_ATTR_FLAGS,
83                 .src_nentries = 16,
84                 .src_sz_max = 256,
85                 .dest_nentries = 0,
86         },
87
88         /* CE1: target->host HTT + HTC control */
89         {
90                 .flags = CE_ATTR_FLAGS,
91                 .src_nentries = 0,
92                 .src_sz_max = 512,
93                 .dest_nentries = 512,
94         },
95
96         /* CE2: target->host WMI */
97         {
98                 .flags = CE_ATTR_FLAGS,
99                 .src_nentries = 0,
100                 .src_sz_max = 2048,
101                 .dest_nentries = 32,
102         },
103
104         /* CE3: host->target WMI */
105         {
106                 .flags = CE_ATTR_FLAGS,
107                 .src_nentries = 32,
108                 .src_sz_max = 2048,
109                 .dest_nentries = 0,
110         },
111
112         /* CE4: host->target HTT */
113         {
114                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
115                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
116                 .src_sz_max = 256,
117                 .dest_nentries = 0,
118         },
119
120         /* CE5: unused */
121         {
122                 .flags = CE_ATTR_FLAGS,
123                 .src_nentries = 0,
124                 .src_sz_max = 0,
125                 .dest_nentries = 0,
126         },
127
128         /* CE6: target autonomous hif_memcpy */
129         {
130                 .flags = CE_ATTR_FLAGS,
131                 .src_nentries = 0,
132                 .src_sz_max = 0,
133                 .dest_nentries = 0,
134         },
135
136         /* CE7: ce_diag, the Diagnostic Window */
137         {
138                 .flags = CE_ATTR_FLAGS,
139                 .src_nentries = 2,
140                 .src_sz_max = DIAG_TRANSFER_LIMIT,
141                 .dest_nentries = 2,
142         },
143 };
144
145 /* Target firmware's Copy Engine configuration. */
146 static const struct ce_pipe_config target_ce_config_wlan[] = {
147         /* CE0: host->target HTC control and raw streams */
148         {
149                 .pipenum = 0,
150                 .pipedir = PIPEDIR_OUT,
151                 .nentries = 32,
152                 .nbytes_max = 256,
153                 .flags = CE_ATTR_FLAGS,
154                 .reserved = 0,
155         },
156
157         /* CE1: target->host HTT + HTC control */
158         {
159                 .pipenum = 1,
160                 .pipedir = PIPEDIR_IN,
161                 .nentries = 32,
162                 .nbytes_max = 512,
163                 .flags = CE_ATTR_FLAGS,
164                 .reserved = 0,
165         },
166
167         /* CE2: target->host WMI */
168         {
169                 .pipenum = 2,
170                 .pipedir = PIPEDIR_IN,
171                 .nentries = 32,
172                 .nbytes_max = 2048,
173                 .flags = CE_ATTR_FLAGS,
174                 .reserved = 0,
175         },
176
177         /* CE3: host->target WMI */
178         {
179                 .pipenum = 3,
180                 .pipedir = PIPEDIR_OUT,
181                 .nentries = 32,
182                 .nbytes_max = 2048,
183                 .flags = CE_ATTR_FLAGS,
184                 .reserved = 0,
185         },
186
187         /* CE4: host->target HTT */
188         {
189                 .pipenum = 4,
190                 .pipedir = PIPEDIR_OUT,
191                 .nentries = 256,
192                 .nbytes_max = 256,
193                 .flags = CE_ATTR_FLAGS,
194                 .reserved = 0,
195         },
196
197         /* NB: 50% of src nentries, since tx has 2 frags */
198
199         /* CE5: unused */
200         {
201                 .pipenum = 5,
202                 .pipedir = PIPEDIR_OUT,
203                 .nentries = 32,
204                 .nbytes_max = 2048,
205                 .flags = CE_ATTR_FLAGS,
206                 .reserved = 0,
207         },
208
209         /* CE6: Reserved for target autonomous hif_memcpy */
210         {
211                 .pipenum = 6,
212                 .pipedir = PIPEDIR_INOUT,
213                 .nentries = 32,
214                 .nbytes_max = 4096,
215                 .flags = CE_ATTR_FLAGS,
216                 .reserved = 0,
217         },
218
219         /* CE7 used only by Host */
220 };
221
222 static bool ath10k_pci_irq_pending(struct ath10k *ar)
223 {
224         u32 cause;
225
226         /* Check if the shared legacy irq is for us */
227         cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
228                                   PCIE_INTR_CAUSE_ADDRESS);
229         if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
230                 return true;
231
232         return false;
233 }
234
235 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
236 {
237         /* IMPORTANT: INTR_CLR register has to be set after
238          * INTR_ENABLE is set to 0, otherwise interrupt can not be
239          * really cleared. */
240         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
241                            0);
242         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
243                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
244
245         /* IMPORTANT: this extra read transaction is required to
246          * flush the posted write buffer. */
247         (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
248                                  PCIE_INTR_ENABLE_ADDRESS);
249 }
250
251 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
252 {
253         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
254                            PCIE_INTR_ENABLE_ADDRESS,
255                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
256
257         /* IMPORTANT: this extra read transaction is required to
258          * flush the posted write buffer. */
259         (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
260                                  PCIE_INTR_ENABLE_ADDRESS);
261 }
262
263 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
264 {
265         struct ath10k *ar = arg;
266         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
267
268         if (ar_pci->num_msi_intrs == 0) {
269                 if (!ath10k_pci_irq_pending(ar))
270                         return IRQ_NONE;
271
272                 ath10k_pci_disable_and_clear_legacy_irq(ar);
273         }
274
275         tasklet_schedule(&ar_pci->early_irq_tasklet);
276
277         return IRQ_HANDLED;
278 }
279
280 static int ath10k_pci_request_early_irq(struct ath10k *ar)
281 {
282         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
283         int ret;
284
285         /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
286          * interrupt from irq vector is triggered in all cases for FW
287          * indication/errors */
288         ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
289                           IRQF_SHARED, "ath10k_pci (early)", ar);
290         if (ret) {
291                 ath10k_warn("failed to request early irq: %d\n", ret);
292                 return ret;
293         }
294
295         return 0;
296 }
297
298 static void ath10k_pci_free_early_irq(struct ath10k *ar)
299 {
300         free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
301 }
302
303 /*
304  * Diagnostic read/write access is provided for startup/config/debug usage.
305  * Caller must guarantee proper alignment, when applicable, and single user
306  * at any moment.
307  */
308 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
309                                     int nbytes)
310 {
311         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
312         int ret = 0;
313         u32 buf;
314         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
315         unsigned int id;
316         unsigned int flags;
317         struct ath10k_ce_pipe *ce_diag;
318         /* Host buffer address in CE space */
319         u32 ce_data;
320         dma_addr_t ce_data_base = 0;
321         void *data_buf = NULL;
322         int i;
323
324         /*
325          * This code cannot handle reads to non-memory space. Redirect to the
326          * register read fn but preserve the multi word read capability of
327          * this fn
328          */
329         if (address < DRAM_BASE_ADDRESS) {
330                 if (!IS_ALIGNED(address, 4) ||
331                     !IS_ALIGNED((unsigned long)data, 4))
332                         return -EIO;
333
334                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
335                                            ar, address, (u32 *)data)) == 0)) {
336                         nbytes -= sizeof(u32);
337                         address += sizeof(u32);
338                         data += sizeof(u32);
339                 }
340                 return ret;
341         }
342
343         ce_diag = ar_pci->ce_diag;
344
345         /*
346          * Allocate a temporary bounce buffer to hold caller's data
347          * to be DMA'ed from Target. This guarantees
348          *   1) 4-byte alignment
349          *   2) Buffer in DMA-able space
350          */
351         orig_nbytes = nbytes;
352         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
353                                                          orig_nbytes,
354                                                          &ce_data_base);
355
356         if (!data_buf) {
357                 ret = -ENOMEM;
358                 goto done;
359         }
360         memset(data_buf, 0, orig_nbytes);
361
362         remaining_bytes = orig_nbytes;
363         ce_data = ce_data_base;
364         while (remaining_bytes) {
365                 nbytes = min_t(unsigned int, remaining_bytes,
366                                DIAG_TRANSFER_LIMIT);
367
368                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
369                 if (ret != 0)
370                         goto done;
371
372                 /* Request CE to send from Target(!) address to Host buffer */
373                 /*
374                  * The address supplied by the caller is in the
375                  * Target CPU virtual address space.
376                  *
377                  * In order to use this address with the diagnostic CE,
378                  * convert it from Target CPU virtual address space
379                  * to CE address space
380                  */
381                 ath10k_pci_wake(ar);
382                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
383                                                      address);
384                 ath10k_pci_sleep(ar);
385
386                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
387                                  0);
388                 if (ret)
389                         goto done;
390
391                 i = 0;
392                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
393                                                      &completed_nbytes,
394                                                      &id) != 0) {
395                         mdelay(1);
396                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
397                                 ret = -EBUSY;
398                                 goto done;
399                         }
400                 }
401
402                 if (nbytes != completed_nbytes) {
403                         ret = -EIO;
404                         goto done;
405                 }
406
407                 if (buf != (u32) address) {
408                         ret = -EIO;
409                         goto done;
410                 }
411
412                 i = 0;
413                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
414                                                      &completed_nbytes,
415                                                      &id, &flags) != 0) {
416                         mdelay(1);
417
418                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
419                                 ret = -EBUSY;
420                                 goto done;
421                         }
422                 }
423
424                 if (nbytes != completed_nbytes) {
425                         ret = -EIO;
426                         goto done;
427                 }
428
429                 if (buf != ce_data) {
430                         ret = -EIO;
431                         goto done;
432                 }
433
434                 remaining_bytes -= nbytes;
435                 address += nbytes;
436                 ce_data += nbytes;
437         }
438
439 done:
440         if (ret == 0) {
441                 /* Copy data from allocated DMA buf to caller's buf */
442                 WARN_ON_ONCE(orig_nbytes & 3);
443                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
444                         ((u32 *)data)[i] =
445                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
446                 }
447         } else
448                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
449                            __func__, address);
450
451         if (data_buf)
452                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
453                                     data_buf, ce_data_base);
454
455         return ret;
456 }
457
458 /* Read 4-byte aligned data from Target memory or register */
459 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
460                                        u32 *data)
461 {
462         /* Assume range doesn't cross this boundary */
463         if (address >= DRAM_BASE_ADDRESS)
464                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
465
466         ath10k_pci_wake(ar);
467         *data = ath10k_pci_read32(ar, address);
468         ath10k_pci_sleep(ar);
469         return 0;
470 }
471
472 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
473                                      const void *data, int nbytes)
474 {
475         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
476         int ret = 0;
477         u32 buf;
478         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
479         unsigned int id;
480         unsigned int flags;
481         struct ath10k_ce_pipe *ce_diag;
482         void *data_buf = NULL;
483         u32 ce_data;    /* Host buffer address in CE space */
484         dma_addr_t ce_data_base = 0;
485         int i;
486
487         ce_diag = ar_pci->ce_diag;
488
489         /*
490          * Allocate a temporary bounce buffer to hold caller's data
491          * to be DMA'ed to Target. This guarantees
492          *   1) 4-byte alignment
493          *   2) Buffer in DMA-able space
494          */
495         orig_nbytes = nbytes;
496         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
497                                                          orig_nbytes,
498                                                          &ce_data_base);
499         if (!data_buf) {
500                 ret = -ENOMEM;
501                 goto done;
502         }
503
504         /* Copy caller's data to allocated DMA buf */
505         WARN_ON_ONCE(orig_nbytes & 3);
506         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
507                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
508
509         /*
510          * The address supplied by the caller is in the
511          * Target CPU virtual address space.
512          *
513          * In order to use this address with the diagnostic CE,
514          * convert it from
515          *    Target CPU virtual address space
516          * to
517          *    CE address space
518          */
519         ath10k_pci_wake(ar);
520         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
521         ath10k_pci_sleep(ar);
522
523         remaining_bytes = orig_nbytes;
524         ce_data = ce_data_base;
525         while (remaining_bytes) {
526                 /* FIXME: check cast */
527                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
528
529                 /* Set up to receive directly into Target(!) address */
530                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
531                 if (ret != 0)
532                         goto done;
533
534                 /*
535                  * Request CE to send caller-supplied data that
536                  * was copied to bounce buffer to Target(!) address.
537                  */
538                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
539                                      nbytes, 0, 0);
540                 if (ret != 0)
541                         goto done;
542
543                 i = 0;
544                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
545                                                      &completed_nbytes,
546                                                      &id) != 0) {
547                         mdelay(1);
548
549                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
550                                 ret = -EBUSY;
551                                 goto done;
552                         }
553                 }
554
555                 if (nbytes != completed_nbytes) {
556                         ret = -EIO;
557                         goto done;
558                 }
559
560                 if (buf != ce_data) {
561                         ret = -EIO;
562                         goto done;
563                 }
564
565                 i = 0;
566                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
567                                                      &completed_nbytes,
568                                                      &id, &flags) != 0) {
569                         mdelay(1);
570
571                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
572                                 ret = -EBUSY;
573                                 goto done;
574                         }
575                 }
576
577                 if (nbytes != completed_nbytes) {
578                         ret = -EIO;
579                         goto done;
580                 }
581
582                 if (buf != address) {
583                         ret = -EIO;
584                         goto done;
585                 }
586
587                 remaining_bytes -= nbytes;
588                 address += nbytes;
589                 ce_data += nbytes;
590         }
591
592 done:
593         if (data_buf) {
594                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
595                                     ce_data_base);
596         }
597
598         if (ret != 0)
599                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
600                            address);
601
602         return ret;
603 }
604
605 /* Write 4B data to Target memory or register */
606 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
607                                         u32 data)
608 {
609         /* Assume range doesn't cross this boundary */
610         if (address >= DRAM_BASE_ADDRESS)
611                 return ath10k_pci_diag_write_mem(ar, address, &data,
612                                                  sizeof(u32));
613
614         ath10k_pci_wake(ar);
615         ath10k_pci_write32(ar, address, data);
616         ath10k_pci_sleep(ar);
617         return 0;
618 }
619
620 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
621 {
622         void __iomem *mem = ath10k_pci_priv(ar)->mem;
623         u32 val;
624         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
625                        RTC_STATE_ADDRESS);
626         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
627 }
628
629 int ath10k_do_pci_wake(struct ath10k *ar)
630 {
631         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
632         void __iomem *pci_addr = ar_pci->mem;
633         int tot_delay = 0;
634         int curr_delay = 5;
635
636         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
637                 /* Force AWAKE */
638                 iowrite32(PCIE_SOC_WAKE_V_MASK,
639                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
640                           PCIE_SOC_WAKE_ADDRESS);
641         }
642         atomic_inc(&ar_pci->keep_awake_count);
643
644         if (ar_pci->verified_awake)
645                 return 0;
646
647         for (;;) {
648                 if (ath10k_pci_target_is_awake(ar)) {
649                         ar_pci->verified_awake = true;
650                         return 0;
651                 }
652
653                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
654                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
655                                     PCIE_WAKE_TIMEOUT,
656                                     atomic_read(&ar_pci->keep_awake_count));
657                         return -ETIMEDOUT;
658                 }
659
660                 udelay(curr_delay);
661                 tot_delay += curr_delay;
662
663                 if (curr_delay < 50)
664                         curr_delay += 5;
665         }
666 }
667
668 void ath10k_do_pci_sleep(struct ath10k *ar)
669 {
670         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671         void __iomem *pci_addr = ar_pci->mem;
672
673         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
674                 /* Allow sleep */
675                 ar_pci->verified_awake = false;
676                 iowrite32(PCIE_SOC_WAKE_RESET,
677                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
678                           PCIE_SOC_WAKE_ADDRESS);
679         }
680 }
681
682 /*
683  * FIXME: Handle OOM properly.
684  */
685 static inline
686 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
687 {
688         struct ath10k_pci_compl *compl = NULL;
689
690         spin_lock_bh(&pipe_info->pipe_lock);
691         if (list_empty(&pipe_info->compl_free)) {
692                 ath10k_warn("Completion buffers are full\n");
693                 goto exit;
694         }
695         compl = list_first_entry(&pipe_info->compl_free,
696                                  struct ath10k_pci_compl, list);
697         list_del(&compl->list);
698 exit:
699         spin_unlock_bh(&pipe_info->pipe_lock);
700         return compl;
701 }
702
703 /* Called by lower (CE) layer when a send to Target completes. */
704 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
705 {
706         struct ath10k *ar = ce_state->ar;
707         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
709         struct ath10k_pci_compl *compl;
710         void *transfer_context;
711         u32 ce_data;
712         unsigned int nbytes;
713         unsigned int transfer_id;
714
715         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
716                                              &ce_data, &nbytes,
717                                              &transfer_id) == 0) {
718                 compl = get_free_compl(pipe_info);
719                 if (!compl)
720                         break;
721
722                 compl->state = ATH10K_PCI_COMPL_SEND;
723                 compl->ce_state = ce_state;
724                 compl->pipe_info = pipe_info;
725                 compl->skb = transfer_context;
726                 compl->nbytes = nbytes;
727                 compl->transfer_id = transfer_id;
728                 compl->flags = 0;
729
730                 /*
731                  * Add the completion to the processing queue.
732                  */
733                 spin_lock_bh(&ar_pci->compl_lock);
734                 list_add_tail(&compl->list, &ar_pci->compl_process);
735                 spin_unlock_bh(&ar_pci->compl_lock);
736         }
737
738         ath10k_pci_process_ce(ar);
739 }
740
741 /* Called by lower (CE) layer when data is received from the Target. */
742 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
743 {
744         struct ath10k *ar = ce_state->ar;
745         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
746         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
747         struct ath10k_pci_compl *compl;
748         struct sk_buff *skb;
749         void *transfer_context;
750         u32 ce_data;
751         unsigned int nbytes;
752         unsigned int transfer_id;
753         unsigned int flags;
754
755         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
756                                              &ce_data, &nbytes, &transfer_id,
757                                              &flags) == 0) {
758                 compl = get_free_compl(pipe_info);
759                 if (!compl)
760                         break;
761
762                 compl->state = ATH10K_PCI_COMPL_RECV;
763                 compl->ce_state = ce_state;
764                 compl->pipe_info = pipe_info;
765                 compl->skb = transfer_context;
766                 compl->nbytes = nbytes;
767                 compl->transfer_id = transfer_id;
768                 compl->flags = flags;
769
770                 skb = transfer_context;
771                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
772                                  skb->len + skb_tailroom(skb),
773                                  DMA_FROM_DEVICE);
774                 /*
775                  * Add the completion to the processing queue.
776                  */
777                 spin_lock_bh(&ar_pci->compl_lock);
778                 list_add_tail(&compl->list, &ar_pci->compl_process);
779                 spin_unlock_bh(&ar_pci->compl_lock);
780         }
781
782         ath10k_pci_process_ce(ar);
783 }
784
785 /* Send the first nbytes bytes of the buffer */
786 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
787                                     unsigned int transfer_id,
788                                     unsigned int bytes, struct sk_buff *nbuf)
789 {
790         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
791         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
792         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
793         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
794         unsigned int len;
795         u32 flags = 0;
796         int ret;
797
798         len = min(bytes, nbuf->len);
799         bytes -= len;
800
801         if (len & 3)
802                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
803
804         ath10k_dbg(ATH10K_DBG_PCI,
805                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
806                    nbuf->data, (unsigned long long) skb_cb->paddr,
807                    nbuf->len, len);
808         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
809                         "ath10k tx: data: ",
810                         nbuf->data, nbuf->len);
811
812         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
813                              flags);
814         if (ret)
815                 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
816
817         return ret;
818 }
819
820 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
821 {
822         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
823         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
824 }
825
826 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
827 {
828         u32 reg_dump_area = 0;
829         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
830         u32 host_addr;
831         int ret;
832         u32 i;
833
834         ath10k_err("firmware crashed!\n");
835         ath10k_err("hardware name %s version 0x%x\n",
836                    ar->hw_params.name, ar->target_version);
837         ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
838
839         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
840         ret = ath10k_pci_diag_read_mem(ar, host_addr,
841                                        &reg_dump_area, sizeof(u32));
842         if (ret) {
843                 ath10k_err("failed to read FW dump area address: %d\n", ret);
844                 return;
845         }
846
847         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
848
849         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
850                                        &reg_dump_values[0],
851                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
852         if (ret != 0) {
853                 ath10k_err("failed to read FW dump area: %d\n", ret);
854                 return;
855         }
856
857         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
858
859         ath10k_err("target Register Dump\n");
860         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
861                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
862                            i,
863                            reg_dump_values[i],
864                            reg_dump_values[i + 1],
865                            reg_dump_values[i + 2],
866                            reg_dump_values[i + 3]);
867
868         queue_work(ar->workqueue, &ar->restart_work);
869 }
870
871 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
872                                                int force)
873 {
874         if (!force) {
875                 int resources;
876                 /*
877                  * Decide whether to actually poll for completions, or just
878                  * wait for a later chance.
879                  * If there seem to be plenty of resources left, then just wait
880                  * since checking involves reading a CE register, which is a
881                  * relatively expensive operation.
882                  */
883                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
884
885                 /*
886                  * If at least 50% of the total resources are still available,
887                  * don't bother checking again yet.
888                  */
889                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
890                         return;
891         }
892         ath10k_ce_per_engine_service(ar, pipe);
893 }
894
895 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
896                                          struct ath10k_hif_cb *callbacks)
897 {
898         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
899
900         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
901
902         memcpy(&ar_pci->msg_callbacks_current, callbacks,
903                sizeof(ar_pci->msg_callbacks_current));
904 }
905
906 static int ath10k_pci_alloc_compl(struct ath10k *ar)
907 {
908         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
909         const struct ce_attr *attr;
910         struct ath10k_pci_pipe *pipe_info;
911         struct ath10k_pci_compl *compl;
912         int i, pipe_num, completions;
913
914         spin_lock_init(&ar_pci->compl_lock);
915         INIT_LIST_HEAD(&ar_pci->compl_process);
916
917         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
918                 pipe_info = &ar_pci->pipe_info[pipe_num];
919
920                 spin_lock_init(&pipe_info->pipe_lock);
921                 INIT_LIST_HEAD(&pipe_info->compl_free);
922
923                 /* Handle Diagnostic CE specially */
924                 if (pipe_info->ce_hdl == ar_pci->ce_diag)
925                         continue;
926
927                 attr = &host_ce_config_wlan[pipe_num];
928                 completions = 0;
929
930                 if (attr->src_nentries)
931                         completions += attr->src_nentries;
932
933                 if (attr->dest_nentries)
934                         completions += attr->dest_nentries;
935
936                 for (i = 0; i < completions; i++) {
937                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
938                         if (!compl) {
939                                 ath10k_warn("No memory for completion state\n");
940                                 ath10k_pci_cleanup_ce(ar);
941                                 return -ENOMEM;
942                         }
943
944                         compl->state = ATH10K_PCI_COMPL_FREE;
945                         list_add_tail(&compl->list, &pipe_info->compl_free);
946                 }
947         }
948
949         return 0;
950 }
951
952 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
953 {
954         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
955         const struct ce_attr *attr;
956         struct ath10k_pci_pipe *pipe_info;
957         int pipe_num, disable_interrupts;
958
959         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
960                 pipe_info = &ar_pci->pipe_info[pipe_num];
961
962                 /* Handle Diagnostic CE specially */
963                 if (pipe_info->ce_hdl == ar_pci->ce_diag)
964                         continue;
965
966                 attr = &host_ce_config_wlan[pipe_num];
967
968                 if (attr->src_nentries) {
969                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
970                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
971                                                    ath10k_pci_ce_send_done,
972                                                    disable_interrupts);
973                 }
974
975                 if (attr->dest_nentries)
976                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
977                                                    ath10k_pci_ce_recv_data);
978         }
979
980         return 0;
981 }
982
983 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
984 {
985         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
986         int i;
987
988         tasklet_kill(&ar_pci->intr_tq);
989         tasklet_kill(&ar_pci->msi_fw_err);
990         tasklet_kill(&ar_pci->early_irq_tasklet);
991
992         for (i = 0; i < CE_COUNT; i++)
993                 tasklet_kill(&ar_pci->pipe_info[i].intr);
994 }
995
996 static void ath10k_pci_stop_ce(struct ath10k *ar)
997 {
998         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
999         struct ath10k_pci_compl *compl;
1000         struct sk_buff *skb;
1001
1002         /* Mark pending completions as aborted, so that upper layers free up
1003          * their associated resources */
1004         spin_lock_bh(&ar_pci->compl_lock);
1005         list_for_each_entry(compl, &ar_pci->compl_process, list) {
1006                 skb = compl->skb;
1007                 ATH10K_SKB_CB(skb)->is_aborted = true;
1008         }
1009         spin_unlock_bh(&ar_pci->compl_lock);
1010 }
1011
1012 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1013 {
1014         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1015         struct ath10k_pci_compl *compl, *tmp;
1016         struct ath10k_pci_pipe *pipe_info;
1017         struct sk_buff *netbuf;
1018         int pipe_num;
1019
1020         /* Free pending completions. */
1021         spin_lock_bh(&ar_pci->compl_lock);
1022         if (!list_empty(&ar_pci->compl_process))
1023                 ath10k_warn("pending completions still present! possible memory leaks.\n");
1024
1025         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1026                 list_del(&compl->list);
1027                 netbuf = compl->skb;
1028                 dev_kfree_skb_any(netbuf);
1029                 kfree(compl);
1030         }
1031         spin_unlock_bh(&ar_pci->compl_lock);
1032
1033         /* Free unused completions for each pipe. */
1034         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1035                 pipe_info = &ar_pci->pipe_info[pipe_num];
1036
1037                 spin_lock_bh(&pipe_info->pipe_lock);
1038                 list_for_each_entry_safe(compl, tmp,
1039                                          &pipe_info->compl_free, list) {
1040                         list_del(&compl->list);
1041                         kfree(compl);
1042                 }
1043                 spin_unlock_bh(&pipe_info->pipe_lock);
1044         }
1045 }
1046
1047 static void ath10k_pci_process_ce(struct ath10k *ar)
1048 {
1049         struct ath10k_pci *ar_pci = ar->hif.priv;
1050         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1051         struct ath10k_pci_compl *compl;
1052         struct sk_buff *skb;
1053         unsigned int nbytes;
1054         int ret, send_done = 0;
1055
1056         /* Upper layers aren't ready to handle tx/rx completions in parallel so
1057          * we must serialize all completion processing. */
1058
1059         spin_lock_bh(&ar_pci->compl_lock);
1060         if (ar_pci->compl_processing) {
1061                 spin_unlock_bh(&ar_pci->compl_lock);
1062                 return;
1063         }
1064         ar_pci->compl_processing = true;
1065         spin_unlock_bh(&ar_pci->compl_lock);
1066
1067         for (;;) {
1068                 spin_lock_bh(&ar_pci->compl_lock);
1069                 if (list_empty(&ar_pci->compl_process)) {
1070                         spin_unlock_bh(&ar_pci->compl_lock);
1071                         break;
1072                 }
1073                 compl = list_first_entry(&ar_pci->compl_process,
1074                                          struct ath10k_pci_compl, list);
1075                 list_del(&compl->list);
1076                 spin_unlock_bh(&ar_pci->compl_lock);
1077
1078                 switch (compl->state) {
1079                 case ATH10K_PCI_COMPL_SEND:
1080                         cb->tx_completion(ar,
1081                                           compl->skb,
1082                                           compl->transfer_id);
1083                         send_done = 1;
1084                         break;
1085                 case ATH10K_PCI_COMPL_RECV:
1086                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1087                         if (ret) {
1088                                 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1089                                             compl->pipe_info->pipe_num, ret);
1090                                 break;
1091                         }
1092
1093                         skb = compl->skb;
1094                         nbytes = compl->nbytes;
1095
1096                         ath10k_dbg(ATH10K_DBG_PCI,
1097                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1098                                    skb, nbytes);
1099                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1100                                         "ath10k rx: ", skb->data, nbytes);
1101
1102                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1103                                 skb_trim(skb, 0);
1104                                 skb_put(skb, nbytes);
1105                                 cb->rx_completion(ar, skb,
1106                                                   compl->pipe_info->pipe_num);
1107                         } else {
1108                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1109                                             nbytes,
1110                                             skb->len + skb_tailroom(skb));
1111                         }
1112                         break;
1113                 case ATH10K_PCI_COMPL_FREE:
1114                         ath10k_warn("free completion cannot be processed\n");
1115                         break;
1116                 default:
1117                         ath10k_warn("invalid completion state (%d)\n",
1118                                     compl->state);
1119                         break;
1120                 }
1121
1122                 compl->state = ATH10K_PCI_COMPL_FREE;
1123
1124                 /*
1125                  * Add completion back to the pipe's free list.
1126                  */
1127                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1128                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1129                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1130         }
1131
1132         spin_lock_bh(&ar_pci->compl_lock);
1133         ar_pci->compl_processing = false;
1134         spin_unlock_bh(&ar_pci->compl_lock);
1135 }
1136
1137 /* TODO - temporary mapping while we have too few CE's */
1138 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1139                                               u16 service_id, u8 *ul_pipe,
1140                                               u8 *dl_pipe, int *ul_is_polled,
1141                                               int *dl_is_polled)
1142 {
1143         int ret = 0;
1144
1145         /* polling for received messages not supported */
1146         *dl_is_polled = 0;
1147
1148         switch (service_id) {
1149         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1150                 /*
1151                  * Host->target HTT gets its own pipe, so it can be polled
1152                  * while other pipes are interrupt driven.
1153                  */
1154                 *ul_pipe = 4;
1155                 /*
1156                  * Use the same target->host pipe for HTC ctrl, HTC raw
1157                  * streams, and HTT.
1158                  */
1159                 *dl_pipe = 1;
1160                 break;
1161
1162         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1163         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1164                 /*
1165                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1166                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1167                  * WMI services.  So, if another CE is needed, change
1168                  * this to *ul_pipe = 3, which frees up CE 0.
1169                  */
1170                 /* *ul_pipe = 3; */
1171                 *ul_pipe = 0;
1172                 *dl_pipe = 1;
1173                 break;
1174
1175         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1176         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1177         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1178         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1179
1180         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1181                 *ul_pipe = 3;
1182                 *dl_pipe = 2;
1183                 break;
1184
1185                 /* pipe 5 unused   */
1186                 /* pipe 6 reserved */
1187                 /* pipe 7 reserved */
1188
1189         default:
1190                 ret = -1;
1191                 break;
1192         }
1193         *ul_is_polled =
1194                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1195
1196         return ret;
1197 }
1198
1199 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1200                                                 u8 *ul_pipe, u8 *dl_pipe)
1201 {
1202         int ul_is_polled, dl_is_polled;
1203
1204         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1205                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1206                                                  ul_pipe,
1207                                                  dl_pipe,
1208                                                  &ul_is_polled,
1209                                                  &dl_is_polled);
1210 }
1211
1212 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1213                                    int num)
1214 {
1215         struct ath10k *ar = pipe_info->hif_ce_state;
1216         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1217         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1218         struct sk_buff *skb;
1219         dma_addr_t ce_data;
1220         int i, ret = 0;
1221
1222         if (pipe_info->buf_sz == 0)
1223                 return 0;
1224
1225         for (i = 0; i < num; i++) {
1226                 skb = dev_alloc_skb(pipe_info->buf_sz);
1227                 if (!skb) {
1228                         ath10k_warn("failed to allocate skbuff for pipe %d\n",
1229                                     num);
1230                         ret = -ENOMEM;
1231                         goto err;
1232                 }
1233
1234                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1235
1236                 ce_data = dma_map_single(ar->dev, skb->data,
1237                                          skb->len + skb_tailroom(skb),
1238                                          DMA_FROM_DEVICE);
1239
1240                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1241                         ath10k_warn("failed to DMA map sk_buff\n");
1242                         dev_kfree_skb_any(skb);
1243                         ret = -EIO;
1244                         goto err;
1245                 }
1246
1247                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1248
1249                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1250                                                pipe_info->buf_sz,
1251                                                PCI_DMA_FROMDEVICE);
1252
1253                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1254                                                  ce_data);
1255                 if (ret) {
1256                         ath10k_warn("failed to enqueue to pipe %d: %d\n",
1257                                     num, ret);
1258                         goto err;
1259                 }
1260         }
1261
1262         return ret;
1263
1264 err:
1265         ath10k_pci_rx_pipe_cleanup(pipe_info);
1266         return ret;
1267 }
1268
1269 static int ath10k_pci_post_rx(struct ath10k *ar)
1270 {
1271         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1272         struct ath10k_pci_pipe *pipe_info;
1273         const struct ce_attr *attr;
1274         int pipe_num, ret = 0;
1275
1276         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1277                 pipe_info = &ar_pci->pipe_info[pipe_num];
1278                 attr = &host_ce_config_wlan[pipe_num];
1279
1280                 if (attr->dest_nentries == 0)
1281                         continue;
1282
1283                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1284                                               attr->dest_nentries - 1);
1285                 if (ret) {
1286                         ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1287                                     pipe_num, ret);
1288
1289                         for (; pipe_num >= 0; pipe_num--) {
1290                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1291                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1292                         }
1293                         return ret;
1294                 }
1295         }
1296
1297         return 0;
1298 }
1299
1300 static int ath10k_pci_hif_start(struct ath10k *ar)
1301 {
1302         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1303         int ret, ret_early;
1304
1305         ath10k_pci_free_early_irq(ar);
1306         ath10k_pci_kill_tasklet(ar);
1307
1308         ret = ath10k_pci_alloc_compl(ar);
1309         if (ret) {
1310                 ath10k_warn("failed to allocate CE completions: %d\n", ret);
1311                 goto err_early_irq;
1312         }
1313
1314         ret = ath10k_pci_request_irq(ar);
1315         if (ret) {
1316                 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1317                             ret);
1318                 goto err_free_compl;
1319         }
1320
1321         ret = ath10k_pci_setup_ce_irq(ar);
1322         if (ret) {
1323                 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1324                 goto err_stop;
1325         }
1326
1327         /* Post buffers once to start things off. */
1328         ret = ath10k_pci_post_rx(ar);
1329         if (ret) {
1330                 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1331                             ret);
1332                 goto err_stop;
1333         }
1334
1335         ar_pci->started = 1;
1336         return 0;
1337
1338 err_stop:
1339         ath10k_ce_disable_interrupts(ar);
1340         ath10k_pci_free_irq(ar);
1341         ath10k_pci_kill_tasklet(ar);
1342         ath10k_pci_stop_ce(ar);
1343         ath10k_pci_process_ce(ar);
1344 err_free_compl:
1345         ath10k_pci_cleanup_ce(ar);
1346 err_early_irq:
1347         /* Though there should be no interrupts (device was reset)
1348          * power_down() expects the early IRQ to be installed as per the
1349          * driver lifecycle. */
1350         ret_early = ath10k_pci_request_early_irq(ar);
1351         if (ret_early)
1352                 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1353
1354         return ret;
1355 }
1356
1357 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1358 {
1359         struct ath10k *ar;
1360         struct ath10k_pci *ar_pci;
1361         struct ath10k_ce_pipe *ce_hdl;
1362         u32 buf_sz;
1363         struct sk_buff *netbuf;
1364         u32 ce_data;
1365
1366         buf_sz = pipe_info->buf_sz;
1367
1368         /* Unused Copy Engine */
1369         if (buf_sz == 0)
1370                 return;
1371
1372         ar = pipe_info->hif_ce_state;
1373         ar_pci = ath10k_pci_priv(ar);
1374
1375         if (!ar_pci->started)
1376                 return;
1377
1378         ce_hdl = pipe_info->ce_hdl;
1379
1380         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1381                                           &ce_data) == 0) {
1382                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1383                                  netbuf->len + skb_tailroom(netbuf),
1384                                  DMA_FROM_DEVICE);
1385                 dev_kfree_skb_any(netbuf);
1386         }
1387 }
1388
1389 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1390 {
1391         struct ath10k *ar;
1392         struct ath10k_pci *ar_pci;
1393         struct ath10k_ce_pipe *ce_hdl;
1394         struct sk_buff *netbuf;
1395         u32 ce_data;
1396         unsigned int nbytes;
1397         unsigned int id;
1398         u32 buf_sz;
1399
1400         buf_sz = pipe_info->buf_sz;
1401
1402         /* Unused Copy Engine */
1403         if (buf_sz == 0)
1404                 return;
1405
1406         ar = pipe_info->hif_ce_state;
1407         ar_pci = ath10k_pci_priv(ar);
1408
1409         if (!ar_pci->started)
1410                 return;
1411
1412         ce_hdl = pipe_info->ce_hdl;
1413
1414         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1415                                           &ce_data, &nbytes, &id) == 0) {
1416                 /*
1417                  * Indicate the completion to higer layer to free
1418                  * the buffer
1419                  */
1420
1421                 if (!netbuf) {
1422                         ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1423                                     ce_hdl->id);
1424                         continue;
1425                 }
1426
1427                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1428                 ar_pci->msg_callbacks_current.tx_completion(ar,
1429                                                             netbuf,
1430                                                             id);
1431         }
1432 }
1433
1434 /*
1435  * Cleanup residual buffers for device shutdown:
1436  *    buffers that were enqueued for receive
1437  *    buffers that were to be sent
1438  * Note: Buffers that had completed but which were
1439  * not yet processed are on a completion queue. They
1440  * are handled when the completion thread shuts down.
1441  */
1442 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1443 {
1444         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1445         int pipe_num;
1446
1447         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1448                 struct ath10k_pci_pipe *pipe_info;
1449
1450                 pipe_info = &ar_pci->pipe_info[pipe_num];
1451                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1452                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1453         }
1454 }
1455
1456 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1457 {
1458         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1459         struct ath10k_pci_pipe *pipe_info;
1460         int pipe_num;
1461
1462         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1463                 pipe_info = &ar_pci->pipe_info[pipe_num];
1464                 if (pipe_info->ce_hdl) {
1465                         ath10k_ce_deinit(pipe_info->ce_hdl);
1466                         pipe_info->ce_hdl = NULL;
1467                         pipe_info->buf_sz = 0;
1468                 }
1469         }
1470 }
1471
1472 static void ath10k_pci_hif_stop(struct ath10k *ar)
1473 {
1474         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1475         int ret;
1476
1477         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1478
1479         ret = ath10k_ce_disable_interrupts(ar);
1480         if (ret)
1481                 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1482
1483         ath10k_pci_free_irq(ar);
1484         ath10k_pci_kill_tasklet(ar);
1485         ath10k_pci_stop_ce(ar);
1486
1487         ret = ath10k_pci_request_early_irq(ar);
1488         if (ret)
1489                 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1490
1491         /* At this point, asynchronous threads are stopped, the target should
1492          * not DMA nor interrupt. We process the leftovers and then free
1493          * everything else up. */
1494
1495         ath10k_pci_process_ce(ar);
1496         ath10k_pci_cleanup_ce(ar);
1497         ath10k_pci_buffer_cleanup(ar);
1498
1499         /* Make the sure the device won't access any structures on the host by
1500          * resetting it. The device was fed with PCI CE ringbuffer
1501          * configuration during init. If ringbuffers are freed and the device
1502          * were to access them this could lead to memory corruption on the
1503          * host. */
1504         ath10k_pci_warm_reset(ar);
1505
1506         ar_pci->started = 0;
1507 }
1508
1509 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1510                                            void *req, u32 req_len,
1511                                            void *resp, u32 *resp_len)
1512 {
1513         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1514         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1515         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1516         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1517         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1518         dma_addr_t req_paddr = 0;
1519         dma_addr_t resp_paddr = 0;
1520         struct bmi_xfer xfer = {};
1521         void *treq, *tresp = NULL;
1522         int ret = 0;
1523
1524         might_sleep();
1525
1526         if (resp && !resp_len)
1527                 return -EINVAL;
1528
1529         if (resp && resp_len && *resp_len == 0)
1530                 return -EINVAL;
1531
1532         treq = kmemdup(req, req_len, GFP_KERNEL);
1533         if (!treq)
1534                 return -ENOMEM;
1535
1536         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1537         ret = dma_mapping_error(ar->dev, req_paddr);
1538         if (ret)
1539                 goto err_dma;
1540
1541         if (resp && resp_len) {
1542                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1543                 if (!tresp) {
1544                         ret = -ENOMEM;
1545                         goto err_req;
1546                 }
1547
1548                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1549                                             DMA_FROM_DEVICE);
1550                 ret = dma_mapping_error(ar->dev, resp_paddr);
1551                 if (ret)
1552                         goto err_req;
1553
1554                 xfer.wait_for_resp = true;
1555                 xfer.resp_len = 0;
1556
1557                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1558         }
1559
1560         init_completion(&xfer.done);
1561
1562         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1563         if (ret)
1564                 goto err_resp;
1565
1566         ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1567         if (ret) {
1568                 u32 unused_buffer;
1569                 unsigned int unused_nbytes;
1570                 unsigned int unused_id;
1571
1572                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1573                                            &unused_nbytes, &unused_id);
1574         } else {
1575                 /* non-zero means we did not time out */
1576                 ret = 0;
1577         }
1578
1579 err_resp:
1580         if (resp) {
1581                 u32 unused_buffer;
1582
1583                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1584                 dma_unmap_single(ar->dev, resp_paddr,
1585                                  *resp_len, DMA_FROM_DEVICE);
1586         }
1587 err_req:
1588         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1589
1590         if (ret == 0 && resp_len) {
1591                 *resp_len = min(*resp_len, xfer.resp_len);
1592                 memcpy(resp, tresp, xfer.resp_len);
1593         }
1594 err_dma:
1595         kfree(treq);
1596         kfree(tresp);
1597
1598         return ret;
1599 }
1600
1601 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1602 {
1603         struct bmi_xfer *xfer;
1604         u32 ce_data;
1605         unsigned int nbytes;
1606         unsigned int transfer_id;
1607
1608         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1609                                           &nbytes, &transfer_id))
1610                 return;
1611
1612         if (xfer->wait_for_resp)
1613                 return;
1614
1615         complete(&xfer->done);
1616 }
1617
1618 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1619 {
1620         struct bmi_xfer *xfer;
1621         u32 ce_data;
1622         unsigned int nbytes;
1623         unsigned int transfer_id;
1624         unsigned int flags;
1625
1626         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1627                                           &nbytes, &transfer_id, &flags))
1628                 return;
1629
1630         if (!xfer->wait_for_resp) {
1631                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1632                 return;
1633         }
1634
1635         xfer->resp_len = nbytes;
1636         complete(&xfer->done);
1637 }
1638
1639 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1640                                struct ath10k_ce_pipe *rx_pipe,
1641                                struct bmi_xfer *xfer)
1642 {
1643         unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1644
1645         while (time_before_eq(jiffies, timeout)) {
1646                 ath10k_pci_bmi_send_done(tx_pipe);
1647                 ath10k_pci_bmi_recv_data(rx_pipe);
1648
1649                 if (completion_done(&xfer->done))
1650                         return 0;
1651
1652                 schedule();
1653         }
1654
1655         return -ETIMEDOUT;
1656 }
1657
1658 /*
1659  * Map from service/endpoint to Copy Engine.
1660  * This table is derived from the CE_PCI TABLE, above.
1661  * It is passed to the Target at startup for use by firmware.
1662  */
1663 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1664         {
1665                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1666                  PIPEDIR_OUT,           /* out = UL = host -> target */
1667                  3,
1668         },
1669         {
1670                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1671                  PIPEDIR_IN,            /* in = DL = target -> host */
1672                  2,
1673         },
1674         {
1675                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1676                  PIPEDIR_OUT,           /* out = UL = host -> target */
1677                  3,
1678         },
1679         {
1680                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1681                  PIPEDIR_IN,            /* in = DL = target -> host */
1682                  2,
1683         },
1684         {
1685                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1686                  PIPEDIR_OUT,           /* out = UL = host -> target */
1687                  3,
1688         },
1689         {
1690                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1691                  PIPEDIR_IN,            /* in = DL = target -> host */
1692                  2,
1693         },
1694         {
1695                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1696                  PIPEDIR_OUT,           /* out = UL = host -> target */
1697                  3,
1698         },
1699         {
1700                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1701                  PIPEDIR_IN,            /* in = DL = target -> host */
1702                  2,
1703         },
1704         {
1705                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1706                  PIPEDIR_OUT,           /* out = UL = host -> target */
1707                  3,
1708         },
1709         {
1710                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1711                  PIPEDIR_IN,            /* in = DL = target -> host */
1712                  2,
1713         },
1714         {
1715                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1716                  PIPEDIR_OUT,           /* out = UL = host -> target */
1717                  0,             /* could be moved to 3 (share with WMI) */
1718         },
1719         {
1720                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1721                  PIPEDIR_IN,            /* in = DL = target -> host */
1722                  1,
1723         },
1724         {
1725                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1726                  PIPEDIR_OUT,           /* out = UL = host -> target */
1727                  0,
1728         },
1729         {
1730                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1731                  PIPEDIR_IN,            /* in = DL = target -> host */
1732                  1,
1733         },
1734         {
1735                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1736                  PIPEDIR_OUT,           /* out = UL = host -> target */
1737                  4,
1738         },
1739         {
1740                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1741                  PIPEDIR_IN,            /* in = DL = target -> host */
1742                  1,
1743         },
1744
1745         /* (Additions here) */
1746
1747         {                               /* Must be last */
1748                  0,
1749                  0,
1750                  0,
1751         },
1752 };
1753
1754 /*
1755  * Send an interrupt to the device to wake up the Target CPU
1756  * so it has an opportunity to notice any changed state.
1757  */
1758 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1759 {
1760         int ret;
1761         u32 core_ctrl;
1762
1763         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1764                                               CORE_CTRL_ADDRESS,
1765                                           &core_ctrl);
1766         if (ret) {
1767                 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1768                 return ret;
1769         }
1770
1771         /* A_INUM_FIRMWARE interrupt to Target CPU */
1772         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1773
1774         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1775                                                CORE_CTRL_ADDRESS,
1776                                            core_ctrl);
1777         if (ret) {
1778                 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1779                             ret);
1780                 return ret;
1781         }
1782
1783         return 0;
1784 }
1785
1786 static int ath10k_pci_init_config(struct ath10k *ar)
1787 {
1788         u32 interconnect_targ_addr;
1789         u32 pcie_state_targ_addr = 0;
1790         u32 pipe_cfg_targ_addr = 0;
1791         u32 svc_to_pipe_map = 0;
1792         u32 pcie_config_flags = 0;
1793         u32 ealloc_value;
1794         u32 ealloc_targ_addr;
1795         u32 flag2_value;
1796         u32 flag2_targ_addr;
1797         int ret = 0;
1798
1799         /* Download to Target the CE Config and the service-to-CE map */
1800         interconnect_targ_addr =
1801                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1802
1803         /* Supply Target-side CE configuration */
1804         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1805                                           &pcie_state_targ_addr);
1806         if (ret != 0) {
1807                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1808                 return ret;
1809         }
1810
1811         if (pcie_state_targ_addr == 0) {
1812                 ret = -EIO;
1813                 ath10k_err("Invalid pcie state addr\n");
1814                 return ret;
1815         }
1816
1817         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1818                                           offsetof(struct pcie_state,
1819                                                    pipe_cfg_addr),
1820                                           &pipe_cfg_targ_addr);
1821         if (ret != 0) {
1822                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1823                 return ret;
1824         }
1825
1826         if (pipe_cfg_targ_addr == 0) {
1827                 ret = -EIO;
1828                 ath10k_err("Invalid pipe cfg addr\n");
1829                 return ret;
1830         }
1831
1832         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1833                                  target_ce_config_wlan,
1834                                  sizeof(target_ce_config_wlan));
1835
1836         if (ret != 0) {
1837                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1838                 return ret;
1839         }
1840
1841         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1842                                           offsetof(struct pcie_state,
1843                                                    svc_to_pipe_map),
1844                                           &svc_to_pipe_map);
1845         if (ret != 0) {
1846                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1847                 return ret;
1848         }
1849
1850         if (svc_to_pipe_map == 0) {
1851                 ret = -EIO;
1852                 ath10k_err("Invalid svc_to_pipe map\n");
1853                 return ret;
1854         }
1855
1856         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1857                                  target_service_to_ce_map_wlan,
1858                                  sizeof(target_service_to_ce_map_wlan));
1859         if (ret != 0) {
1860                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1861                 return ret;
1862         }
1863
1864         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1865                                           offsetof(struct pcie_state,
1866                                                    config_flags),
1867                                           &pcie_config_flags);
1868         if (ret != 0) {
1869                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1870                 return ret;
1871         }
1872
1873         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1874
1875         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1876                                  offsetof(struct pcie_state, config_flags),
1877                                  &pcie_config_flags,
1878                                  sizeof(pcie_config_flags));
1879         if (ret != 0) {
1880                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1881                 return ret;
1882         }
1883
1884         /* configure early allocation */
1885         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1886
1887         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1888         if (ret != 0) {
1889                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1890                 return ret;
1891         }
1892
1893         /* first bank is switched to IRAM */
1894         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1895                          HI_EARLY_ALLOC_MAGIC_MASK);
1896         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1897                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1898
1899         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1900         if (ret != 0) {
1901                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1902                 return ret;
1903         }
1904
1905         /* Tell Target to proceed with initialization */
1906         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1907
1908         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1909         if (ret != 0) {
1910                 ath10k_err("Failed to get option val: %d\n", ret);
1911                 return ret;
1912         }
1913
1914         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1915
1916         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1917         if (ret != 0) {
1918                 ath10k_err("Failed to set option val: %d\n", ret);
1919                 return ret;
1920         }
1921
1922         return 0;
1923 }
1924
1925
1926
1927 static int ath10k_pci_ce_init(struct ath10k *ar)
1928 {
1929         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1930         struct ath10k_pci_pipe *pipe_info;
1931         const struct ce_attr *attr;
1932         int pipe_num;
1933
1934         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1935                 pipe_info = &ar_pci->pipe_info[pipe_num];
1936                 pipe_info->pipe_num = pipe_num;
1937                 pipe_info->hif_ce_state = ar;
1938                 attr = &host_ce_config_wlan[pipe_num];
1939
1940                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1941                 if (pipe_info->ce_hdl == NULL) {
1942                         ath10k_err("failed to initialize CE for pipe: %d\n",
1943                                    pipe_num);
1944
1945                         /* It is safe to call it here. It checks if ce_hdl is
1946                          * valid for each pipe */
1947                         ath10k_pci_ce_deinit(ar);
1948                         return -1;
1949                 }
1950
1951                 if (pipe_num == CE_COUNT - 1) {
1952                         /*
1953                          * Reserve the ultimate CE for
1954                          * diagnostic Window support
1955                          */
1956                         ar_pci->ce_diag = pipe_info->ce_hdl;
1957                         continue;
1958                 }
1959
1960                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1961         }
1962
1963         return 0;
1964 }
1965
1966 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1967 {
1968         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1969         u32 fw_indicator_address, fw_indicator;
1970
1971         ath10k_pci_wake(ar);
1972
1973         fw_indicator_address = ar_pci->fw_indicator_address;
1974         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1975
1976         if (fw_indicator & FW_IND_EVENT_PENDING) {
1977                 /* ACK: clear Target-side pending event */
1978                 ath10k_pci_write32(ar, fw_indicator_address,
1979                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1980
1981                 if (ar_pci->started) {
1982                         ath10k_pci_hif_dump_area(ar);
1983                 } else {
1984                         /*
1985                          * Probable Target failure before we're prepared
1986                          * to handle it.  Generally unexpected.
1987                          */
1988                         ath10k_warn("early firmware event indicated\n");
1989                 }
1990         }
1991
1992         ath10k_pci_sleep(ar);
1993 }
1994
1995 static int ath10k_pci_warm_reset(struct ath10k *ar)
1996 {
1997         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1998         int ret = 0;
1999         u32 val;
2000
2001         ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
2002
2003         ret = ath10k_do_pci_wake(ar);
2004         if (ret) {
2005                 ath10k_err("failed to wake up target: %d\n", ret);
2006                 return ret;
2007         }
2008
2009         /* debug */
2010         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2011                                 PCIE_INTR_CAUSE_ADDRESS);
2012         ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
2013
2014         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2015                                 CPU_INTR_ADDRESS);
2016         ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
2017                    val);
2018
2019         /* disable pending irqs */
2020         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2021                            PCIE_INTR_ENABLE_ADDRESS, 0);
2022
2023         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2024                            PCIE_INTR_CLR_ADDRESS, ~0);
2025
2026         msleep(100);
2027
2028         /* clear fw indicator */
2029         ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
2030
2031         /* clear target LF timer interrupts */
2032         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2033                                 SOC_LF_TIMER_CONTROL0_ADDRESS);
2034         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2035                            SOC_LF_TIMER_CONTROL0_ADDRESS,
2036                            val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2037
2038         /* reset CE */
2039         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2040                                 SOC_RESET_CONTROL_ADDRESS);
2041         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2042                            val | SOC_RESET_CONTROL_CE_RST_MASK);
2043         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2044                                 SOC_RESET_CONTROL_ADDRESS);
2045         msleep(10);
2046
2047         /* unreset CE */
2048         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2049                            val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2050         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2051                                 SOC_RESET_CONTROL_ADDRESS);
2052         msleep(10);
2053
2054         /* debug */
2055         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2056                                 PCIE_INTR_CAUSE_ADDRESS);
2057         ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
2058
2059         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2060                                 CPU_INTR_ADDRESS);
2061         ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
2062                    val);
2063
2064         /* CPU warm reset */
2065         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2066                                 SOC_RESET_CONTROL_ADDRESS);
2067         ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2068                            val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2069
2070         val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2071                                 SOC_RESET_CONTROL_ADDRESS);
2072         ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
2073
2074         msleep(100);
2075
2076         ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
2077
2078         ath10k_do_pci_sleep(ar);
2079         return ret;
2080 }
2081
2082 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
2083 {
2084         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2085         const char *irq_mode;
2086         int ret;
2087
2088         /*
2089          * Bring the target up cleanly.
2090          *
2091          * The target may be in an undefined state with an AUX-powered Target
2092          * and a Host in WoW mode. If the Host crashes, loses power, or is
2093          * restarted (without unloading the driver) then the Target is left
2094          * (aux) powered and running. On a subsequent driver load, the Target
2095          * is in an unexpected state. We try to catch that here in order to
2096          * reset the Target and retry the probe.
2097          */
2098         if (cold_reset)
2099                 ret = ath10k_pci_cold_reset(ar);
2100         else
2101                 ret = ath10k_pci_warm_reset(ar);
2102
2103         if (ret) {
2104                 ath10k_err("failed to reset target: %d\n", ret);
2105                 goto err;
2106         }
2107
2108         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2109                 /* Force AWAKE forever */
2110                 ath10k_do_pci_wake(ar);
2111
2112         ret = ath10k_pci_ce_init(ar);
2113         if (ret) {
2114                 ath10k_err("failed to initialize CE: %d\n", ret);
2115                 goto err_ps;
2116         }
2117
2118         ret = ath10k_ce_disable_interrupts(ar);
2119         if (ret) {
2120                 ath10k_err("failed to disable CE interrupts: %d\n", ret);
2121                 goto err_ce;
2122         }
2123
2124         ret = ath10k_pci_init_irq(ar);
2125         if (ret) {
2126                 ath10k_err("failed to init irqs: %d\n", ret);
2127                 goto err_ce;
2128         }
2129
2130         ret = ath10k_pci_request_early_irq(ar);
2131         if (ret) {
2132                 ath10k_err("failed to request early irq: %d\n", ret);
2133                 goto err_deinit_irq;
2134         }
2135
2136         ret = ath10k_pci_wait_for_target_init(ar);
2137         if (ret) {
2138                 ath10k_err("failed to wait for target to init: %d\n", ret);
2139                 goto err_free_early_irq;
2140         }
2141
2142         ret = ath10k_pci_init_config(ar);
2143         if (ret) {
2144                 ath10k_err("failed to setup init config: %d\n", ret);
2145                 goto err_free_early_irq;
2146         }
2147
2148         ret = ath10k_pci_wake_target_cpu(ar);
2149         if (ret) {
2150                 ath10k_err("could not wake up target CPU: %d\n", ret);
2151                 goto err_free_early_irq;
2152         }
2153
2154         if (ar_pci->num_msi_intrs > 1)
2155                 irq_mode = "MSI-X";
2156         else if (ar_pci->num_msi_intrs == 1)
2157                 irq_mode = "MSI";
2158         else
2159                 irq_mode = "legacy";
2160
2161         if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2162                 ath10k_info("pci irq %s\n", irq_mode);
2163
2164         return 0;
2165
2166 err_free_early_irq:
2167         ath10k_pci_free_early_irq(ar);
2168 err_deinit_irq:
2169         ath10k_pci_deinit_irq(ar);
2170 err_ce:
2171         ath10k_pci_ce_deinit(ar);
2172         ath10k_pci_warm_reset(ar);
2173 err_ps:
2174         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2175                 ath10k_do_pci_sleep(ar);
2176 err:
2177         return ret;
2178 }
2179
2180 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2181 {
2182         int ret;
2183
2184         /*
2185          * Hardware CUS232 version 2 has some issues with cold reset and the
2186          * preferred (and safer) way to perform a device reset is through a
2187          * warm reset.
2188          *
2189          * Warm reset doesn't always work though (notably after a firmware
2190          * crash) so fall back to cold reset if necessary.
2191          */
2192         ret = __ath10k_pci_hif_power_up(ar, false);
2193         if (ret) {
2194                 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
2195                             ret);
2196
2197                 ret = __ath10k_pci_hif_power_up(ar, true);
2198                 if (ret) {
2199                         ath10k_err("failed to power up target using cold reset too (%d)\n",
2200                                    ret);
2201                         return ret;
2202                 }
2203         }
2204
2205         return 0;
2206 }
2207
2208 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2209 {
2210         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2211
2212         ath10k_pci_free_early_irq(ar);
2213         ath10k_pci_kill_tasklet(ar);
2214         ath10k_pci_deinit_irq(ar);
2215         ath10k_pci_warm_reset(ar);
2216
2217         ath10k_pci_ce_deinit(ar);
2218         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2219                 ath10k_do_pci_sleep(ar);
2220 }
2221
2222 #ifdef CONFIG_PM
2223
2224 #define ATH10K_PCI_PM_CONTROL 0x44
2225
2226 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2227 {
2228         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2229         struct pci_dev *pdev = ar_pci->pdev;
2230         u32 val;
2231
2232         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2233
2234         if ((val & 0x000000ff) != 0x3) {
2235                 pci_save_state(pdev);
2236                 pci_disable_device(pdev);
2237                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2238                                        (val & 0xffffff00) | 0x03);
2239         }
2240
2241         return 0;
2242 }
2243
2244 static int ath10k_pci_hif_resume(struct ath10k *ar)
2245 {
2246         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2247         struct pci_dev *pdev = ar_pci->pdev;
2248         u32 val;
2249
2250         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2251
2252         if ((val & 0x000000ff) != 0) {
2253                 pci_restore_state(pdev);
2254                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2255                                        val & 0xffffff00);
2256                 /*
2257                  * Suspend/Resume resets the PCI configuration space,
2258                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2259                  * to keep PCI Tx retries from interfering with C3 CPU state
2260                  */
2261                 pci_read_config_dword(pdev, 0x40, &val);
2262
2263                 if ((val & 0x0000ff00) != 0)
2264                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2265         }
2266
2267         return 0;
2268 }
2269 #endif
2270
2271 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2272         .send_head              = ath10k_pci_hif_send_head,
2273         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2274         .start                  = ath10k_pci_hif_start,
2275         .stop                   = ath10k_pci_hif_stop,
2276         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2277         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2278         .send_complete_check    = ath10k_pci_hif_send_complete_check,
2279         .set_callbacks          = ath10k_pci_hif_set_callbacks,
2280         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2281         .power_up               = ath10k_pci_hif_power_up,
2282         .power_down             = ath10k_pci_hif_power_down,
2283 #ifdef CONFIG_PM
2284         .suspend                = ath10k_pci_hif_suspend,
2285         .resume                 = ath10k_pci_hif_resume,
2286 #endif
2287 };
2288
2289 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2290 {
2291         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2292         struct ath10k_pci *ar_pci = pipe->ar_pci;
2293
2294         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2295 }
2296
2297 static void ath10k_msi_err_tasklet(unsigned long data)
2298 {
2299         struct ath10k *ar = (struct ath10k *)data;
2300
2301         ath10k_pci_fw_interrupt_handler(ar);
2302 }
2303
2304 /*
2305  * Handler for a per-engine interrupt on a PARTICULAR CE.
2306  * This is used in cases where each CE has a private MSI interrupt.
2307  */
2308 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2309 {
2310         struct ath10k *ar = arg;
2311         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2312         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2313
2314         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2315                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2316                 return IRQ_HANDLED;
2317         }
2318
2319         /*
2320          * NOTE: We are able to derive ce_id from irq because we
2321          * use a one-to-one mapping for CE's 0..5.
2322          * CE's 6 & 7 do not use interrupts at all.
2323          *
2324          * This mapping must be kept in sync with the mapping
2325          * used by firmware.
2326          */
2327         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2328         return IRQ_HANDLED;
2329 }
2330
2331 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2332 {
2333         struct ath10k *ar = arg;
2334         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2335
2336         tasklet_schedule(&ar_pci->msi_fw_err);
2337         return IRQ_HANDLED;
2338 }
2339
2340 /*
2341  * Top-level interrupt handler for all PCI interrupts from a Target.
2342  * When a block of MSI interrupts is allocated, this top-level handler
2343  * is not used; instead, we directly call the correct sub-handler.
2344  */
2345 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2346 {
2347         struct ath10k *ar = arg;
2348         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2349
2350         if (ar_pci->num_msi_intrs == 0) {
2351                 if (!ath10k_pci_irq_pending(ar))
2352                         return IRQ_NONE;
2353
2354                 ath10k_pci_disable_and_clear_legacy_irq(ar);
2355         }
2356
2357         tasklet_schedule(&ar_pci->intr_tq);
2358
2359         return IRQ_HANDLED;
2360 }
2361
2362 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2363 {
2364         struct ath10k *ar = (struct ath10k *)data;
2365         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2366         u32 fw_ind;
2367         int ret;
2368
2369         ret = ath10k_pci_wake(ar);
2370         if (ret) {
2371                 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2372                             ret);
2373                 return;
2374         }
2375
2376         fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2377         if (fw_ind & FW_IND_EVENT_PENDING) {
2378                 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2379                                    fw_ind & ~FW_IND_EVENT_PENDING);
2380
2381                 /* Some structures are unavailable during early boot or at
2382                  * driver teardown so just print that the device has crashed. */
2383                 ath10k_warn("device crashed - no diagnostics available\n");
2384         }
2385
2386         ath10k_pci_sleep(ar);
2387         ath10k_pci_enable_legacy_irq(ar);
2388 }
2389
2390 static void ath10k_pci_tasklet(unsigned long data)
2391 {
2392         struct ath10k *ar = (struct ath10k *)data;
2393         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2394
2395         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2396         ath10k_ce_per_engine_service_any(ar);
2397
2398         /* Re-enable legacy irq that was disabled in the irq handler */
2399         if (ar_pci->num_msi_intrs == 0)
2400                 ath10k_pci_enable_legacy_irq(ar);
2401 }
2402
2403 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2404 {
2405         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2406         int ret, i;
2407
2408         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2409                           ath10k_pci_msi_fw_handler,
2410                           IRQF_SHARED, "ath10k_pci", ar);
2411         if (ret) {
2412                 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2413                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2414                 return ret;
2415         }
2416
2417         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2418                 ret = request_irq(ar_pci->pdev->irq + i,
2419                                   ath10k_pci_per_engine_handler,
2420                                   IRQF_SHARED, "ath10k_pci", ar);
2421                 if (ret) {
2422                         ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2423                                     ar_pci->pdev->irq + i, ret);
2424
2425                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2426                                 free_irq(ar_pci->pdev->irq + i, ar);
2427
2428                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2429                         return ret;
2430                 }
2431         }
2432
2433         return 0;
2434 }
2435
2436 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2437 {
2438         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2439         int ret;
2440
2441         ret = request_irq(ar_pci->pdev->irq,
2442                           ath10k_pci_interrupt_handler,
2443                           IRQF_SHARED, "ath10k_pci", ar);
2444         if (ret) {
2445                 ath10k_warn("failed to request MSI irq %d: %d\n",
2446                             ar_pci->pdev->irq, ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2454 {
2455         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2456         int ret;
2457
2458         ret = request_irq(ar_pci->pdev->irq,
2459                           ath10k_pci_interrupt_handler,
2460                           IRQF_SHARED, "ath10k_pci", ar);
2461         if (ret) {
2462                 ath10k_warn("failed to request legacy irq %d: %d\n",
2463                             ar_pci->pdev->irq, ret);
2464                 return ret;
2465         }
2466
2467         return 0;
2468 }
2469
2470 static int ath10k_pci_request_irq(struct ath10k *ar)
2471 {
2472         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2473
2474         switch (ar_pci->num_msi_intrs) {
2475         case 0:
2476                 return ath10k_pci_request_irq_legacy(ar);
2477         case 1:
2478                 return ath10k_pci_request_irq_msi(ar);
2479         case MSI_NUM_REQUEST:
2480                 return ath10k_pci_request_irq_msix(ar);
2481         }
2482
2483         ath10k_warn("unknown irq configuration upon request\n");
2484         return -EINVAL;
2485 }
2486
2487 static void ath10k_pci_free_irq(struct ath10k *ar)
2488 {
2489         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2490         int i;
2491
2492         /* There's at least one interrupt irregardless whether its legacy INTR
2493          * or MSI or MSI-X */
2494         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2495                 free_irq(ar_pci->pdev->irq + i, ar);
2496 }
2497
2498 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2499 {
2500         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2501         int i;
2502
2503         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2504         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2505                      (unsigned long)ar);
2506         tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2507                      (unsigned long)ar);
2508
2509         for (i = 0; i < CE_COUNT; i++) {
2510                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2511                 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2512                              (unsigned long)&ar_pci->pipe_info[i]);
2513         }
2514 }
2515
2516 static int ath10k_pci_init_irq(struct ath10k *ar)
2517 {
2518         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2519         bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2520                                        ar_pci->features);
2521         int ret;
2522
2523         ath10k_pci_init_irq_tasklets(ar);
2524
2525         if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2526             !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2527                 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2528
2529         /* Try MSI-X */
2530         if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2531                 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2532                 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2533                                                          ar_pci->num_msi_intrs);
2534                 if (ret > 0)
2535                         return 0;
2536
2537                 /* fall-through */
2538         }
2539
2540         /* Try MSI */
2541         if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2542                 ar_pci->num_msi_intrs = 1;
2543                 ret = pci_enable_msi(ar_pci->pdev);
2544                 if (ret == 0)
2545                         return 0;
2546
2547                 /* fall-through */
2548         }
2549
2550         /* Try legacy irq
2551          *
2552          * A potential race occurs here: The CORE_BASE write
2553          * depends on target correctly decoding AXI address but
2554          * host won't know when target writes BAR to CORE_CTRL.
2555          * This write might get lost if target has NOT written BAR.
2556          * For now, fix the race by repeating the write in below
2557          * synchronization checking. */
2558         ar_pci->num_msi_intrs = 0;
2559
2560         ret = ath10k_pci_wake(ar);
2561         if (ret) {
2562                 ath10k_warn("failed to wake target: %d\n", ret);
2563                 return ret;
2564         }
2565
2566         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2567                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2568         ath10k_pci_sleep(ar);
2569
2570         return 0;
2571 }
2572
2573 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2574 {
2575         int ret;
2576
2577         ret = ath10k_pci_wake(ar);
2578         if (ret) {
2579                 ath10k_warn("failed to wake target: %d\n", ret);
2580                 return ret;
2581         }
2582
2583         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2584                            0);
2585         ath10k_pci_sleep(ar);
2586
2587         return 0;
2588 }
2589
2590 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2591 {
2592         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2593
2594         switch (ar_pci->num_msi_intrs) {
2595         case 0:
2596                 return ath10k_pci_deinit_irq_legacy(ar);
2597         case 1:
2598                 /* fall-through */
2599         case MSI_NUM_REQUEST:
2600                 pci_disable_msi(ar_pci->pdev);
2601                 return 0;
2602         default:
2603                 pci_disable_msi(ar_pci->pdev);
2604         }
2605
2606         ath10k_warn("unknown irq configuration upon deinit\n");
2607         return -EINVAL;
2608 }
2609
2610 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2611 {
2612         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2613         int wait_limit = 300; /* 3 sec */
2614         int ret;
2615
2616         ret = ath10k_pci_wake(ar);
2617         if (ret) {
2618                 ath10k_err("failed to wake up target: %d\n", ret);
2619                 return ret;
2620         }
2621
2622         while (wait_limit-- &&
2623                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2624                  FW_IND_INITIALIZED)) {
2625                 if (ar_pci->num_msi_intrs == 0)
2626                         /* Fix potential race by repeating CORE_BASE writes */
2627                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2628                                   PCIE_INTR_CE_MASK_ALL,
2629                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2630                                                  PCIE_INTR_ENABLE_ADDRESS));
2631                 mdelay(10);
2632         }
2633
2634         if (wait_limit < 0) {
2635                 ath10k_err("target stalled\n");
2636                 ret = -EIO;
2637                 goto out;
2638         }
2639
2640 out:
2641         ath10k_pci_sleep(ar);
2642         return ret;
2643 }
2644
2645 static int ath10k_pci_cold_reset(struct ath10k *ar)
2646 {
2647         int i, ret;
2648         u32 val;
2649
2650         ret = ath10k_do_pci_wake(ar);
2651         if (ret) {
2652                 ath10k_err("failed to wake up target: %d\n",
2653                            ret);
2654                 return ret;
2655         }
2656
2657         /* Put Target, including PCIe, into RESET. */
2658         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2659         val |= 1;
2660         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2661
2662         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2663                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2664                                           RTC_STATE_COLD_RESET_MASK)
2665                         break;
2666                 msleep(1);
2667         }
2668
2669         /* Pull Target, including PCIe, out of RESET. */
2670         val &= ~1;
2671         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2672
2673         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2674                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2675                                             RTC_STATE_COLD_RESET_MASK))
2676                         break;
2677                 msleep(1);
2678         }
2679
2680         ath10k_do_pci_sleep(ar);
2681         return 0;
2682 }
2683
2684 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2685 {
2686         int i;
2687
2688         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2689                 if (!test_bit(i, ar_pci->features))
2690                         continue;
2691
2692                 switch (i) {
2693                 case ATH10K_PCI_FEATURE_MSI_X:
2694                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2695                         break;
2696                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2697                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2698                         break;
2699                 }
2700         }
2701 }
2702
2703 static int ath10k_pci_probe(struct pci_dev *pdev,
2704                             const struct pci_device_id *pci_dev)
2705 {
2706         void __iomem *mem;
2707         int ret = 0;
2708         struct ath10k *ar;
2709         struct ath10k_pci *ar_pci;
2710         u32 lcr_val, chip_id;
2711
2712         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2713
2714         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2715         if (ar_pci == NULL)
2716                 return -ENOMEM;
2717
2718         ar_pci->pdev = pdev;
2719         ar_pci->dev = &pdev->dev;
2720
2721         switch (pci_dev->device) {
2722         case QCA988X_2_0_DEVICE_ID:
2723                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2724                 break;
2725         default:
2726                 ret = -ENODEV;
2727                 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2728                 goto err_ar_pci;
2729         }
2730
2731         if (ath10k_target_ps)
2732                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2733
2734         ath10k_pci_dump_features(ar_pci);
2735
2736         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2737         if (!ar) {
2738                 ath10k_err("failed to create driver core\n");
2739                 ret = -EINVAL;
2740                 goto err_ar_pci;
2741         }
2742
2743         ar_pci->ar = ar;
2744         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2745         atomic_set(&ar_pci->keep_awake_count, 0);
2746
2747         pci_set_drvdata(pdev, ar);
2748
2749         /*
2750          * Without any knowledge of the Host, the Target may have been reset or
2751          * power cycled and its Config Space may no longer reflect the PCI
2752          * address space that was assigned earlier by the PCI infrastructure.
2753          * Refresh it now.
2754          */
2755         ret = pci_assign_resource(pdev, BAR_NUM);
2756         if (ret) {
2757                 ath10k_err("failed to assign PCI space: %d\n", ret);
2758                 goto err_ar;
2759         }
2760
2761         ret = pci_enable_device(pdev);
2762         if (ret) {
2763                 ath10k_err("failed to enable PCI device: %d\n", ret);
2764                 goto err_ar;
2765         }
2766
2767         /* Request MMIO resources */
2768         ret = pci_request_region(pdev, BAR_NUM, "ath");
2769         if (ret) {
2770                 ath10k_err("failed to request MMIO region: %d\n", ret);
2771                 goto err_device;
2772         }
2773
2774         /*
2775          * Target structures have a limit of 32 bit DMA pointers.
2776          * DMA pointers can be wider than 32 bits by default on some systems.
2777          */
2778         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2779         if (ret) {
2780                 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2781                 goto err_region;
2782         }
2783
2784         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2785         if (ret) {
2786                 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2787                 goto err_region;
2788         }
2789
2790         /* Set bus master bit in PCI_COMMAND to enable DMA */
2791         pci_set_master(pdev);
2792
2793         /*
2794          * Temporary FIX: disable ASPM
2795          * Will be removed after the OTP is programmed
2796          */
2797         pci_read_config_dword(pdev, 0x80, &lcr_val);
2798         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2799
2800         /* Arrange for access to Target SoC registers. */
2801         mem = pci_iomap(pdev, BAR_NUM, 0);
2802         if (!mem) {
2803                 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2804                 ret = -EIO;
2805                 goto err_master;
2806         }
2807
2808         ar_pci->mem = mem;
2809
2810         spin_lock_init(&ar_pci->ce_lock);
2811
2812         ret = ath10k_do_pci_wake(ar);
2813         if (ret) {
2814                 ath10k_err("Failed to get chip id: %d\n", ret);
2815                 goto err_iomap;
2816         }
2817
2818         chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2819
2820         ath10k_do_pci_sleep(ar);
2821
2822         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2823
2824         ret = ath10k_core_register(ar, chip_id);
2825         if (ret) {
2826                 ath10k_err("failed to register driver core: %d\n", ret);
2827                 goto err_iomap;
2828         }
2829
2830         return 0;
2831
2832 err_iomap:
2833         pci_iounmap(pdev, mem);
2834 err_master:
2835         pci_clear_master(pdev);
2836 err_region:
2837         pci_release_region(pdev, BAR_NUM);
2838 err_device:
2839         pci_disable_device(pdev);
2840 err_ar:
2841         ath10k_core_destroy(ar);
2842 err_ar_pci:
2843         /* call HIF PCI free here */
2844         kfree(ar_pci);
2845
2846         return ret;
2847 }
2848
2849 static void ath10k_pci_remove(struct pci_dev *pdev)
2850 {
2851         struct ath10k *ar = pci_get_drvdata(pdev);
2852         struct ath10k_pci *ar_pci;
2853
2854         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2855
2856         if (!ar)
2857                 return;
2858
2859         ar_pci = ath10k_pci_priv(ar);
2860
2861         if (!ar_pci)
2862                 return;
2863
2864         tasklet_kill(&ar_pci->msi_fw_err);
2865
2866         ath10k_core_unregister(ar);
2867
2868         pci_iounmap(pdev, ar_pci->mem);
2869         pci_release_region(pdev, BAR_NUM);
2870         pci_clear_master(pdev);
2871         pci_disable_device(pdev);
2872
2873         ath10k_core_destroy(ar);
2874         kfree(ar_pci);
2875 }
2876
2877 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2878
2879 static struct pci_driver ath10k_pci_driver = {
2880         .name = "ath10k_pci",
2881         .id_table = ath10k_pci_id_table,
2882         .probe = ath10k_pci_probe,
2883         .remove = ath10k_pci_remove,
2884 };
2885
2886 static int __init ath10k_pci_init(void)
2887 {
2888         int ret;
2889
2890         ret = pci_register_driver(&ath10k_pci_driver);
2891         if (ret)
2892                 ath10k_err("failed to register PCI driver: %d\n", ret);
2893
2894         return ret;
2895 }
2896 module_init(ath10k_pci_init);
2897
2898 static void __exit ath10k_pci_exit(void)
2899 {
2900         pci_unregister_driver(&ath10k_pci_driver);
2901 }
2902
2903 module_exit(ath10k_pci_exit);
2904
2905 MODULE_AUTHOR("Qualcomm Atheros");
2906 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2907 MODULE_LICENSE("Dual BSD/GPL");
2908 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2909 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2910 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);