NTB: Add parameters for Intel SNB B2B addresses
[linux-2.6-block.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static const struct intel_ntb_reg bwd_reg;
76 static const struct intel_ntb_alt_reg bwd_pri_reg;
77 static const struct intel_ntb_alt_reg bwd_sec_reg;
78 static const struct intel_ntb_alt_reg bwd_b2b_reg;
79 static const struct intel_ntb_xlat_reg bwd_pri_xlat;
80 static const struct intel_ntb_xlat_reg bwd_sec_xlat;
81 static const struct intel_ntb_reg snb_reg;
82 static const struct intel_ntb_alt_reg snb_pri_reg;
83 static const struct intel_ntb_alt_reg snb_sec_reg;
84 static const struct intel_ntb_alt_reg snb_b2b_reg;
85 static const struct intel_ntb_xlat_reg snb_pri_xlat;
86 static const struct intel_ntb_xlat_reg snb_sec_xlat;
87 static struct intel_b2b_addr snb_b2b_usd_addr;
88 static struct intel_b2b_addr snb_b2b_dsd_addr;
89 static const struct ntb_dev_ops intel_ntb_ops;
90
91 static const struct file_operations intel_ntb_debugfs_info;
92 static struct dentry *debugfs_dir;
93
94 static int b2b_mw_idx = -1;
95 module_param(b2b_mw_idx, int, 0644);
96 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
97                  "value of zero or positive starts from first mw idx, and a "
98                  "negative value starts from last mw idx.  Both sides MUST "
99                  "set the same value here!");
100
101 static unsigned int b2b_mw_share;
102 module_param(b2b_mw_share, uint, 0644);
103 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
104                  "ntb so that the peer ntb only occupies the first half of "
105                  "the mw, so the second half can still be used as a mw.  Both "
106                  "sides MUST set the same value here!");
107
108 module_param_named(snb_b2b_usd_bar2_addr64,
109                    snb_b2b_usd_addr.bar2_addr64, ullong, 0644);
110 MODULE_PARM_DESC(snb_b2b_usd_bar2_addr64,
111                  "SNB B2B USD BAR 2 64-bit address");
112
113 module_param_named(snb_b2b_usd_bar4_addr64,
114                    snb_b2b_usd_addr.bar4_addr64, ullong, 0644);
115 MODULE_PARM_DESC(snb_b2b_usd_bar2_addr64,
116                  "SNB B2B USD BAR 4 64-bit address");
117
118 module_param_named(snb_b2b_usd_bar4_addr32,
119                    snb_b2b_usd_addr.bar4_addr32, ullong, 0644);
120 MODULE_PARM_DESC(snb_b2b_usd_bar2_addr64,
121                  "SNB B2B USD split-BAR 4 32-bit address");
122
123 module_param_named(snb_b2b_usd_bar5_addr32,
124                    snb_b2b_usd_addr.bar5_addr32, ullong, 0644);
125 MODULE_PARM_DESC(snb_b2b_usd_bar2_addr64,
126                  "SNB B2B USD split-BAR 5 32-bit address");
127
128 module_param_named(snb_b2b_dsd_bar2_addr64,
129                    snb_b2b_dsd_addr.bar2_addr64, ullong, 0644);
130 MODULE_PARM_DESC(snb_b2b_dsd_bar2_addr64,
131                  "SNB B2B DSD BAR 2 64-bit address");
132
133 module_param_named(snb_b2b_dsd_bar4_addr64,
134                    snb_b2b_dsd_addr.bar4_addr64, ullong, 0644);
135 MODULE_PARM_DESC(snb_b2b_dsd_bar2_addr64,
136                  "SNB B2B DSD BAR 4 64-bit address");
137
138 module_param_named(snb_b2b_dsd_bar4_addr32,
139                    snb_b2b_dsd_addr.bar4_addr32, ullong, 0644);
140 MODULE_PARM_DESC(snb_b2b_dsd_bar2_addr64,
141                  "SNB B2B DSD split-BAR 4 32-bit address");
142
143 module_param_named(snb_b2b_dsd_bar5_addr32,
144                    snb_b2b_dsd_addr.bar5_addr32, ullong, 0644);
145 MODULE_PARM_DESC(snb_b2b_dsd_bar2_addr64,
146                  "SNB B2B DSD split-BAR 5 32-bit address");
147
148 #ifndef ioread64
149 #ifdef readq
150 #define ioread64 readq
151 #else
152 #define ioread64 _ioread64
153 static inline u64 _ioread64(void __iomem *mmio)
154 {
155         u64 low, high;
156
157         low = ioread32(mmio);
158         high = ioread32(mmio + sizeof(u32));
159         return low | (high << 32);
160 }
161 #endif
162 #endif
163
164 #ifndef iowrite64
165 #ifdef writeq
166 #define iowrite64 writeq
167 #else
168 #define iowrite64 _iowrite64
169 static inline void _iowrite64(u64 val, void __iomem *mmio)
170 {
171         iowrite32(val, mmio);
172         iowrite32(val >> 32, mmio + sizeof(u32));
173 }
174 #endif
175 #endif
176
177 static inline int pdev_is_bwd(struct pci_dev *pdev)
178 {
179         switch (pdev->device) {
180         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
181                 return 1;
182         }
183         return 0;
184 }
185
186 static inline int pdev_is_snb(struct pci_dev *pdev)
187 {
188         switch (pdev->device) {
189         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
190         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
191         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
192         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
193         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
194         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
195         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
196         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
197         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
198         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
199         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
200         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
201                 return 1;
202         }
203         return 0;
204 }
205
206 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
207 {
208         ndev->unsafe_flags = 0;
209         ndev->unsafe_flags_ignore = 0;
210
211         /* Only B2B has a workaround to avoid SDOORBELL */
212         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
213                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
214                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
215
216         /* No low level workaround to avoid SB01BASE */
217         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
218                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
219                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
220         }
221 }
222
223 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
224                                  unsigned long flag)
225 {
226         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
227 }
228
229 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
230                                      unsigned long flag)
231 {
232         flag &= ndev->unsafe_flags;
233         ndev->unsafe_flags_ignore |= flag;
234
235         return !!flag;
236 }
237
238 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
239 {
240         if (idx < 0 || idx > ndev->mw_count)
241                 return -EINVAL;
242         return ndev->reg->mw_bar[idx];
243 }
244
245 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
246                                phys_addr_t *db_addr, resource_size_t *db_size,
247                                phys_addr_t reg_addr, unsigned long reg)
248 {
249         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
250
251         if (db_addr) {
252                 *db_addr = reg_addr + reg;
253                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
254         }
255
256         if (db_size) {
257                 *db_size = ndev->reg->db_size;
258                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
259         }
260
261         return 0;
262 }
263
264 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
265                                void __iomem *mmio)
266 {
267         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
268
269         return ndev->reg->db_ioread(mmio);
270 }
271
272 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
273                                 void __iomem *mmio)
274 {
275         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
276
277         if (db_bits & ~ndev->db_valid_mask)
278                 return -EINVAL;
279
280         ndev->reg->db_iowrite(db_bits, mmio);
281
282         return 0;
283 }
284
285 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
286                                    void __iomem *mmio)
287 {
288         unsigned long irqflags;
289
290         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
291
292         if (db_bits & ~ndev->db_valid_mask)
293                 return -EINVAL;
294
295         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
296         {
297                 ndev->db_mask |= db_bits;
298                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
299         }
300         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
301
302         return 0;
303 }
304
305 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
306                                      void __iomem *mmio)
307 {
308         unsigned long irqflags;
309
310         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
311
312         if (db_bits & ~ndev->db_valid_mask)
313                 return -EINVAL;
314
315         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
316         {
317                 ndev->db_mask &= ~db_bits;
318                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
319         }
320         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
321
322         return 0;
323 }
324
325 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
326 {
327         u64 shift, mask;
328
329         shift = ndev->db_vec_shift;
330         mask = BIT_ULL(shift) - 1;
331
332         return mask << (shift * db_vector);
333 }
334
335 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
336                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
337                                  unsigned long reg)
338 {
339         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
340
341         if (idx < 0 || idx >= ndev->spad_count)
342                 return -EINVAL;
343
344         if (spad_addr) {
345                 *spad_addr = reg_addr + reg + (idx << 2);
346                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
347         }
348
349         return 0;
350 }
351
352 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
353                                  void __iomem *mmio)
354 {
355         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
356
357         if (idx < 0 || idx >= ndev->spad_count)
358                 return 0;
359
360         return ioread32(mmio + (idx << 2));
361 }
362
363 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
364                                   void __iomem *mmio)
365 {
366         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
367
368         if (idx < 0 || idx >= ndev->spad_count)
369                 return -EINVAL;
370
371         iowrite32(val, mmio + (idx << 2));
372
373         return 0;
374 }
375
376 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
377 {
378         u64 vec_mask;
379
380         vec_mask = ndev_vec_mask(ndev, vec);
381
382         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
383
384         ndev->last_ts = jiffies;
385
386         if (vec_mask & ndev->db_link_mask) {
387                 if (ndev->reg->poll_link(ndev))
388                         ntb_link_event(&ndev->ntb);
389         }
390
391         if (vec_mask & ndev->db_valid_mask)
392                 ntb_db_event(&ndev->ntb, vec);
393
394         return IRQ_HANDLED;
395 }
396
397 static irqreturn_t ndev_vec_isr(int irq, void *dev)
398 {
399         struct intel_ntb_vec *nvec = dev;
400
401         return ndev_interrupt(nvec->ndev, nvec->num);
402 }
403
404 static irqreturn_t ndev_irq_isr(int irq, void *dev)
405 {
406         struct intel_ntb_dev *ndev = dev;
407
408         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
409 }
410
411 static int ndev_init_isr(struct intel_ntb_dev *ndev,
412                          int msix_min, int msix_max,
413                          int msix_shift, int total_shift)
414 {
415         struct pci_dev *pdev;
416         int rc, i, msix_count;
417
418         pdev = ndev_pdev(ndev);
419
420         /* Mask all doorbell interrupts */
421         ndev->db_mask = ndev->db_valid_mask;
422         ndev->reg->db_iowrite(ndev->db_mask,
423                               ndev->self_mmio +
424                               ndev->self_reg->db_mask);
425
426         /* Try to set up msix irq */
427
428         ndev->vec = kcalloc(msix_max, sizeof(*ndev->vec), GFP_KERNEL);
429         if (!ndev->vec)
430                 goto err_msix_vec_alloc;
431
432         ndev->msix = kcalloc(msix_max, sizeof(*ndev->msix), GFP_KERNEL);
433         if (!ndev->msix)
434                 goto err_msix_alloc;
435
436         for (i = 0; i < msix_max; ++i)
437                 ndev->msix[i].entry = i;
438
439         msix_count = pci_enable_msix_range(pdev, ndev->msix,
440                                            msix_min, msix_max);
441         if (msix_count < 0)
442                 goto err_msix_enable;
443
444         for (i = 0; i < msix_count; ++i) {
445                 ndev->vec[i].ndev = ndev;
446                 ndev->vec[i].num = i;
447                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
448                                  "ndev_vec_isr", &ndev->vec[i]);
449                 if (rc)
450                         goto err_msix_request;
451         }
452
453         dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
454         ndev->db_vec_count = msix_count;
455         ndev->db_vec_shift = msix_shift;
456         return 0;
457
458 err_msix_request:
459         while (i-- > 0)
460                 free_irq(ndev->msix[i].vector, ndev);
461         pci_disable_msix(pdev);
462 err_msix_enable:
463         kfree(ndev->msix);
464 err_msix_alloc:
465         kfree(ndev->vec);
466 err_msix_vec_alloc:
467         ndev->msix = NULL;
468         ndev->vec = NULL;
469
470         /* Try to set up msi irq */
471
472         rc = pci_enable_msi(pdev);
473         if (rc)
474                 goto err_msi_enable;
475
476         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
477                          "ndev_irq_isr", ndev);
478         if (rc)
479                 goto err_msi_request;
480
481         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
482         ndev->db_vec_count = 1;
483         ndev->db_vec_shift = total_shift;
484         return 0;
485
486 err_msi_request:
487         pci_disable_msi(pdev);
488 err_msi_enable:
489
490         /* Try to set up intx irq */
491
492         pci_intx(pdev, 1);
493
494         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
495                          "ndev_irq_isr", ndev);
496         if (rc)
497                 goto err_intx_request;
498
499         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
500         ndev->db_vec_count = 1;
501         ndev->db_vec_shift = total_shift;
502         return 0;
503
504 err_intx_request:
505         return rc;
506 }
507
508 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
509 {
510         struct pci_dev *pdev;
511         int i;
512
513         pdev = ndev_pdev(ndev);
514
515         /* Mask all doorbell interrupts */
516         ndev->db_mask = ndev->db_valid_mask;
517         ndev->reg->db_iowrite(ndev->db_mask,
518                               ndev->self_mmio +
519                               ndev->self_reg->db_mask);
520
521         if (ndev->msix) {
522                 i = ndev->db_vec_count;
523                 while (i--)
524                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
525                 pci_disable_msix(pdev);
526                 kfree(ndev->msix);
527                 kfree(ndev->vec);
528         } else {
529                 free_irq(pdev->irq, ndev);
530                 if (pci_dev_msi_enabled(pdev))
531                         pci_disable_msi(pdev);
532         }
533 }
534
535 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
536                                  size_t count, loff_t *offp)
537 {
538         struct intel_ntb_dev *ndev;
539         void __iomem *mmio;
540         char *buf;
541         size_t buf_size;
542         ssize_t ret, off;
543         union { u64 v64; u32 v32; u16 v16; } u;
544
545         ndev = filp->private_data;
546         mmio = ndev->self_mmio;
547
548         buf_size = min(count, 0x800ul);
549
550         buf = kmalloc(buf_size, GFP_KERNEL);
551         if (!buf)
552                 return -ENOMEM;
553
554         off = 0;
555
556         off += scnprintf(buf + off, buf_size - off,
557                          "NTB Device Information:\n");
558
559         off += scnprintf(buf + off, buf_size - off,
560                          "Connection Topology -\t%s\n",
561                          ntb_topo_string(ndev->ntb.topo));
562
563         off += scnprintf(buf + off, buf_size - off,
564                          "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
565         off += scnprintf(buf + off, buf_size - off,
566                          "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
567         off += scnprintf(buf + off, buf_size - off,
568                          "BAR4 Split -\t\t%s\n",
569                          ndev->bar4_split ? "yes" : "no");
570
571         off += scnprintf(buf + off, buf_size - off,
572                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
573         off += scnprintf(buf + off, buf_size - off,
574                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
575
576         if (!ndev->reg->link_is_up(ndev)) {
577                 off += scnprintf(buf + off, buf_size - off,
578                                  "Link Status -\t\tDown\n");
579         } else {
580                 off += scnprintf(buf + off, buf_size - off,
581                                  "Link Status -\t\tUp\n");
582                 off += scnprintf(buf + off, buf_size - off,
583                                  "Link Speed -\t\tPCI-E Gen %u\n",
584                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
585                 off += scnprintf(buf + off, buf_size - off,
586                                  "Link Width -\t\tx%u\n",
587                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
588         }
589
590         off += scnprintf(buf + off, buf_size - off,
591                          "Memory Window Count -\t%u\n", ndev->mw_count);
592         off += scnprintf(buf + off, buf_size - off,
593                          "Scratchpad Count -\t%u\n", ndev->spad_count);
594         off += scnprintf(buf + off, buf_size - off,
595                          "Doorbell Count -\t%u\n", ndev->db_count);
596         off += scnprintf(buf + off, buf_size - off,
597                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
598         off += scnprintf(buf + off, buf_size - off,
599                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
600
601         off += scnprintf(buf + off, buf_size - off,
602                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
603         off += scnprintf(buf + off, buf_size - off,
604                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
605         off += scnprintf(buf + off, buf_size - off,
606                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
607
608         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
609         off += scnprintf(buf + off, buf_size - off,
610                          "Doorbell Mask -\t\t%#llx\n", u.v64);
611
612         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
613         off += scnprintf(buf + off, buf_size - off,
614                          "Doorbell Bell -\t\t%#llx\n", u.v64);
615
616         off += scnprintf(buf + off, buf_size - off,
617                          "\nNTB Incoming XLAT:\n");
618
619         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
620         off += scnprintf(buf + off, buf_size - off,
621                          "XLAT23 -\t\t%#018llx\n", u.v64);
622
623         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
624         off += scnprintf(buf + off, buf_size - off,
625                          "XLAT45 -\t\t%#018llx\n", u.v64);
626
627         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
628         off += scnprintf(buf + off, buf_size - off,
629                          "LMT23 -\t\t\t%#018llx\n", u.v64);
630
631         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
632         off += scnprintf(buf + off, buf_size - off,
633                          "LMT45 -\t\t\t%#018llx\n", u.v64);
634
635         if (pdev_is_snb(ndev->ntb.pdev)) {
636                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
637                         off += scnprintf(buf + off, buf_size - off,
638                                          "\nNTB Outgoing B2B XLAT:\n");
639
640                         u.v64 = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
641                         off += scnprintf(buf + off, buf_size - off,
642                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
643
644                         u.v64 = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
645                         off += scnprintf(buf + off, buf_size - off,
646                                          "B2B XLAT45 -\t\t%#018llx\n", u.v64);
647
648                         u.v64 = ioread64(mmio + SNB_PBAR23LMT_OFFSET);
649                         off += scnprintf(buf + off, buf_size - off,
650                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
651
652                         u.v64 = ioread64(mmio + SNB_PBAR45LMT_OFFSET);
653                         off += scnprintf(buf + off, buf_size - off,
654                                          "B2B LMT45 -\t\t%#018llx\n", u.v64);
655
656                         off += scnprintf(buf + off, buf_size - off,
657                                          "\nNTB Secondary BAR:\n");
658
659                         u.v64 = ioread64(mmio + SNB_SBAR0BASE_OFFSET);
660                         off += scnprintf(buf + off, buf_size - off,
661                                          "SBAR01 -\t\t%#018llx\n", u.v64);
662
663                         u.v64 = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
664                         off += scnprintf(buf + off, buf_size - off,
665                                          "SBAR23 -\t\t%#018llx\n", u.v64);
666
667                         u.v64 = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
668                         off += scnprintf(buf + off, buf_size - off,
669                                          "SBAR45 -\t\t%#018llx\n", u.v64);
670                 }
671
672                 off += scnprintf(buf + off, buf_size - off,
673                                  "\nSNB NTB Statistics:\n");
674
675                 u.v16 = ioread16(mmio + SNB_USMEMMISS_OFFSET);
676                 off += scnprintf(buf + off, buf_size - off,
677                                  "Upstream Memory Miss -\t%u\n", u.v16);
678
679                 off += scnprintf(buf + off, buf_size - off,
680                                  "\nSNB NTB Hardware Errors:\n");
681
682                 if (!pci_read_config_word(ndev->ntb.pdev,
683                                           SNB_DEVSTS_OFFSET, &u.v16))
684                         off += scnprintf(buf + off, buf_size - off,
685                                          "DEVSTS -\t\t%#06x\n", u.v16);
686
687                 if (!pci_read_config_word(ndev->ntb.pdev,
688                                           SNB_LINK_STATUS_OFFSET, &u.v16))
689                         off += scnprintf(buf + off, buf_size - off,
690                                          "LNKSTS -\t\t%#06x\n", u.v16);
691
692                 if (!pci_read_config_dword(ndev->ntb.pdev,
693                                            SNB_UNCERRSTS_OFFSET, &u.v32))
694                         off += scnprintf(buf + off, buf_size - off,
695                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
696
697                 if (!pci_read_config_dword(ndev->ntb.pdev,
698                                            SNB_CORERRSTS_OFFSET, &u.v32))
699                         off += scnprintf(buf + off, buf_size - off,
700                                          "CORERRSTS -\t\t%#06x\n", u.v32);
701         }
702
703         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
704         kfree(buf);
705         return ret;
706 }
707
708 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
709 {
710         if (!debugfs_dir) {
711                 ndev->debugfs_dir = NULL;
712                 ndev->debugfs_info = NULL;
713         } else {
714                 ndev->debugfs_dir =
715                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
716                 if (!ndev->debugfs_dir)
717                         ndev->debugfs_info = NULL;
718                 else
719                         ndev->debugfs_info =
720                                 debugfs_create_file("info", S_IRUSR,
721                                                     ndev->debugfs_dir, ndev,
722                                                     &intel_ntb_debugfs_info);
723         }
724 }
725
726 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
727 {
728         debugfs_remove_recursive(ndev->debugfs_dir);
729 }
730
731 static int intel_ntb_mw_count(struct ntb_dev *ntb)
732 {
733         return ntb_ndev(ntb)->mw_count;
734 }
735
736 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
737                                   phys_addr_t *base,
738                                   resource_size_t *size,
739                                   resource_size_t *align,
740                                   resource_size_t *align_size)
741 {
742         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
743         int bar;
744
745         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
746                 idx += 1;
747
748         bar = ndev_mw_to_bar(ndev, idx);
749         if (bar < 0)
750                 return bar;
751
752         if (base)
753                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
754                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
755
756         if (size)
757                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
758                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
759
760         if (align)
761                 *align = pci_resource_len(ndev->ntb.pdev, bar);
762
763         if (align_size)
764                 *align_size = 1;
765
766         return 0;
767 }
768
769 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
770                                   dma_addr_t addr, resource_size_t size)
771 {
772         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
773         unsigned long base_reg, xlat_reg, limit_reg;
774         resource_size_t bar_size, mw_size;
775         void __iomem *mmio;
776         u64 base, limit, reg_val;
777         int bar;
778
779         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
780                 idx += 1;
781
782         bar = ndev_mw_to_bar(ndev, idx);
783         if (bar < 0)
784                 return bar;
785
786         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
787
788         if (idx == ndev->b2b_idx)
789                 mw_size = bar_size - ndev->b2b_off;
790         else
791                 mw_size = bar_size;
792
793         /* hardware requires that addr is aligned to bar size */
794         if (addr & (bar_size - 1))
795                 return -EINVAL;
796
797         /* make sure the range fits in the usable mw size */
798         if (size > mw_size)
799                 return -EINVAL;
800
801         mmio = ndev->self_mmio;
802         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
803         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
804         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
805
806         if (bar < 4 || !ndev->bar4_split) {
807                 base = ioread64(mmio + base_reg);
808
809                 /* Set the limit if supported, if size is not mw_size */
810                 if (limit_reg && size != mw_size)
811                         limit = base + size;
812                 else
813                         limit = 0;
814
815                 /* set and verify setting the translation address */
816                 iowrite64(addr, mmio + xlat_reg);
817                 reg_val = ioread64(mmio + xlat_reg);
818                 if (reg_val != addr) {
819                         iowrite64(0, mmio + xlat_reg);
820                         return -EIO;
821                 }
822
823                 /* set and verify setting the limit */
824                 iowrite64(limit, mmio + limit_reg);
825                 reg_val = ioread64(mmio + limit_reg);
826                 if (reg_val != limit) {
827                         iowrite64(base, mmio + limit_reg);
828                         iowrite64(0, mmio + xlat_reg);
829                         return -EIO;
830                 }
831         } else {
832                 /* split bar addr range must all be 32 bit */
833                 if (addr & (~0ull << 32))
834                         return -EINVAL;
835                 if ((addr + size) & (~0ull << 32))
836                         return -EINVAL;
837
838                 base = ioread32(mmio + base_reg);
839
840                 /* Set the limit if supported, if size is not mw_size */
841                 if (limit_reg && size != mw_size)
842                         limit = base + size;
843                 else
844                         limit = 0;
845
846                 /* set and verify setting the translation address */
847                 iowrite32(addr, mmio + xlat_reg);
848                 reg_val = ioread32(mmio + xlat_reg);
849                 if (reg_val != addr) {
850                         iowrite32(0, mmio + xlat_reg);
851                         return -EIO;
852                 }
853
854                 /* set and verify setting the limit */
855                 iowrite32(limit, mmio + limit_reg);
856                 reg_val = ioread32(mmio + limit_reg);
857                 if (reg_val != limit) {
858                         iowrite32(base, mmio + limit_reg);
859                         iowrite32(0, mmio + xlat_reg);
860                         return -EIO;
861                 }
862         }
863
864         return 0;
865 }
866
867 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
868                                 enum ntb_speed *speed,
869                                 enum ntb_width *width)
870 {
871         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
872
873         if (ndev->reg->link_is_up(ndev)) {
874                 if (speed)
875                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
876                 if (width)
877                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
878                 return 1;
879         } else {
880                 /* TODO MAYBE: is it possible to observe the link speed and
881                  * width while link is training? */
882                 if (speed)
883                         *speed = NTB_SPEED_NONE;
884                 if (width)
885                         *width = NTB_WIDTH_NONE;
886                 return 0;
887         }
888 }
889
890 static int intel_ntb_link_enable(struct ntb_dev *ntb,
891                                  enum ntb_speed max_speed,
892                                  enum ntb_width max_width)
893 {
894         struct intel_ntb_dev *ndev;
895         u32 ntb_ctl;
896
897         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
898
899         if (ndev->ntb.topo == NTB_TOPO_SEC)
900                 return -EINVAL;
901
902         dev_dbg(ndev_dev(ndev),
903                 "Enabling link with max_speed %d max_width %d\n",
904                 max_speed, max_width);
905         if (max_speed != NTB_SPEED_AUTO)
906                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
907         if (max_width != NTB_WIDTH_AUTO)
908                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
909
910         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
911         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
912         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
913         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
914         if (ndev->bar4_split)
915                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
916         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
917
918         return 0;
919 }
920
921 static int intel_ntb_link_disable(struct ntb_dev *ntb)
922 {
923         struct intel_ntb_dev *ndev;
924         u32 ntb_cntl;
925
926         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
927
928         if (ndev->ntb.topo == NTB_TOPO_SEC)
929                 return -EINVAL;
930
931         dev_dbg(ndev_dev(ndev), "Disabling link\n");
932
933         /* Bring NTB link down */
934         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
935         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
936         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
937         if (ndev->bar4_split)
938                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
939         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
940         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
941
942         return 0;
943 }
944
945 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
946 {
947         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
948 }
949
950 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
951 {
952         return ntb_ndev(ntb)->db_valid_mask;
953 }
954
955 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
956 {
957         struct intel_ntb_dev *ndev;
958
959         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
960
961         return ndev->db_vec_count;
962 }
963
964 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
965 {
966         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
967
968         if (db_vector < 0 || db_vector > ndev->db_vec_count)
969                 return 0;
970
971         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
972 }
973
974 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
975 {
976         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
977
978         return ndev_db_read(ndev,
979                             ndev->self_mmio +
980                             ndev->self_reg->db_bell);
981 }
982
983 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
984 {
985         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
986
987         return ndev_db_write(ndev, db_bits,
988                              ndev->self_mmio +
989                              ndev->self_reg->db_bell);
990 }
991
992 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
993 {
994         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
995
996         return ndev_db_set_mask(ndev, db_bits,
997                                 ndev->self_mmio +
998                                 ndev->self_reg->db_mask);
999 }
1000
1001 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1002 {
1003         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1004
1005         return ndev_db_clear_mask(ndev, db_bits,
1006                                   ndev->self_mmio +
1007                                   ndev->self_reg->db_mask);
1008 }
1009
1010 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1011                                   phys_addr_t *db_addr,
1012                                   resource_size_t *db_size)
1013 {
1014         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1015
1016         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1017                             ndev->peer_reg->db_bell);
1018 }
1019
1020 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1021 {
1022         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1023
1024         return ndev_db_write(ndev, db_bits,
1025                              ndev->peer_mmio +
1026                              ndev->peer_reg->db_bell);
1027 }
1028
1029 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1030 {
1031         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1032 }
1033
1034 static int intel_ntb_spad_count(struct ntb_dev *ntb)
1035 {
1036         struct intel_ntb_dev *ndev;
1037
1038         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1039
1040         return ndev->spad_count;
1041 }
1042
1043 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1044 {
1045         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1046
1047         return ndev_spad_read(ndev, idx,
1048                               ndev->self_mmio +
1049                               ndev->self_reg->spad);
1050 }
1051
1052 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1053                                 int idx, u32 val)
1054 {
1055         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1056
1057         return ndev_spad_write(ndev, idx, val,
1058                                ndev->self_mmio +
1059                                ndev->self_reg->spad);
1060 }
1061
1062 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1063                                     phys_addr_t *spad_addr)
1064 {
1065         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1066
1067         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1068                               ndev->peer_reg->spad);
1069 }
1070
1071 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1072 {
1073         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1074
1075         return ndev_spad_read(ndev, idx,
1076                               ndev->peer_mmio +
1077                               ndev->peer_reg->spad);
1078 }
1079
1080 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1081                                      int idx, u32 val)
1082 {
1083         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1084
1085         return ndev_spad_write(ndev, idx, val,
1086                                ndev->peer_mmio +
1087                                ndev->peer_reg->spad);
1088 }
1089
1090 /* BWD */
1091
1092 static u64 bwd_db_ioread(void __iomem *mmio)
1093 {
1094         return ioread64(mmio);
1095 }
1096
1097 static void bwd_db_iowrite(u64 bits, void __iomem *mmio)
1098 {
1099         iowrite64(bits, mmio);
1100 }
1101
1102 static int bwd_poll_link(struct intel_ntb_dev *ndev)
1103 {
1104         u32 ntb_ctl;
1105
1106         ntb_ctl = ioread32(ndev->self_mmio + BWD_NTBCNTL_OFFSET);
1107
1108         if (ntb_ctl == ndev->ntb_ctl)
1109                 return 0;
1110
1111         ndev->ntb_ctl = ntb_ctl;
1112
1113         ndev->lnk_sta = ioread32(ndev->self_mmio + BWD_LINK_STATUS_OFFSET);
1114
1115         return 1;
1116 }
1117
1118 static int bwd_link_is_up(struct intel_ntb_dev *ndev)
1119 {
1120         return BWD_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1121 }
1122
1123 static int bwd_link_is_err(struct intel_ntb_dev *ndev)
1124 {
1125         if (ioread32(ndev->self_mmio + BWD_LTSSMSTATEJMP_OFFSET)
1126             & BWD_LTSSMSTATEJMP_FORCEDETECT)
1127                 return 1;
1128
1129         if (ioread32(ndev->self_mmio + BWD_IBSTERRRCRVSTS0_OFFSET)
1130             & BWD_IBIST_ERR_OFLOW)
1131                 return 1;
1132
1133         return 0;
1134 }
1135
1136 static inline enum ntb_topo bwd_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1137 {
1138         switch (ppd & BWD_PPD_TOPO_MASK) {
1139         case BWD_PPD_TOPO_B2B_USD:
1140                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1141                 return NTB_TOPO_B2B_USD;
1142
1143         case BWD_PPD_TOPO_B2B_DSD:
1144                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1145                 return NTB_TOPO_B2B_DSD;
1146
1147         case BWD_PPD_TOPO_PRI_USD:
1148         case BWD_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1149         case BWD_PPD_TOPO_SEC_USD:
1150         case BWD_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1151                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1152                 return NTB_TOPO_NONE;
1153         }
1154
1155         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1156         return NTB_TOPO_NONE;
1157 }
1158
1159 static void bwd_link_hb(struct work_struct *work)
1160 {
1161         struct intel_ntb_dev *ndev = hb_ndev(work);
1162         unsigned long poll_ts;
1163         void __iomem *mmio;
1164         u32 status32;
1165
1166         poll_ts = ndev->last_ts + BWD_LINK_HB_TIMEOUT;
1167
1168         /* Delay polling the link status if an interrupt was received,
1169          * unless the cached link status says the link is down.
1170          */
1171         if (time_after(poll_ts, jiffies) && bwd_link_is_up(ndev)) {
1172                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1173                 return;
1174         }
1175
1176         if (bwd_poll_link(ndev))
1177                 ntb_link_event(&ndev->ntb);
1178
1179         if (bwd_link_is_up(ndev) || !bwd_link_is_err(ndev)) {
1180                 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1181                 return;
1182         }
1183
1184         /* Link is down with error: recover the link! */
1185
1186         mmio = ndev->self_mmio;
1187
1188         /* Driver resets the NTB ModPhy lanes - magic! */
1189         iowrite8(0xe0, mmio + BWD_MODPHY_PCSREG6);
1190         iowrite8(0x40, mmio + BWD_MODPHY_PCSREG4);
1191         iowrite8(0x60, mmio + BWD_MODPHY_PCSREG4);
1192         iowrite8(0x60, mmio + BWD_MODPHY_PCSREG6);
1193
1194         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1195         msleep(100);
1196
1197         /* Clear AER Errors, write to clear */
1198         status32 = ioread32(mmio + BWD_ERRCORSTS_OFFSET);
1199         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1200         status32 &= PCI_ERR_COR_REP_ROLL;
1201         iowrite32(status32, mmio + BWD_ERRCORSTS_OFFSET);
1202
1203         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1204         status32 = ioread32(mmio + BWD_LTSSMERRSTS0_OFFSET);
1205         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1206         status32 |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
1207         iowrite32(status32, mmio + BWD_LTSSMERRSTS0_OFFSET);
1208
1209         /* Clear DeSkew Buffer error, write to clear */
1210         status32 = ioread32(mmio + BWD_DESKEWSTS_OFFSET);
1211         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1212         status32 |= BWD_DESKEWSTS_DBERR;
1213         iowrite32(status32, mmio + BWD_DESKEWSTS_OFFSET);
1214
1215         status32 = ioread32(mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1216         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1217         status32 &= BWD_IBIST_ERR_OFLOW;
1218         iowrite32(status32, mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1219
1220         /* Releases the NTB state machine to allow the link to retrain */
1221         status32 = ioread32(mmio + BWD_LTSSMSTATEJMP_OFFSET);
1222         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1223         status32 &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
1224         iowrite32(status32, mmio + BWD_LTSSMSTATEJMP_OFFSET);
1225
1226         /* There is a potential race between the 2 NTB devices recovering at the
1227          * same time.  If the times are the same, the link will not recover and
1228          * the driver will be stuck in this loop forever.  Add a random interval
1229          * to the recovery time to prevent this race.
1230          */
1231         schedule_delayed_work(&ndev->hb_timer, BWD_LINK_RECOVERY_TIME
1232                               + prandom_u32() % BWD_LINK_RECOVERY_TIME);
1233 }
1234
1235 static int bwd_init_isr(struct intel_ntb_dev *ndev)
1236 {
1237         int rc;
1238
1239         rc = ndev_init_isr(ndev, 1, BWD_DB_MSIX_VECTOR_COUNT,
1240                            BWD_DB_MSIX_VECTOR_SHIFT, BWD_DB_TOTAL_SHIFT);
1241         if (rc)
1242                 return rc;
1243
1244         /* BWD doesn't have link status interrupt, poll on that platform */
1245         ndev->last_ts = jiffies;
1246         INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_hb);
1247         schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1248
1249         return 0;
1250 }
1251
1252 static void bwd_deinit_isr(struct intel_ntb_dev *ndev)
1253 {
1254         cancel_delayed_work_sync(&ndev->hb_timer);
1255         ndev_deinit_isr(ndev);
1256 }
1257
1258 static int bwd_init_ntb(struct intel_ntb_dev *ndev)
1259 {
1260         ndev->mw_count = BWD_MW_COUNT;
1261         ndev->spad_count = BWD_SPAD_COUNT;
1262         ndev->db_count = BWD_DB_COUNT;
1263
1264         switch (ndev->ntb.topo) {
1265         case NTB_TOPO_B2B_USD:
1266         case NTB_TOPO_B2B_DSD:
1267                 ndev->self_reg = &bwd_pri_reg;
1268                 ndev->peer_reg = &bwd_b2b_reg;
1269                 ndev->xlat_reg = &bwd_sec_xlat;
1270
1271                 /* Enable Bus Master and Memory Space on the secondary side */
1272                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1273                           ndev->self_mmio + BWD_SPCICMD_OFFSET);
1274
1275                 break;
1276
1277         default:
1278                 return -EINVAL;
1279         }
1280
1281         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1282
1283         return 0;
1284 }
1285
1286 static int bwd_init_dev(struct intel_ntb_dev *ndev)
1287 {
1288         u32 ppd;
1289         int rc;
1290
1291         rc = pci_read_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET, &ppd);
1292         if (rc)
1293                 return -EIO;
1294
1295         ndev->ntb.topo = bwd_ppd_topo(ndev, ppd);
1296         if (ndev->ntb.topo == NTB_TOPO_NONE)
1297                 return -EINVAL;
1298
1299         rc = bwd_init_ntb(ndev);
1300         if (rc)
1301                 return rc;
1302
1303         rc = bwd_init_isr(ndev);
1304         if (rc)
1305                 return rc;
1306
1307         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1308                 /* Initiate PCI-E link training */
1309                 rc = pci_write_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET,
1310                                             ppd | BWD_PPD_INIT_LINK);
1311                 if (rc)
1312                         return rc;
1313         }
1314
1315         return 0;
1316 }
1317
1318 static void bwd_deinit_dev(struct intel_ntb_dev *ndev)
1319 {
1320         bwd_deinit_isr(ndev);
1321 }
1322
1323 /* SNB */
1324
1325 static u64 snb_db_ioread(void __iomem *mmio)
1326 {
1327         return (u64)ioread16(mmio);
1328 }
1329
1330 static void snb_db_iowrite(u64 bits, void __iomem *mmio)
1331 {
1332         iowrite16((u16)bits, mmio);
1333 }
1334
1335 static int snb_poll_link(struct intel_ntb_dev *ndev)
1336 {
1337         u16 reg_val;
1338         int rc;
1339
1340         ndev->reg->db_iowrite(ndev->db_link_mask,
1341                               ndev->self_mmio +
1342                               ndev->self_reg->db_bell);
1343
1344         rc = pci_read_config_word(ndev->ntb.pdev,
1345                                   SNB_LINK_STATUS_OFFSET, &reg_val);
1346         if (rc)
1347                 return 0;
1348
1349         if (reg_val == ndev->lnk_sta)
1350                 return 0;
1351
1352         ndev->lnk_sta = reg_val;
1353
1354         return 1;
1355 }
1356
1357 static int snb_link_is_up(struct intel_ntb_dev *ndev)
1358 {
1359         if (ndev->ntb.topo == NTB_TOPO_SEC)
1360                 return 1;
1361
1362         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1363 }
1364
1365 static inline enum ntb_topo snb_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1366 {
1367         switch (ppd & SNB_PPD_TOPO_MASK) {
1368         case SNB_PPD_TOPO_B2B_USD:
1369                 return NTB_TOPO_B2B_USD;
1370
1371         case SNB_PPD_TOPO_B2B_DSD:
1372                 return NTB_TOPO_B2B_DSD;
1373
1374         case SNB_PPD_TOPO_PRI_USD:
1375         case SNB_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1376                 return NTB_TOPO_PRI;
1377
1378         case SNB_PPD_TOPO_SEC_USD:
1379         case SNB_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1380                 return NTB_TOPO_SEC;
1381         }
1382
1383         return NTB_TOPO_NONE;
1384 }
1385
1386 static inline int snb_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1387 {
1388         if (ppd & SNB_PPD_SPLIT_BAR_MASK) {
1389                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1390                 return 1;
1391         }
1392         return 0;
1393 }
1394
1395 static int snb_init_isr(struct intel_ntb_dev *ndev)
1396 {
1397         return ndev_init_isr(ndev, SNB_DB_MSIX_VECTOR_COUNT,
1398                              SNB_DB_MSIX_VECTOR_COUNT,
1399                              SNB_DB_MSIX_VECTOR_SHIFT,
1400                              SNB_DB_TOTAL_SHIFT);
1401 }
1402
1403 static void snb_deinit_isr(struct intel_ntb_dev *ndev)
1404 {
1405         ndev_deinit_isr(ndev);
1406 }
1407
1408 static int snb_setup_b2b_mw(struct intel_ntb_dev *ndev,
1409                             const struct intel_b2b_addr *addr,
1410                             const struct intel_b2b_addr *peer_addr)
1411 {
1412         struct pci_dev *pdev;
1413         void __iomem *mmio;
1414         resource_size_t bar_size;
1415         phys_addr_t bar_addr;
1416         int b2b_bar;
1417         u8 bar_sz;
1418
1419         pdev = ndev_pdev(ndev);
1420         mmio = ndev->self_mmio;
1421
1422         if (ndev->b2b_idx >= ndev->mw_count) {
1423                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1424                 b2b_bar = 0;
1425                 ndev->b2b_off = 0;
1426         } else {
1427                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1428                 if (b2b_bar < 0)
1429                         return -EIO;
1430
1431                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1432
1433                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1434
1435                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1436
1437                 if (b2b_mw_share && SNB_B2B_MIN_SIZE <= bar_size >> 1) {
1438                         dev_dbg(ndev_dev(ndev),
1439                                 "b2b using first half of bar\n");
1440                         ndev->b2b_off = bar_size >> 1;
1441                 } else if (SNB_B2B_MIN_SIZE <= bar_size) {
1442                         dev_dbg(ndev_dev(ndev),
1443                                 "b2b using whole bar\n");
1444                         ndev->b2b_off = 0;
1445                         --ndev->mw_count;
1446                 } else {
1447                         dev_dbg(ndev_dev(ndev),
1448                                 "b2b bar size is too small\n");
1449                         return -EIO;
1450                 }
1451         }
1452
1453         /* Reset the secondary bar sizes to match the primary bar sizes,
1454          * except disable or halve the size of the b2b secondary bar.
1455          *
1456          * Note: code for each specific bar size register, because the register
1457          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1458          */
1459         pci_read_config_byte(pdev, SNB_PBAR23SZ_OFFSET, &bar_sz);
1460         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1461         if (b2b_bar == 2) {
1462                 if (ndev->b2b_off)
1463                         bar_sz -= 1;
1464                 else
1465                         bar_sz = 0;
1466         }
1467         pci_write_config_byte(pdev, SNB_SBAR23SZ_OFFSET, bar_sz);
1468         pci_read_config_byte(pdev, SNB_SBAR23SZ_OFFSET, &bar_sz);
1469         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1470
1471         if (!ndev->bar4_split) {
1472                 pci_read_config_byte(pdev, SNB_PBAR45SZ_OFFSET, &bar_sz);
1473                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1474                 if (b2b_bar == 4) {
1475                         if (ndev->b2b_off)
1476                                 bar_sz -= 1;
1477                         else
1478                                 bar_sz = 0;
1479                 }
1480                 pci_write_config_byte(pdev, SNB_SBAR45SZ_OFFSET, bar_sz);
1481                 pci_read_config_byte(pdev, SNB_SBAR45SZ_OFFSET, &bar_sz);
1482                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1483         } else {
1484                 pci_read_config_byte(pdev, SNB_PBAR4SZ_OFFSET, &bar_sz);
1485                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1486                 if (b2b_bar == 4) {
1487                         if (ndev->b2b_off)
1488                                 bar_sz -= 1;
1489                         else
1490                                 bar_sz = 0;
1491                 }
1492                 pci_write_config_byte(pdev, SNB_SBAR4SZ_OFFSET, bar_sz);
1493                 pci_read_config_byte(pdev, SNB_SBAR4SZ_OFFSET, &bar_sz);
1494                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1495
1496                 pci_read_config_byte(pdev, SNB_PBAR5SZ_OFFSET, &bar_sz);
1497                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1498                 if (b2b_bar == 5) {
1499                         if (ndev->b2b_off)
1500                                 bar_sz -= 1;
1501                         else
1502                                 bar_sz = 0;
1503                 }
1504                 pci_write_config_byte(pdev, SNB_SBAR5SZ_OFFSET, bar_sz);
1505                 pci_read_config_byte(pdev, SNB_SBAR5SZ_OFFSET, &bar_sz);
1506                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1507         }
1508
1509         /* SBAR01 hit by first part of the b2b bar */
1510         if (b2b_bar == 0)
1511                 bar_addr = addr->bar0_addr;
1512         else if (b2b_bar == 2)
1513                 bar_addr = addr->bar2_addr64;
1514         else if (b2b_bar == 4 && !ndev->bar4_split)
1515                 bar_addr = addr->bar4_addr64;
1516         else if (b2b_bar == 4)
1517                 bar_addr = addr->bar4_addr32;
1518         else if (b2b_bar == 5)
1519                 bar_addr = addr->bar5_addr32;
1520         else
1521                 return -EIO;
1522
1523         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1524         iowrite64(bar_addr, mmio + SNB_SBAR0BASE_OFFSET);
1525
1526         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1527          * The b2b bar is either disabled above, or configured half-size, and
1528          * it starts at the PBAR xlat + offset.
1529          */
1530
1531         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1532         iowrite64(bar_addr, mmio + SNB_SBAR23BASE_OFFSET);
1533         bar_addr = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
1534         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1535
1536         if (!ndev->bar4_split) {
1537                 bar_addr = addr->bar4_addr64 +
1538                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1539                 iowrite64(bar_addr, mmio + SNB_SBAR45BASE_OFFSET);
1540                 bar_addr = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
1541                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1542         } else {
1543                 bar_addr = addr->bar4_addr32 +
1544                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1545                 iowrite32(bar_addr, mmio + SNB_SBAR4BASE_OFFSET);
1546                 bar_addr = ioread32(mmio + SNB_SBAR4BASE_OFFSET);
1547                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1548
1549                 bar_addr = addr->bar5_addr32 +
1550                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1551                 iowrite32(bar_addr, mmio + SNB_SBAR5BASE_OFFSET);
1552                 bar_addr = ioread32(mmio + SNB_SBAR5BASE_OFFSET);
1553                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1554         }
1555
1556         /* setup incoming bar limits == base addrs (zero length windows) */
1557
1558         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1559         iowrite64(bar_addr, mmio + SNB_SBAR23LMT_OFFSET);
1560         bar_addr = ioread64(mmio + SNB_SBAR23LMT_OFFSET);
1561         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1562
1563         if (!ndev->bar4_split) {
1564                 bar_addr = addr->bar4_addr64 +
1565                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1566                 iowrite64(bar_addr, mmio + SNB_SBAR45LMT_OFFSET);
1567                 bar_addr = ioread64(mmio + SNB_SBAR45LMT_OFFSET);
1568                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1569         } else {
1570                 bar_addr = addr->bar4_addr32 +
1571                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1572                 iowrite32(bar_addr, mmio + SNB_SBAR4LMT_OFFSET);
1573                 bar_addr = ioread32(mmio + SNB_SBAR4LMT_OFFSET);
1574                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1575
1576                 bar_addr = addr->bar5_addr32 +
1577                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1578                 iowrite32(bar_addr, mmio + SNB_SBAR5LMT_OFFSET);
1579                 bar_addr = ioread32(mmio + SNB_SBAR5LMT_OFFSET);
1580                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1581         }
1582
1583         /* zero incoming translation addrs */
1584         iowrite64(0, mmio + SNB_SBAR23XLAT_OFFSET);
1585
1586         if (!ndev->bar4_split) {
1587                 iowrite64(0, mmio + SNB_SBAR45XLAT_OFFSET);
1588         } else {
1589                 iowrite32(0, mmio + SNB_SBAR4XLAT_OFFSET);
1590                 iowrite32(0, mmio + SNB_SBAR5XLAT_OFFSET);
1591         }
1592
1593         /* zero outgoing translation limits (whole bar size windows) */
1594         iowrite64(0, mmio + SNB_PBAR23LMT_OFFSET);
1595         if (!ndev->bar4_split) {
1596                 iowrite64(0, mmio + SNB_PBAR45LMT_OFFSET);
1597         } else {
1598                 iowrite32(0, mmio + SNB_PBAR4LMT_OFFSET);
1599                 iowrite32(0, mmio + SNB_PBAR5LMT_OFFSET);
1600         }
1601
1602         /* set outgoing translation offsets */
1603         bar_addr = peer_addr->bar2_addr64;
1604         iowrite64(bar_addr, mmio + SNB_PBAR23XLAT_OFFSET);
1605         bar_addr = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
1606         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1607
1608         if (!ndev->bar4_split) {
1609                 bar_addr = peer_addr->bar4_addr64;
1610                 iowrite64(bar_addr, mmio + SNB_PBAR45XLAT_OFFSET);
1611                 bar_addr = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
1612                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1613         } else {
1614                 bar_addr = peer_addr->bar4_addr32;
1615                 iowrite32(bar_addr, mmio + SNB_PBAR4XLAT_OFFSET);
1616                 bar_addr = ioread32(mmio + SNB_PBAR4XLAT_OFFSET);
1617                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1618
1619                 bar_addr = peer_addr->bar5_addr32;
1620                 iowrite32(bar_addr, mmio + SNB_PBAR5XLAT_OFFSET);
1621                 bar_addr = ioread32(mmio + SNB_PBAR5XLAT_OFFSET);
1622                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1623         }
1624
1625         /* set the translation offset for b2b registers */
1626         if (b2b_bar == 0)
1627                 bar_addr = peer_addr->bar0_addr;
1628         else if (b2b_bar == 2)
1629                 bar_addr = peer_addr->bar2_addr64;
1630         else if (b2b_bar == 4 && !ndev->bar4_split)
1631                 bar_addr = peer_addr->bar4_addr64;
1632         else if (b2b_bar == 4)
1633                 bar_addr = peer_addr->bar4_addr32;
1634         else if (b2b_bar == 5)
1635                 bar_addr = peer_addr->bar5_addr32;
1636         else
1637                 return -EIO;
1638
1639         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1640         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1641         iowrite32(bar_addr, mmio + SNB_B2B_XLAT_OFFSETL);
1642         iowrite32(bar_addr >> 32, mmio + SNB_B2B_XLAT_OFFSETU);
1643
1644         if (b2b_bar) {
1645                 /* map peer ntb mmio config space registers */
1646                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1647                                             SNB_B2B_MIN_SIZE);
1648                 if (!ndev->peer_mmio)
1649                         return -EIO;
1650         }
1651
1652         return 0;
1653 }
1654
1655 static int snb_init_ntb(struct intel_ntb_dev *ndev)
1656 {
1657         int rc;
1658         u32 ntb_ctl;
1659
1660         if (ndev->bar4_split)
1661                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1662         else
1663                 ndev->mw_count = SNB_MW_COUNT;
1664
1665         ndev->spad_count = SNB_SPAD_COUNT;
1666         ndev->db_count = SNB_DB_COUNT;
1667         ndev->db_link_mask = SNB_DB_LINK_BIT;
1668
1669         switch (ndev->ntb.topo) {
1670         case NTB_TOPO_PRI:
1671                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1672                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1673                         return -EINVAL;
1674                 }
1675
1676                 /* enable link to allow secondary side device to appear */
1677                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1678                 ntb_ctl &= ~NTB_CTL_DISABLE;
1679                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1680
1681                 /* use half the spads for the peer */
1682                 ndev->spad_count >>= 1;
1683                 ndev->self_reg = &snb_pri_reg;
1684                 ndev->peer_reg = &snb_sec_reg;
1685                 ndev->xlat_reg = &snb_sec_xlat;
1686                 break;
1687
1688         case NTB_TOPO_SEC:
1689                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1690                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1691                         return -EINVAL;
1692                 }
1693                 /* use half the spads for the peer */
1694                 ndev->spad_count >>= 1;
1695                 ndev->self_reg = &snb_sec_reg;
1696                 ndev->peer_reg = &snb_pri_reg;
1697                 ndev->xlat_reg = &snb_pri_xlat;
1698                 break;
1699
1700         case NTB_TOPO_B2B_USD:
1701         case NTB_TOPO_B2B_DSD:
1702                 ndev->self_reg = &snb_pri_reg;
1703                 ndev->peer_reg = &snb_b2b_reg;
1704                 ndev->xlat_reg = &snb_sec_xlat;
1705
1706                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1707                         ndev->peer_reg = &snb_pri_reg;
1708
1709                         if (b2b_mw_idx < 0)
1710                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1711                         else
1712                                 ndev->b2b_idx = b2b_mw_idx;
1713
1714                         dev_dbg(ndev_dev(ndev),
1715                                 "setting up b2b mw idx %d means %d\n",
1716                                 b2b_mw_idx, ndev->b2b_idx);
1717
1718                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1719                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1720                         ndev->db_count -= 1;
1721                 }
1722
1723                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1724                         rc = snb_setup_b2b_mw(ndev,
1725                                               &snb_b2b_dsd_addr,
1726                                               &snb_b2b_usd_addr);
1727                 } else {
1728                         rc = snb_setup_b2b_mw(ndev,
1729                                               &snb_b2b_usd_addr,
1730                                               &snb_b2b_dsd_addr);
1731                 }
1732                 if (rc)
1733                         return rc;
1734
1735                 /* Enable Bus Master and Memory Space on the secondary side */
1736                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1737                           ndev->self_mmio + SNB_SPCICMD_OFFSET);
1738
1739                 break;
1740
1741         default:
1742                 return -EINVAL;
1743         }
1744
1745         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1746
1747         ndev->reg->db_iowrite(ndev->db_valid_mask,
1748                               ndev->self_mmio +
1749                               ndev->self_reg->db_mask);
1750
1751         return 0;
1752 }
1753
1754 static int snb_init_dev(struct intel_ntb_dev *ndev)
1755 {
1756         struct pci_dev *pdev;
1757         u8 ppd;
1758         int rc, mem;
1759
1760         pdev = ndev_pdev(ndev);
1761
1762         switch (pdev->device) {
1763         /* There is a Xeon hardware errata related to writes to SDOORBELL or
1764          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1765          * which may hang the system.  To workaround this use the second memory
1766          * window to access the interrupt and scratch pad registers on the
1767          * remote system.
1768          */
1769         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1770         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1771         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1772         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1773         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1774         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1775         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1776         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1777         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1778         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1779         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1780         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1781                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1782                 break;
1783         }
1784
1785         switch (pdev->device) {
1786         /* There is a hardware errata related to accessing any register in
1787          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1788          */
1789         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1790         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1791         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1792         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1793         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1794         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1795                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1796                 break;
1797         }
1798
1799         switch (pdev->device) {
1800         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1801          * mirrored to the remote system.  Shrink the number of bits by one,
1802          * since bit 14 is the last bit.
1803          */
1804         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1805         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1806         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1807         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1808         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1809         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1810         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1811         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1812         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1813         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1814         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1815         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1816                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1817                 break;
1818         }
1819
1820         ndev->reg = &snb_reg;
1821
1822         rc = pci_read_config_byte(pdev, SNB_PPD_OFFSET, &ppd);
1823         if (rc)
1824                 return -EIO;
1825
1826         ndev->ntb.topo = snb_ppd_topo(ndev, ppd);
1827         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1828                 ntb_topo_string(ndev->ntb.topo));
1829         if (ndev->ntb.topo == NTB_TOPO_NONE)
1830                 return -EINVAL;
1831
1832         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1833                 ndev->bar4_split = snb_ppd_bar4_split(ndev, ppd);
1834                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1835                         ppd, ndev->bar4_split);
1836         } else {
1837                 /* This is a way for transparent BAR to figure out if we are
1838                  * doing split BAR or not. There is no way for the hw on the
1839                  * transparent side to know and set the PPD.
1840                  */
1841                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1842                 ndev->bar4_split = hweight32(mem) ==
1843                         HSX_SPLIT_BAR_MW_COUNT + 1;
1844                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1845                         mem, ndev->bar4_split);
1846         }
1847
1848         rc = snb_init_ntb(ndev);
1849         if (rc)
1850                 return rc;
1851
1852         return snb_init_isr(ndev);
1853 }
1854
1855 static void snb_deinit_dev(struct intel_ntb_dev *ndev)
1856 {
1857         snb_deinit_isr(ndev);
1858 }
1859
1860 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1861 {
1862         int rc;
1863
1864         pci_set_drvdata(pdev, ndev);
1865
1866         rc = pci_enable_device(pdev);
1867         if (rc)
1868                 goto err_pci_enable;
1869
1870         rc = pci_request_regions(pdev, NTB_NAME);
1871         if (rc)
1872                 goto err_pci_regions;
1873
1874         pci_set_master(pdev);
1875
1876         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1877         if (rc) {
1878                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1879                 if (rc)
1880                         goto err_dma_mask;
1881                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
1882         }
1883
1884         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1885         if (rc) {
1886                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1887                 if (rc)
1888                         goto err_dma_mask;
1889                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
1890         }
1891
1892         ndev->self_mmio = pci_iomap(pdev, 0, 0);
1893         if (!ndev->self_mmio) {
1894                 rc = -EIO;
1895                 goto err_mmio;
1896         }
1897         ndev->peer_mmio = ndev->self_mmio;
1898
1899         return 0;
1900
1901 err_mmio:
1902 err_dma_mask:
1903         pci_clear_master(pdev);
1904         pci_release_regions(pdev);
1905 err_pci_regions:
1906         pci_disable_device(pdev);
1907 err_pci_enable:
1908         pci_set_drvdata(pdev, NULL);
1909         return rc;
1910 }
1911
1912 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1913 {
1914         struct pci_dev *pdev = ndev_pdev(ndev);
1915
1916         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1917                 pci_iounmap(pdev, ndev->peer_mmio);
1918         pci_iounmap(pdev, ndev->self_mmio);
1919
1920         pci_clear_master(pdev);
1921         pci_release_regions(pdev);
1922         pci_disable_device(pdev);
1923         pci_set_drvdata(pdev, NULL);
1924 }
1925
1926 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1927                                     struct pci_dev *pdev)
1928 {
1929         ndev->ntb.pdev = pdev;
1930         ndev->ntb.topo = NTB_TOPO_NONE;
1931         ndev->ntb.ops = &intel_ntb_ops;
1932
1933         ndev->b2b_off = 0;
1934         ndev->b2b_idx = INT_MAX;
1935
1936         ndev->bar4_split = 0;
1937
1938         ndev->mw_count = 0;
1939         ndev->spad_count = 0;
1940         ndev->db_count = 0;
1941         ndev->db_vec_count = 0;
1942         ndev->db_vec_shift = 0;
1943
1944         ndev->ntb_ctl = 0;
1945         ndev->lnk_sta = 0;
1946
1947         ndev->db_valid_mask = 0;
1948         ndev->db_link_mask = 0;
1949         ndev->db_mask = 0;
1950
1951         spin_lock_init(&ndev->db_mask_lock);
1952 }
1953
1954 static int intel_ntb_pci_probe(struct pci_dev *pdev,
1955                                const struct pci_device_id *id)
1956 {
1957         struct intel_ntb_dev *ndev;
1958         int rc;
1959
1960         if (pdev_is_bwd(pdev)) {
1961                 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1962                 if (!ndev) {
1963                         rc = -ENOMEM;
1964                         goto err_ndev;
1965                 }
1966
1967                 ndev_init_struct(ndev, pdev);
1968
1969                 rc = intel_ntb_init_pci(ndev, pdev);
1970                 if (rc)
1971                         goto err_init_pci;
1972
1973                 rc = bwd_init_dev(ndev);
1974                 if (rc)
1975                         goto err_init_dev;
1976
1977         } else if (pdev_is_snb(pdev)) {
1978                 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1979                 if (!ndev) {
1980                         rc = -ENOMEM;
1981                         goto err_ndev;
1982                 }
1983
1984                 ndev_init_struct(ndev, pdev);
1985
1986                 rc = intel_ntb_init_pci(ndev, pdev);
1987                 if (rc)
1988                         goto err_init_pci;
1989
1990                 rc = snb_init_dev(ndev);
1991                 if (rc)
1992                         goto err_init_dev;
1993
1994         } else {
1995                 rc = -EINVAL;
1996                 goto err_ndev;
1997         }
1998
1999         ndev_reset_unsafe_flags(ndev);
2000
2001         ndev->reg->poll_link(ndev);
2002
2003         ndev_init_debugfs(ndev);
2004
2005         rc = ntb_register_device(&ndev->ntb);
2006         if (rc)
2007                 goto err_register;
2008
2009         return 0;
2010
2011 err_register:
2012         ndev_deinit_debugfs(ndev);
2013         if (pdev_is_bwd(pdev))
2014                 bwd_deinit_dev(ndev);
2015         else if (pdev_is_snb(pdev))
2016                 snb_deinit_dev(ndev);
2017 err_init_dev:
2018         intel_ntb_deinit_pci(ndev);
2019 err_init_pci:
2020         kfree(ndev);
2021 err_ndev:
2022         return rc;
2023 }
2024
2025 static void intel_ntb_pci_remove(struct pci_dev *pdev)
2026 {
2027         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2028
2029         ntb_unregister_device(&ndev->ntb);
2030         ndev_deinit_debugfs(ndev);
2031         if (pdev_is_bwd(pdev))
2032                 bwd_deinit_dev(ndev);
2033         else if (pdev_is_snb(pdev))
2034                 snb_deinit_dev(ndev);
2035         intel_ntb_deinit_pci(ndev);
2036         kfree(ndev);
2037 }
2038
2039 static const struct intel_ntb_reg bwd_reg = {
2040         .poll_link              = bwd_poll_link,
2041         .link_is_up             = bwd_link_is_up,
2042         .db_ioread              = bwd_db_ioread,
2043         .db_iowrite             = bwd_db_iowrite,
2044         .db_size                = sizeof(u64),
2045         .ntb_ctl                = BWD_NTBCNTL_OFFSET,
2046         .mw_bar                 = {2, 4},
2047 };
2048
2049 static const struct intel_ntb_alt_reg bwd_pri_reg = {
2050         .db_bell                = BWD_PDOORBELL_OFFSET,
2051         .db_mask                = BWD_PDBMSK_OFFSET,
2052         .spad                   = BWD_SPAD_OFFSET,
2053 };
2054
2055 static const struct intel_ntb_alt_reg bwd_b2b_reg = {
2056         .db_bell                = BWD_B2B_DOORBELL_OFFSET,
2057         .spad                   = BWD_B2B_SPAD_OFFSET,
2058 };
2059
2060 static const struct intel_ntb_xlat_reg bwd_sec_xlat = {
2061         /* FIXME : .bar0_base   = BWD_SBAR0BASE_OFFSET, */
2062         /* FIXME : .bar2_limit  = BWD_SBAR2LMT_OFFSET, */
2063         .bar2_xlat              = BWD_SBAR2XLAT_OFFSET,
2064 };
2065
2066 static const struct intel_ntb_reg snb_reg = {
2067         .poll_link              = snb_poll_link,
2068         .link_is_up             = snb_link_is_up,
2069         .db_ioread              = snb_db_ioread,
2070         .db_iowrite             = snb_db_iowrite,
2071         .db_size                = sizeof(u32),
2072         .ntb_ctl                = SNB_NTBCNTL_OFFSET,
2073         .mw_bar                 = {2, 4, 5},
2074 };
2075
2076 static const struct intel_ntb_alt_reg snb_pri_reg = {
2077         .db_bell                = SNB_PDOORBELL_OFFSET,
2078         .db_mask                = SNB_PDBMSK_OFFSET,
2079         .spad                   = SNB_SPAD_OFFSET,
2080 };
2081
2082 static const struct intel_ntb_alt_reg snb_sec_reg = {
2083         .db_bell                = SNB_SDOORBELL_OFFSET,
2084         .db_mask                = SNB_SDBMSK_OFFSET,
2085         /* second half of the scratchpads */
2086         .spad                   = SNB_SPAD_OFFSET + (SNB_SPAD_COUNT << 1),
2087 };
2088
2089 static const struct intel_ntb_alt_reg snb_b2b_reg = {
2090         .db_bell                = SNB_B2B_DOORBELL_OFFSET,
2091         .spad                   = SNB_B2B_SPAD_OFFSET,
2092 };
2093
2094 static const struct intel_ntb_xlat_reg snb_pri_xlat = {
2095         /* Note: no primary .bar0_base visible to the secondary side.
2096          *
2097          * The secondary side cannot get the base address stored in primary
2098          * bars.  The base address is necessary to set the limit register to
2099          * any value other than zero, or unlimited.
2100          *
2101          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2102          * window by setting the limit equal to base, nor can it limit the size
2103          * of the memory window by setting the limit to base + size.
2104          */
2105         .bar2_limit             = SNB_PBAR23LMT_OFFSET,
2106         .bar2_xlat              = SNB_PBAR23XLAT_OFFSET,
2107 };
2108
2109 static const struct intel_ntb_xlat_reg snb_sec_xlat = {
2110         .bar0_base              = SNB_SBAR0BASE_OFFSET,
2111         .bar2_limit             = SNB_SBAR23LMT_OFFSET,
2112         .bar2_xlat              = SNB_SBAR23XLAT_OFFSET,
2113 };
2114
2115 static struct intel_b2b_addr snb_b2b_usd_addr = {
2116         .bar2_addr64            = SNB_B2B_BAR2_USD_ADDR64,
2117         .bar4_addr64            = SNB_B2B_BAR4_USD_ADDR64,
2118         .bar4_addr32            = SNB_B2B_BAR4_USD_ADDR32,
2119         .bar5_addr32            = SNB_B2B_BAR5_USD_ADDR32,
2120 };
2121
2122 static struct intel_b2b_addr snb_b2b_dsd_addr = {
2123         .bar2_addr64            = SNB_B2B_BAR2_DSD_ADDR64,
2124         .bar4_addr64            = SNB_B2B_BAR4_DSD_ADDR64,
2125         .bar4_addr32            = SNB_B2B_BAR4_DSD_ADDR32,
2126         .bar5_addr32            = SNB_B2B_BAR5_DSD_ADDR32,
2127 };
2128
2129 /* operations for primary side of local ntb */
2130 static const struct ntb_dev_ops intel_ntb_ops = {
2131         .mw_count               = intel_ntb_mw_count,
2132         .mw_get_range           = intel_ntb_mw_get_range,
2133         .mw_set_trans           = intel_ntb_mw_set_trans,
2134         .link_is_up             = intel_ntb_link_is_up,
2135         .link_enable            = intel_ntb_link_enable,
2136         .link_disable           = intel_ntb_link_disable,
2137         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2138         .db_valid_mask          = intel_ntb_db_valid_mask,
2139         .db_vector_count        = intel_ntb_db_vector_count,
2140         .db_vector_mask         = intel_ntb_db_vector_mask,
2141         .db_read                = intel_ntb_db_read,
2142         .db_clear               = intel_ntb_db_clear,
2143         .db_set_mask            = intel_ntb_db_set_mask,
2144         .db_clear_mask          = intel_ntb_db_clear_mask,
2145         .peer_db_addr           = intel_ntb_peer_db_addr,
2146         .peer_db_set            = intel_ntb_peer_db_set,
2147         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2148         .spad_count             = intel_ntb_spad_count,
2149         .spad_read              = intel_ntb_spad_read,
2150         .spad_write             = intel_ntb_spad_write,
2151         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2152         .peer_spad_read         = intel_ntb_peer_spad_read,
2153         .peer_spad_write        = intel_ntb_peer_spad_write,
2154 };
2155
2156 static const struct file_operations intel_ntb_debugfs_info = {
2157         .owner = THIS_MODULE,
2158         .open = simple_open,
2159         .read = ndev_debugfs_read,
2160 };
2161
2162 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2163         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2164         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2165         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2166         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2167         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2168         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2169         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2170         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2171         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2172         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2173         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2174         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2175         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2176         {0}
2177 };
2178 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2179
2180 static struct pci_driver intel_ntb_pci_driver = {
2181         .name = KBUILD_MODNAME,
2182         .id_table = intel_ntb_pci_tbl,
2183         .probe = intel_ntb_pci_probe,
2184         .remove = intel_ntb_pci_remove,
2185 };
2186
2187 static int __init intel_ntb_pci_driver_init(void)
2188 {
2189         if (debugfs_initialized())
2190                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2191
2192         return pci_register_driver(&intel_ntb_pci_driver);
2193 }
2194 module_init(intel_ntb_pci_driver_init);
2195
2196 static void __exit intel_ntb_pci_driver_exit(void)
2197 {
2198         pci_unregister_driver(&intel_ntb_pci_driver);
2199
2200         debugfs_remove_recursive(debugfs_dir);
2201 }
2202 module_exit(intel_ntb_pci_driver_exit);
2203