NTB: Check the device ID to set errata flags
[linux-2.6-block.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static int b2b_mw_idx = -1;
76 module_param(b2b_mw_idx, int, 0644);
77 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
78                  "value of zero or positive starts from first mw idx, and a "
79                  "negative value starts from last mw idx.  Both sides MUST "
80                  "set the same value here!");
81
82 static unsigned int b2b_mw_share;
83 module_param(b2b_mw_share, uint, 0644);
84 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
85                  "ntb so that the peer ntb only occupies the first half of "
86                  "the mw, so the second half can still be used as a mw.  Both "
87                  "sides MUST set the same value here!");
88
89 static const struct intel_ntb_reg bwd_reg;
90 static const struct intel_ntb_alt_reg bwd_pri_reg;
91 static const struct intel_ntb_alt_reg bwd_sec_reg;
92 static const struct intel_ntb_alt_reg bwd_b2b_reg;
93 static const struct intel_ntb_xlat_reg bwd_pri_xlat;
94 static const struct intel_ntb_xlat_reg bwd_sec_xlat;
95 static const struct intel_ntb_reg snb_reg;
96 static const struct intel_ntb_alt_reg snb_pri_reg;
97 static const struct intel_ntb_alt_reg snb_sec_reg;
98 static const struct intel_ntb_alt_reg snb_b2b_reg;
99 static const struct intel_ntb_xlat_reg snb_pri_xlat;
100 static const struct intel_ntb_xlat_reg snb_sec_xlat;
101 static const struct intel_b2b_addr snb_b2b_usd_addr;
102 static const struct intel_b2b_addr snb_b2b_dsd_addr;
103
104 static const struct ntb_dev_ops intel_ntb_ops;
105
106 static const struct file_operations intel_ntb_debugfs_info;
107 static struct dentry *debugfs_dir;
108
109 #ifndef ioread64
110 #ifdef readq
111 #define ioread64 readq
112 #else
113 #define ioread64 _ioread64
114 static inline u64 _ioread64(void __iomem *mmio)
115 {
116         u64 low, high;
117
118         low = ioread32(mmio);
119         high = ioread32(mmio + sizeof(u32));
120         return low | (high << 32);
121 }
122 #endif
123 #endif
124
125 #ifndef iowrite64
126 #ifdef writeq
127 #define iowrite64 writeq
128 #else
129 #define iowrite64 _iowrite64
130 static inline void _iowrite64(u64 val, void __iomem *mmio)
131 {
132         iowrite32(val, mmio);
133         iowrite32(val >> 32, mmio + sizeof(u32));
134 }
135 #endif
136 #endif
137
138 static inline int pdev_is_bwd(struct pci_dev *pdev)
139 {
140         switch (pdev->device) {
141         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
142                 return 1;
143         }
144         return 0;
145 }
146
147 static inline int pdev_is_snb(struct pci_dev *pdev)
148 {
149         switch (pdev->device) {
150         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
151         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
152         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
153         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
154         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
155         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
156         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
157         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
158         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
159         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
160         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
161         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
162                 return 1;
163         }
164         return 0;
165 }
166
167 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
168 {
169         ndev->unsafe_flags = 0;
170         ndev->unsafe_flags_ignore = 0;
171
172         /* Only B2B has a workaround to avoid SDOORBELL */
173         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
174                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
175                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
176
177         /* No low level workaround to avoid SB01BASE */
178         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
179                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
180                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
181         }
182 }
183
184 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
185                                  unsigned long flag)
186 {
187         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
188 }
189
190 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
191                                      unsigned long flag)
192 {
193         flag &= ndev->unsafe_flags;
194         ndev->unsafe_flags_ignore |= flag;
195
196         return !!flag;
197 }
198
199 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
200 {
201         if (idx < 0 || idx > ndev->mw_count)
202                 return -EINVAL;
203         return ndev->reg->mw_bar[idx];
204 }
205
206 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
207                                phys_addr_t *db_addr, resource_size_t *db_size,
208                                phys_addr_t reg_addr, unsigned long reg)
209 {
210         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
211
212         if (db_addr) {
213                 *db_addr = reg_addr + reg;
214                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
215         }
216
217         if (db_size) {
218                 *db_size = ndev->reg->db_size;
219                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
220         }
221
222         return 0;
223 }
224
225 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
226                                void __iomem *mmio)
227 {
228         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
229
230         return ndev->reg->db_ioread(mmio);
231 }
232
233 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
234                                 void __iomem *mmio)
235 {
236         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
237
238         if (db_bits & ~ndev->db_valid_mask)
239                 return -EINVAL;
240
241         ndev->reg->db_iowrite(db_bits, mmio);
242
243         return 0;
244 }
245
246 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
247                                    void __iomem *mmio)
248 {
249         unsigned long irqflags;
250
251         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
252
253         if (db_bits & ~ndev->db_valid_mask)
254                 return -EINVAL;
255
256         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
257         {
258                 ndev->db_mask |= db_bits;
259                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
260         }
261         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
262
263         return 0;
264 }
265
266 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
267                                      void __iomem *mmio)
268 {
269         unsigned long irqflags;
270
271         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
272
273         if (db_bits & ~ndev->db_valid_mask)
274                 return -EINVAL;
275
276         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
277         {
278                 ndev->db_mask &= ~db_bits;
279                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
280         }
281         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
282
283         return 0;
284 }
285
286 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
287 {
288         u64 shift, mask;
289
290         shift = ndev->db_vec_shift;
291         mask = BIT_ULL(shift) - 1;
292
293         return mask << (shift * db_vector);
294 }
295
296 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
297                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
298                                  unsigned long reg)
299 {
300         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
301
302         if (idx < 0 || idx >= ndev->spad_count)
303                 return -EINVAL;
304
305         if (spad_addr) {
306                 *spad_addr = reg_addr + reg + (idx << 2);
307                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
308         }
309
310         return 0;
311 }
312
313 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
314                                  void __iomem *mmio)
315 {
316         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
317
318         if (idx < 0 || idx >= ndev->spad_count)
319                 return 0;
320
321         return ioread32(mmio + (idx << 2));
322 }
323
324 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
325                                   void __iomem *mmio)
326 {
327         WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
328
329         if (idx < 0 || idx >= ndev->spad_count)
330                 return -EINVAL;
331
332         iowrite32(val, mmio + (idx << 2));
333
334         return 0;
335 }
336
337 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
338 {
339         u64 vec_mask;
340
341         vec_mask = ndev_vec_mask(ndev, vec);
342
343         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
344
345         ndev->last_ts = jiffies;
346
347         if (vec_mask & ndev->db_link_mask) {
348                 if (ndev->reg->poll_link(ndev))
349                         ntb_link_event(&ndev->ntb);
350         }
351
352         if (vec_mask & ndev->db_valid_mask)
353                 ntb_db_event(&ndev->ntb, vec);
354
355         return IRQ_HANDLED;
356 }
357
358 static irqreturn_t ndev_vec_isr(int irq, void *dev)
359 {
360         struct intel_ntb_vec *nvec = dev;
361
362         return ndev_interrupt(nvec->ndev, nvec->num);
363 }
364
365 static irqreturn_t ndev_irq_isr(int irq, void *dev)
366 {
367         struct intel_ntb_dev *ndev = dev;
368
369         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
370 }
371
372 static int ndev_init_isr(struct intel_ntb_dev *ndev,
373                          int msix_min, int msix_max,
374                          int msix_shift, int total_shift)
375 {
376         struct pci_dev *pdev;
377         int rc, i, msix_count;
378
379         pdev = ndev_pdev(ndev);
380
381         /* Mask all doorbell interrupts */
382         ndev->db_mask = ndev->db_valid_mask;
383         ndev->reg->db_iowrite(ndev->db_mask,
384                               ndev->self_mmio +
385                               ndev->self_reg->db_mask);
386
387         /* Try to set up msix irq */
388
389         ndev->vec = kcalloc(msix_max, sizeof(*ndev->vec), GFP_KERNEL);
390         if (!ndev->vec)
391                 goto err_msix_vec_alloc;
392
393         ndev->msix = kcalloc(msix_max, sizeof(*ndev->msix), GFP_KERNEL);
394         if (!ndev->msix)
395                 goto err_msix_alloc;
396
397         for (i = 0; i < msix_max; ++i)
398                 ndev->msix[i].entry = i;
399
400         msix_count = pci_enable_msix_range(pdev, ndev->msix,
401                                            msix_min, msix_max);
402         if (msix_count < 0)
403                 goto err_msix_enable;
404
405         for (i = 0; i < msix_count; ++i) {
406                 ndev->vec[i].ndev = ndev;
407                 ndev->vec[i].num = i;
408                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
409                                  "ndev_vec_isr", &ndev->vec[i]);
410                 if (rc)
411                         goto err_msix_request;
412         }
413
414         dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
415         ndev->db_vec_count = msix_count;
416         ndev->db_vec_shift = msix_shift;
417         return 0;
418
419 err_msix_request:
420         while (i-- > 0)
421                 free_irq(ndev->msix[i].vector, ndev);
422         pci_disable_msix(pdev);
423 err_msix_enable:
424         kfree(ndev->msix);
425 err_msix_alloc:
426         kfree(ndev->vec);
427 err_msix_vec_alloc:
428         ndev->msix = NULL;
429         ndev->vec = NULL;
430
431         /* Try to set up msi irq */
432
433         rc = pci_enable_msi(pdev);
434         if (rc)
435                 goto err_msi_enable;
436
437         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
438                          "ndev_irq_isr", ndev);
439         if (rc)
440                 goto err_msi_request;
441
442         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
443         ndev->db_vec_count = 1;
444         ndev->db_vec_shift = total_shift;
445         return 0;
446
447 err_msi_request:
448         pci_disable_msi(pdev);
449 err_msi_enable:
450
451         /* Try to set up intx irq */
452
453         pci_intx(pdev, 1);
454
455         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
456                          "ndev_irq_isr", ndev);
457         if (rc)
458                 goto err_intx_request;
459
460         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
461         ndev->db_vec_count = 1;
462         ndev->db_vec_shift = total_shift;
463         return 0;
464
465 err_intx_request:
466         return rc;
467 }
468
469 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
470 {
471         struct pci_dev *pdev;
472         int i;
473
474         pdev = ndev_pdev(ndev);
475
476         /* Mask all doorbell interrupts */
477         ndev->db_mask = ndev->db_valid_mask;
478         ndev->reg->db_iowrite(ndev->db_mask,
479                               ndev->self_mmio +
480                               ndev->self_reg->db_mask);
481
482         if (ndev->msix) {
483                 i = ndev->db_vec_count;
484                 while (i--)
485                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
486                 pci_disable_msix(pdev);
487                 kfree(ndev->msix);
488                 kfree(ndev->vec);
489         } else {
490                 free_irq(pdev->irq, ndev);
491                 if (pci_dev_msi_enabled(pdev))
492                         pci_disable_msi(pdev);
493         }
494 }
495
496 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
497                                  size_t count, loff_t *offp)
498 {
499         struct intel_ntb_dev *ndev;
500         void __iomem *mmio;
501         char *buf;
502         size_t buf_size;
503         ssize_t ret, off;
504         union { u64 v64; u32 v32; u16 v16; } u;
505
506         ndev = filp->private_data;
507         mmio = ndev->self_mmio;
508
509         buf_size = min(count, 0x800ul);
510
511         buf = kmalloc(buf_size, GFP_KERNEL);
512         if (!buf)
513                 return -ENOMEM;
514
515         off = 0;
516
517         off += scnprintf(buf + off, buf_size - off,
518                          "NTB Device Information:\n");
519
520         off += scnprintf(buf + off, buf_size - off,
521                          "Connection Topology -\t%s\n",
522                          ntb_topo_string(ndev->ntb.topo));
523
524         off += scnprintf(buf + off, buf_size - off,
525                          "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
526         off += scnprintf(buf + off, buf_size - off,
527                          "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
528         off += scnprintf(buf + off, buf_size - off,
529                          "BAR4 Split -\t\t%s\n",
530                          ndev->bar4_split ? "yes" : "no");
531
532         off += scnprintf(buf + off, buf_size - off,
533                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
534         off += scnprintf(buf + off, buf_size - off,
535                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
536
537         if (!ndev->reg->link_is_up(ndev)) {
538                 off += scnprintf(buf + off, buf_size - off,
539                                  "Link Status -\t\tDown\n");
540         } else {
541                 off += scnprintf(buf + off, buf_size - off,
542                                  "Link Status -\t\tUp\n");
543                 off += scnprintf(buf + off, buf_size - off,
544                                  "Link Speed -\t\tPCI-E Gen %u\n",
545                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
546                 off += scnprintf(buf + off, buf_size - off,
547                                  "Link Width -\t\tx%u\n",
548                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
549         }
550
551         off += scnprintf(buf + off, buf_size - off,
552                          "Memory Window Count -\t%u\n", ndev->mw_count);
553         off += scnprintf(buf + off, buf_size - off,
554                          "Scratchpad Count -\t%u\n", ndev->spad_count);
555         off += scnprintf(buf + off, buf_size - off,
556                          "Doorbell Count -\t%u\n", ndev->db_count);
557         off += scnprintf(buf + off, buf_size - off,
558                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
559         off += scnprintf(buf + off, buf_size - off,
560                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
561
562         off += scnprintf(buf + off, buf_size - off,
563                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
564         off += scnprintf(buf + off, buf_size - off,
565                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
566         off += scnprintf(buf + off, buf_size - off,
567                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
568
569         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
570         off += scnprintf(buf + off, buf_size - off,
571                          "Doorbell Mask -\t\t%#llx\n", u.v64);
572
573         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
574         off += scnprintf(buf + off, buf_size - off,
575                          "Doorbell Bell -\t\t%#llx\n", u.v64);
576
577         off += scnprintf(buf + off, buf_size - off,
578                          "\nNTB Incoming XLAT:\n");
579
580         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
581         off += scnprintf(buf + off, buf_size - off,
582                          "XLAT23 -\t\t%#018llx\n", u.v64);
583
584         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
585         off += scnprintf(buf + off, buf_size - off,
586                          "XLAT45 -\t\t%#018llx\n", u.v64);
587
588         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
589         off += scnprintf(buf + off, buf_size - off,
590                          "LMT23 -\t\t\t%#018llx\n", u.v64);
591
592         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
593         off += scnprintf(buf + off, buf_size - off,
594                          "LMT45 -\t\t\t%#018llx\n", u.v64);
595
596         if (pdev_is_snb(ndev->ntb.pdev)) {
597                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
598                         off += scnprintf(buf + off, buf_size - off,
599                                          "\nNTB Outgoing B2B XLAT:\n");
600
601                         u.v64 = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
602                         off += scnprintf(buf + off, buf_size - off,
603                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
604
605                         u.v64 = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
606                         off += scnprintf(buf + off, buf_size - off,
607                                          "B2B XLAT45 -\t\t%#018llx\n", u.v64);
608
609                         u.v64 = ioread64(mmio + SNB_PBAR23LMT_OFFSET);
610                         off += scnprintf(buf + off, buf_size - off,
611                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
612
613                         u.v64 = ioread64(mmio + SNB_PBAR45LMT_OFFSET);
614                         off += scnprintf(buf + off, buf_size - off,
615                                          "B2B LMT45 -\t\t%#018llx\n", u.v64);
616
617                         off += scnprintf(buf + off, buf_size - off,
618                                          "\nNTB Secondary BAR:\n");
619
620                         u.v64 = ioread64(mmio + SNB_SBAR0BASE_OFFSET);
621                         off += scnprintf(buf + off, buf_size - off,
622                                          "SBAR01 -\t\t%#018llx\n", u.v64);
623
624                         u.v64 = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
625                         off += scnprintf(buf + off, buf_size - off,
626                                          "SBAR23 -\t\t%#018llx\n", u.v64);
627
628                         u.v64 = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
629                         off += scnprintf(buf + off, buf_size - off,
630                                          "SBAR45 -\t\t%#018llx\n", u.v64);
631                 }
632
633                 off += scnprintf(buf + off, buf_size - off,
634                                  "\nSNB NTB Statistics:\n");
635
636                 u.v16 = ioread16(mmio + SNB_USMEMMISS_OFFSET);
637                 off += scnprintf(buf + off, buf_size - off,
638                                  "Upstream Memory Miss -\t%u\n", u.v16);
639
640                 off += scnprintf(buf + off, buf_size - off,
641                                  "\nSNB NTB Hardware Errors:\n");
642
643                 if (!pci_read_config_word(ndev->ntb.pdev,
644                                           SNB_DEVSTS_OFFSET, &u.v16))
645                         off += scnprintf(buf + off, buf_size - off,
646                                          "DEVSTS -\t\t%#06x\n", u.v16);
647
648                 if (!pci_read_config_word(ndev->ntb.pdev,
649                                           SNB_LINK_STATUS_OFFSET, &u.v16))
650                         off += scnprintf(buf + off, buf_size - off,
651                                          "LNKSTS -\t\t%#06x\n", u.v16);
652
653                 if (!pci_read_config_dword(ndev->ntb.pdev,
654                                            SNB_UNCERRSTS_OFFSET, &u.v32))
655                         off += scnprintf(buf + off, buf_size - off,
656                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
657
658                 if (!pci_read_config_dword(ndev->ntb.pdev,
659                                            SNB_CORERRSTS_OFFSET, &u.v32))
660                         off += scnprintf(buf + off, buf_size - off,
661                                          "CORERRSTS -\t\t%#06x\n", u.v32);
662         }
663
664         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
665         kfree(buf);
666         return ret;
667 }
668
669 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
670 {
671         if (!debugfs_dir) {
672                 ndev->debugfs_dir = NULL;
673                 ndev->debugfs_info = NULL;
674         } else {
675                 ndev->debugfs_dir =
676                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
677                 if (!ndev->debugfs_dir)
678                         ndev->debugfs_info = NULL;
679                 else
680                         ndev->debugfs_info =
681                                 debugfs_create_file("info", S_IRUSR,
682                                                     ndev->debugfs_dir, ndev,
683                                                     &intel_ntb_debugfs_info);
684         }
685 }
686
687 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
688 {
689         debugfs_remove_recursive(ndev->debugfs_dir);
690 }
691
692 static int intel_ntb_mw_count(struct ntb_dev *ntb)
693 {
694         return ntb_ndev(ntb)->mw_count;
695 }
696
697 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
698                                   phys_addr_t *base,
699                                   resource_size_t *size,
700                                   resource_size_t *align,
701                                   resource_size_t *align_size)
702 {
703         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
704         int bar;
705
706         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
707                 idx += 1;
708
709         bar = ndev_mw_to_bar(ndev, idx);
710         if (bar < 0)
711                 return bar;
712
713         if (base)
714                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
715                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
716
717         if (size)
718                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
719                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
720
721         if (align)
722                 *align = pci_resource_len(ndev->ntb.pdev, bar);
723
724         if (align_size)
725                 *align_size = 1;
726
727         return 0;
728 }
729
730 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
731                                   dma_addr_t addr, resource_size_t size)
732 {
733         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
734         unsigned long base_reg, xlat_reg, limit_reg;
735         resource_size_t bar_size, mw_size;
736         void __iomem *mmio;
737         u64 base, limit, reg_val;
738         int bar;
739
740         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
741                 idx += 1;
742
743         bar = ndev_mw_to_bar(ndev, idx);
744         if (bar < 0)
745                 return bar;
746
747         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
748
749         if (idx == ndev->b2b_idx)
750                 mw_size = bar_size - ndev->b2b_off;
751         else
752                 mw_size = bar_size;
753
754         /* hardware requires that addr is aligned to bar size */
755         if (addr & (bar_size - 1))
756                 return -EINVAL;
757
758         /* make sure the range fits in the usable mw size */
759         if (size > mw_size)
760                 return -EINVAL;
761
762         mmio = ndev->self_mmio;
763         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
764         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
765         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
766
767         if (bar < 4 || !ndev->bar4_split) {
768                 base = ioread64(mmio + base_reg);
769
770                 /* Set the limit if supported, if size is not mw_size */
771                 if (limit_reg && size != mw_size)
772                         limit = base + size;
773                 else
774                         limit = 0;
775
776                 /* set and verify setting the translation address */
777                 iowrite64(addr, mmio + xlat_reg);
778                 reg_val = ioread64(mmio + xlat_reg);
779                 if (reg_val != addr) {
780                         iowrite64(0, mmio + xlat_reg);
781                         return -EIO;
782                 }
783
784                 /* set and verify setting the limit */
785                 iowrite64(limit, mmio + limit_reg);
786                 reg_val = ioread64(mmio + limit_reg);
787                 if (reg_val != limit) {
788                         iowrite64(base, mmio + limit_reg);
789                         iowrite64(0, mmio + xlat_reg);
790                         return -EIO;
791                 }
792         } else {
793                 /* split bar addr range must all be 32 bit */
794                 if (addr & (~0ull << 32))
795                         return -EINVAL;
796                 if ((addr + size) & (~0ull << 32))
797                         return -EINVAL;
798
799                 base = ioread32(mmio + base_reg);
800
801                 /* Set the limit if supported, if size is not mw_size */
802                 if (limit_reg && size != mw_size)
803                         limit = base + size;
804                 else
805                         limit = 0;
806
807                 /* set and verify setting the translation address */
808                 iowrite32(addr, mmio + xlat_reg);
809                 reg_val = ioread32(mmio + xlat_reg);
810                 if (reg_val != addr) {
811                         iowrite32(0, mmio + xlat_reg);
812                         return -EIO;
813                 }
814
815                 /* set and verify setting the limit */
816                 iowrite32(limit, mmio + limit_reg);
817                 reg_val = ioread32(mmio + limit_reg);
818                 if (reg_val != limit) {
819                         iowrite32(base, mmio + limit_reg);
820                         iowrite32(0, mmio + xlat_reg);
821                         return -EIO;
822                 }
823         }
824
825         return 0;
826 }
827
828 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
829                                 enum ntb_speed *speed,
830                                 enum ntb_width *width)
831 {
832         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
833
834         if (ndev->reg->link_is_up(ndev)) {
835                 if (speed)
836                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
837                 if (width)
838                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
839                 return 1;
840         } else {
841                 /* TODO MAYBE: is it possible to observe the link speed and
842                  * width while link is training? */
843                 if (speed)
844                         *speed = NTB_SPEED_NONE;
845                 if (width)
846                         *width = NTB_WIDTH_NONE;
847                 return 0;
848         }
849 }
850
851 static int intel_ntb_link_enable(struct ntb_dev *ntb,
852                                  enum ntb_speed max_speed,
853                                  enum ntb_width max_width)
854 {
855         struct intel_ntb_dev *ndev;
856         u32 ntb_ctl;
857
858         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
859
860         if (ndev->ntb.topo == NTB_TOPO_SEC)
861                 return -EINVAL;
862
863         dev_dbg(ndev_dev(ndev),
864                 "Enabling link with max_speed %d max_width %d\n",
865                 max_speed, max_width);
866         if (max_speed != NTB_SPEED_AUTO)
867                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
868         if (max_width != NTB_WIDTH_AUTO)
869                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
870
871         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
872         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
873         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
874         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
875         if (ndev->bar4_split)
876                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
877         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
878
879         return 0;
880 }
881
882 static int intel_ntb_link_disable(struct ntb_dev *ntb)
883 {
884         struct intel_ntb_dev *ndev;
885         u32 ntb_cntl;
886
887         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
888
889         if (ndev->ntb.topo == NTB_TOPO_SEC)
890                 return -EINVAL;
891
892         dev_dbg(ndev_dev(ndev), "Disabling link\n");
893
894         /* Bring NTB link down */
895         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
896         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
897         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
898         if (ndev->bar4_split)
899                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
900         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
901         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
902
903         return 0;
904 }
905
906 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
907 {
908         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
909 }
910
911 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
912 {
913         return ntb_ndev(ntb)->db_valid_mask;
914 }
915
916 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
917 {
918         struct intel_ntb_dev *ndev;
919
920         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
921
922         return ndev->db_vec_count;
923 }
924
925 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
926 {
927         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
928
929         if (db_vector < 0 || db_vector > ndev->db_vec_count)
930                 return 0;
931
932         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
933 }
934
935 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
936 {
937         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
938
939         return ndev_db_read(ndev,
940                             ndev->self_mmio +
941                             ndev->self_reg->db_bell);
942 }
943
944 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
945 {
946         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
947
948         return ndev_db_write(ndev, db_bits,
949                              ndev->self_mmio +
950                              ndev->self_reg->db_bell);
951 }
952
953 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
954 {
955         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
956
957         return ndev_db_set_mask(ndev, db_bits,
958                                 ndev->self_mmio +
959                                 ndev->self_reg->db_mask);
960 }
961
962 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
963 {
964         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
965
966         return ndev_db_clear_mask(ndev, db_bits,
967                                   ndev->self_mmio +
968                                   ndev->self_reg->db_mask);
969 }
970
971 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
972                                   phys_addr_t *db_addr,
973                                   resource_size_t *db_size)
974 {
975         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
976
977         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
978                             ndev->peer_reg->db_bell);
979 }
980
981 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
982 {
983         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
984
985         return ndev_db_write(ndev, db_bits,
986                              ndev->peer_mmio +
987                              ndev->peer_reg->db_bell);
988 }
989
990 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
991 {
992         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
993 }
994
995 static int intel_ntb_spad_count(struct ntb_dev *ntb)
996 {
997         struct intel_ntb_dev *ndev;
998
999         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1000
1001         return ndev->spad_count;
1002 }
1003
1004 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1005 {
1006         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1007
1008         return ndev_spad_read(ndev, idx,
1009                               ndev->self_mmio +
1010                               ndev->self_reg->spad);
1011 }
1012
1013 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1014                                 int idx, u32 val)
1015 {
1016         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1017
1018         return ndev_spad_write(ndev, idx, val,
1019                                ndev->self_mmio +
1020                                ndev->self_reg->spad);
1021 }
1022
1023 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1024                                     phys_addr_t *spad_addr)
1025 {
1026         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1027
1028         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1029                               ndev->peer_reg->spad);
1030 }
1031
1032 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1033 {
1034         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1035
1036         return ndev_spad_read(ndev, idx,
1037                               ndev->peer_mmio +
1038                               ndev->peer_reg->spad);
1039 }
1040
1041 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1042                                      int idx, u32 val)
1043 {
1044         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1045
1046         return ndev_spad_write(ndev, idx, val,
1047                                ndev->peer_mmio +
1048                                ndev->peer_reg->spad);
1049 }
1050
1051 /* BWD */
1052
1053 static u64 bwd_db_ioread(void __iomem *mmio)
1054 {
1055         return ioread64(mmio);
1056 }
1057
1058 static void bwd_db_iowrite(u64 bits, void __iomem *mmio)
1059 {
1060         iowrite64(bits, mmio);
1061 }
1062
1063 static int bwd_poll_link(struct intel_ntb_dev *ndev)
1064 {
1065         u32 ntb_ctl;
1066
1067         ntb_ctl = ioread32(ndev->self_mmio + BWD_NTBCNTL_OFFSET);
1068
1069         if (ntb_ctl == ndev->ntb_ctl)
1070                 return 0;
1071
1072         ndev->ntb_ctl = ntb_ctl;
1073
1074         ndev->lnk_sta = ioread32(ndev->self_mmio + BWD_LINK_STATUS_OFFSET);
1075
1076         return 1;
1077 }
1078
1079 static int bwd_link_is_up(struct intel_ntb_dev *ndev)
1080 {
1081         return BWD_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1082 }
1083
1084 static int bwd_link_is_err(struct intel_ntb_dev *ndev)
1085 {
1086         if (ioread32(ndev->self_mmio + BWD_LTSSMSTATEJMP_OFFSET)
1087             & BWD_LTSSMSTATEJMP_FORCEDETECT)
1088                 return 1;
1089
1090         if (ioread32(ndev->self_mmio + BWD_IBSTERRRCRVSTS0_OFFSET)
1091             & BWD_IBIST_ERR_OFLOW)
1092                 return 1;
1093
1094         return 0;
1095 }
1096
1097 static inline enum ntb_topo bwd_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1098 {
1099         switch (ppd & BWD_PPD_TOPO_MASK) {
1100         case BWD_PPD_TOPO_B2B_USD:
1101                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1102                 return NTB_TOPO_B2B_USD;
1103
1104         case BWD_PPD_TOPO_B2B_DSD:
1105                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1106                 return NTB_TOPO_B2B_DSD;
1107
1108         case BWD_PPD_TOPO_PRI_USD:
1109         case BWD_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1110         case BWD_PPD_TOPO_SEC_USD:
1111         case BWD_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1112                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1113                 return NTB_TOPO_NONE;
1114         }
1115
1116         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1117         return NTB_TOPO_NONE;
1118 }
1119
1120 static void bwd_link_hb(struct work_struct *work)
1121 {
1122         struct intel_ntb_dev *ndev = hb_ndev(work);
1123         unsigned long poll_ts;
1124         void __iomem *mmio;
1125         u32 status32;
1126
1127         poll_ts = ndev->last_ts + BWD_LINK_HB_TIMEOUT;
1128
1129         /* Delay polling the link status if an interrupt was received,
1130          * unless the cached link status says the link is down.
1131          */
1132         if (time_after(poll_ts, jiffies) && bwd_link_is_up(ndev)) {
1133                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1134                 return;
1135         }
1136
1137         if (bwd_poll_link(ndev))
1138                 ntb_link_event(&ndev->ntb);
1139
1140         if (bwd_link_is_up(ndev) || !bwd_link_is_err(ndev)) {
1141                 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1142                 return;
1143         }
1144
1145         /* Link is down with error: recover the link! */
1146
1147         mmio = ndev->self_mmio;
1148
1149         /* Driver resets the NTB ModPhy lanes - magic! */
1150         iowrite8(0xe0, mmio + BWD_MODPHY_PCSREG6);
1151         iowrite8(0x40, mmio + BWD_MODPHY_PCSREG4);
1152         iowrite8(0x60, mmio + BWD_MODPHY_PCSREG4);
1153         iowrite8(0x60, mmio + BWD_MODPHY_PCSREG6);
1154
1155         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1156         msleep(100);
1157
1158         /* Clear AER Errors, write to clear */
1159         status32 = ioread32(mmio + BWD_ERRCORSTS_OFFSET);
1160         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1161         status32 &= PCI_ERR_COR_REP_ROLL;
1162         iowrite32(status32, mmio + BWD_ERRCORSTS_OFFSET);
1163
1164         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1165         status32 = ioread32(mmio + BWD_LTSSMERRSTS0_OFFSET);
1166         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1167         status32 |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
1168         iowrite32(status32, mmio + BWD_LTSSMERRSTS0_OFFSET);
1169
1170         /* Clear DeSkew Buffer error, write to clear */
1171         status32 = ioread32(mmio + BWD_DESKEWSTS_OFFSET);
1172         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1173         status32 |= BWD_DESKEWSTS_DBERR;
1174         iowrite32(status32, mmio + BWD_DESKEWSTS_OFFSET);
1175
1176         status32 = ioread32(mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1177         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1178         status32 &= BWD_IBIST_ERR_OFLOW;
1179         iowrite32(status32, mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1180
1181         /* Releases the NTB state machine to allow the link to retrain */
1182         status32 = ioread32(mmio + BWD_LTSSMSTATEJMP_OFFSET);
1183         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1184         status32 &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
1185         iowrite32(status32, mmio + BWD_LTSSMSTATEJMP_OFFSET);
1186
1187         /* There is a potential race between the 2 NTB devices recovering at the
1188          * same time.  If the times are the same, the link will not recover and
1189          * the driver will be stuck in this loop forever.  Add a random interval
1190          * to the recovery time to prevent this race.
1191          */
1192         schedule_delayed_work(&ndev->hb_timer, BWD_LINK_RECOVERY_TIME
1193                               + prandom_u32() % BWD_LINK_RECOVERY_TIME);
1194 }
1195
1196 static int bwd_init_isr(struct intel_ntb_dev *ndev)
1197 {
1198         int rc;
1199
1200         rc = ndev_init_isr(ndev, 1, BWD_DB_MSIX_VECTOR_COUNT,
1201                            BWD_DB_MSIX_VECTOR_SHIFT, BWD_DB_TOTAL_SHIFT);
1202         if (rc)
1203                 return rc;
1204
1205         /* BWD doesn't have link status interrupt, poll on that platform */
1206         ndev->last_ts = jiffies;
1207         INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_hb);
1208         schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1209
1210         return 0;
1211 }
1212
1213 static void bwd_deinit_isr(struct intel_ntb_dev *ndev)
1214 {
1215         cancel_delayed_work_sync(&ndev->hb_timer);
1216         ndev_deinit_isr(ndev);
1217 }
1218
1219 static int bwd_init_ntb(struct intel_ntb_dev *ndev)
1220 {
1221         ndev->mw_count = BWD_MW_COUNT;
1222         ndev->spad_count = BWD_SPAD_COUNT;
1223         ndev->db_count = BWD_DB_COUNT;
1224
1225         switch (ndev->ntb.topo) {
1226         case NTB_TOPO_B2B_USD:
1227         case NTB_TOPO_B2B_DSD:
1228                 ndev->self_reg = &bwd_pri_reg;
1229                 ndev->peer_reg = &bwd_b2b_reg;
1230                 ndev->xlat_reg = &bwd_sec_xlat;
1231
1232                 /* Enable Bus Master and Memory Space on the secondary side */
1233                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1234                           ndev->self_mmio + BWD_SPCICMD_OFFSET);
1235
1236                 break;
1237
1238         default:
1239                 return -EINVAL;
1240         }
1241
1242         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1243
1244         return 0;
1245 }
1246
1247 static int bwd_init_dev(struct intel_ntb_dev *ndev)
1248 {
1249         u32 ppd;
1250         int rc;
1251
1252         rc = pci_read_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET, &ppd);
1253         if (rc)
1254                 return -EIO;
1255
1256         ndev->ntb.topo = bwd_ppd_topo(ndev, ppd);
1257         if (ndev->ntb.topo == NTB_TOPO_NONE)
1258                 return -EINVAL;
1259
1260         rc = bwd_init_ntb(ndev);
1261         if (rc)
1262                 return rc;
1263
1264         rc = bwd_init_isr(ndev);
1265         if (rc)
1266                 return rc;
1267
1268         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1269                 /* Initiate PCI-E link training */
1270                 rc = pci_write_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET,
1271                                             ppd | BWD_PPD_INIT_LINK);
1272                 if (rc)
1273                         return rc;
1274         }
1275
1276         return 0;
1277 }
1278
1279 static void bwd_deinit_dev(struct intel_ntb_dev *ndev)
1280 {
1281         bwd_deinit_isr(ndev);
1282 }
1283
1284 /* SNB */
1285
1286 static u64 snb_db_ioread(void __iomem *mmio)
1287 {
1288         return (u64)ioread16(mmio);
1289 }
1290
1291 static void snb_db_iowrite(u64 bits, void __iomem *mmio)
1292 {
1293         iowrite16((u16)bits, mmio);
1294 }
1295
1296 static int snb_poll_link(struct intel_ntb_dev *ndev)
1297 {
1298         u16 reg_val;
1299         int rc;
1300
1301         ndev->reg->db_iowrite(ndev->db_link_mask,
1302                               ndev->self_mmio +
1303                               ndev->self_reg->db_bell);
1304
1305         rc = pci_read_config_word(ndev->ntb.pdev,
1306                                   SNB_LINK_STATUS_OFFSET, &reg_val);
1307         if (rc)
1308                 return 0;
1309
1310         if (reg_val == ndev->lnk_sta)
1311                 return 0;
1312
1313         ndev->lnk_sta = reg_val;
1314
1315         return 1;
1316 }
1317
1318 static int snb_link_is_up(struct intel_ntb_dev *ndev)
1319 {
1320         if (ndev->ntb.topo == NTB_TOPO_SEC)
1321                 return 1;
1322
1323         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1324 }
1325
1326 static inline enum ntb_topo snb_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1327 {
1328         switch (ppd & SNB_PPD_TOPO_MASK) {
1329         case SNB_PPD_TOPO_B2B_USD:
1330                 return NTB_TOPO_B2B_USD;
1331
1332         case SNB_PPD_TOPO_B2B_DSD:
1333                 return NTB_TOPO_B2B_DSD;
1334
1335         case SNB_PPD_TOPO_PRI_USD:
1336         case SNB_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1337                 return NTB_TOPO_PRI;
1338
1339         case SNB_PPD_TOPO_SEC_USD:
1340         case SNB_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1341                 return NTB_TOPO_SEC;
1342         }
1343
1344         return NTB_TOPO_NONE;
1345 }
1346
1347 static inline int snb_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1348 {
1349         if (ppd & SNB_PPD_SPLIT_BAR_MASK) {
1350                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1351                 return 1;
1352         }
1353         return 0;
1354 }
1355
1356 static int snb_init_isr(struct intel_ntb_dev *ndev)
1357 {
1358         return ndev_init_isr(ndev, SNB_DB_MSIX_VECTOR_COUNT,
1359                              SNB_DB_MSIX_VECTOR_COUNT,
1360                              SNB_DB_MSIX_VECTOR_SHIFT,
1361                              SNB_DB_TOTAL_SHIFT);
1362 }
1363
1364 static void snb_deinit_isr(struct intel_ntb_dev *ndev)
1365 {
1366         ndev_deinit_isr(ndev);
1367 }
1368
1369 static int snb_setup_b2b_mw(struct intel_ntb_dev *ndev,
1370                             const struct intel_b2b_addr *addr,
1371                             const struct intel_b2b_addr *peer_addr)
1372 {
1373         struct pci_dev *pdev;
1374         void __iomem *mmio;
1375         resource_size_t bar_size;
1376         phys_addr_t bar_addr;
1377         int b2b_bar;
1378         u8 bar_sz;
1379
1380         pdev = ndev_pdev(ndev);
1381         mmio = ndev->self_mmio;
1382
1383         if (ndev->b2b_idx >= ndev->mw_count) {
1384                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1385                 b2b_bar = 0;
1386                 ndev->b2b_off = 0;
1387         } else {
1388                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1389                 if (b2b_bar < 0)
1390                         return -EIO;
1391
1392                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1393
1394                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1395
1396                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1397
1398                 if (b2b_mw_share && SNB_B2B_MIN_SIZE <= bar_size >> 1) {
1399                         dev_dbg(ndev_dev(ndev),
1400                                 "b2b using first half of bar\n");
1401                         ndev->b2b_off = bar_size >> 1;
1402                 } else if (SNB_B2B_MIN_SIZE <= bar_size) {
1403                         dev_dbg(ndev_dev(ndev),
1404                                 "b2b using whole bar\n");
1405                         ndev->b2b_off = 0;
1406                         --ndev->mw_count;
1407                 } else {
1408                         dev_dbg(ndev_dev(ndev),
1409                                 "b2b bar size is too small\n");
1410                         return -EIO;
1411                 }
1412         }
1413
1414         /* Reset the secondary bar sizes to match the primary bar sizes,
1415          * except disable or halve the size of the b2b secondary bar.
1416          *
1417          * Note: code for each specific bar size register, because the register
1418          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1419          */
1420         pci_read_config_byte(pdev, SNB_PBAR23SZ_OFFSET, &bar_sz);
1421         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1422         if (b2b_bar == 2) {
1423                 if (ndev->b2b_off)
1424                         bar_sz -= 1;
1425                 else
1426                         bar_sz = 0;
1427         }
1428         pci_write_config_byte(pdev, SNB_SBAR23SZ_OFFSET, bar_sz);
1429         pci_read_config_byte(pdev, SNB_SBAR23SZ_OFFSET, &bar_sz);
1430         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1431
1432         if (!ndev->bar4_split) {
1433                 pci_read_config_byte(pdev, SNB_PBAR45SZ_OFFSET, &bar_sz);
1434                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1435                 if (b2b_bar == 4) {
1436                         if (ndev->b2b_off)
1437                                 bar_sz -= 1;
1438                         else
1439                                 bar_sz = 0;
1440                 }
1441                 pci_write_config_byte(pdev, SNB_SBAR45SZ_OFFSET, bar_sz);
1442                 pci_read_config_byte(pdev, SNB_SBAR45SZ_OFFSET, &bar_sz);
1443                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1444         } else {
1445                 pci_read_config_byte(pdev, SNB_PBAR4SZ_OFFSET, &bar_sz);
1446                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1447                 if (b2b_bar == 4) {
1448                         if (ndev->b2b_off)
1449                                 bar_sz -= 1;
1450                         else
1451                                 bar_sz = 0;
1452                 }
1453                 pci_write_config_byte(pdev, SNB_SBAR4SZ_OFFSET, bar_sz);
1454                 pci_read_config_byte(pdev, SNB_SBAR4SZ_OFFSET, &bar_sz);
1455                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1456
1457                 pci_read_config_byte(pdev, SNB_PBAR5SZ_OFFSET, &bar_sz);
1458                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1459                 if (b2b_bar == 5) {
1460                         if (ndev->b2b_off)
1461                                 bar_sz -= 1;
1462                         else
1463                                 bar_sz = 0;
1464                 }
1465                 pci_write_config_byte(pdev, SNB_SBAR5SZ_OFFSET, bar_sz);
1466                 pci_read_config_byte(pdev, SNB_SBAR5SZ_OFFSET, &bar_sz);
1467                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1468         }
1469
1470         /* SBAR01 hit by first part of the b2b bar */
1471         if (b2b_bar == 0)
1472                 bar_addr = addr->bar0_addr;
1473         else if (b2b_bar == 2)
1474                 bar_addr = addr->bar2_addr64;
1475         else if (b2b_bar == 4 && !ndev->bar4_split)
1476                 bar_addr = addr->bar4_addr64;
1477         else if (b2b_bar == 4)
1478                 bar_addr = addr->bar4_addr32;
1479         else if (b2b_bar == 5)
1480                 bar_addr = addr->bar5_addr32;
1481         else
1482                 return -EIO;
1483
1484         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1485         iowrite64(bar_addr, mmio + SNB_SBAR0BASE_OFFSET);
1486
1487         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1488          * The b2b bar is either disabled above, or configured half-size, and
1489          * it starts at the PBAR xlat + offset.
1490          */
1491
1492         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1493         iowrite64(bar_addr, mmio + SNB_SBAR23BASE_OFFSET);
1494         bar_addr = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
1495         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1496
1497         if (!ndev->bar4_split) {
1498                 bar_addr = addr->bar4_addr64 +
1499                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1500                 iowrite64(bar_addr, mmio + SNB_SBAR45BASE_OFFSET);
1501                 bar_addr = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
1502                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1503         } else {
1504                 bar_addr = addr->bar4_addr32 +
1505                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1506                 iowrite32(bar_addr, mmio + SNB_SBAR4BASE_OFFSET);
1507                 bar_addr = ioread32(mmio + SNB_SBAR4BASE_OFFSET);
1508                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1509
1510                 bar_addr = addr->bar5_addr32 +
1511                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1512                 iowrite32(bar_addr, mmio + SNB_SBAR5BASE_OFFSET);
1513                 bar_addr = ioread32(mmio + SNB_SBAR5BASE_OFFSET);
1514                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1515         }
1516
1517         /* setup incoming bar limits == base addrs (zero length windows) */
1518
1519         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1520         iowrite64(bar_addr, mmio + SNB_SBAR23LMT_OFFSET);
1521         bar_addr = ioread64(mmio + SNB_SBAR23LMT_OFFSET);
1522         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1523
1524         if (!ndev->bar4_split) {
1525                 bar_addr = addr->bar4_addr64 +
1526                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1527                 iowrite64(bar_addr, mmio + SNB_SBAR45LMT_OFFSET);
1528                 bar_addr = ioread64(mmio + SNB_SBAR45LMT_OFFSET);
1529                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1530         } else {
1531                 bar_addr = addr->bar4_addr32 +
1532                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1533                 iowrite32(bar_addr, mmio + SNB_SBAR4LMT_OFFSET);
1534                 bar_addr = ioread32(mmio + SNB_SBAR4LMT_OFFSET);
1535                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1536
1537                 bar_addr = addr->bar5_addr32 +
1538                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1539                 iowrite32(bar_addr, mmio + SNB_SBAR5LMT_OFFSET);
1540                 bar_addr = ioread32(mmio + SNB_SBAR5LMT_OFFSET);
1541                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1542         }
1543
1544         /* zero incoming translation addrs */
1545         iowrite64(0, mmio + SNB_SBAR23XLAT_OFFSET);
1546
1547         if (!ndev->bar4_split) {
1548                 iowrite64(0, mmio + SNB_SBAR45XLAT_OFFSET);
1549         } else {
1550                 iowrite32(0, mmio + SNB_SBAR4XLAT_OFFSET);
1551                 iowrite32(0, mmio + SNB_SBAR5XLAT_OFFSET);
1552         }
1553
1554         /* zero outgoing translation limits (whole bar size windows) */
1555         iowrite64(0, mmio + SNB_PBAR23LMT_OFFSET);
1556         if (!ndev->bar4_split) {
1557                 iowrite64(0, mmio + SNB_PBAR45LMT_OFFSET);
1558         } else {
1559                 iowrite32(0, mmio + SNB_PBAR4LMT_OFFSET);
1560                 iowrite32(0, mmio + SNB_PBAR5LMT_OFFSET);
1561         }
1562
1563         /* set outgoing translation offsets */
1564         bar_addr = peer_addr->bar2_addr64;
1565         iowrite64(bar_addr, mmio + SNB_PBAR23XLAT_OFFSET);
1566         bar_addr = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
1567         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1568
1569         if (!ndev->bar4_split) {
1570                 bar_addr = peer_addr->bar4_addr64;
1571                 iowrite64(bar_addr, mmio + SNB_PBAR45XLAT_OFFSET);
1572                 bar_addr = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
1573                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1574         } else {
1575                 bar_addr = peer_addr->bar4_addr32;
1576                 iowrite32(bar_addr, mmio + SNB_PBAR4XLAT_OFFSET);
1577                 bar_addr = ioread32(mmio + SNB_PBAR4XLAT_OFFSET);
1578                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1579
1580                 bar_addr = peer_addr->bar5_addr32;
1581                 iowrite32(bar_addr, mmio + SNB_PBAR5XLAT_OFFSET);
1582                 bar_addr = ioread32(mmio + SNB_PBAR5XLAT_OFFSET);
1583                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1584         }
1585
1586         /* set the translation offset for b2b registers */
1587         if (b2b_bar == 0)
1588                 bar_addr = peer_addr->bar0_addr;
1589         else if (b2b_bar == 2)
1590                 bar_addr = peer_addr->bar2_addr64;
1591         else if (b2b_bar == 4 && !ndev->bar4_split)
1592                 bar_addr = peer_addr->bar4_addr64;
1593         else if (b2b_bar == 4)
1594                 bar_addr = peer_addr->bar4_addr32;
1595         else if (b2b_bar == 5)
1596                 bar_addr = peer_addr->bar5_addr32;
1597         else
1598                 return -EIO;
1599
1600         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1601         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1602         iowrite32(bar_addr, mmio + SNB_B2B_XLAT_OFFSETL);
1603         iowrite32(bar_addr >> 32, mmio + SNB_B2B_XLAT_OFFSETU);
1604
1605         if (b2b_bar) {
1606                 /* map peer ntb mmio config space registers */
1607                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1608                                             SNB_B2B_MIN_SIZE);
1609                 if (!ndev->peer_mmio)
1610                         return -EIO;
1611         }
1612
1613         return 0;
1614 }
1615
1616 static int snb_init_ntb(struct intel_ntb_dev *ndev)
1617 {
1618         int rc;
1619         u32 ntb_ctl;
1620
1621         if (ndev->bar4_split)
1622                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1623         else
1624                 ndev->mw_count = SNB_MW_COUNT;
1625
1626         ndev->spad_count = SNB_SPAD_COUNT;
1627         ndev->db_count = SNB_DB_COUNT;
1628         ndev->db_link_mask = SNB_DB_LINK_BIT;
1629
1630         switch (ndev->ntb.topo) {
1631         case NTB_TOPO_PRI:
1632                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1633                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1634                         return -EINVAL;
1635                 }
1636
1637                 /* enable link to allow secondary side device to appear */
1638                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1639                 ntb_ctl &= ~NTB_CTL_DISABLE;
1640                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1641
1642                 /* use half the spads for the peer */
1643                 ndev->spad_count >>= 1;
1644                 ndev->self_reg = &snb_pri_reg;
1645                 ndev->peer_reg = &snb_sec_reg;
1646                 ndev->xlat_reg = &snb_sec_xlat;
1647                 break;
1648
1649         case NTB_TOPO_SEC:
1650                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1651                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1652                         return -EINVAL;
1653                 }
1654                 /* use half the spads for the peer */
1655                 ndev->spad_count >>= 1;
1656                 ndev->self_reg = &snb_sec_reg;
1657                 ndev->peer_reg = &snb_pri_reg;
1658                 ndev->xlat_reg = &snb_pri_xlat;
1659                 break;
1660
1661         case NTB_TOPO_B2B_USD:
1662         case NTB_TOPO_B2B_DSD:
1663                 ndev->self_reg = &snb_pri_reg;
1664                 ndev->peer_reg = &snb_b2b_reg;
1665                 ndev->xlat_reg = &snb_sec_xlat;
1666
1667                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1668                         ndev->peer_reg = &snb_pri_reg;
1669
1670                         if (b2b_mw_idx < 0)
1671                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1672                         else
1673                                 ndev->b2b_idx = b2b_mw_idx;
1674
1675                         dev_dbg(ndev_dev(ndev),
1676                                 "setting up b2b mw idx %d means %d\n",
1677                                 b2b_mw_idx, ndev->b2b_idx);
1678
1679                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1680                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1681                         ndev->db_count -= 1;
1682                 }
1683
1684                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1685                         rc = snb_setup_b2b_mw(ndev,
1686                                               &snb_b2b_dsd_addr,
1687                                               &snb_b2b_usd_addr);
1688                 } else {
1689                         rc = snb_setup_b2b_mw(ndev,
1690                                               &snb_b2b_usd_addr,
1691                                               &snb_b2b_dsd_addr);
1692                 }
1693                 if (rc)
1694                         return rc;
1695
1696                 /* Enable Bus Master and Memory Space on the secondary side */
1697                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1698                           ndev->self_mmio + SNB_SPCICMD_OFFSET);
1699
1700                 break;
1701
1702         default:
1703                 return -EINVAL;
1704         }
1705
1706         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1707
1708         ndev->reg->db_iowrite(ndev->db_valid_mask,
1709                               ndev->self_mmio +
1710                               ndev->self_reg->db_mask);
1711
1712         return 0;
1713 }
1714
1715 static int snb_init_dev(struct intel_ntb_dev *ndev)
1716 {
1717         struct pci_dev *pdev;
1718         u8 ppd;
1719         int rc, mem;
1720
1721         pdev = ndev_pdev(ndev);
1722
1723         switch (pdev->device) {
1724         /* There is a Xeon hardware errata related to writes to SDOORBELL or
1725          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1726          * which may hang the system.  To workaround this use the second memory
1727          * window to access the interrupt and scratch pad registers on the
1728          * remote system.
1729          */
1730         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1731         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1732         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1733         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1734         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1735         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1736         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1737         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1738         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1739         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1740         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1741         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1742                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1743                 break;
1744         }
1745
1746         switch (pdev->device) {
1747         /* There is a hardware errata related to accessing any register in
1748          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1749          */
1750         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1751         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1752         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1753         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1754         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1755         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1756                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1757                 break;
1758         }
1759
1760         switch (pdev->device) {
1761         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1762          * mirrored to the remote system.  Shrink the number of bits by one,
1763          * since bit 14 is the last bit.
1764          */
1765         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1766         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1767         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1768         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1769         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1770         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1771         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1772         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1773         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1774         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1775         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1776         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1777                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1778                 break;
1779         }
1780
1781         ndev->reg = &snb_reg;
1782
1783         rc = pci_read_config_byte(pdev, SNB_PPD_OFFSET, &ppd);
1784         if (rc)
1785                 return -EIO;
1786
1787         ndev->ntb.topo = snb_ppd_topo(ndev, ppd);
1788         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1789                 ntb_topo_string(ndev->ntb.topo));
1790         if (ndev->ntb.topo == NTB_TOPO_NONE)
1791                 return -EINVAL;
1792
1793         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1794                 ndev->bar4_split = snb_ppd_bar4_split(ndev, ppd);
1795                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1796                         ppd, ndev->bar4_split);
1797         } else {
1798                 /* This is a way for transparent BAR to figure out if we are
1799                  * doing split BAR or not. There is no way for the hw on the
1800                  * transparent side to know and set the PPD.
1801                  */
1802                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1803                 ndev->bar4_split = hweight32(mem) ==
1804                         HSX_SPLIT_BAR_MW_COUNT + 1;
1805                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1806                         mem, ndev->bar4_split);
1807         }
1808
1809         rc = snb_init_ntb(ndev);
1810         if (rc)
1811                 return rc;
1812
1813         return snb_init_isr(ndev);
1814 }
1815
1816 static void snb_deinit_dev(struct intel_ntb_dev *ndev)
1817 {
1818         snb_deinit_isr(ndev);
1819 }
1820
1821 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1822 {
1823         int rc;
1824
1825         pci_set_drvdata(pdev, ndev);
1826
1827         rc = pci_enable_device(pdev);
1828         if (rc)
1829                 goto err_pci_enable;
1830
1831         rc = pci_request_regions(pdev, NTB_NAME);
1832         if (rc)
1833                 goto err_pci_regions;
1834
1835         pci_set_master(pdev);
1836
1837         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1838         if (rc) {
1839                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1840                 if (rc)
1841                         goto err_dma_mask;
1842                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
1843         }
1844
1845         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1846         if (rc) {
1847                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1848                 if (rc)
1849                         goto err_dma_mask;
1850                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
1851         }
1852
1853         ndev->self_mmio = pci_iomap(pdev, 0, 0);
1854         if (!ndev->self_mmio) {
1855                 rc = -EIO;
1856                 goto err_mmio;
1857         }
1858         ndev->peer_mmio = ndev->self_mmio;
1859
1860         return 0;
1861
1862 err_mmio:
1863 err_dma_mask:
1864         pci_clear_master(pdev);
1865         pci_release_regions(pdev);
1866 err_pci_regions:
1867         pci_disable_device(pdev);
1868 err_pci_enable:
1869         pci_set_drvdata(pdev, NULL);
1870         return rc;
1871 }
1872
1873 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1874 {
1875         struct pci_dev *pdev = ndev_pdev(ndev);
1876
1877         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1878                 pci_iounmap(pdev, ndev->peer_mmio);
1879         pci_iounmap(pdev, ndev->self_mmio);
1880
1881         pci_clear_master(pdev);
1882         pci_release_regions(pdev);
1883         pci_disable_device(pdev);
1884         pci_set_drvdata(pdev, NULL);
1885 }
1886
1887 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1888                                     struct pci_dev *pdev)
1889 {
1890         ndev->ntb.pdev = pdev;
1891         ndev->ntb.topo = NTB_TOPO_NONE;
1892         ndev->ntb.ops = &intel_ntb_ops;
1893
1894         ndev->b2b_off = 0;
1895         ndev->b2b_idx = INT_MAX;
1896
1897         ndev->bar4_split = 0;
1898
1899         ndev->mw_count = 0;
1900         ndev->spad_count = 0;
1901         ndev->db_count = 0;
1902         ndev->db_vec_count = 0;
1903         ndev->db_vec_shift = 0;
1904
1905         ndev->ntb_ctl = 0;
1906         ndev->lnk_sta = 0;
1907
1908         ndev->db_valid_mask = 0;
1909         ndev->db_link_mask = 0;
1910         ndev->db_mask = 0;
1911
1912         spin_lock_init(&ndev->db_mask_lock);
1913 }
1914
1915 static int intel_ntb_pci_probe(struct pci_dev *pdev,
1916                                const struct pci_device_id *id)
1917 {
1918         struct intel_ntb_dev *ndev;
1919         int rc;
1920
1921         if (pdev_is_bwd(pdev)) {
1922                 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1923                 if (!ndev) {
1924                         rc = -ENOMEM;
1925                         goto err_ndev;
1926                 }
1927
1928                 ndev_init_struct(ndev, pdev);
1929
1930                 rc = intel_ntb_init_pci(ndev, pdev);
1931                 if (rc)
1932                         goto err_init_pci;
1933
1934                 rc = bwd_init_dev(ndev);
1935                 if (rc)
1936                         goto err_init_dev;
1937
1938         } else if (pdev_is_snb(pdev)) {
1939                 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1940                 if (!ndev) {
1941                         rc = -ENOMEM;
1942                         goto err_ndev;
1943                 }
1944
1945                 ndev_init_struct(ndev, pdev);
1946
1947                 rc = intel_ntb_init_pci(ndev, pdev);
1948                 if (rc)
1949                         goto err_init_pci;
1950
1951                 rc = snb_init_dev(ndev);
1952                 if (rc)
1953                         goto err_init_dev;
1954
1955         } else {
1956                 rc = -EINVAL;
1957                 goto err_ndev;
1958         }
1959
1960         ndev_reset_unsafe_flags(ndev);
1961
1962         ndev->reg->poll_link(ndev);
1963
1964         ndev_init_debugfs(ndev);
1965
1966         rc = ntb_register_device(&ndev->ntb);
1967         if (rc)
1968                 goto err_register;
1969
1970         return 0;
1971
1972 err_register:
1973         ndev_deinit_debugfs(ndev);
1974         if (pdev_is_bwd(pdev))
1975                 bwd_deinit_dev(ndev);
1976         else if (pdev_is_snb(pdev))
1977                 snb_deinit_dev(ndev);
1978 err_init_dev:
1979         intel_ntb_deinit_pci(ndev);
1980 err_init_pci:
1981         kfree(ndev);
1982 err_ndev:
1983         return rc;
1984 }
1985
1986 static void intel_ntb_pci_remove(struct pci_dev *pdev)
1987 {
1988         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1989
1990         ntb_unregister_device(&ndev->ntb);
1991         ndev_deinit_debugfs(ndev);
1992         if (pdev_is_bwd(pdev))
1993                 bwd_deinit_dev(ndev);
1994         else if (pdev_is_snb(pdev))
1995                 snb_deinit_dev(ndev);
1996         intel_ntb_deinit_pci(ndev);
1997         kfree(ndev);
1998 }
1999
2000 static const struct intel_ntb_reg bwd_reg = {
2001         .poll_link              = bwd_poll_link,
2002         .link_is_up             = bwd_link_is_up,
2003         .db_ioread              = bwd_db_ioread,
2004         .db_iowrite             = bwd_db_iowrite,
2005         .db_size                = sizeof(u64),
2006         .ntb_ctl                = BWD_NTBCNTL_OFFSET,
2007         .mw_bar                 = {2, 4},
2008 };
2009
2010 static const struct intel_ntb_alt_reg bwd_pri_reg = {
2011         .db_bell                = BWD_PDOORBELL_OFFSET,
2012         .db_mask                = BWD_PDBMSK_OFFSET,
2013         .spad                   = BWD_SPAD_OFFSET,
2014 };
2015
2016 static const struct intel_ntb_alt_reg bwd_b2b_reg = {
2017         .db_bell                = BWD_B2B_DOORBELL_OFFSET,
2018         .spad                   = BWD_B2B_SPAD_OFFSET,
2019 };
2020
2021 static const struct intel_ntb_xlat_reg bwd_sec_xlat = {
2022         /* FIXME : .bar0_base   = BWD_SBAR0BASE_OFFSET, */
2023         /* FIXME : .bar2_limit  = BWD_SBAR2LMT_OFFSET, */
2024         .bar2_xlat              = BWD_SBAR2XLAT_OFFSET,
2025 };
2026
2027 static const struct intel_ntb_reg snb_reg = {
2028         .poll_link              = snb_poll_link,
2029         .link_is_up             = snb_link_is_up,
2030         .db_ioread              = snb_db_ioread,
2031         .db_iowrite             = snb_db_iowrite,
2032         .db_size                = sizeof(u32),
2033         .ntb_ctl                = SNB_NTBCNTL_OFFSET,
2034         .mw_bar                 = {2, 4, 5},
2035 };
2036
2037 static const struct intel_ntb_alt_reg snb_pri_reg = {
2038         .db_bell                = SNB_PDOORBELL_OFFSET,
2039         .db_mask                = SNB_PDBMSK_OFFSET,
2040         .spad                   = SNB_SPAD_OFFSET,
2041 };
2042
2043 static const struct intel_ntb_alt_reg snb_sec_reg = {
2044         .db_bell                = SNB_SDOORBELL_OFFSET,
2045         .db_mask                = SNB_SDBMSK_OFFSET,
2046         /* second half of the scratchpads */
2047         .spad                   = SNB_SPAD_OFFSET + (SNB_SPAD_COUNT << 1),
2048 };
2049
2050 static const struct intel_ntb_alt_reg snb_b2b_reg = {
2051         .db_bell                = SNB_B2B_DOORBELL_OFFSET,
2052         .spad                   = SNB_B2B_SPAD_OFFSET,
2053 };
2054
2055 static const struct intel_ntb_xlat_reg snb_pri_xlat = {
2056         /* Note: no primary .bar0_base visible to the secondary side.
2057          *
2058          * The secondary side cannot get the base address stored in primary
2059          * bars.  The base address is necessary to set the limit register to
2060          * any value other than zero, or unlimited.
2061          *
2062          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2063          * window by setting the limit equal to base, nor can it limit the size
2064          * of the memory window by setting the limit to base + size.
2065          */
2066         .bar2_limit             = SNB_PBAR23LMT_OFFSET,
2067         .bar2_xlat              = SNB_PBAR23XLAT_OFFSET,
2068 };
2069
2070 static const struct intel_ntb_xlat_reg snb_sec_xlat = {
2071         .bar0_base              = SNB_SBAR0BASE_OFFSET,
2072         .bar2_limit             = SNB_SBAR23LMT_OFFSET,
2073         .bar2_xlat              = SNB_SBAR23XLAT_OFFSET,
2074 };
2075
2076 static const struct intel_b2b_addr snb_b2b_usd_addr = {
2077         .bar2_addr64            = SNB_B2B_BAR2_USD_ADDR64,
2078         .bar4_addr64            = SNB_B2B_BAR4_USD_ADDR64,
2079         .bar4_addr32            = SNB_B2B_BAR4_USD_ADDR32,
2080         .bar5_addr32            = SNB_B2B_BAR5_USD_ADDR32,
2081 };
2082
2083 static const struct intel_b2b_addr snb_b2b_dsd_addr = {
2084         .bar2_addr64            = SNB_B2B_BAR2_DSD_ADDR64,
2085         .bar4_addr64            = SNB_B2B_BAR4_DSD_ADDR64,
2086         .bar4_addr32            = SNB_B2B_BAR4_DSD_ADDR32,
2087         .bar5_addr32            = SNB_B2B_BAR5_DSD_ADDR32,
2088 };
2089
2090 /* operations for primary side of local ntb */
2091 static const struct ntb_dev_ops intel_ntb_ops = {
2092         .mw_count               = intel_ntb_mw_count,
2093         .mw_get_range           = intel_ntb_mw_get_range,
2094         .mw_set_trans           = intel_ntb_mw_set_trans,
2095         .link_is_up             = intel_ntb_link_is_up,
2096         .link_enable            = intel_ntb_link_enable,
2097         .link_disable           = intel_ntb_link_disable,
2098         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2099         .db_valid_mask          = intel_ntb_db_valid_mask,
2100         .db_vector_count        = intel_ntb_db_vector_count,
2101         .db_vector_mask         = intel_ntb_db_vector_mask,
2102         .db_read                = intel_ntb_db_read,
2103         .db_clear               = intel_ntb_db_clear,
2104         .db_set_mask            = intel_ntb_db_set_mask,
2105         .db_clear_mask          = intel_ntb_db_clear_mask,
2106         .peer_db_addr           = intel_ntb_peer_db_addr,
2107         .peer_db_set            = intel_ntb_peer_db_set,
2108         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2109         .spad_count             = intel_ntb_spad_count,
2110         .spad_read              = intel_ntb_spad_read,
2111         .spad_write             = intel_ntb_spad_write,
2112         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2113         .peer_spad_read         = intel_ntb_peer_spad_read,
2114         .peer_spad_write        = intel_ntb_peer_spad_write,
2115 };
2116
2117 static const struct file_operations intel_ntb_debugfs_info = {
2118         .owner = THIS_MODULE,
2119         .open = simple_open,
2120         .read = ndev_debugfs_read,
2121 };
2122
2123 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2124         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2125         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2126         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2127         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2128         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2129         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2130         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2131         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2132         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2133         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2134         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2135         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2136         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2137         {0}
2138 };
2139 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2140
2141 static struct pci_driver intel_ntb_pci_driver = {
2142         .name = KBUILD_MODNAME,
2143         .id_table = intel_ntb_pci_tbl,
2144         .probe = intel_ntb_pci_probe,
2145         .remove = intel_ntb_pci_remove,
2146 };
2147
2148 static int __init intel_ntb_pci_driver_init(void)
2149 {
2150         if (debugfs_initialized())
2151                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2152
2153         return pci_register_driver(&intel_ntb_pci_driver);
2154 }
2155 module_init(intel_ntb_pci_driver_init);
2156
2157 static void __exit intel_ntb_pci_driver_exit(void)
2158 {
2159         pci_unregister_driver(&intel_ntb_pci_driver);
2160
2161         debugfs_remove_recursive(debugfs_dir);
2162 }
2163 module_exit(intel_ntb_pci_driver_exit);
2164