Merge tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Jun 2018 01:28:00 +0000 (18:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Jun 2018 01:28:00 +0000 (18:28 -0700)
Pull more overflow updates from Kees Cook:
 "The rest of the overflow changes for v4.18-rc1.

  This includes the explicit overflow fixes from Silvio, further
  struct_size() conversions from Matthew, and a bug fix from Dan.

  But the bulk of it is the treewide conversions to use either the
  2-factor argument allocators (e.g. kmalloc(a * b, ...) into
  kmalloc_array(a, b, ...) or the array_size() macros (e.g. vmalloc(a *
  b) into vmalloc(array_size(a, b)).

  Coccinelle was fighting me on several fronts, so I've done a bunch of
  manual whitespace updates in the patches as well.

  Summary:

   - Error path bug fix for overflow tests (Dan)

   - Additional struct_size() conversions (Matthew, Kees)

   - Explicitly reported overflow fixes (Silvio, Kees)

   - Add missing kvcalloc() function (Kees)

   - Treewide conversions of allocators to use either 2-factor argument
     variant when available, or array_size() and array3_size() as needed
     (Kees)"

* tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (26 commits)
  treewide: Use array_size in f2fs_kvzalloc()
  treewide: Use array_size() in f2fs_kzalloc()
  treewide: Use array_size() in f2fs_kmalloc()
  treewide: Use array_size() in sock_kmalloc()
  treewide: Use array_size() in kvzalloc_node()
  treewide: Use array_size() in vzalloc_node()
  treewide: Use array_size() in vzalloc()
  treewide: Use array_size() in vmalloc()
  treewide: devm_kzalloc() -> devm_kcalloc()
  treewide: devm_kmalloc() -> devm_kmalloc_array()
  treewide: kvzalloc() -> kvcalloc()
  treewide: kvmalloc() -> kvmalloc_array()
  treewide: kzalloc_node() -> kcalloc_node()
  treewide: kzalloc() -> kcalloc()
  treewide: kmalloc() -> kmalloc_array()
  mm: Introduce kvcalloc()
  video: uvesafb: Fix integer overflow in allocation
  UBIFS: Fix potential integer overflow in allocation
  leds: Use struct_size() in allocation
  Convert intel uncore to struct_size
  ...

27 files changed:
1  2 
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/omap_device.c
arch/powerpc/kvm/book3s_hv.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/x86.c
drivers/firmware/ti_sci.c
drivers/md/dm-crypt.c
drivers/md/dm-region-hash.c
drivers/md/dm-thin.c
drivers/ntb/hw/intel/ntb_hw_gen1.c
drivers/ntb/ntb_transport.c
drivers/of/platform.c
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/pci/controller/dwc/pcie-designware-ep.c
drivers/pci/controller/pcie-cadence-ep.c
drivers/pci/controller/pcie-rockchip-ep.c
drivers/soc/mediatek/mtk-scpsys.c
drivers/thermal/int340x_thermal/int340x_thermal_zone.c
drivers/thermal/of-thermal.c
drivers/thermal/tegra/soctherm.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfsd/nfs4state.c
fs/nfsd/nfscache.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
virt/kvm/kvm_main.c

index 0103548b0b155c0587e05cf998808b4c09df473c,9344035d537f05ae9597ff3728e3f6106a6aa23b..af545193f6736f474d2c77b62c3abc46fc7c494f
@@@ -18,6 -18,7 +18,6 @@@
  
  #include "soc.h"
  #include "omap_device.h"
 -#include "omap-pm.h"
  
  #include "hsmmc.h"
  #include "control.h"
@@@ -34,7 -35,7 +34,7 @@@ static int __init omap_hsmmc_pdata_init
  {
        char *hc_name;
  
-       hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL);
+       hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL);
        if (!hc_name) {
                kfree(hc_name);
                return -ENOMEM;
index ac219b9e6a4c80eb27289e90f231746edf4f8175,06b6bca3a1799dbf4cc43408071b9c207a0a987e..41c7b905980a9e7a37a512cf26c06a428b4e9ca6
@@@ -143,7 -143,7 +143,7 @@@ static int omap_device_build_from_dt(st
        struct resource res;
        const char *oh_name;
        int oh_cnt, i, ret = 0;
 -      bool device_active = false;
 +      bool device_active = false, skip_pm_domain = false;
  
        oh_cnt = of_property_count_strings(node, "ti,hwmods");
        if (oh_cnt <= 0) {
                return -ENODEV;
        }
  
 +      /* SDMA still needs special handling for omap_device_build() */
 +      ret = of_property_read_string_index(node, "ti,hwmods", 0, &oh_name);
 +      if (!ret && (!strncmp("dma_system", oh_name, 10) ||
 +                   !strncmp("dma", oh_name, 3)))
 +              skip_pm_domain = true;
 +
        /* Use ti-sysc driver instead of omap_device? */
 -      if (!omap_hwmod_parse_module_range(NULL, node, &res))
 +      if (!skip_pm_domain &&
 +          !omap_hwmod_parse_module_range(NULL, node, &res))
                return -ENODEV;
  
-       hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
+       hwmods = kcalloc(oh_cnt, sizeof(struct omap_hwmod *), GFP_KERNEL);
        if (!hwmods) {
                ret = -ENOMEM;
                goto odbfd_exit;
                        r->name = dev_name(&pdev->dev);
        }
  
 -      dev_pm_domain_set(&pdev->dev, &omap_device_pm_domain);
 -
 -      if (device_active) {
 -              omap_device_enable(pdev);
 -              pm_runtime_set_active(&pdev->dev);
 +      if (!skip_pm_domain) {
 +              dev_pm_domain_set(&pdev->dev, &omap_device_pm_domain);
 +              if (device_active) {
 +                      omap_device_enable(pdev);
 +                      pm_runtime_set_active(&pdev->dev);
 +              }
        }
  
  odbfd_exit1:
@@@ -413,7 -405,7 +413,7 @@@ omap_device_copy_resources(struct omap_
                goto error;
        }
  
-       res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
+       res = kcalloc(2, sizeof(*res), GFP_KERNEL);
        if (!res)
                return -ENOMEM;
  
index 69895597736ab60a63ce840269e9889dd0bf24b0,746645cd2ba71428c344c0e9dc51e8a9c0068ca8..8858ab8b6ca42d79f8616b2d16d75e8561854ab7
@@@ -3548,7 -3548,7 +3548,7 @@@ static void kvmppc_core_free_memslot_hv
  static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
                                         unsigned long npages)
  {
-       slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
+       slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
        if (!slot->arch.rmap)
                return -ENOMEM;
  
@@@ -3955,7 -3955,8 +3955,7 @@@ static int kvmppc_core_init_vm_hv(struc
         */
        snprintf(buf, sizeof(buf), "vm%d", current->pid);
        kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
 -      if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
 -              kvmppc_mmu_debugfs_init(kvm);
 +      kvmppc_mmu_debugfs_init(kvm);
  
        return 0;
  }
diff --combined arch/s390/kvm/kvm-s390.c
index 7142508ca6e1ce940a1a95d5b00fd731c27b830a,3f6625c64341c773b49e0734b5088c2f195a5f99..3b7a5151b6a5effa6dabc5aa95ac027a4af53198
@@@ -791,21 -791,11 +791,21 @@@ static int kvm_s390_set_mem_control(str
  
  static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
  
 -static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
 +void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
  {
        struct kvm_vcpu *vcpu;
        int i;
  
 +      kvm_s390_vcpu_block_all(kvm);
 +
 +      kvm_for_each_vcpu(i, vcpu, kvm)
 +              kvm_s390_vcpu_crypto_setup(vcpu);
 +
 +      kvm_s390_vcpu_unblock_all(kvm);
 +}
 +
 +static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
 +{
        if (!test_kvm_facility(kvm, 76))
                return -EINVAL;
  
                return -ENXIO;
        }
  
 -      kvm_for_each_vcpu(i, vcpu, kvm) {
 -              kvm_s390_vcpu_crypto_setup(vcpu);
 -              exit_sie(vcpu);
 -      }
 +      kvm_s390_vcpu_crypto_reset_all(kvm);
        mutex_unlock(&kvm->lock);
        return 0;
  }
@@@ -1040,8 -1033,8 +1040,8 @@@ static int kvm_s390_set_tod(struct kvm 
        return ret;
  }
  
 -static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
 -                                      struct kvm_s390_vm_tod_clock *gtod)
 +static void kvm_s390_get_tod_clock(struct kvm *kvm,
 +                                 struct kvm_s390_vm_tod_clock *gtod)
  {
        struct kvm_s390_tod_clock_ext htod;
  
        get_tod_clock_ext((char *)&htod);
  
        gtod->tod = htod.tod + kvm->arch.epoch;
 -      gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
 -
 -      if (gtod->tod < htod.tod)
 -              gtod->epoch_idx += 1;
 +      gtod->epoch_idx = 0;
 +      if (test_kvm_facility(kvm, 139)) {
 +              gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
 +              if (gtod->tod < htod.tod)
 +                      gtod->epoch_idx += 1;
 +      }
  
        preempt_enable();
  }
@@@ -1065,7 -1056,12 +1065,7 @@@ static int kvm_s390_get_tod_ext(struct 
        struct kvm_s390_vm_tod_clock gtod;
  
        memset(&gtod, 0, sizeof(gtod));
 -
 -      if (test_kvm_facility(kvm, 139))
 -              kvm_s390_get_tod_clock_ext(kvm, &gtod);
 -      else
 -              gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
 -
 +      kvm_s390_get_tod_clock(kvm, &gtod);
        if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
                return -EFAULT;
  
@@@ -1497,7 -1493,7 +1497,7 @@@ static long kvm_s390_get_skeys(struct k
                return -EINVAL;
  
        /* Is this guest using storage keys? */
 -      if (!mm_use_skey(current->mm))
 +      if (!mm_uses_skeys(current->mm))
                return KVM_S390_GET_SKEYS_NONE;
  
        /* Enforce sane limit on memory allocation */
@@@ -1729,7 -1725,7 +1729,7 @@@ static int kvm_s390_set_cmma_bits(struc
        if (args->count == 0)
                return 0;
  
-       bits = vmalloc(sizeof(*bits) * args->count);
+       bits = vmalloc(array_size(sizeof(*bits), args->count));
        if (!bits)
                return -ENOMEM;
  
@@@ -1986,10 -1982,10 +1986,10 @@@ int kvm_arch_init_vm(struct kvm *kvm, u
  
        rc = -ENOMEM;
  
 -      kvm->arch.use_esca = 0; /* start with basic SCA */
        if (!sclp.has_64bscao)
                alloc_flags |= GFP_DMA;
        rwlock_init(&kvm->arch.sca_lock);
 +      /* start with basic SCA */
        kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
        if (!kvm->arch.sca)
                goto out_err;
        kvm_s390_crypto_init(kvm);
  
        mutex_init(&kvm->arch.float_int.ais_lock);
 -      kvm->arch.float_int.simm = 0;
 -      kvm->arch.float_int.nimm = 0;
        spin_lock_init(&kvm->arch.float_int.lock);
        for (i = 0; i < FIRQ_LIST_COUNT; i++)
                INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
                kvm->arch.gmap->pfault_enabled = 0;
        }
  
 -      kvm->arch.css_support = 0;
 -      kvm->arch.use_irqchip = 0;
        kvm->arch.use_pfmfi = sclp.has_pfmfi;
 -      kvm->arch.epoch = 0;
 -
 +      kvm->arch.use_skf = sclp.has_skey;
        spin_lock_init(&kvm->arch.start_stop_lock);
        kvm_s390_vsie_init(kvm);
        kvm_s390_gisa_init(kvm);
@@@ -2432,12 -2433,8 +2432,12 @@@ static void kvm_s390_vcpu_initial_reset
        vcpu->arch.sie_block->ckc       = 0UL;
        vcpu->arch.sie_block->todpr     = 0;
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 -      vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 -      vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
 +      vcpu->arch.sie_block->gcr[0]  = CR0_UNUSED_56 |
 +                                      CR0_INTERRUPT_KEY_SUBMASK |
 +                                      CR0_MEASUREMENT_ALERT_SUBMASK;
 +      vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
 +                                      CR14_UNUSED_33 |
 +                                      CR14_EXTERNAL_DAMAGE_SUBMASK;
        /* make sure the new fpc will be lazily loaded */
        save_fpu_regs();
        current->thread.fpu.fpc = 0;
@@@ -3195,7 -3192,7 +3195,7 @@@ static int kvm_arch_setup_async_pf(stru
                return 0;
        if (kvm_s390_vcpu_has_irq(vcpu, 0))
                return 0;
 -      if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
 +      if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
                return 0;
        if (!vcpu->arch.gmap->pfault_enabled)
                return 0;
@@@ -3993,7 -3990,7 +3993,7 @@@ long kvm_arch_vcpu_ioctl(struct file *f
        return r;
  }
  
 -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  {
  #ifdef CONFIG_KVM_S390_UCONTROL
        if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
diff --combined arch/x86/kvm/cpuid.c
index 5720e78b2f7b52fa9a05bc52064d4fb8cac04c6c,812cada68e0f44f07e27914d2f32e3288c6315b5..7e042e3d47fd5a007cd70ad20d2f5d6ee525c28e
@@@ -203,8 -203,9 +203,9 @@@ int kvm_vcpu_ioctl_set_cpuid(struct kvm
                goto out;
        r = -ENOMEM;
        if (cpuid->nent) {
-               cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
-                                       cpuid->nent);
+               cpuid_entries =
+                       vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
+                                          cpuid->nent));
                if (!cpuid_entries)
                        goto out;
                r = -EFAULT;
@@@ -404,8 -405,7 +405,8 @@@ static inline int __do_cpuid_ent(struc
        const u32 kvm_cpuid_7_0_ecx_x86_features =
                F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
 -              F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG);
 +              F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
 +              F(CLDEMOTE);
  
        /* cpuid 7.0.edx*/
        const u32 kvm_cpuid_7_0_edx_x86_features =
@@@ -785,7 -785,8 +786,8 @@@ int kvm_dev_ioctl_get_cpuid(struct kvm_
                return -EINVAL;
  
        r = -ENOMEM;
-       cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+       cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
+                                          cpuid->nent));
        if (!cpuid_entries)
                goto out;
  
diff --combined arch/x86/kvm/svm.c
index 695b0bd02220378493dd3ea3f9edf6959343121f,e831e6d3b70e15b60b73a0fb437f7ee7ed8c9e48..f059a73f0fd088fcec5f1c6cee05cf4bb9dcc9a8
@@@ -1001,7 -1001,9 +1001,9 @@@ static int svm_cpu_init(int cpu
  
        if (svm_sev_enabled()) {
                r = -ENOMEM;
-               sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
+               sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
+                                             sizeof(void *),
+                                             GFP_KERNEL);
                if (!sd->sev_vmcbs)
                        goto err_1;
        }
@@@ -1768,10 -1770,7 +1770,10 @@@ static struct page **sev_pin_memory(str
        unsigned long npages, npinned, size;
        unsigned long locked, lock_limit;
        struct page **pages;
 -      int first, last;
 +      unsigned long first, last;
 +
 +      if (ulen == 0 || uaddr + ulen < uaddr)
 +              return NULL;
  
        /* Calculate number of pages. */
        first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
@@@ -1858,13 -1857,13 +1860,13 @@@ static void __unregister_enc_region_loc
  
  static struct kvm *svm_vm_alloc(void)
  {
 -      struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
 +      struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
        return &kvm_svm->kvm;
  }
  
  static void svm_vm_free(struct kvm *kvm)
  {
 -      kfree(to_kvm_svm(kvm));
 +      vfree(to_kvm_svm(kvm));
  }
  
  static void sev_vm_destroy(struct kvm *kvm)
@@@ -5065,7 -5064,7 +5067,7 @@@ static void update_cr8_intercept(struc
                set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  }
  
 -static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 +static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
  {
        return;
  }
@@@ -6952,9 -6951,6 +6954,9 @@@ static int svm_register_enc_region(stru
        if (!sev_guest(kvm))
                return -ENOTTY;
  
 +      if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
 +              return -EINVAL;
 +
        region = kzalloc(sizeof(*region), GFP_KERNEL);
        if (!region)
                return -ENOMEM;
@@@ -7106,7 -7102,7 +7108,7 @@@ static struct kvm_x86_ops svm_x86_ops _
        .enable_nmi_window = enable_nmi_window,
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
 -      .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
 +      .set_virtual_apic_mode = svm_set_virtual_apic_mode,
        .get_enable_apicv = svm_get_enable_apicv,
        .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
        .load_eoi_exitmap = svm_load_eoi_exitmap,
diff --combined arch/x86/kvm/x86.c
index cc8c8be1e92db9d309acd9367af4cf08218aba4c,31853061ed4f6bd6f2aeebf62813455bce0220b0..6bcecc325e7ef9654e898982cb5ddb2064fa736b
@@@ -138,7 -138,6 +138,7 @@@ module_param(tsc_tolerance_ppm, uint, S
  /* lapic timer advance (tscdeadline mode only) in nanoseconds */
  unsigned int __read_mostly lapic_timer_advance_ns = 0;
  module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 +EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
  
  static bool __read_mostly vector_hashing = true;
  module_param(vector_hashing, bool, S_IRUGO);
@@@ -319,27 -318,23 +319,27 @@@ u64 kvm_get_apic_base(struct kvm_vcpu *
  }
  EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  
 +enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
 +{
 +      return kvm_apic_mode(kvm_get_apic_base(vcpu));
 +}
 +EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
 +
  int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  {
 -      u64 old_state = vcpu->arch.apic_base &
 -              (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 -      u64 new_state = msr_info->data &
 -              (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 +      enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
 +      enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
        u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
                (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
  
 -      if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE)
 -              return 1;
 -      if (!msr_info->host_initiated &&
 -          ((new_state == MSR_IA32_APICBASE_ENABLE &&
 -            old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
 -           (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
 -            old_state == 0)))
 +      if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
                return 1;
 +      if (!msr_info->host_initiated) {
 +              if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
 +                      return 1;
 +              if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
 +                      return 1;
 +      }
  
        kvm_lapic_set_base(vcpu, msr_info->data);
        return 0;
@@@ -861,7 -856,7 +861,7 @@@ int kvm_set_cr3(struct kvm_vcpu *vcpu, 
        }
  
        if (is_long_mode(vcpu) &&
 -          (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
 +          (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
                return 1;
        else if (is_pae(vcpu) && is_paging(vcpu) &&
                   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
@@@ -1766,7 -1761,7 +1766,7 @@@ static int do_monotonic_boot(s64 *t, u6
        return mode;
  }
  
 -static int do_realtime(struct timespec *ts, u64 *tsc_timestamp)
 +static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
  {
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        unsigned long seq;
@@@ -1799,7 -1794,7 +1799,7 @@@ static bool kvm_get_time_and_clockread(
  }
  
  /* returns true if host is using TSC based clocksource */
 -static bool kvm_get_walltime_and_clockread(struct timespec *ts,
 +static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
                                           u64 *tsc_timestamp)
  {
        /* checked again under seqlock below */
@@@ -2873,7 -2868,6 +2873,7 @@@ int kvm_vm_ioctl_check_extension(struc
        case KVM_CAP_HYPERV_SYNIC2:
        case KVM_CAP_HYPERV_VP_INDEX:
        case KVM_CAP_HYPERV_EVENTFD:
 +      case KVM_CAP_HYPERV_TLBFLUSH:
        case KVM_CAP_PCI_SEGMENT:
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
                r = KVM_CLOCK_TSC_STABLE;
                break;
        case KVM_CAP_X86_DISABLE_EXITS:
 -              r |=  KVM_X86_DISABLE_EXITS_HTL | KVM_X86_DISABLE_EXITS_PAUSE;
 +              r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
                if(kvm_can_mwait_in_guest())
                        r |= KVM_X86_DISABLE_EXITS_MWAIT;
                break;
@@@ -3968,7 -3962,7 +3968,7 @@@ out_nofree
        return r;
  }
  
 -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  {
        return VM_FAULT_SIGBUS;
  }
@@@ -4254,7 -4248,7 +4254,7 @@@ split_irqchip_unlock
                if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
                        kvm_can_mwait_in_guest())
                        kvm->arch.mwait_in_guest = true;
 -              if (cap->args[0] & KVM_X86_DISABLE_EXITS_HTL)
 +              if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
                        kvm->arch.hlt_in_guest = true;
                if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
                        kvm->arch.pause_in_guest = true;
@@@ -4793,10 -4787,11 +4793,10 @@@ static int kvm_fetch_guest_virt(struct 
        return X86EMUL_CONTINUE;
  }
  
 -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
                               gva_t addr, void *val, unsigned int bytes,
                               struct x86_exception *exception)
  {
 -      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  }
  EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
  
 -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
 -                                    gva_t addr, void *val, unsigned int bytes,
 -                                    struct x86_exception *exception)
 +static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
 +                           gva_t addr, void *val, unsigned int bytes,
 +                           struct x86_exception *exception, bool system)
  {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 -      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 +      u32 access = 0;
 +
 +      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
 +              access |= PFERR_USER_MASK;
 +
 +      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
  }
  
  static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
        return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
  }
  
 -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
 -                                     gva_t addr, void *val,
 -                                     unsigned int bytes,
 -                                     struct x86_exception *exception)
 +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 +                                    struct kvm_vcpu *vcpu, u32 access,
 +                                    struct x86_exception *exception)
  {
 -      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        void *data = val;
        int r = X86EMUL_CONTINUE;
  
        while (bytes) {
                gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
 -                                                           PFERR_WRITE_MASK,
 +                                                           access,
                                                             exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  out:
        return r;
  }
 +
 +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
 +                            unsigned int bytes, struct x86_exception *exception,
 +                            bool system)
 +{
 +      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 +      u32 access = PFERR_WRITE_MASK;
 +
 +      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
 +              access |= PFERR_USER_MASK;
 +
 +      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 +                                         access, exception);
 +}
 +
 +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
 +                              unsigned int bytes, struct x86_exception *exception)
 +{
 +      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 +                                         PFERR_WRITE_MASK, exception);
 +}
  EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
  
  int handle_ud(struct kvm_vcpu *vcpu)
        struct x86_exception e;
  
        if (force_emulation_prefix &&
 -          kvm_read_guest_virt(&vcpu->arch.emulate_ctxt,
 -                              kvm_get_linear_rip(vcpu), sig, sizeof(sig), &e) == 0 &&
 +          kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
 +                              sig, sizeof(sig), &e) == 0 &&
            memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
                kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
                emul_type = 0;
@@@ -5629,8 -5600,8 +5629,8 @@@ static int emulator_pre_leave_smm(struc
  static const struct x86_emulate_ops emulate_ops = {
        .read_gpr            = emulator_read_gpr,
        .write_gpr           = emulator_write_gpr,
 -      .read_std            = kvm_read_guest_virt_system,
 -      .write_std           = kvm_write_guest_virt_system,
 +      .read_std            = emulator_read_std,
 +      .write_std           = emulator_write_std,
        .read_phys           = kvm_read_guest_phys_system,
        .fetch               = kvm_fetch_guest_virt,
        .read_emulated       = emulator_read_emulated,
@@@ -6646,7 -6617,7 +6646,7 @@@ static int kvm_pv_clock_pairing(struct 
                                unsigned long clock_type)
  {
        struct kvm_clock_pairing clock_pairing;
 -      struct timespec ts;
 +      struct timespec64 ts;
        u64 cycle;
        int ret;
  
@@@ -8900,13 -8871,14 +8900,14 @@@ int kvm_arch_create_memslot(struct kvm 
                                      slot->base_gfn, level) + 1;
  
                slot->arch.rmap[i] =
-                       kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
+                       kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
+                                GFP_KERNEL);
                if (!slot->arch.rmap[i])
                        goto out_free;
                if (i == 0)
                        continue;
  
-               linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
+               linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
                if (!linfo)
                        goto out_free;
  
index b74a533ef35b33301f9bef606cbec38ead2f9a38,a7d9a2046352cb4c41660d66af975ae6c28c2276..7fa744793bc5c900eb5d6998848aba233bd1875f
@@@ -1,9 -1,17 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0
  /*
   * Texas Instruments System Control Interface Protocol Driver
   *
   * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
   *    Nishanth Menon
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
 - *
 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 - * kind, whether express or implied; without even the implied warranty
 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
   */
  
  #define pr_fmt(fmt) "%s: " fmt, __func__
@@@ -1854,9 -1862,9 +1854,9 @@@ static int ti_sci_probe(struct platform
        if (!minfo->xfer_block)
                return -ENOMEM;
  
-       minfo->xfer_alloc_table = devm_kzalloc(dev,
-                                              BITS_TO_LONGS(desc->max_msgs)
-                                              sizeof(unsigned long),
+       minfo->xfer_alloc_table = devm_kcalloc(dev,
+                                              BITS_TO_LONGS(desc->max_msgs),
+                                              sizeof(unsigned long),
                                               GFP_KERNEL);
        if (!minfo->xfer_alloc_table)
                return -ENOMEM;
diff --combined drivers/md/dm-crypt.c
index 4939fbc34ff294d2b4bc6bc650306b2ae7837f63,57ca92dc0c3ea1cb42254f5541f10bf5e45e86c8..b61b069c33afbafd17af1951f0f5d8fee4ddf9d0
@@@ -139,13 -139,25 +139,13 @@@ struct crypt_config 
        struct dm_dev *dev;
        sector_t start;
  
 -      /*
 -       * pool for per bio private data, crypto requests,
 -       * encryption requeusts/buffer pages and integrity tags
 -       */
 -      mempool_t req_pool;
 -      mempool_t page_pool;
 -      mempool_t tag_pool;
 -      unsigned tag_pool_max_sectors;
 -
        struct percpu_counter n_allocated_pages;
  
 -      struct bio_set bs;
 -      struct mutex bio_alloc_lock;
 -
        struct workqueue_struct *io_queue;
        struct workqueue_struct *crypt_queue;
  
 -      struct task_struct *write_thread;
        wait_queue_head_t write_thread_wait;
 +      struct task_struct *write_thread;
        struct rb_root write_tree;
  
        char *cipher;
        unsigned int integrity_iv_size;
        unsigned int on_disk_tag_size;
  
 +      /*
 +       * pool for per bio private data, crypto requests,
 +       * encryption requeusts/buffer pages and integrity tags
 +       */
 +      unsigned tag_pool_max_sectors;
 +      mempool_t tag_pool;
 +      mempool_t req_pool;
 +      mempool_t page_pool;
 +
 +      struct bio_set bs;
 +      struct mutex bio_alloc_lock;
 +
        u8 *authenc_key; /* space for keys in authenc() format (if used) */
        u8 key[0];
  };
@@@ -1878,8 -1878,9 +1878,9 @@@ static int crypt_alloc_tfms_skcipher(st
        unsigned i;
        int err;
  
-       cc->cipher_tfm.tfms = kzalloc(cc->tfms_count *
-                                     sizeof(struct crypto_skcipher *), GFP_KERNEL);
+       cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
+                                     sizeof(struct crypto_skcipher *),
+                                     GFP_KERNEL);
        if (!cc->cipher_tfm.tfms)
                return -ENOMEM;
  
index c832ec398f02851b9860c427b2991e96605499e3,bc7795095dd9b91d225fb10767ad3d8b0b081926..1f760451e6f48fa06e5c217f2224d78d96c7b612
@@@ -63,28 -63,27 +63,28 @@@ struct dm_region_hash 
  
        /* hash table */
        rwlock_t hash_lock;
 -      mempool_t region_pool;
        unsigned mask;
        unsigned nr_buckets;
        unsigned prime;
        unsigned shift;
        struct list_head *buckets;
  
 +      /*
 +       * If there was a flush failure no regions can be marked clean.
 +       */
 +      int flush_failure;
 +
        unsigned max_recovery; /* Max # of regions to recover in parallel */
  
        spinlock_t region_lock;
        atomic_t recovery_in_flight;
 -      struct semaphore recovery_count;
        struct list_head clean_regions;
        struct list_head quiesced_regions;
        struct list_head recovered_regions;
        struct list_head failed_recovered_regions;
 +      struct semaphore recovery_count;
  
 -      /*
 -       * If there was a flush failure no regions can be marked clean.
 -       */
 -      int flush_failure;
 +      mempool_t region_pool;
  
        void *context;
        sector_t target_begin;
@@@ -203,7 -202,7 +203,7 @@@ struct dm_region_hash *dm_region_hash_c
        rh->shift = RH_HASH_SHIFT;
        rh->prime = RH_HASH_MULT;
  
-       rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+       rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
        if (!rh->buckets) {
                DMERR("unable to allocate region hash bucket memory");
                kfree(rh);
diff --combined drivers/md/dm-thin.c
index 6cf9c9364103437bfe6a065b1669d39cc9ec1098,a91332557bc8c115ec54ef852ee19aec79d898c5..7945238df1c0a67a8e525697f0e419c7594ed1ad
@@@ -240,9 -240,9 +240,9 @@@ struct pool 
        struct dm_bio_prison *prison;
        struct dm_kcopyd_client *copier;
  
 +      struct work_struct worker;
        struct workqueue_struct *wq;
        struct throttle throttle;
 -      struct work_struct worker;
        struct delayed_work waker;
        struct delayed_work no_space_timeout;
  
        struct dm_deferred_set *all_io_ds;
  
        struct dm_thin_new_mapping *next_mapping;
 -      mempool_t mapping_pool;
  
        process_bio_fn process_bio;
        process_bio_fn process_discard;
        process_mapping_fn process_prepared_discard_pt2;
  
        struct dm_bio_prison_cell **cell_sort_array;
 +
 +      mempool_t mapping_pool;
  };
  
  static enum pool_mode get_pool_mode(struct pool *pool);
@@@ -2940,7 -2939,9 +2940,9 @@@ static struct pool *pool_create(struct 
                goto bad_mapping_pool;
        }
  
-       pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+       pool->cell_sort_array =
+               vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
+                                  sizeof(*pool->cell_sort_array)));
        if (!pool->cell_sort_array) {
                *error = "Error allocating cell sort array";
                err_p = ERR_PTR(-ENOMEM);
index ffdee98e8ece12da2fe5a75f4d6a897bf1c637f8,0000000000000000000000000000000000000000..6aa57322727916bd5bc1c8e5ab13f286f8fd1b1d
mode 100644,000000..100644
--- /dev/null
@@@ -1,2070 -1,0 +1,2070 @@@
-       ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
 +/*
 + * This file is provided under a dual BSD/GPLv2 license.  When using or
 + *   redistributing this file, you may do so under either license.
 + *
 + *   GPL LICENSE SUMMARY
 + *
 + *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 + *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
 + *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
 + *
 + *   This program is free software; you can redistribute it and/or modify
 + *   it under the terms of version 2 of the GNU General Public License as
 + *   published by the Free Software Foundation.
 + *
 + *   BSD LICENSE
 + *
 + *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 + *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
 + *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
 + *
 + *   Redistribution and use in source and binary forms, with or without
 + *   modification, are permitted provided that the following conditions
 + *   are met:
 + *
 + *     * Redistributions of source code must retain the above copyright
 + *       notice, this list of conditions and the following disclaimer.
 + *     * Redistributions in binary form must reproduce the above copy
 + *       notice, this list of conditions and the following disclaimer in
 + *       the documentation and/or other materials provided with the
 + *       distribution.
 + *     * Neither the name of Intel Corporation nor the names of its
 + *       contributors may be used to endorse or promote products derived
 + *       from this software without specific prior written permission.
 + *
 + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 + *
 + * Intel PCIe NTB Linux driver
 + */
 +
 +#include <linux/debugfs.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/module.h>
 +#include <linux/pci.h>
 +#include <linux/random.h>
 +#include <linux/slab.h>
 +#include <linux/ntb.h>
 +
 +#include "ntb_hw_intel.h"
 +#include "ntb_hw_gen1.h"
 +#include "ntb_hw_gen3.h"
 +
 +#define NTB_NAME      "ntb_hw_intel"
 +#define NTB_DESC      "Intel(R) PCI-E Non-Transparent Bridge Driver"
 +#define NTB_VER               "2.0"
 +
 +MODULE_DESCRIPTION(NTB_DESC);
 +MODULE_VERSION(NTB_VER);
 +MODULE_LICENSE("Dual BSD/GPL");
 +MODULE_AUTHOR("Intel Corporation");
 +
 +#define bar0_off(base, bar) ((base) + ((bar) << 2))
 +#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
 +
 +static const struct intel_ntb_reg xeon_reg;
 +static const struct intel_ntb_alt_reg xeon_pri_reg;
 +static const struct intel_ntb_alt_reg xeon_sec_reg;
 +static const struct intel_ntb_alt_reg xeon_b2b_reg;
 +static const struct intel_ntb_xlat_reg xeon_pri_xlat;
 +static const struct intel_ntb_xlat_reg xeon_sec_xlat;
 +static const struct ntb_dev_ops intel_ntb_ops;
 +
 +static const struct file_operations intel_ntb_debugfs_info;
 +static struct dentry *debugfs_dir;
 +
 +static int b2b_mw_idx = -1;
 +module_param(b2b_mw_idx, int, 0644);
 +MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
 +               "value of zero or positive starts from first mw idx, and a "
 +               "negative value starts from last mw idx.  Both sides MUST "
 +               "set the same value here!");
 +
 +static unsigned int b2b_mw_share;
 +module_param(b2b_mw_share, uint, 0644);
 +MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
 +               "ntb so that the peer ntb only occupies the first half of "
 +               "the mw, so the second half can still be used as a mw.  Both "
 +               "sides MUST set the same value here!");
 +
 +module_param_named(xeon_b2b_usd_bar2_addr64,
 +                 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
 +               "XEON B2B USD BAR 2 64-bit address");
 +
 +module_param_named(xeon_b2b_usd_bar4_addr64,
 +                 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
 +               "XEON B2B USD BAR 4 64-bit address");
 +
 +module_param_named(xeon_b2b_usd_bar4_addr32,
 +                 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
 +               "XEON B2B USD split-BAR 4 32-bit address");
 +
 +module_param_named(xeon_b2b_usd_bar5_addr32,
 +                 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
 +               "XEON B2B USD split-BAR 5 32-bit address");
 +
 +module_param_named(xeon_b2b_dsd_bar2_addr64,
 +                 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
 +               "XEON B2B DSD BAR 2 64-bit address");
 +
 +module_param_named(xeon_b2b_dsd_bar4_addr64,
 +                 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
 +               "XEON B2B DSD BAR 4 64-bit address");
 +
 +module_param_named(xeon_b2b_dsd_bar4_addr32,
 +                 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
 +               "XEON B2B DSD split-BAR 4 32-bit address");
 +
 +module_param_named(xeon_b2b_dsd_bar5_addr32,
 +                 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
 +MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
 +               "XEON B2B DSD split-BAR 5 32-bit address");
 +
 +
 +static int xeon_init_isr(struct intel_ntb_dev *ndev);
 +
 +static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
 +{
 +      ndev->unsafe_flags = 0;
 +      ndev->unsafe_flags_ignore = 0;
 +
 +      /* Only B2B has a workaround to avoid SDOORBELL */
 +      if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
 +              if (!ntb_topo_is_b2b(ndev->ntb.topo))
 +                      ndev->unsafe_flags |= NTB_UNSAFE_DB;
 +
 +      /* No low level workaround to avoid SB01BASE */
 +      if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
 +              ndev->unsafe_flags |= NTB_UNSAFE_DB;
 +              ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
 +      }
 +}
 +
 +static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
 +                               unsigned long flag)
 +{
 +      return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
 +}
 +
 +static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
 +                                   unsigned long flag)
 +{
 +      flag &= ndev->unsafe_flags;
 +      ndev->unsafe_flags_ignore |= flag;
 +
 +      return !!flag;
 +}
 +
 +int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
 +{
 +      if (idx < 0 || idx >= ndev->mw_count)
 +              return -EINVAL;
 +      return ndev->reg->mw_bar[idx];
 +}
 +
 +static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
 +                             phys_addr_t *db_addr, resource_size_t *db_size,
 +                             phys_addr_t reg_addr, unsigned long reg)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 +              pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 +
 +      if (db_addr) {
 +              *db_addr = reg_addr + reg;
 +              dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
 +      }
 +
 +      if (db_size) {
 +              *db_size = ndev->reg->db_size;
 +              dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
 +      }
 +
 +      return 0;
 +}
 +
 +u64 ndev_db_read(struct intel_ntb_dev *ndev,
 +                             void __iomem *mmio)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 +              pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 +
 +      return ndev->reg->db_ioread(mmio);
 +}
 +
 +int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
 +                              void __iomem *mmio)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 +              pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 +
 +      if (db_bits & ~ndev->db_valid_mask)
 +              return -EINVAL;
 +
 +      ndev->reg->db_iowrite(db_bits, mmio);
 +
 +      return 0;
 +}
 +
 +static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
 +                                 void __iomem *mmio)
 +{
 +      unsigned long irqflags;
 +
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 +              pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 +
 +      if (db_bits & ~ndev->db_valid_mask)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
 +      {
 +              ndev->db_mask |= db_bits;
 +              ndev->reg->db_iowrite(ndev->db_mask, mmio);
 +      }
 +      spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
 +
 +      return 0;
 +}
 +
 +static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
 +                                   void __iomem *mmio)
 +{
 +      unsigned long irqflags;
 +
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 +              pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 +
 +      if (db_bits & ~ndev->db_valid_mask)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
 +      {
 +              ndev->db_mask &= ~db_bits;
 +              ndev->reg->db_iowrite(ndev->db_mask, mmio);
 +      }
 +      spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
 +
 +      return 0;
 +}
 +
 +static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
 +{
 +      u64 shift, mask;
 +
 +      shift = ndev->db_vec_shift;
 +      mask = BIT_ULL(shift) - 1;
 +
 +      return mask << (shift * db_vector);
 +}
 +
 +static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
 +                               phys_addr_t *spad_addr, phys_addr_t reg_addr,
 +                               unsigned long reg)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 +              pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 +
 +      if (idx < 0 || idx >= ndev->spad_count)
 +              return -EINVAL;
 +
 +      if (spad_addr) {
 +              *spad_addr = reg_addr + reg + (idx << 2);
 +              dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
 +                      *spad_addr);
 +      }
 +
 +      return 0;
 +}
 +
 +static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
 +                               void __iomem *mmio)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 +              pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 +
 +      if (idx < 0 || idx >= ndev->spad_count)
 +              return 0;
 +
 +      return ioread32(mmio + (idx << 2));
 +}
 +
 +static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
 +                                void __iomem *mmio)
 +{
 +      if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 +              pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 +
 +      if (idx < 0 || idx >= ndev->spad_count)
 +              return -EINVAL;
 +
 +      iowrite32(val, mmio + (idx << 2));
 +
 +      return 0;
 +}
 +
 +static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
 +{
 +      u64 vec_mask;
 +
 +      vec_mask = ndev_vec_mask(ndev, vec);
 +
 +      if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
 +              vec_mask |= ndev->db_link_mask;
 +
 +      dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
 +
 +      ndev->last_ts = jiffies;
 +
 +      if (vec_mask & ndev->db_link_mask) {
 +              if (ndev->reg->poll_link(ndev))
 +                      ntb_link_event(&ndev->ntb);
 +      }
 +
 +      if (vec_mask & ndev->db_valid_mask)
 +              ntb_db_event(&ndev->ntb, vec);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static irqreturn_t ndev_vec_isr(int irq, void *dev)
 +{
 +      struct intel_ntb_vec *nvec = dev;
 +
 +      dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d  nvec->num: %d\n",
 +              irq, nvec->num);
 +
 +      return ndev_interrupt(nvec->ndev, nvec->num);
 +}
 +
 +static irqreturn_t ndev_irq_isr(int irq, void *dev)
 +{
 +      struct intel_ntb_dev *ndev = dev;
 +
 +      return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
 +}
 +
 +int ndev_init_isr(struct intel_ntb_dev *ndev,
 +                       int msix_min, int msix_max,
 +                       int msix_shift, int total_shift)
 +{
 +      struct pci_dev *pdev;
 +      int rc, i, msix_count, node;
 +
 +      pdev = ndev->ntb.pdev;
 +
 +      node = dev_to_node(&pdev->dev);
 +
 +      /* Mask all doorbell interrupts */
 +      ndev->db_mask = ndev->db_valid_mask;
 +      ndev->reg->db_iowrite(ndev->db_mask,
 +                            ndev->self_mmio +
 +                            ndev->self_reg->db_mask);
 +
 +      /* Try to set up msix irq */
 +
-       ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
++      ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
 +                               GFP_KERNEL, node);
 +      if (!ndev->vec)
 +              goto err_msix_vec_alloc;
 +
++      ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
 +                                GFP_KERNEL, node);
 +      if (!ndev->msix)
 +              goto err_msix_alloc;
 +
 +      for (i = 0; i < msix_max; ++i)
 +              ndev->msix[i].entry = i;
 +
 +      msix_count = pci_enable_msix_range(pdev, ndev->msix,
 +                                         msix_min, msix_max);
 +      if (msix_count < 0)
 +              goto err_msix_enable;
 +
 +      for (i = 0; i < msix_count; ++i) {
 +              ndev->vec[i].ndev = ndev;
 +              ndev->vec[i].num = i;
 +              rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
 +                               "ndev_vec_isr", &ndev->vec[i]);
 +              if (rc)
 +                      goto err_msix_request;
 +      }
 +
 +      dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
 +      ndev->db_vec_count = msix_count;
 +      ndev->db_vec_shift = msix_shift;
 +      return 0;
 +
 +err_msix_request:
 +      while (i-- > 0)
 +              free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 +      pci_disable_msix(pdev);
 +err_msix_enable:
 +      kfree(ndev->msix);
 +err_msix_alloc:
 +      kfree(ndev->vec);
 +err_msix_vec_alloc:
 +      ndev->msix = NULL;
 +      ndev->vec = NULL;
 +
 +      /* Try to set up msi irq */
 +
 +      rc = pci_enable_msi(pdev);
 +      if (rc)
 +              goto err_msi_enable;
 +
 +      rc = request_irq(pdev->irq, ndev_irq_isr, 0,
 +                       "ndev_irq_isr", ndev);
 +      if (rc)
 +              goto err_msi_request;
 +
 +      dev_dbg(&pdev->dev, "Using msi interrupts\n");
 +      ndev->db_vec_count = 1;
 +      ndev->db_vec_shift = total_shift;
 +      return 0;
 +
 +err_msi_request:
 +      pci_disable_msi(pdev);
 +err_msi_enable:
 +
 +      /* Try to set up intx irq */
 +
 +      pci_intx(pdev, 1);
 +
 +      rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
 +                       "ndev_irq_isr", ndev);
 +      if (rc)
 +              goto err_intx_request;
 +
 +      dev_dbg(&pdev->dev, "Using intx interrupts\n");
 +      ndev->db_vec_count = 1;
 +      ndev->db_vec_shift = total_shift;
 +      return 0;
 +
 +err_intx_request:
 +      return rc;
 +}
 +
 +static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
 +{
 +      struct pci_dev *pdev;
 +      int i;
 +
 +      pdev = ndev->ntb.pdev;
 +
 +      /* Mask all doorbell interrupts */
 +      ndev->db_mask = ndev->db_valid_mask;
 +      ndev->reg->db_iowrite(ndev->db_mask,
 +                            ndev->self_mmio +
 +                            ndev->self_reg->db_mask);
 +
 +      if (ndev->msix) {
 +              i = ndev->db_vec_count;
 +              while (i--)
 +                      free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 +              pci_disable_msix(pdev);
 +              kfree(ndev->msix);
 +              kfree(ndev->vec);
 +      } else {
 +              free_irq(pdev->irq, ndev);
 +              if (pci_dev_msi_enabled(pdev))
 +                      pci_disable_msi(pdev);
 +      }
 +}
 +
 +static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
 +                                   size_t count, loff_t *offp)
 +{
 +      struct intel_ntb_dev *ndev;
 +      struct pci_dev *pdev;
 +      void __iomem *mmio;
 +      char *buf;
 +      size_t buf_size;
 +      ssize_t ret, off;
 +      union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
 +
 +      ndev = filp->private_data;
 +      pdev = ndev->ntb.pdev;
 +      mmio = ndev->self_mmio;
 +
 +      buf_size = min(count, 0x800ul);
 +
 +      buf = kmalloc(buf_size, GFP_KERNEL);
 +      if (!buf)
 +              return -ENOMEM;
 +
 +      off = 0;
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "NTB Device Information:\n");
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Connection Topology -\t%s\n",
 +                       ntb_topo_string(ndev->ntb.topo));
 +
 +      if (ndev->b2b_idx != UINT_MAX) {
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
 +      }
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "BAR4 Split -\t\t%s\n",
 +                       ndev->bar4_split ? "yes" : "no");
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
 +
 +      if (!ndev->reg->link_is_up(ndev)) {
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "Link Status -\t\tDown\n");
 +      } else {
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "Link Status -\t\tUp\n");
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "Link Speed -\t\tPCI-E Gen %u\n",
 +                               NTB_LNK_STA_SPEED(ndev->lnk_sta));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "Link Width -\t\tx%u\n",
 +                               NTB_LNK_STA_WIDTH(ndev->lnk_sta));
 +      }
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Memory Window Count -\t%u\n", ndev->mw_count);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Scratchpad Count -\t%u\n", ndev->spad_count);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Count -\t%u\n", ndev->db_count);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
 +
 +      u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Mask -\t\t%#llx\n", u.v64);
 +
 +      u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "Doorbell Bell -\t\t%#llx\n", u.v64);
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "\nNTB Window Size:\n");
 +
 +      pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "PBAR23SZ %hhu\n", u.v8);
 +      if (!ndev->bar4_split) {
 +              pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "PBAR45SZ %hhu\n", u.v8);
 +      } else {
 +              pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "PBAR4SZ %hhu\n", u.v8);
 +              pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "PBAR5SZ %hhu\n", u.v8);
 +      }
 +
 +      pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "SBAR23SZ %hhu\n", u.v8);
 +      if (!ndev->bar4_split) {
 +              pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "SBAR45SZ %hhu\n", u.v8);
 +      } else {
 +              pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "SBAR4SZ %hhu\n", u.v8);
 +              pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "SBAR5SZ %hhu\n", u.v8);
 +      }
 +
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "\nNTB Incoming XLAT:\n");
 +
 +      u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "XLAT23 -\t\t%#018llx\n", u.v64);
 +
 +      if (ndev->bar4_split) {
 +              u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "XLAT4 -\t\t\t%#06x\n", u.v32);
 +
 +              u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "XLAT5 -\t\t\t%#06x\n", u.v32);
 +      } else {
 +              u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "XLAT45 -\t\t%#018llx\n", u.v64);
 +      }
 +
 +      u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
 +      off += scnprintf(buf + off, buf_size - off,
 +                       "LMT23 -\t\t\t%#018llx\n", u.v64);
 +
 +      if (ndev->bar4_split) {
 +              u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "LMT4 -\t\t\t%#06x\n", u.v32);
 +              u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "LMT5 -\t\t\t%#06x\n", u.v32);
 +      } else {
 +              u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "LMT45 -\t\t\t%#018llx\n", u.v64);
 +      }
 +
 +      if (pdev_is_gen1(pdev)) {
 +              if (ntb_topo_is_b2b(ndev->ntb.topo)) {
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "\nNTB Outgoing B2B XLAT:\n");
 +
 +                      u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "B2B XLAT23 -\t\t%#018llx\n", u.v64);
 +
 +                      if (ndev->bar4_split) {
 +                              u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B XLAT4 -\t\t%#06x\n",
 +                                               u.v32);
 +                              u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B XLAT5 -\t\t%#06x\n",
 +                                               u.v32);
 +                      } else {
 +                              u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B XLAT45 -\t\t%#018llx\n",
 +                                               u.v64);
 +                      }
 +
 +                      u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "B2B LMT23 -\t\t%#018llx\n", u.v64);
 +
 +                      if (ndev->bar4_split) {
 +                              u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B LMT4 -\t\t%#06x\n",
 +                                               u.v32);
 +                              u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B LMT5 -\t\t%#06x\n",
 +                                               u.v32);
 +                      } else {
 +                              u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "B2B LMT45 -\t\t%#018llx\n",
 +                                               u.v64);
 +                      }
 +
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "\nNTB Secondary BAR:\n");
 +
 +                      u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "SBAR01 -\t\t%#018llx\n", u.v64);
 +
 +                      u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "SBAR23 -\t\t%#018llx\n", u.v64);
 +
 +                      if (ndev->bar4_split) {
 +                              u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "SBAR4 -\t\t\t%#06x\n", u.v32);
 +                              u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "SBAR5 -\t\t\t%#06x\n", u.v32);
 +                      } else {
 +                              u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
 +                              off += scnprintf(buf + off, buf_size - off,
 +                                               "SBAR45 -\t\t%#018llx\n",
 +                                               u.v64);
 +                      }
 +              }
 +
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "\nXEON NTB Statistics:\n");
 +
 +              u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "Upstream Memory Miss -\t%u\n", u.v16);
 +
 +              off += scnprintf(buf + off, buf_size - off,
 +                               "\nXEON NTB Hardware Errors:\n");
 +
 +              if (!pci_read_config_word(pdev,
 +                                        XEON_DEVSTS_OFFSET, &u.v16))
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "DEVSTS -\t\t%#06x\n", u.v16);
 +
 +              if (!pci_read_config_word(pdev,
 +                                        XEON_LINK_STATUS_OFFSET, &u.v16))
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "LNKSTS -\t\t%#06x\n", u.v16);
 +
 +              if (!pci_read_config_dword(pdev,
 +                                         XEON_UNCERRSTS_OFFSET, &u.v32))
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "UNCERRSTS -\t\t%#06x\n", u.v32);
 +
 +              if (!pci_read_config_dword(pdev,
 +                                         XEON_CORERRSTS_OFFSET, &u.v32))
 +                      off += scnprintf(buf + off, buf_size - off,
 +                                       "CORERRSTS -\t\t%#06x\n", u.v32);
 +      }
 +
 +      ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
 +      kfree(buf);
 +      return ret;
 +}
 +
 +static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
 +                               size_t count, loff_t *offp)
 +{
 +      struct intel_ntb_dev *ndev = filp->private_data;
 +
 +      if (pdev_is_gen1(ndev->ntb.pdev))
 +              return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
 +      else if (pdev_is_gen3(ndev->ntb.pdev))
 +              return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
 +
 +      return -ENXIO;
 +}
 +
 +static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
 +{
 +      if (!debugfs_dir) {
 +              ndev->debugfs_dir = NULL;
 +              ndev->debugfs_info = NULL;
 +      } else {
 +              ndev->debugfs_dir =
 +                      debugfs_create_dir(pci_name(ndev->ntb.pdev),
 +                                         debugfs_dir);
 +              if (!ndev->debugfs_dir)
 +                      ndev->debugfs_info = NULL;
 +              else
 +                      ndev->debugfs_info =
 +                              debugfs_create_file("info", S_IRUSR,
 +                                                  ndev->debugfs_dir, ndev,
 +                                                  &intel_ntb_debugfs_info);
 +      }
 +}
 +
 +static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
 +{
 +      debugfs_remove_recursive(ndev->debugfs_dir);
 +}
 +
 +int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
 +{
 +      if (pidx != NTB_DEF_PEER_IDX)
 +              return -EINVAL;
 +
 +      return ntb_ndev(ntb)->mw_count;
 +}
 +
 +int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
 +                         resource_size_t *addr_align,
 +                         resource_size_t *size_align,
 +                         resource_size_t *size_max)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +      resource_size_t bar_size, mw_size;
 +      int bar;
 +
 +      if (pidx != NTB_DEF_PEER_IDX)
 +              return -EINVAL;
 +
 +      if (idx >= ndev->b2b_idx && !ndev->b2b_off)
 +              idx += 1;
 +
 +      bar = ndev_mw_to_bar(ndev, idx);
 +      if (bar < 0)
 +              return bar;
 +
 +      bar_size = pci_resource_len(ndev->ntb.pdev, bar);
 +
 +      if (idx == ndev->b2b_idx)
 +              mw_size = bar_size - ndev->b2b_off;
 +      else
 +              mw_size = bar_size;
 +
 +      if (addr_align)
 +              *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
 +
 +      if (size_align)
 +              *size_align = 1;
 +
 +      if (size_max)
 +              *size_max = mw_size;
 +
 +      return 0;
 +}
 +
 +static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
 +                                dma_addr_t addr, resource_size_t size)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +      unsigned long base_reg, xlat_reg, limit_reg;
 +      resource_size_t bar_size, mw_size;
 +      void __iomem *mmio;
 +      u64 base, limit, reg_val;
 +      int bar;
 +
 +      if (pidx != NTB_DEF_PEER_IDX)
 +              return -EINVAL;
 +
 +      if (idx >= ndev->b2b_idx && !ndev->b2b_off)
 +              idx += 1;
 +
 +      bar = ndev_mw_to_bar(ndev, idx);
 +      if (bar < 0)
 +              return bar;
 +
 +      bar_size = pci_resource_len(ndev->ntb.pdev, bar);
 +
 +      if (idx == ndev->b2b_idx)
 +              mw_size = bar_size - ndev->b2b_off;
 +      else
 +              mw_size = bar_size;
 +
 +      /* hardware requires that addr is aligned to bar size */
 +      if (addr & (bar_size - 1))
 +              return -EINVAL;
 +
 +      /* make sure the range fits in the usable mw size */
 +      if (size > mw_size)
 +              return -EINVAL;
 +
 +      mmio = ndev->self_mmio;
 +      base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
 +      xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
 +      limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
 +
 +      if (bar < 4 || !ndev->bar4_split) {
 +              base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
 +
 +              /* Set the limit if supported, if size is not mw_size */
 +              if (limit_reg && size != mw_size)
 +                      limit = base + size;
 +              else
 +                      limit = 0;
 +
 +              /* set and verify setting the translation address */
 +              iowrite64(addr, mmio + xlat_reg);
 +              reg_val = ioread64(mmio + xlat_reg);
 +              if (reg_val != addr) {
 +                      iowrite64(0, mmio + xlat_reg);
 +                      return -EIO;
 +              }
 +
 +              /* set and verify setting the limit */
 +              iowrite64(limit, mmio + limit_reg);
 +              reg_val = ioread64(mmio + limit_reg);
 +              if (reg_val != limit) {
 +                      iowrite64(base, mmio + limit_reg);
 +                      iowrite64(0, mmio + xlat_reg);
 +                      return -EIO;
 +              }
 +      } else {
 +              /* split bar addr range must all be 32 bit */
 +              if (addr & (~0ull << 32))
 +                      return -EINVAL;
 +              if ((addr + size) & (~0ull << 32))
 +                      return -EINVAL;
 +
 +              base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
 +
 +              /* Set the limit if supported, if size is not mw_size */
 +              if (limit_reg && size != mw_size)
 +                      limit = base + size;
 +              else
 +                      limit = 0;
 +
 +              /* set and verify setting the translation address */
 +              iowrite32(addr, mmio + xlat_reg);
 +              reg_val = ioread32(mmio + xlat_reg);
 +              if (reg_val != addr) {
 +                      iowrite32(0, mmio + xlat_reg);
 +                      return -EIO;
 +              }
 +
 +              /* set and verify setting the limit */
 +              iowrite32(limit, mmio + limit_reg);
 +              reg_val = ioread32(mmio + limit_reg);
 +              if (reg_val != limit) {
 +                      iowrite32(base, mmio + limit_reg);
 +                      iowrite32(0, mmio + xlat_reg);
 +                      return -EIO;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
 +                       enum ntb_width *width)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      if (ndev->reg->link_is_up(ndev)) {
 +              if (speed)
 +                      *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
 +              if (width)
 +                      *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
 +              return 1;
 +      } else {
 +              /* TODO MAYBE: is it possible to observe the link speed and
 +               * width while link is training? */
 +              if (speed)
 +                      *speed = NTB_SPEED_NONE;
 +              if (width)
 +                      *width = NTB_WIDTH_NONE;
 +              return 0;
 +      }
 +}
 +
 +static int intel_ntb_link_enable(struct ntb_dev *ntb,
 +                               enum ntb_speed max_speed,
 +                               enum ntb_width max_width)
 +{
 +      struct intel_ntb_dev *ndev;
 +      u32 ntb_ctl;
 +
 +      ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 +
 +      if (ndev->ntb.topo == NTB_TOPO_SEC)
 +              return -EINVAL;
 +
 +      dev_dbg(&ntb->pdev->dev,
 +              "Enabling link with max_speed %d max_width %d\n",
 +              max_speed, max_width);
 +      if (max_speed != NTB_SPEED_AUTO)
 +              dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
 +      if (max_width != NTB_WIDTH_AUTO)
 +              dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
 +
 +      ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
 +      ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
 +      ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
 +      ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
 +      if (ndev->bar4_split)
 +              ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
 +      iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
 +
 +      return 0;
 +}
 +
 +int intel_ntb_link_disable(struct ntb_dev *ntb)
 +{
 +      struct intel_ntb_dev *ndev;
 +      u32 ntb_cntl;
 +
 +      ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 +
 +      if (ndev->ntb.topo == NTB_TOPO_SEC)
 +              return -EINVAL;
 +
 +      dev_dbg(&ntb->pdev->dev, "Disabling link\n");
 +
 +      /* Bring NTB link down */
 +      ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
 +      ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
 +      ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
 +      if (ndev->bar4_split)
 +              ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
 +      ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
 +      iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
 +
 +      return 0;
 +}
 +
 +int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
 +{
 +      /* Numbers of inbound and outbound memory windows match */
 +      return ntb_ndev(ntb)->mw_count;
 +}
 +
 +int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
 +                             phys_addr_t *base, resource_size_t *size)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +      int bar;
 +
 +      if (idx >= ndev->b2b_idx && !ndev->b2b_off)
 +              idx += 1;
 +
 +      bar = ndev_mw_to_bar(ndev, idx);
 +      if (bar < 0)
 +              return bar;
 +
 +      if (base)
 +              *base = pci_resource_start(ndev->ntb.pdev, bar) +
 +                      (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
 +
 +      if (size)
 +              *size = pci_resource_len(ndev->ntb.pdev, bar) -
 +                      (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
 +
 +      return 0;
 +}
 +
 +static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
 +{
 +      return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
 +}
 +
 +u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
 +{
 +      return ntb_ndev(ntb)->db_valid_mask;
 +}
 +
 +int intel_ntb_db_vector_count(struct ntb_dev *ntb)
 +{
 +      struct intel_ntb_dev *ndev;
 +
 +      ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 +
 +      return ndev->db_vec_count;
 +}
 +
 +u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      if (db_vector < 0 || db_vector > ndev->db_vec_count)
 +              return 0;
 +
 +      return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
 +}
 +
 +static u64 intel_ntb_db_read(struct ntb_dev *ntb)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_read(ndev,
 +                          ndev->self_mmio +
 +                          ndev->self_reg->db_bell);
 +}
 +
 +static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_write(ndev, db_bits,
 +                           ndev->self_mmio +
 +                           ndev->self_reg->db_bell);
 +}
 +
 +int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_set_mask(ndev, db_bits,
 +                              ndev->self_mmio +
 +                              ndev->self_reg->db_mask);
 +}
 +
 +int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_clear_mask(ndev, db_bits,
 +                                ndev->self_mmio +
 +                                ndev->self_reg->db_mask);
 +}
 +
 +int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
 +                         resource_size_t *db_size)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
 +                          ndev->peer_reg->db_bell);
 +}
 +
 +static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_db_write(ndev, db_bits,
 +                           ndev->peer_mmio +
 +                           ndev->peer_reg->db_bell);
 +}
 +
 +int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
 +{
 +      return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
 +}
 +
 +int intel_ntb_spad_count(struct ntb_dev *ntb)
 +{
 +      struct intel_ntb_dev *ndev;
 +
 +      ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 +
 +      return ndev->spad_count;
 +}
 +
 +u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_spad_read(ndev, idx,
 +                            ndev->self_mmio +
 +                            ndev->self_reg->spad);
 +}
 +
 +int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_spad_write(ndev, idx, val,
 +                             ndev->self_mmio +
 +                             ndev->self_reg->spad);
 +}
 +
 +int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
 +                           phys_addr_t *spad_addr)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
 +                            ndev->peer_reg->spad);
 +}
 +
 +u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_spad_read(ndev, sidx,
 +                            ndev->peer_mmio +
 +                            ndev->peer_reg->spad);
 +}
 +
 +int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
 +                            u32 val)
 +{
 +      struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 +
 +      return ndev_spad_write(ndev, sidx, val,
 +                             ndev->peer_mmio +
 +                             ndev->peer_reg->spad);
 +}
 +
 +static u64 xeon_db_ioread(void __iomem *mmio)
 +{
 +      return (u64)ioread16(mmio);
 +}
 +
 +static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
 +{
 +      iowrite16((u16)bits, mmio);
 +}
 +
 +static int xeon_poll_link(struct intel_ntb_dev *ndev)
 +{
 +      u16 reg_val;
 +      int rc;
 +
 +      ndev->reg->db_iowrite(ndev->db_link_mask,
 +                            ndev->self_mmio +
 +                            ndev->self_reg->db_bell);
 +
 +      rc = pci_read_config_word(ndev->ntb.pdev,
 +                                XEON_LINK_STATUS_OFFSET, &reg_val);
 +      if (rc)
 +              return 0;
 +
 +      if (reg_val == ndev->lnk_sta)
 +              return 0;
 +
 +      ndev->lnk_sta = reg_val;
 +
 +      return 1;
 +}
 +
 +int xeon_link_is_up(struct intel_ntb_dev *ndev)
 +{
 +      if (ndev->ntb.topo == NTB_TOPO_SEC)
 +              return 1;
 +
 +      return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
 +}
 +
 +enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
 +{
 +      switch (ppd & XEON_PPD_TOPO_MASK) {
 +      case XEON_PPD_TOPO_B2B_USD:
 +              return NTB_TOPO_B2B_USD;
 +
 +      case XEON_PPD_TOPO_B2B_DSD:
 +              return NTB_TOPO_B2B_DSD;
 +
 +      case XEON_PPD_TOPO_PRI_USD:
 +      case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
 +              return NTB_TOPO_PRI;
 +
 +      case XEON_PPD_TOPO_SEC_USD:
 +      case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
 +              return NTB_TOPO_SEC;
 +      }
 +
 +      return NTB_TOPO_NONE;
 +}
 +
 +static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
 +{
 +      if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
 +              dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
 +              return 1;
 +      }
 +      return 0;
 +}
 +
 +static int xeon_init_isr(struct intel_ntb_dev *ndev)
 +{
 +      return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
 +                           XEON_DB_MSIX_VECTOR_COUNT,
 +                           XEON_DB_MSIX_VECTOR_SHIFT,
 +                           XEON_DB_TOTAL_SHIFT);
 +}
 +
 +static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
 +{
 +      ndev_deinit_isr(ndev);
 +}
 +
 +static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
 +                           const struct intel_b2b_addr *addr,
 +                           const struct intel_b2b_addr *peer_addr)
 +{
 +      struct pci_dev *pdev;
 +      void __iomem *mmio;
 +      resource_size_t bar_size;
 +      phys_addr_t bar_addr;
 +      int b2b_bar;
 +      u8 bar_sz;
 +
 +      pdev = ndev->ntb.pdev;
 +      mmio = ndev->self_mmio;
 +
 +      if (ndev->b2b_idx == UINT_MAX) {
 +              dev_dbg(&pdev->dev, "not using b2b mw\n");
 +              b2b_bar = 0;
 +              ndev->b2b_off = 0;
 +      } else {
 +              b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
 +              if (b2b_bar < 0)
 +                      return -EIO;
 +
 +              dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
 +
 +              bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
 +
 +              dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
 +
 +              if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
 +                      dev_dbg(&pdev->dev, "b2b using first half of bar\n");
 +                      ndev->b2b_off = bar_size >> 1;
 +              } else if (XEON_B2B_MIN_SIZE <= bar_size) {
 +                      dev_dbg(&pdev->dev, "b2b using whole bar\n");
 +                      ndev->b2b_off = 0;
 +                      --ndev->mw_count;
 +              } else {
 +                      dev_dbg(&pdev->dev, "b2b bar size is too small\n");
 +                      return -EIO;
 +              }
 +      }
 +
 +      /* Reset the secondary bar sizes to match the primary bar sizes,
 +       * except disable or halve the size of the b2b secondary bar.
 +       *
 +       * Note: code for each specific bar size register, because the register
 +       * offsets are not in a consistent order (bar5sz comes after ppd, odd).
 +       */
 +      pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
 +      dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
 +      if (b2b_bar == 2) {
 +              if (ndev->b2b_off)
 +                      bar_sz -= 1;
 +              else
 +                      bar_sz = 0;
 +      }
 +      pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
 +      pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
 +      dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
 +
 +      if (!ndev->bar4_split) {
 +              pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
 +              if (b2b_bar == 4) {
 +                      if (ndev->b2b_off)
 +                              bar_sz -= 1;
 +                      else
 +                              bar_sz = 0;
 +              }
 +              pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
 +              pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
 +      } else {
 +              pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
 +              if (b2b_bar == 4) {
 +                      if (ndev->b2b_off)
 +                              bar_sz -= 1;
 +                      else
 +                              bar_sz = 0;
 +              }
 +              pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
 +              pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
 +
 +              pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
 +              if (b2b_bar == 5) {
 +                      if (ndev->b2b_off)
 +                              bar_sz -= 1;
 +                      else
 +                              bar_sz = 0;
 +              }
 +              pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
 +              pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
 +              dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
 +      }
 +
 +      /* SBAR01 hit by first part of the b2b bar */
 +      if (b2b_bar == 0)
 +              bar_addr = addr->bar0_addr;
 +      else if (b2b_bar == 2)
 +              bar_addr = addr->bar2_addr64;
 +      else if (b2b_bar == 4 && !ndev->bar4_split)
 +              bar_addr = addr->bar4_addr64;
 +      else if (b2b_bar == 4)
 +              bar_addr = addr->bar4_addr32;
 +      else if (b2b_bar == 5)
 +              bar_addr = addr->bar5_addr32;
 +      else
 +              return -EIO;
 +
 +      dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
 +      iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
 +
 +      /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
 +       * The b2b bar is either disabled above, or configured half-size, and
 +       * it starts at the PBAR xlat + offset.
 +       */
 +
 +      bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
 +      iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
 +      bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
 +      dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
 +
 +      if (!ndev->bar4_split) {
 +              bar_addr = addr->bar4_addr64 +
 +                      (b2b_bar == 4 ? ndev->b2b_off : 0);
 +              iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
 +              bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
 +      } else {
 +              bar_addr = addr->bar4_addr32 +
 +                      (b2b_bar == 4 ? ndev->b2b_off : 0);
 +              iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
 +
 +              bar_addr = addr->bar5_addr32 +
 +                      (b2b_bar == 5 ? ndev->b2b_off : 0);
 +              iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
 +      }
 +
 +      /* setup incoming bar limits == base addrs (zero length windows) */
 +
 +      bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
 +      iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
 +      bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
 +      dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
 +
 +      if (!ndev->bar4_split) {
 +              bar_addr = addr->bar4_addr64 +
 +                      (b2b_bar == 4 ? ndev->b2b_off : 0);
 +              iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
 +              bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
 +      } else {
 +              bar_addr = addr->bar4_addr32 +
 +                      (b2b_bar == 4 ? ndev->b2b_off : 0);
 +              iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
 +
 +              bar_addr = addr->bar5_addr32 +
 +                      (b2b_bar == 5 ? ndev->b2b_off : 0);
 +              iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
 +              dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
 +      }
 +
 +      /* zero incoming translation addrs */
 +      iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
 +
 +      if (!ndev->bar4_split) {
 +              iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
 +      } else {
 +              iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
 +              iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
 +      }
 +
 +      /* zero outgoing translation limits (whole bar size windows) */
 +      iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
 +      if (!ndev->bar4_split) {
 +              iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
 +      } else {
 +              iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
 +              iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
 +      }
 +
 +      /* set outgoing translation offsets */
 +      bar_addr = peer_addr->bar2_addr64;
 +      iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
 +      bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
 +      dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
 +
 +      if (!ndev->bar4_split) {
 +              bar_addr = peer_addr->bar4_addr64;
 +              iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
 +              bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
 +              dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
 +      } else {
 +              bar_addr = peer_addr->bar4_addr32;
 +              iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
 +              dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
 +
 +              bar_addr = peer_addr->bar5_addr32;
 +              iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
 +              bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
 +              dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
 +      }
 +
 +      /* set the translation offset for b2b registers */
 +      if (b2b_bar == 0)
 +              bar_addr = peer_addr->bar0_addr;
 +      else if (b2b_bar == 2)
 +              bar_addr = peer_addr->bar2_addr64;
 +      else if (b2b_bar == 4 && !ndev->bar4_split)
 +              bar_addr = peer_addr->bar4_addr64;
 +      else if (b2b_bar == 4)
 +              bar_addr = peer_addr->bar4_addr32;
 +      else if (b2b_bar == 5)
 +              bar_addr = peer_addr->bar5_addr32;
 +      else
 +              return -EIO;
 +
 +      /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
 +      dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
 +      iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
 +      iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
 +
 +      if (b2b_bar) {
 +              /* map peer ntb mmio config space registers */
 +              ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
 +                                          XEON_B2B_MIN_SIZE);
 +              if (!ndev->peer_mmio)
 +                      return -EIO;
 +
 +              ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
 +      }
 +
 +      return 0;
 +}
 +
 +static int xeon_init_ntb(struct intel_ntb_dev *ndev)
 +{
 +      struct device *dev = &ndev->ntb.pdev->dev;
 +      int rc;
 +      u32 ntb_ctl;
 +
 +      if (ndev->bar4_split)
 +              ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
 +      else
 +              ndev->mw_count = XEON_MW_COUNT;
 +
 +      ndev->spad_count = XEON_SPAD_COUNT;
 +      ndev->db_count = XEON_DB_COUNT;
 +      ndev->db_link_mask = XEON_DB_LINK_BIT;
 +
 +      switch (ndev->ntb.topo) {
 +      case NTB_TOPO_PRI:
 +              if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
 +                      dev_err(dev, "NTB Primary config disabled\n");
 +                      return -EINVAL;
 +              }
 +
 +              /* enable link to allow secondary side device to appear */
 +              ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
 +              ntb_ctl &= ~NTB_CTL_DISABLE;
 +              iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
 +
 +              /* use half the spads for the peer */
 +              ndev->spad_count >>= 1;
 +              ndev->self_reg = &xeon_pri_reg;
 +              ndev->peer_reg = &xeon_sec_reg;
 +              ndev->xlat_reg = &xeon_sec_xlat;
 +              break;
 +
 +      case NTB_TOPO_SEC:
 +              if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
 +                      dev_err(dev, "NTB Secondary config disabled\n");
 +                      return -EINVAL;
 +              }
 +              /* use half the spads for the peer */
 +              ndev->spad_count >>= 1;
 +              ndev->self_reg = &xeon_sec_reg;
 +              ndev->peer_reg = &xeon_pri_reg;
 +              ndev->xlat_reg = &xeon_pri_xlat;
 +              break;
 +
 +      case NTB_TOPO_B2B_USD:
 +      case NTB_TOPO_B2B_DSD:
 +              ndev->self_reg = &xeon_pri_reg;
 +              ndev->peer_reg = &xeon_b2b_reg;
 +              ndev->xlat_reg = &xeon_sec_xlat;
 +
 +              if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
 +                      ndev->peer_reg = &xeon_pri_reg;
 +
 +                      if (b2b_mw_idx < 0)
 +                              ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
 +                      else
 +                              ndev->b2b_idx = b2b_mw_idx;
 +
 +                      if (ndev->b2b_idx >= ndev->mw_count) {
 +                              dev_dbg(dev,
 +                                      "b2b_mw_idx %d invalid for mw_count %u\n",
 +                                      b2b_mw_idx, ndev->mw_count);
 +                              return -EINVAL;
 +                      }
 +
 +                      dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
 +                              b2b_mw_idx, ndev->b2b_idx);
 +
 +              } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
 +                      dev_warn(dev, "Reduce doorbell count by 1\n");
 +                      ndev->db_count -= 1;
 +              }
 +
 +              if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
 +                      rc = xeon_setup_b2b_mw(ndev,
 +                                             &xeon_b2b_dsd_addr,
 +                                             &xeon_b2b_usd_addr);
 +              } else {
 +                      rc = xeon_setup_b2b_mw(ndev,
 +                                             &xeon_b2b_usd_addr,
 +                                             &xeon_b2b_dsd_addr);
 +              }
 +              if (rc)
 +                      return rc;
 +
 +              /* Enable Bus Master and Memory Space on the secondary side */
 +              iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
 +                        ndev->self_mmio + XEON_SPCICMD_OFFSET);
 +
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
 +
 +      ndev->reg->db_iowrite(ndev->db_valid_mask,
 +                            ndev->self_mmio +
 +                            ndev->self_reg->db_mask);
 +
 +      return 0;
 +}
 +
 +static int xeon_init_dev(struct intel_ntb_dev *ndev)
 +{
 +      struct pci_dev *pdev;
 +      u8 ppd;
 +      int rc, mem;
 +
 +      pdev = ndev->ntb.pdev;
 +
 +      switch (pdev->device) {
 +      /* There is a Xeon hardware errata related to writes to SDOORBELL or
 +       * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
 +       * which may hang the system.  To workaround this use the second memory
 +       * window to access the interrupt and scratch pad registers on the
 +       * remote system.
 +       */
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 +              ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
 +              break;
 +      }
 +
 +      switch (pdev->device) {
 +      /* There is a hardware errata related to accessing any register in
 +       * SB01BASE in the presence of bidirectional traffic crossing the NTB.
 +       */
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 +              ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
 +              break;
 +      }
 +
 +      switch (pdev->device) {
 +      /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
 +       * mirrored to the remote system.  Shrink the number of bits by one,
 +       * since bit 14 is the last bit.
 +       */
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
 +      case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
 +      case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
 +              ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
 +              break;
 +      }
 +
 +      ndev->reg = &xeon_reg;
 +
 +      rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
 +      if (rc)
 +              return -EIO;
 +
 +      ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
 +      dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
 +              ntb_topo_string(ndev->ntb.topo));
 +      if (ndev->ntb.topo == NTB_TOPO_NONE)
 +              return -EINVAL;
 +
 +      if (ndev->ntb.topo != NTB_TOPO_SEC) {
 +              ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
 +              dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
 +                      ppd, ndev->bar4_split);
 +      } else {
 +              /* This is a way for transparent BAR to figure out if we are
 +               * doing split BAR or not. There is no way for the hw on the
 +               * transparent side to know and set the PPD.
 +               */
 +              mem = pci_select_bars(pdev, IORESOURCE_MEM);
 +              ndev->bar4_split = hweight32(mem) ==
 +                      HSX_SPLIT_BAR_MW_COUNT + 1;
 +              dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
 +                      mem, ndev->bar4_split);
 +      }
 +
 +      rc = xeon_init_ntb(ndev);
 +      if (rc)
 +              return rc;
 +
 +      return xeon_init_isr(ndev);
 +}
 +
 +static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
 +{
 +      xeon_deinit_isr(ndev);
 +}
 +
 +static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
 +{
 +      int rc;
 +
 +      pci_set_drvdata(pdev, ndev);
 +
 +      rc = pci_enable_device(pdev);
 +      if (rc)
 +              goto err_pci_enable;
 +
 +      rc = pci_request_regions(pdev, NTB_NAME);
 +      if (rc)
 +              goto err_pci_regions;
 +
 +      pci_set_master(pdev);
 +
 +      rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
 +      if (rc) {
 +              rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (rc)
 +                      goto err_dma_mask;
 +              dev_warn(&pdev->dev, "Cannot DMA highmem\n");
 +      }
 +
 +      rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 +      if (rc) {
 +              rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (rc)
 +                      goto err_dma_mask;
 +              dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
 +      }
 +      rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
 +                                        dma_get_mask(&pdev->dev));
 +      if (rc)
 +              goto err_dma_mask;
 +
 +      ndev->self_mmio = pci_iomap(pdev, 0, 0);
 +      if (!ndev->self_mmio) {
 +              rc = -EIO;
 +              goto err_mmio;
 +      }
 +      ndev->peer_mmio = ndev->self_mmio;
 +      ndev->peer_addr = pci_resource_start(pdev, 0);
 +
 +      return 0;
 +
 +err_mmio:
 +err_dma_mask:
 +      pci_clear_master(pdev);
 +      pci_release_regions(pdev);
 +err_pci_regions:
 +      pci_disable_device(pdev);
 +err_pci_enable:
 +      pci_set_drvdata(pdev, NULL);
 +      return rc;
 +}
 +
 +static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
 +{
 +      struct pci_dev *pdev = ndev->ntb.pdev;
 +
 +      if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
 +              pci_iounmap(pdev, ndev->peer_mmio);
 +      pci_iounmap(pdev, ndev->self_mmio);
 +
 +      pci_clear_master(pdev);
 +      pci_release_regions(pdev);
 +      pci_disable_device(pdev);
 +      pci_set_drvdata(pdev, NULL);
 +}
 +
 +static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
 +                                  struct pci_dev *pdev)
 +{
 +      ndev->ntb.pdev = pdev;
 +      ndev->ntb.topo = NTB_TOPO_NONE;
 +      ndev->ntb.ops = &intel_ntb_ops;
 +
 +      ndev->b2b_off = 0;
 +      ndev->b2b_idx = UINT_MAX;
 +
 +      ndev->bar4_split = 0;
 +
 +      ndev->mw_count = 0;
 +      ndev->spad_count = 0;
 +      ndev->db_count = 0;
 +      ndev->db_vec_count = 0;
 +      ndev->db_vec_shift = 0;
 +
 +      ndev->ntb_ctl = 0;
 +      ndev->lnk_sta = 0;
 +
 +      ndev->db_valid_mask = 0;
 +      ndev->db_link_mask = 0;
 +      ndev->db_mask = 0;
 +
 +      spin_lock_init(&ndev->db_mask_lock);
 +}
 +
 +static int intel_ntb_pci_probe(struct pci_dev *pdev,
 +                             const struct pci_device_id *id)
 +{
 +      struct intel_ntb_dev *ndev;
 +      int rc, node;
 +
 +      node = dev_to_node(&pdev->dev);
 +
 +      if (pdev_is_gen1(pdev)) {
 +              ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
 +              if (!ndev) {
 +                      rc = -ENOMEM;
 +                      goto err_ndev;
 +              }
 +
 +              ndev_init_struct(ndev, pdev);
 +
 +              rc = intel_ntb_init_pci(ndev, pdev);
 +              if (rc)
 +                      goto err_init_pci;
 +
 +              rc = xeon_init_dev(ndev);
 +              if (rc)
 +                      goto err_init_dev;
 +
 +      } else if (pdev_is_gen3(pdev)) {
 +              ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
 +              if (!ndev) {
 +                      rc = -ENOMEM;
 +                      goto err_ndev;
 +              }
 +
 +              ndev_init_struct(ndev, pdev);
 +              ndev->ntb.ops = &intel_ntb3_ops;
 +
 +              rc = intel_ntb_init_pci(ndev, pdev);
 +              if (rc)
 +                      goto err_init_pci;
 +
 +              rc = gen3_init_dev(ndev);
 +              if (rc)
 +                      goto err_init_dev;
 +
 +      } else {
 +              rc = -EINVAL;
 +              goto err_ndev;
 +      }
 +
 +      ndev_reset_unsafe_flags(ndev);
 +
 +      ndev->reg->poll_link(ndev);
 +
 +      ndev_init_debugfs(ndev);
 +
 +      rc = ntb_register_device(&ndev->ntb);
 +      if (rc)
 +              goto err_register;
 +
 +      dev_info(&pdev->dev, "NTB device registered.\n");
 +
 +      return 0;
 +
 +err_register:
 +      ndev_deinit_debugfs(ndev);
 +      if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
 +              xeon_deinit_dev(ndev);
 +err_init_dev:
 +      intel_ntb_deinit_pci(ndev);
 +err_init_pci:
 +      kfree(ndev);
 +err_ndev:
 +      return rc;
 +}
 +
 +static void intel_ntb_pci_remove(struct pci_dev *pdev)
 +{
 +      struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
 +
 +      ntb_unregister_device(&ndev->ntb);
 +      ndev_deinit_debugfs(ndev);
 +      if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
 +              xeon_deinit_dev(ndev);
 +      intel_ntb_deinit_pci(ndev);
 +      kfree(ndev);
 +}
 +
 +static const struct intel_ntb_reg xeon_reg = {
 +      .poll_link              = xeon_poll_link,
 +      .link_is_up             = xeon_link_is_up,
 +      .db_ioread              = xeon_db_ioread,
 +      .db_iowrite             = xeon_db_iowrite,
 +      .db_size                = sizeof(u32),
 +      .ntb_ctl                = XEON_NTBCNTL_OFFSET,
 +      .mw_bar                 = {2, 4, 5},
 +};
 +
 +static const struct intel_ntb_alt_reg xeon_pri_reg = {
 +      .db_bell                = XEON_PDOORBELL_OFFSET,
 +      .db_mask                = XEON_PDBMSK_OFFSET,
 +      .spad                   = XEON_SPAD_OFFSET,
 +};
 +
 +static const struct intel_ntb_alt_reg xeon_sec_reg = {
 +      .db_bell                = XEON_SDOORBELL_OFFSET,
 +      .db_mask                = XEON_SDBMSK_OFFSET,
 +      /* second half of the scratchpads */
 +      .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
 +};
 +
 +static const struct intel_ntb_alt_reg xeon_b2b_reg = {
 +      .db_bell                = XEON_B2B_DOORBELL_OFFSET,
 +      .spad                   = XEON_B2B_SPAD_OFFSET,
 +};
 +
 +static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
 +      /* Note: no primary .bar0_base visible to the secondary side.
 +       *
 +       * The secondary side cannot get the base address stored in primary
 +       * bars.  The base address is necessary to set the limit register to
 +       * any value other than zero, or unlimited.
 +       *
 +       * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
 +       * window by setting the limit equal to base, nor can it limit the size
 +       * of the memory window by setting the limit to base + size.
 +       */
 +      .bar2_limit             = XEON_PBAR23LMT_OFFSET,
 +      .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
 +};
 +
 +static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
 +      .bar0_base              = XEON_SBAR0BASE_OFFSET,
 +      .bar2_limit             = XEON_SBAR23LMT_OFFSET,
 +      .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
 +};
 +
 +struct intel_b2b_addr xeon_b2b_usd_addr = {
 +      .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
 +      .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
 +      .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
 +      .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
 +};
 +
 +struct intel_b2b_addr xeon_b2b_dsd_addr = {
 +      .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
 +      .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
 +      .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
 +      .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
 +};
 +
 +/* operations for primary side of local ntb */
 +static const struct ntb_dev_ops intel_ntb_ops = {
 +      .mw_count               = intel_ntb_mw_count,
 +      .mw_get_align           = intel_ntb_mw_get_align,
 +      .mw_set_trans           = intel_ntb_mw_set_trans,
 +      .peer_mw_count          = intel_ntb_peer_mw_count,
 +      .peer_mw_get_addr       = intel_ntb_peer_mw_get_addr,
 +      .link_is_up             = intel_ntb_link_is_up,
 +      .link_enable            = intel_ntb_link_enable,
 +      .link_disable           = intel_ntb_link_disable,
 +      .db_is_unsafe           = intel_ntb_db_is_unsafe,
 +      .db_valid_mask          = intel_ntb_db_valid_mask,
 +      .db_vector_count        = intel_ntb_db_vector_count,
 +      .db_vector_mask         = intel_ntb_db_vector_mask,
 +      .db_read                = intel_ntb_db_read,
 +      .db_clear               = intel_ntb_db_clear,
 +      .db_set_mask            = intel_ntb_db_set_mask,
 +      .db_clear_mask          = intel_ntb_db_clear_mask,
 +      .peer_db_addr           = intel_ntb_peer_db_addr,
 +      .peer_db_set            = intel_ntb_peer_db_set,
 +      .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
 +      .spad_count             = intel_ntb_spad_count,
 +      .spad_read              = intel_ntb_spad_read,
 +      .spad_write             = intel_ntb_spad_write,
 +      .peer_spad_addr         = intel_ntb_peer_spad_addr,
 +      .peer_spad_read         = intel_ntb_peer_spad_read,
 +      .peer_spad_write        = intel_ntb_peer_spad_write,
 +};
 +
 +static const struct file_operations intel_ntb_debugfs_info = {
 +      .owner = THIS_MODULE,
 +      .open = simple_open,
 +      .read = ndev_debugfs_read,
 +};
 +
 +static const struct pci_device_id intel_ntb_pci_tbl[] = {
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
 +      {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
 +      {0}
 +};
 +MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
 +
 +static struct pci_driver intel_ntb_pci_driver = {
 +      .name = KBUILD_MODNAME,
 +      .id_table = intel_ntb_pci_tbl,
 +      .probe = intel_ntb_pci_probe,
 +      .remove = intel_ntb_pci_remove,
 +};
 +
 +static int __init intel_ntb_pci_driver_init(void)
 +{
 +      pr_info("%s %s\n", NTB_DESC, NTB_VER);
 +
 +      if (debugfs_initialized())
 +              debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
 +
 +      return pci_register_driver(&intel_ntb_pci_driver);
 +}
 +module_init(intel_ntb_pci_driver_init);
 +
 +static void __exit intel_ntb_pci_driver_exit(void)
 +{
 +      pci_unregister_driver(&intel_ntb_pci_driver);
 +
 +      debugfs_remove_recursive(debugfs_dir);
 +}
 +module_exit(intel_ntb_pci_driver_exit);
index 8145be34328b78bf9b59272d974b34fa98ddae28,504bdcc57ae843cc24ea909911b69d81772e485b..9398959664769b5f6cd3c79e4914199d1cf35f0d
@@@ -637,7 -637,7 +637,7 @@@ static int ntb_transport_setup_qp_mw(st
         */
        node = dev_to_node(&ndev->dev);
        for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
 -              entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
 +              entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
                if (!entry)
                        return -ENOMEM;
  
@@@ -1102,7 -1102,7 +1102,7 @@@ static int ntb_transport_probe(struct n
        max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
        nt->mw_count = min(mw_count, max_mw_count_for_spads);
  
-       nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
+       nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
                                  GFP_KERNEL, node);
        if (!nt->mw_vec) {
                rc = -ENOMEM;
        nt->qp_bitmap = qp_bitmap;
        nt->qp_bitmap_free = qp_bitmap;
  
-       nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
+       nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
                                  GFP_KERNEL, node);
        if (!nt->qp_vec) {
                rc = -ENOMEM;
@@@ -1828,7 -1828,7 +1828,7 @@@ ntb_transport_create_queue(void *data, 
                qp->rx_dma_chan ? "DMA" : "CPU");
  
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
 -              entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
 +              entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
                if (!entry)
                        goto err1;
  
        qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
  
        for (i = 0; i < qp->tx_max_entry; i++) {
 -              entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
 +              entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node);
                if (!entry)
                        goto err2;
  
diff --combined drivers/of/platform.c
index 59731a950c1f01262c2820c7f7f82f334b0f4762,14cc962e0eec990e7d8b5a813ba5ad9bc5a1b6c9..6925d993e1f0f41b4894303fe77b3cf29729f595
@@@ -129,7 -129,7 +129,7 @@@ struct platform_device *of_device_alloc
  
        /* Populate the resource table */
        if (num_irq || num_reg) {
-               res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+               res = kcalloc(num_irq + num_reg, sizeof(*res), GFP_KERNEL);
                if (!res) {
                        platform_device_put(dev);
                        return NULL;
@@@ -505,7 -505,6 +505,7 @@@ EXPORT_SYMBOL_GPL(of_platform_default_p
  #ifndef CONFIG_PPC
  static const struct of_device_id reserved_mem_matches[] = {
        { .compatible = "qcom,rmtfs-mem" },
 +      { .compatible = "qcom,cmd-db" },
        { .compatible = "ramoops" },
        {}
  };
index cfaeef81d868a22eac96046b2c4e6ab49f68494e,0000000000000000000000000000000000000000..345aab56ce8bb1c7434a00c0d81d14ce74fbebc4
mode 100644,000000..100644
--- /dev/null
@@@ -1,846 -1,0 +1,846 @@@
-       phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
 +// SPDX-License-Identifier: GPL-2.0
 +/*
 + * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
 + *
 + * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
 + *
 + * Authors: Kishon Vijay Abraham I <kishon@ti.com>
 + */
 +
 +#include <linux/delay.h>
 +#include <linux/device.h>
 +#include <linux/err.h>
 +#include <linux/interrupt.h>
 +#include <linux/irq.h>
 +#include <linux/irqdomain.h>
 +#include <linux/kernel.h>
 +#include <linux/init.h>
 +#include <linux/of_device.h>
 +#include <linux/of_gpio.h>
 +#include <linux/of_pci.h>
 +#include <linux/pci.h>
 +#include <linux/phy/phy.h>
 +#include <linux/platform_device.h>
 +#include <linux/pm_runtime.h>
 +#include <linux/resource.h>
 +#include <linux/types.h>
 +#include <linux/mfd/syscon.h>
 +#include <linux/regmap.h>
 +
 +#include "../../pci.h"
 +#include "pcie-designware.h"
 +
 +/* PCIe controller wrapper DRA7XX configuration registers */
 +
 +#define       PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN             0x0024
 +#define       PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN         0x0028
 +#define       ERR_SYS                                         BIT(0)
 +#define       ERR_FATAL                                       BIT(1)
 +#define       ERR_NONFATAL                                    BIT(2)
 +#define       ERR_COR                                         BIT(3)
 +#define       ERR_AXI                                         BIT(4)
 +#define       ERR_ECRC                                        BIT(5)
 +#define       PME_TURN_OFF                                    BIT(8)
 +#define       PME_TO_ACK                                      BIT(9)
 +#define       PM_PME                                          BIT(10)
 +#define       LINK_REQ_RST                                    BIT(11)
 +#define       LINK_UP_EVT                                     BIT(12)
 +#define       CFG_BME_EVT                                     BIT(13)
 +#define       CFG_MSE_EVT                                     BIT(14)
 +#define       INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
 +                      ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
 +                      LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
 +
 +#define       PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI              0x0034
 +#define       PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI          0x0038
 +#define       INTA                                            BIT(0)
 +#define       INTB                                            BIT(1)
 +#define       INTC                                            BIT(2)
 +#define       INTD                                            BIT(3)
 +#define       MSI                                             BIT(4)
 +#define       LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
 +
 +#define       PCIECTRL_TI_CONF_DEVICE_TYPE                    0x0100
 +#define       DEVICE_TYPE_EP                                  0x0
 +#define       DEVICE_TYPE_LEG_EP                              0x1
 +#define       DEVICE_TYPE_RC                                  0x4
 +
 +#define       PCIECTRL_DRA7XX_CONF_DEVICE_CMD                 0x0104
 +#define       LTSSM_EN                                        0x1
 +
 +#define       PCIECTRL_DRA7XX_CONF_PHY_CS                     0x010C
 +#define       LINK_UP                                         BIT(16)
 +#define       DRA7XX_CPU_TO_BUS_ADDR                          0x0FFFFFFF
 +
 +#define EXP_CAP_ID_OFFSET                             0x70
 +
 +#define       PCIECTRL_TI_CONF_INTX_ASSERT                    0x0124
 +#define       PCIECTRL_TI_CONF_INTX_DEASSERT                  0x0128
 +
 +#define       PCIECTRL_TI_CONF_MSI_XMT                        0x012c
 +#define MSI_REQ_GRANT                                 BIT(0)
 +#define MSI_VECTOR_SHIFT                              7
 +
 +struct dra7xx_pcie {
 +      struct dw_pcie          *pci;
 +      void __iomem            *base;          /* DT ti_conf */
 +      int                     phy_count;      /* DT phy-names count */
 +      struct phy              **phy;
 +      int                     link_gen;
 +      struct irq_domain       *irq_domain;
 +      enum dw_pcie_device_mode mode;
 +};
 +
 +struct dra7xx_pcie_of_data {
 +      enum dw_pcie_device_mode mode;
 +};
 +
 +#define to_dra7xx_pcie(x)     dev_get_drvdata((x)->dev)
 +
 +static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
 +{
 +      return readl(pcie->base + offset);
 +}
 +
 +static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
 +                                    u32 value)
 +{
 +      writel(value, pcie->base + offset);
 +}
 +
 +static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
 +{
 +      return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
 +}
 +
 +static int dra7xx_pcie_link_up(struct dw_pcie *pci)
 +{
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +      u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
 +
 +      return !!(reg & LINK_UP);
 +}
 +
 +static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
 +{
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +      u32 reg;
 +
 +      reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
 +      reg &= ~LTSSM_EN;
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 +}
 +
 +static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
 +{
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +      struct device *dev = pci->dev;
 +      u32 reg;
 +      u32 exp_cap_off = EXP_CAP_ID_OFFSET;
 +
 +      if (dw_pcie_link_up(pci)) {
 +              dev_err(dev, "link is already up\n");
 +              return 0;
 +      }
 +
 +      if (dra7xx->link_gen == 1) {
 +              dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
 +                           4, &reg);
 +              if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
 +                      reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
 +                      reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
 +                      dw_pcie_write(pci->dbi_base + exp_cap_off +
 +                                    PCI_EXP_LNKCAP, 4, reg);
 +              }
 +
 +              dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
 +                           2, &reg);
 +              if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
 +                      reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
 +                      reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
 +                      dw_pcie_write(pci->dbi_base + exp_cap_off +
 +                                    PCI_EXP_LNKCTL2, 2, reg);
 +              }
 +      }
 +
 +      reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
 +      reg |= LTSSM_EN;
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 +
 +      return 0;
 +}
 +
 +static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
 +{
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
 +                         LEG_EP_INTERRUPTS | MSI);
 +
 +      dra7xx_pcie_writel(dra7xx,
 +                         PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
 +                         MSI | LEG_EP_INTERRUPTS);
 +}
 +
 +static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
 +{
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
 +                         INTERRUPTS);
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
 +                         INTERRUPTS);
 +}
 +
 +static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
 +{
 +      dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
 +      dra7xx_pcie_enable_msi_interrupts(dra7xx);
 +}
 +
 +static int dra7xx_pcie_host_init(struct pcie_port *pp)
 +{
 +      struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +
 +      dw_pcie_setup_rc(pp);
 +
 +      dra7xx_pcie_establish_link(pci);
 +      dw_pcie_wait_for_link(pci);
 +      dw_pcie_msi_init(pp);
 +      dra7xx_pcie_enable_interrupts(dra7xx);
 +
 +      return 0;
 +}
 +
 +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
 +      .host_init = dra7xx_pcie_host_init,
 +};
 +
 +static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 +                              irq_hw_number_t hwirq)
 +{
 +      irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 +      irq_set_chip_data(irq, domain->host_data);
 +
 +      return 0;
 +}
 +
 +static const struct irq_domain_ops intx_domain_ops = {
 +      .map = dra7xx_pcie_intx_map,
 +      .xlate = pci_irqd_intx_xlate,
 +};
 +
 +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
 +{
 +      struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 +      struct device *dev = pci->dev;
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +      struct device_node *node = dev->of_node;
 +      struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);
 +
 +      if (!pcie_intc_node) {
 +              dev_err(dev, "No PCIe Intc node found\n");
 +              return -ENODEV;
 +      }
 +
 +      dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
 +                                                 &intx_domain_ops, pp);
 +      if (!dra7xx->irq_domain) {
 +              dev_err(dev, "Failed to get a INTx IRQ domain\n");
 +              return -ENODEV;
 +      }
 +
 +      return 0;
 +}
 +
 +static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
 +{
 +      struct dra7xx_pcie *dra7xx = arg;
 +      struct dw_pcie *pci = dra7xx->pci;
 +      struct pcie_port *pp = &pci->pp;
 +      unsigned long reg;
 +      u32 virq, bit;
 +
 +      reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
 +
 +      switch (reg) {
 +      case MSI:
 +              dw_handle_msi_irq(pp);
 +              break;
 +      case INTA:
 +      case INTB:
 +      case INTC:
 +      case INTD:
 +              for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
 +                      virq = irq_find_mapping(dra7xx->irq_domain, bit);
 +                      if (virq)
 +                              generic_handle_irq(virq);
 +              }
 +              break;
 +      }
 +
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
 +{
 +      struct dra7xx_pcie *dra7xx = arg;
 +      struct dw_pcie *pci = dra7xx->pci;
 +      struct device *dev = pci->dev;
 +      struct dw_pcie_ep *ep = &pci->ep;
 +      u32 reg;
 +
 +      reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
 +
 +      if (reg & ERR_SYS)
 +              dev_dbg(dev, "System Error\n");
 +
 +      if (reg & ERR_FATAL)
 +              dev_dbg(dev, "Fatal Error\n");
 +
 +      if (reg & ERR_NONFATAL)
 +              dev_dbg(dev, "Non Fatal Error\n");
 +
 +      if (reg & ERR_COR)
 +              dev_dbg(dev, "Correctable Error\n");
 +
 +      if (reg & ERR_AXI)
 +              dev_dbg(dev, "AXI tag lookup fatal Error\n");
 +
 +      if (reg & ERR_ECRC)
 +              dev_dbg(dev, "ECRC Error\n");
 +
 +      if (reg & PME_TURN_OFF)
 +              dev_dbg(dev,
 +                      "Power Management Event Turn-Off message received\n");
 +
 +      if (reg & PME_TO_ACK)
 +              dev_dbg(dev,
 +                      "Power Management Turn-Off Ack message received\n");
 +
 +      if (reg & PM_PME)
 +              dev_dbg(dev, "PM Power Management Event message received\n");
 +
 +      if (reg & LINK_REQ_RST)
 +              dev_dbg(dev, "Link Request Reset\n");
 +
 +      if (reg & LINK_UP_EVT) {
 +              if (dra7xx->mode == DW_PCIE_EP_TYPE)
 +                      dw_pcie_ep_linkup(ep);
 +              dev_dbg(dev, "Link-up state change\n");
 +      }
 +
 +      if (reg & CFG_BME_EVT)
 +              dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
 +
 +      if (reg & CFG_MSE_EVT)
 +              dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
 +
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
 +{
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +      enum pci_barno bar;
 +
 +      for (bar = BAR_0; bar <= BAR_5; bar++)
 +              dw_pcie_ep_reset_bar(pci, bar);
 +
 +      dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
 +}
 +
 +static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
 +{
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
 +      mdelay(1);
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
 +}
 +
 +static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
 +                                    u8 interrupt_num)
 +{
 +      u32 reg;
 +
 +      reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
 +      reg |= MSI_REQ_GRANT;
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
 +}
 +
 +static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
 +                               enum pci_epc_irq_type type, u8 interrupt_num)
 +{
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 +
 +      switch (type) {
 +      case PCI_EPC_IRQ_LEGACY:
 +              dra7xx_pcie_raise_legacy_irq(dra7xx);
 +              break;
 +      case PCI_EPC_IRQ_MSI:
 +              dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
 +              break;
 +      default:
 +              dev_err(pci->dev, "UNKNOWN IRQ type\n");
 +      }
 +
 +      return 0;
 +}
 +
 +static struct dw_pcie_ep_ops pcie_ep_ops = {
 +      .ep_init = dra7xx_pcie_ep_init,
 +      .raise_irq = dra7xx_pcie_raise_irq,
 +};
 +
 +static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
 +                                   struct platform_device *pdev)
 +{
 +      int ret;
 +      struct dw_pcie_ep *ep;
 +      struct resource *res;
 +      struct device *dev = &pdev->dev;
 +      struct dw_pcie *pci = dra7xx->pci;
 +
 +      ep = &pci->ep;
 +      ep->ops = &pcie_ep_ops;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
 +      pci->dbi_base = devm_ioremap_resource(dev, res);
 +      if (IS_ERR(pci->dbi_base))
 +              return PTR_ERR(pci->dbi_base);
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
 +      pci->dbi_base2 = devm_ioremap_resource(dev, res);
 +      if (IS_ERR(pci->dbi_base2))
 +              return PTR_ERR(pci->dbi_base2);
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
 +      if (!res)
 +              return -EINVAL;
 +
 +      ep->phys_base = res->start;
 +      ep->addr_size = resource_size(res);
 +
 +      ret = dw_pcie_ep_init(ep);
 +      if (ret) {
 +              dev_err(dev, "failed to initialize endpoint\n");
 +              return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
 +                                     struct platform_device *pdev)
 +{
 +      int ret;
 +      struct dw_pcie *pci = dra7xx->pci;
 +      struct pcie_port *pp = &pci->pp;
 +      struct device *dev = pci->dev;
 +      struct resource *res;
 +
 +      pp->irq = platform_get_irq(pdev, 1);
 +      if (pp->irq < 0) {
 +              dev_err(dev, "missing IRQ resource\n");
 +              return pp->irq;
 +      }
 +
 +      ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
 +                             IRQF_SHARED | IRQF_NO_THREAD,
 +                             "dra7-pcie-msi", dra7xx);
 +      if (ret) {
 +              dev_err(dev, "failed to request irq\n");
 +              return ret;
 +      }
 +
 +      ret = dra7xx_pcie_init_irq_domain(pp);
 +      if (ret < 0)
 +              return ret;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
 +      pci->dbi_base = devm_ioremap_resource(dev, res);
 +      if (IS_ERR(pci->dbi_base))
 +              return PTR_ERR(pci->dbi_base);
 +
 +      pp->ops = &dra7xx_pcie_host_ops;
 +
 +      ret = dw_pcie_host_init(pp);
 +      if (ret) {
 +              dev_err(dev, "failed to initialize host\n");
 +              return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct dw_pcie_ops dw_pcie_ops = {
 +      .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
 +      .start_link = dra7xx_pcie_establish_link,
 +      .stop_link = dra7xx_pcie_stop_link,
 +      .link_up = dra7xx_pcie_link_up,
 +};
 +
 +static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
 +{
 +      int phy_count = dra7xx->phy_count;
 +
 +      while (phy_count--) {
 +              phy_power_off(dra7xx->phy[phy_count]);
 +              phy_exit(dra7xx->phy[phy_count]);
 +      }
 +}
 +
 +static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
 +{
 +      int phy_count = dra7xx->phy_count;
 +      int ret;
 +      int i;
 +
 +      for (i = 0; i < phy_count; i++) {
 +              ret = phy_init(dra7xx->phy[i]);
 +              if (ret < 0)
 +                      goto err_phy;
 +
 +              ret = phy_power_on(dra7xx->phy[i]);
 +              if (ret < 0) {
 +                      phy_exit(dra7xx->phy[i]);
 +                      goto err_phy;
 +              }
 +      }
 +
 +      return 0;
 +
 +err_phy:
 +      while (--i >= 0) {
 +              phy_power_off(dra7xx->phy[i]);
 +              phy_exit(dra7xx->phy[i]);
 +      }
 +
 +      return ret;
 +}
 +
 +static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
 +      .mode = DW_PCIE_RC_TYPE,
 +};
 +
 +static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
 +      .mode = DW_PCIE_EP_TYPE,
 +};
 +
 +static const struct of_device_id of_dra7xx_pcie_match[] = {
 +      {
 +              .compatible = "ti,dra7-pcie",
 +              .data = &dra7xx_pcie_rc_of_data,
 +      },
 +      {
 +              .compatible = "ti,dra7-pcie-ep",
 +              .data = &dra7xx_pcie_ep_of_data,
 +      },
 +      {},
 +};
 +
 +/*
 + * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
 + * @dra7xx: the dra7xx device where the workaround should be applied
 + *
 + * Access to the PCIe slave port that are not 32-bit aligned will result
 + * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
 + * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
 + * 0x3.
 + *
 + * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
 + */
 +static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
 +{
 +      int ret;
 +      struct device_node *np = dev->of_node;
 +      struct of_phandle_args args;
 +      struct regmap *regmap;
 +
 +      regmap = syscon_regmap_lookup_by_phandle(np,
 +                                               "ti,syscon-unaligned-access");
 +      if (IS_ERR(regmap)) {
 +              dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
 +              return -EINVAL;
 +      }
 +
 +      ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
 +                                             2, 0, &args);
 +      if (ret) {
 +              dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
 +              return ret;
 +      }
 +
 +      ret = regmap_update_bits(regmap, args.args[0], args.args[1],
 +                               args.args[1]);
 +      if (ret)
 +              dev_err(dev, "failed to enable unaligned access\n");
 +
 +      of_node_put(args.np);
 +
 +      return ret;
 +}
 +
 +static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 +{
 +      u32 reg;
 +      int ret;
 +      int irq;
 +      int i;
 +      int phy_count;
 +      struct phy **phy;
 +      struct device_link **link;
 +      void __iomem *base;
 +      struct resource *res;
 +      struct dw_pcie *pci;
 +      struct dra7xx_pcie *dra7xx;
 +      struct device *dev = &pdev->dev;
 +      struct device_node *np = dev->of_node;
 +      char name[10];
 +      struct gpio_desc *reset;
 +      const struct of_device_id *match;
 +      const struct dra7xx_pcie_of_data *data;
 +      enum dw_pcie_device_mode mode;
 +
 +      match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
 +      if (!match)
 +              return -EINVAL;
 +
 +      data = (struct dra7xx_pcie_of_data *)match->data;
 +      mode = (enum dw_pcie_device_mode)data->mode;
 +
 +      dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
 +      if (!dra7xx)
 +              return -ENOMEM;
 +
 +      pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
 +      if (!pci)
 +              return -ENOMEM;
 +
 +      pci->dev = dev;
 +      pci->ops = &dw_pcie_ops;
 +
 +      irq = platform_get_irq(pdev, 0);
 +      if (irq < 0) {
 +              dev_err(dev, "missing IRQ resource: %d\n", irq);
 +              return irq;
 +      }
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
 +      base = devm_ioremap_nocache(dev, res->start, resource_size(res));
 +      if (!base)
 +              return -ENOMEM;
 +
 +      phy_count = of_property_count_strings(np, "phy-names");
 +      if (phy_count < 0) {
 +              dev_err(dev, "unable to find the strings\n");
 +              return phy_count;
 +      }
 +
-       link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
++      phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
 +      if (!phy)
 +              return -ENOMEM;
 +
++      link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
 +      if (!link)
 +              return -ENOMEM;
 +
 +      for (i = 0; i < phy_count; i++) {
 +              snprintf(name, sizeof(name), "pcie-phy%d", i);
 +              phy[i] = devm_phy_get(dev, name);
 +              if (IS_ERR(phy[i]))
 +                      return PTR_ERR(phy[i]);
 +
 +              link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
 +              if (!link[i]) {
 +                      ret = -EINVAL;
 +                      goto err_link;
 +              }
 +      }
 +
 +      dra7xx->base = base;
 +      dra7xx->phy = phy;
 +      dra7xx->pci = pci;
 +      dra7xx->phy_count = phy_count;
 +
 +      ret = dra7xx_pcie_enable_phy(dra7xx);
 +      if (ret) {
 +              dev_err(dev, "failed to enable phy\n");
 +              return ret;
 +      }
 +
 +      platform_set_drvdata(pdev, dra7xx);
 +
 +      pm_runtime_enable(dev);
 +      ret = pm_runtime_get_sync(dev);
 +      if (ret < 0) {
 +              dev_err(dev, "pm_runtime_get_sync failed\n");
 +              goto err_get_sync;
 +      }
 +
 +      reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
 +      if (IS_ERR(reset)) {
 +              ret = PTR_ERR(reset);
 +              dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
 +              goto err_gpio;
 +      }
 +
 +      reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
 +      reg &= ~LTSSM_EN;
 +      dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 +
 +      dra7xx->link_gen = of_pci_get_max_link_speed(np);
 +      if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
 +              dra7xx->link_gen = 2;
 +
 +      switch (mode) {
 +      case DW_PCIE_RC_TYPE:
 +              if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
 +                      ret = -ENODEV;
 +                      goto err_gpio;
 +              }
 +
 +              dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
 +                                 DEVICE_TYPE_RC);
 +              ret = dra7xx_add_pcie_port(dra7xx, pdev);
 +              if (ret < 0)
 +                      goto err_gpio;
 +              break;
 +      case DW_PCIE_EP_TYPE:
 +              if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
 +                      ret = -ENODEV;
 +                      goto err_gpio;
 +              }
 +
 +              dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
 +                                 DEVICE_TYPE_EP);
 +
 +              ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
 +              if (ret)
 +                      goto err_gpio;
 +
 +              ret = dra7xx_add_pcie_ep(dra7xx, pdev);
 +              if (ret < 0)
 +                      goto err_gpio;
 +              break;
 +      default:
 +              dev_err(dev, "INVALID device type %d\n", mode);
 +      }
 +      dra7xx->mode = mode;
 +
 +      ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
 +                             IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
 +      if (ret) {
 +              dev_err(dev, "failed to request irq\n");
 +              goto err_gpio;
 +      }
 +
 +      return 0;
 +
 +err_gpio:
 +      pm_runtime_put(dev);
 +
 +err_get_sync:
 +      pm_runtime_disable(dev);
 +      dra7xx_pcie_disable_phy(dra7xx);
 +
 +err_link:
 +      while (--i >= 0)
 +              device_link_del(link[i]);
 +
 +      return ret;
 +}
 +
 +#ifdef CONFIG_PM_SLEEP
 +static int dra7xx_pcie_suspend(struct device *dev)
 +{
 +      struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
 +      struct dw_pcie *pci = dra7xx->pci;
 +      u32 val;
 +
 +      if (dra7xx->mode != DW_PCIE_RC_TYPE)
 +              return 0;
 +
 +      /* clear MSE */
 +      val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 +      val &= ~PCI_COMMAND_MEMORY;
 +      dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
 +
 +      return 0;
 +}
 +
 +static int dra7xx_pcie_resume(struct device *dev)
 +{
 +      struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
 +      struct dw_pcie *pci = dra7xx->pci;
 +      u32 val;
 +
 +      if (dra7xx->mode != DW_PCIE_RC_TYPE)
 +              return 0;
 +
 +      /* set MSE */
 +      val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 +      val |= PCI_COMMAND_MEMORY;
 +      dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
 +
 +      return 0;
 +}
 +
 +static int dra7xx_pcie_suspend_noirq(struct device *dev)
 +{
 +      struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
 +
 +      dra7xx_pcie_disable_phy(dra7xx);
 +
 +      return 0;
 +}
 +
 +static int dra7xx_pcie_resume_noirq(struct device *dev)
 +{
 +      struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
 +      int ret;
 +
 +      ret = dra7xx_pcie_enable_phy(dra7xx);
 +      if (ret) {
 +              dev_err(dev, "failed to enable phy\n");
 +              return ret;
 +      }
 +
 +      return 0;
 +}
 +#endif
 +
 +static void dra7xx_pcie_shutdown(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
 +      int ret;
 +
 +      dra7xx_pcie_stop_link(dra7xx->pci);
 +
 +      ret = pm_runtime_put_sync(dev);
 +      if (ret < 0)
 +              dev_dbg(dev, "pm_runtime_put_sync failed\n");
 +
 +      pm_runtime_disable(dev);
 +      dra7xx_pcie_disable_phy(dra7xx);
 +}
 +
 +static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
 +      SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
 +      SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
 +                                    dra7xx_pcie_resume_noirq)
 +};
 +
 +static struct platform_driver dra7xx_pcie_driver = {
 +      .driver = {
 +              .name   = "dra7-pcie",
 +              .of_match_table = of_dra7xx_pcie_match,
 +              .suppress_bind_attrs = true,
 +              .pm     = &dra7xx_pcie_pm_ops,
 +      },
 +      .shutdown = dra7xx_pcie_shutdown,
 +};
 +builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
index 1eec4415a77f05fe1ebfd7593815fee553d85ae6,0000000000000000000000000000000000000000..8650416f6f9e46df6c722be7ae780da0916218d5
mode 100644,000000..100644
--- /dev/null
@@@ -1,422 -1,0 +1,424 @@@
-       ep->ib_window_map = devm_kzalloc(dev, sizeof(long) *
 +// SPDX-License-Identifier: GPL-2.0
 +/**
 + * Synopsys DesignWare PCIe Endpoint controller driver
 + *
 + * Copyright (C) 2017 Texas Instruments
 + * Author: Kishon Vijay Abraham I <kishon@ti.com>
 + */
 +
 +#include <linux/of.h>
 +
 +#include "pcie-designware.h"
 +#include <linux/pci-epc.h>
 +#include <linux/pci-epf.h>
 +
 +void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
 +{
 +      struct pci_epc *epc = ep->epc;
 +
 +      pci_epc_linkup(epc);
 +}
 +
 +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
 +                                 int flags)
 +{
 +      u32 reg;
 +
 +      reg = PCI_BASE_ADDRESS_0 + (4 * bar);
 +      dw_pcie_dbi_ro_wr_en(pci);
 +      dw_pcie_writel_dbi2(pci, reg, 0x0);
 +      dw_pcie_writel_dbi(pci, reg, 0x0);
 +      if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
 +              dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
 +              dw_pcie_writel_dbi(pci, reg + 4, 0x0);
 +      }
 +      dw_pcie_dbi_ro_wr_dis(pci);
 +}
 +
 +void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
 +{
 +      __dw_pcie_ep_reset_bar(pci, bar, 0);
 +}
 +
 +static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
 +                                 struct pci_epf_header *hdr)
 +{
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      dw_pcie_dbi_ro_wr_en(pci);
 +      dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
 +      dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
 +      dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
 +      dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
 +      dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
 +                         hdr->subclass_code | hdr->baseclass_code << 8);
 +      dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
 +                         hdr->cache_line_size);
 +      dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
 +                         hdr->subsys_vendor_id);
 +      dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
 +      dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
 +                         hdr->interrupt_pin);
 +      dw_pcie_dbi_ro_wr_dis(pci);
 +
 +      return 0;
 +}
 +
 +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
 +                                dma_addr_t cpu_addr,
 +                                enum dw_pcie_as_type as_type)
 +{
 +      int ret;
 +      u32 free_win;
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
 +      if (free_win >= ep->num_ib_windows) {
 +              dev_err(pci->dev, "No free inbound window\n");
 +              return -EINVAL;
 +      }
 +
 +      ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
 +                                     as_type);
 +      if (ret < 0) {
 +              dev_err(pci->dev, "Failed to program IB window\n");
 +              return ret;
 +      }
 +
 +      ep->bar_to_atu[bar] = free_win;
 +      set_bit(free_win, ep->ib_window_map);
 +
 +      return 0;
 +}
 +
 +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
 +                                 u64 pci_addr, size_t size)
 +{
 +      u32 free_win;
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
 +      if (free_win >= ep->num_ob_windows) {
 +              dev_err(pci->dev, "No free outbound window\n");
 +              return -EINVAL;
 +      }
 +
 +      dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
 +                                phys_addr, pci_addr, size);
 +
 +      set_bit(free_win, ep->ob_window_map);
 +      ep->outbound_addr[free_win] = phys_addr;
 +
 +      return 0;
 +}
 +
 +static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
 +                               struct pci_epf_bar *epf_bar)
 +{
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      enum pci_barno bar = epf_bar->barno;
 +      u32 atu_index = ep->bar_to_atu[bar];
 +
 +      __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
 +
 +      dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
 +      clear_bit(atu_index, ep->ib_window_map);
 +}
 +
 +static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
 +                            struct pci_epf_bar *epf_bar)
 +{
 +      int ret;
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      enum pci_barno bar = epf_bar->barno;
 +      size_t size = epf_bar->size;
 +      int flags = epf_bar->flags;
 +      enum dw_pcie_as_type as_type;
 +      u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
 +
 +      if (!(flags & PCI_BASE_ADDRESS_SPACE))
 +              as_type = DW_PCIE_AS_MEM;
 +      else
 +              as_type = DW_PCIE_AS_IO;
 +
 +      ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
 +      if (ret)
 +              return ret;
 +
 +      dw_pcie_dbi_ro_wr_en(pci);
 +
 +      dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
 +      dw_pcie_writel_dbi(pci, reg, flags);
 +
 +      if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
 +              dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
 +              dw_pcie_writel_dbi(pci, reg + 4, 0);
 +      }
 +
 +      dw_pcie_dbi_ro_wr_dis(pci);
 +
 +      return 0;
 +}
 +
 +static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
 +                            u32 *atu_index)
 +{
 +      u32 index;
 +
 +      for (index = 0; index < ep->num_ob_windows; index++) {
 +              if (ep->outbound_addr[index] != addr)
 +                      continue;
 +              *atu_index = index;
 +              return 0;
 +      }
 +
 +      return -EINVAL;
 +}
 +
 +static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
 +                                phys_addr_t addr)
 +{
 +      int ret;
 +      u32 atu_index;
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      ret = dw_pcie_find_index(ep, addr, &atu_index);
 +      if (ret < 0)
 +              return;
 +
 +      dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
 +      clear_bit(atu_index, ep->ob_window_map);
 +}
 +
 +static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
 +                             phys_addr_t addr,
 +                             u64 pci_addr, size_t size)
 +{
 +      int ret;
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
 +      if (ret) {
 +              dev_err(pci->dev, "Failed to enable address\n");
 +              return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
 +{
 +      int val;
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
 +      if (!(val & MSI_CAP_MSI_EN_MASK))
 +              return -EINVAL;
 +
 +      val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
 +      return val;
 +}
 +
 +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
 +{
 +      int val;
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
 +      val &= ~MSI_CAP_MMC_MASK;
 +      val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
 +      dw_pcie_dbi_ro_wr_en(pci);
 +      dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
 +      dw_pcie_dbi_ro_wr_dis(pci);
 +
 +      return 0;
 +}
 +
 +static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
 +                              enum pci_epc_irq_type type, u8 interrupt_num)
 +{
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +
 +      if (!ep->ops->raise_irq)
 +              return -EINVAL;
 +
 +      return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
 +}
 +
 +static void dw_pcie_ep_stop(struct pci_epc *epc)
 +{
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      if (!pci->ops->stop_link)
 +              return;
 +
 +      pci->ops->stop_link(pci);
 +}
 +
 +static int dw_pcie_ep_start(struct pci_epc *epc)
 +{
 +      struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +
 +      if (!pci->ops->start_link)
 +              return -EINVAL;
 +
 +      return pci->ops->start_link(pci);
 +}
 +
 +static const struct pci_epc_ops epc_ops = {
 +      .write_header           = dw_pcie_ep_write_header,
 +      .set_bar                = dw_pcie_ep_set_bar,
 +      .clear_bar              = dw_pcie_ep_clear_bar,
 +      .map_addr               = dw_pcie_ep_map_addr,
 +      .unmap_addr             = dw_pcie_ep_unmap_addr,
 +      .set_msi                = dw_pcie_ep_set_msi,
 +      .get_msi                = dw_pcie_ep_get_msi,
 +      .raise_irq              = dw_pcie_ep_raise_irq,
 +      .start                  = dw_pcie_ep_start,
 +      .stop                   = dw_pcie_ep_stop,
 +};
 +
 +int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
 +                           u8 interrupt_num)
 +{
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      struct pci_epc *epc = ep->epc;
 +      u16 msg_ctrl, msg_data;
 +      u32 msg_addr_lower, msg_addr_upper;
 +      u64 msg_addr;
 +      bool has_upper;
 +      int ret;
 +
 +      /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
 +      msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
 +      has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
 +      msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
 +      if (has_upper) {
 +              msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
 +              msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
 +      } else {
 +              msg_addr_upper = 0;
 +              msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
 +      }
 +      msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
 +      ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
 +                                epc->mem->page_size);
 +      if (ret)
 +              return ret;
 +
 +      writel(msg_data | (interrupt_num - 1), ep->msi_mem);
 +
 +      dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
 +
 +      return 0;
 +}
 +
 +void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
 +{
 +      struct pci_epc *epc = ep->epc;
 +
 +      pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
 +                            epc->mem->page_size);
 +
 +      pci_epc_mem_exit(epc);
 +}
 +
 +int dw_pcie_ep_init(struct dw_pcie_ep *ep)
 +{
 +      int ret;
 +      void *addr;
 +      struct pci_epc *epc;
 +      struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 +      struct device *dev = pci->dev;
 +      struct device_node *np = dev->of_node;
 +
 +      if (!pci->dbi_base || !pci->dbi_base2) {
 +              dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
 +              return -EINVAL;
 +      }
 +
 +      ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
 +      if (ret < 0) {
 +              dev_err(dev, "Unable to read *num-ib-windows* property\n");
 +              return ret;
 +      }
 +      if (ep->num_ib_windows > MAX_IATU_IN) {
 +              dev_err(dev, "Invalid *num-ib-windows*\n");
 +              return -EINVAL;
 +      }
 +
 +      ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
 +      if (ret < 0) {
 +              dev_err(dev, "Unable to read *num-ob-windows* property\n");
 +              return ret;
 +      }
 +      if (ep->num_ob_windows > MAX_IATU_OUT) {
 +              dev_err(dev, "Invalid *num-ob-windows*\n");
 +              return -EINVAL;
 +      }
 +
-       ep->ob_window_map = devm_kzalloc(dev, sizeof(long) *
++      ep->ib_window_map = devm_kcalloc(dev,
 +                                       BITS_TO_LONGS(ep->num_ib_windows),
++                                       sizeof(long),
 +                                       GFP_KERNEL);
 +      if (!ep->ib_window_map)
 +              return -ENOMEM;
 +
-       addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
++      ep->ob_window_map = devm_kcalloc(dev,
 +                                       BITS_TO_LONGS(ep->num_ob_windows),
++                                       sizeof(long),
 +                                       GFP_KERNEL);
 +      if (!ep->ob_window_map)
 +              return -ENOMEM;
 +
++      addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
 +                          GFP_KERNEL);
 +      if (!addr)
 +              return -ENOMEM;
 +      ep->outbound_addr = addr;
 +
 +      if (ep->ops->ep_init)
 +              ep->ops->ep_init(ep);
 +
 +      epc = devm_pci_epc_create(dev, &epc_ops);
 +      if (IS_ERR(epc)) {
 +              dev_err(dev, "Failed to create epc device\n");
 +              return PTR_ERR(epc);
 +      }
 +
 +      ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
 +      if (ret < 0)
 +              epc->max_functions = 1;
 +
 +      ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
 +                               ep->page_size);
 +      if (ret < 0) {
 +              dev_err(dev, "Failed to initialize address space\n");
 +              return ret;
 +      }
 +
 +      ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
 +                                           epc->mem->page_size);
 +      if (!ep->msi_mem) {
 +              dev_err(dev, "Failed to reserve memory for MSI\n");
 +              return -ENOMEM;
 +      }
 +
 +      epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
 +      EPC_FEATURE_SET_BAR(epc->features, BAR_0);
 +
 +      ep->epc = epc;
 +      epc_set_drvdata(epc, ep);
 +      dw_pcie_setup(pci);
 +
 +      return 0;
 +}
index 3d8283e450a910e6e3fa0d3612c98b7f1529abd7,0000000000000000000000000000000000000000..e3fe4124e3afd427a674cddd3f2f3e0b3cf96c3f
mode 100644,000000..100644
--- /dev/null
@@@ -1,549 -1,0 +1,550 @@@
-       ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr),
 +// SPDX-License-Identifier: GPL-2.0
 +// Copyright (c) 2017 Cadence
 +// Cadence PCIe endpoint controller driver.
 +// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
 +
 +#include <linux/delay.h>
 +#include <linux/kernel.h>
 +#include <linux/of.h>
 +#include <linux/pci-epc.h>
 +#include <linux/platform_device.h>
 +#include <linux/pm_runtime.h>
 +#include <linux/sizes.h>
 +
 +#include "pcie-cadence.h"
 +
 +#define CDNS_PCIE_EP_MIN_APERTURE             128     /* 128 bytes */
 +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE                0x1
 +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY      0x3
 +
 +/**
 + * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
 + * @pcie: Cadence PCIe controller
 + * @max_regions: maximum number of regions supported by hardware
 + * @ob_region_map: bitmask of mapped outbound regions
 + * @ob_addr: base addresses in the AXI bus where the outbound regions start
 + * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
 + *               dedicated outbound regions is mapped.
 + * @irq_cpu_addr: base address in the CPU space where a write access triggers
 + *              the sending of a memory write (MSI) / normal message (legacy
 + *              IRQ) TLP through the PCIe bus.
 + * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
 + *              dedicated outbound region.
 + * @irq_pci_fn: the latest PCI function that has updated the mapping of
 + *            the MSI/legacy IRQ dedicated outbound region.
 + * @irq_pending: bitmask of asserted legacy IRQs.
 + */
 +struct cdns_pcie_ep {
 +      struct cdns_pcie                pcie;
 +      u32                             max_regions;
 +      unsigned long                   ob_region_map;
 +      phys_addr_t                     *ob_addr;
 +      phys_addr_t                     irq_phys_addr;
 +      void __iomem                    *irq_cpu_addr;
 +      u64                             irq_pci_addr;
 +      u8                              irq_pci_fn;
 +      u8                              irq_pending;
 +};
 +
 +static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
 +                                   struct pci_epf_header *hdr)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +
 +      cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
 +      cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
 +      cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
 +      cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
 +                             hdr->subclass_code | hdr->baseclass_code << 8);
 +      cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
 +                             hdr->cache_line_size);
 +      cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
 +      cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
 +
 +      /*
 +       * Vendor ID can only be modified from function 0, all other functions
 +       * use the same vendor ID as function 0.
 +       */
 +      if (fn == 0) {
 +              /* Update the vendor IDs. */
 +              u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
 +                       CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
 +
 +              cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
 +      }
 +
 +      return 0;
 +}
 +
 +static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
 +                              struct pci_epf_bar *epf_bar)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      dma_addr_t bar_phys = epf_bar->phys_addr;
 +      enum pci_barno bar = epf_bar->barno;
 +      int flags = epf_bar->flags;
 +      u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
 +      u64 sz;
 +
 +      /* BAR size is 2^(aperture + 7) */
 +      sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
 +      /*
 +       * roundup_pow_of_two() returns an unsigned long, which is not suited
 +       * for 64bit values.
 +       */
 +      sz = 1ULL << fls64(sz - 1);
 +      aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
 +
 +      if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
 +              ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
 +      } else {
 +              bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
 +              bool is_64bits = sz > SZ_2G;
 +
 +              if (is_64bits && (bar & 1))
 +                      return -EINVAL;
 +
 +              if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
 +                      epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
 +
 +              if (is_64bits && is_prefetch)
 +                      ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
 +              else if (is_prefetch)
 +                      ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
 +              else if (is_64bits)
 +                      ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
 +              else
 +                      ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
 +      }
 +
 +      addr0 = lower_32_bits(bar_phys);
 +      addr1 = upper_32_bits(bar_phys);
 +      cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
 +                       addr0);
 +      cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
 +                       addr1);
 +
 +      if (bar < BAR_4) {
 +              reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
 +              b = bar;
 +      } else {
 +              reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
 +              b = bar - BAR_4;
 +      }
 +
 +      cfg = cdns_pcie_readl(pcie, reg);
 +      cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
 +               CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
 +      cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
 +              CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
 +      cdns_pcie_writel(pcie, reg, cfg);
 +
 +      return 0;
 +}
 +
 +static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
 +                                 struct pci_epf_bar *epf_bar)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      enum pci_barno bar = epf_bar->barno;
 +      u32 reg, cfg, b, ctrl;
 +
 +      if (bar < BAR_4) {
 +              reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
 +              b = bar;
 +      } else {
 +              reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
 +              b = bar - BAR_4;
 +      }
 +
 +      ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
 +      cfg = cdns_pcie_readl(pcie, reg);
 +      cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
 +               CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
 +      cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
 +      cdns_pcie_writel(pcie, reg, cfg);
 +
 +      cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
 +      cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
 +}
 +
 +static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
 +                               u64 pci_addr, size_t size)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 r;
 +
 +      r = find_first_zero_bit(&ep->ob_region_map,
 +                              sizeof(ep->ob_region_map) * BITS_PER_LONG);
 +      if (r >= ep->max_regions - 1) {
 +              dev_err(&epc->dev, "no free outbound region\n");
 +              return -EINVAL;
 +      }
 +
 +      cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
 +
 +      set_bit(r, &ep->ob_region_map);
 +      ep->ob_addr[r] = addr;
 +
 +      return 0;
 +}
 +
 +static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
 +                                  phys_addr_t addr)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 r;
 +
 +      for (r = 0; r < ep->max_regions - 1; r++)
 +              if (ep->ob_addr[r] == addr)
 +                      break;
 +
 +      if (r == ep->max_regions - 1)
 +              return;
 +
 +      cdns_pcie_reset_outbound_region(pcie, r);
 +
 +      ep->ob_addr[r] = 0;
 +      clear_bit(r, &ep->ob_region_map);
 +}
 +
 +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
 +      u16 flags;
 +
 +      /*
 +       * Set the Multiple Message Capable bitfield into the Message Control
 +       * register.
 +       */
 +      flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
 +      flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
 +      flags |= PCI_MSI_FLAGS_64BIT;
 +      flags &= ~PCI_MSI_FLAGS_MASKBIT;
 +      cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
 +
 +      return 0;
 +}
 +
 +static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
 +      u16 flags, mmc, mme;
 +
 +      /* Validate that the MSI feature is actually enabled. */
 +      flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
 +      if (!(flags & PCI_MSI_FLAGS_ENABLE))
 +              return -EINVAL;
 +
 +      /*
 +       * Get the Multiple Message Enable bitfield from the Message Control
 +       * register.
 +       */
 +      mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
 +      mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
 +
 +      return mme;
 +}
 +
 +static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
 +                                   u8 intx, bool is_asserted)
 +{
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 r = ep->max_regions - 1;
 +      u32 offset;
 +      u16 status;
 +      u8 msg_code;
 +
 +      intx &= 3;
 +
 +      /* Set the outbound region if needed. */
 +      if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
 +                   ep->irq_pci_fn != fn)) {
 +              /* Last region was reserved for IRQ writes. */
 +              cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
 +                                                           ep->irq_phys_addr);
 +              ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
 +              ep->irq_pci_fn = fn;
 +      }
 +
 +      if (is_asserted) {
 +              ep->irq_pending |= BIT(intx);
 +              msg_code = MSG_CODE_ASSERT_INTA + intx;
 +      } else {
 +              ep->irq_pending &= ~BIT(intx);
 +              msg_code = MSG_CODE_DEASSERT_INTA + intx;
 +      }
 +
 +      status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
 +      if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
 +              status ^= PCI_STATUS_INTERRUPT;
 +              cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
 +      }
 +
 +      offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
 +               CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
 +               CDNS_PCIE_MSG_NO_DATA;
 +      writel(0, ep->irq_cpu_addr + offset);
 +}
 +
 +static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
 +{
 +      u16 cmd;
 +
 +      cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
 +      if (cmd & PCI_COMMAND_INTX_DISABLE)
 +              return -EINVAL;
 +
 +      cdns_pcie_ep_assert_intx(ep, fn, intx, true);
 +      /*
 +       * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
 +       * from drivers/pci/dwc/pci-dra7xx.c
 +       */
 +      mdelay(1);
 +      cdns_pcie_ep_assert_intx(ep, fn, intx, false);
 +      return 0;
 +}
 +
 +static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
 +                                   u8 interrupt_num)
 +{
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
 +      u16 flags, mme, data, data_mask;
 +      u8 msi_count;
 +      u64 pci_addr, pci_addr_mask = 0xff;
 +
 +      /* Check whether the MSI feature has been enabled by the PCI host. */
 +      flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
 +      if (!(flags & PCI_MSI_FLAGS_ENABLE))
 +              return -EINVAL;
 +
 +      /* Get the number of enabled MSIs */
 +      mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
 +      msi_count = 1 << mme;
 +      if (!interrupt_num || interrupt_num > msi_count)
 +              return -EINVAL;
 +
 +      /* Compute the data value to be written. */
 +      data_mask = msi_count - 1;
 +      data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
 +      data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
 +
 +      /* Get the PCI address where to write the data into. */
 +      pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
 +      pci_addr <<= 32;
 +      pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
 +      pci_addr &= GENMASK_ULL(63, 2);
 +
 +      /* Set the outbound region if needed. */
 +      if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
 +                   ep->irq_pci_fn != fn)) {
 +              /* Last region was reserved for IRQ writes. */
 +              cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
 +                                            false,
 +                                            ep->irq_phys_addr,
 +                                            pci_addr & ~pci_addr_mask,
 +                                            pci_addr_mask + 1);
 +              ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
 +              ep->irq_pci_fn = fn;
 +      }
 +      writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
 +
 +      return 0;
 +}
 +
 +static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
 +                                enum pci_epc_irq_type type, u8 interrupt_num)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +
 +      switch (type) {
 +      case PCI_EPC_IRQ_LEGACY:
 +              return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
 +
 +      case PCI_EPC_IRQ_MSI:
 +              return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
 +
 +      default:
 +              break;
 +      }
 +
 +      return -EINVAL;
 +}
 +
 +static int cdns_pcie_ep_start(struct pci_epc *epc)
 +{
 +      struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct cdns_pcie *pcie = &ep->pcie;
 +      struct pci_epf *epf;
 +      u32 cfg;
 +
 +      /*
 +       * BIT(0) is hardwired to 1, hence function 0 is always enabled
 +       * and can't be disabled anyway.
 +       */
 +      cfg = BIT(0);
 +      list_for_each_entry(epf, &epc->pci_epf, list)
 +              cfg |= BIT(epf->func_no);
 +      cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
 +
 +      /*
 +       * The PCIe links are automatically established by the controller
 +       * once for all at powerup: the software can neither start nor stop
 +       * those links later at runtime.
 +       *
 +       * Then we only have to notify the EP core that our links are already
 +       * established. However we don't call directly pci_epc_linkup() because
 +       * we've already locked the epc->lock.
 +       */
 +      list_for_each_entry(epf, &epc->pci_epf, list)
 +              pci_epf_linkup(epf);
 +
 +      return 0;
 +}
 +
 +static const struct pci_epc_ops cdns_pcie_epc_ops = {
 +      .write_header   = cdns_pcie_ep_write_header,
 +      .set_bar        = cdns_pcie_ep_set_bar,
 +      .clear_bar      = cdns_pcie_ep_clear_bar,
 +      .map_addr       = cdns_pcie_ep_map_addr,
 +      .unmap_addr     = cdns_pcie_ep_unmap_addr,
 +      .set_msi        = cdns_pcie_ep_set_msi,
 +      .get_msi        = cdns_pcie_ep_get_msi,
 +      .raise_irq      = cdns_pcie_ep_raise_irq,
 +      .start          = cdns_pcie_ep_start,
 +};
 +
 +static const struct of_device_id cdns_pcie_ep_of_match[] = {
 +      { .compatible = "cdns,cdns-pcie-ep" },
 +
 +      { },
 +};
 +
 +static int cdns_pcie_ep_probe(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct device_node *np = dev->of_node;
 +      struct cdns_pcie_ep *ep;
 +      struct cdns_pcie *pcie;
 +      struct pci_epc *epc;
 +      struct resource *res;
 +      int ret;
 +
 +      ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
 +      if (!ep)
 +              return -ENOMEM;
 +
 +      pcie = &ep->pcie;
 +      pcie->is_rc = false;
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
 +      pcie->reg_base = devm_ioremap_resource(dev, res);
 +      if (IS_ERR(pcie->reg_base)) {
 +              dev_err(dev, "missing \"reg\"\n");
 +              return PTR_ERR(pcie->reg_base);
 +      }
 +
 +      res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
 +      if (!res) {
 +              dev_err(dev, "missing \"mem\"\n");
 +              return -EINVAL;
 +      }
 +      pcie->mem_res = res;
 +
 +      ret = of_property_read_u32(np, "cdns,max-outbound-regions",
 +                                 &ep->max_regions);
 +      if (ret < 0) {
 +              dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
 +              return ret;
 +      }
++      ep->ob_addr = devm_kcalloc(dev,
++                                 ep->max_regions, sizeof(*ep->ob_addr),
 +                                 GFP_KERNEL);
 +      if (!ep->ob_addr)
 +              return -ENOMEM;
 +
 +      pm_runtime_enable(dev);
 +      ret = pm_runtime_get_sync(dev);
 +      if (ret < 0) {
 +              dev_err(dev, "pm_runtime_get_sync() failed\n");
 +              goto err_get_sync;
 +      }
 +
 +      /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
 +      cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
 +
 +      epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
 +      if (IS_ERR(epc)) {
 +              dev_err(dev, "failed to create epc device\n");
 +              ret = PTR_ERR(epc);
 +              goto err_init;
 +      }
 +
 +      epc_set_drvdata(epc, ep);
 +
 +      if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
 +              epc->max_functions = 1;
 +
 +      ret = pci_epc_mem_init(epc, pcie->mem_res->start,
 +                             resource_size(pcie->mem_res));
 +      if (ret < 0) {
 +              dev_err(dev, "failed to initialize the memory space\n");
 +              goto err_init;
 +      }
 +
 +      ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
 +                                                SZ_128K);
 +      if (!ep->irq_cpu_addr) {
 +              dev_err(dev, "failed to reserve memory space for MSI\n");
 +              ret = -ENOMEM;
 +              goto free_epc_mem;
 +      }
 +      ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
 +
 +      return 0;
 +
 + free_epc_mem:
 +      pci_epc_mem_exit(epc);
 +
 + err_init:
 +      pm_runtime_put_sync(dev);
 +
 + err_get_sync:
 +      pm_runtime_disable(dev);
 +
 +      return ret;
 +}
 +
 +static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      int ret;
 +
 +      ret = pm_runtime_put_sync(dev);
 +      if (ret < 0)
 +              dev_dbg(dev, "pm_runtime_put_sync failed\n");
 +
 +      pm_runtime_disable(dev);
 +
 +      /* The PCIe controller can't be disabled. */
 +}
 +
 +static struct platform_driver cdns_pcie_ep_driver = {
 +      .driver = {
 +              .name = "cdns-pcie-ep",
 +              .of_match_table = cdns_pcie_ep_of_match,
 +      },
 +      .probe = cdns_pcie_ep_probe,
 +      .shutdown = cdns_pcie_ep_shutdown,
 +};
 +builtin_platform_driver(cdns_pcie_ep_driver);
index fc267a49a932e4c5e935b549552a4c1400032194,0000000000000000000000000000000000000000..6beba8ed7b84b2f4d450a0f0cee19abb88f9c83b
mode 100644,000000..100644
--- /dev/null
@@@ -1,642 -1,0 +1,642 @@@
-       ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
 +// SPDX-License-Identifier: GPL-2.0+
 +/*
 + * Rockchip AXI PCIe endpoint controller driver
 + *
 + * Copyright (c) 2018 Rockchip, Inc.
 + *
 + * Author: Shawn Lin <shawn.lin@rock-chips.com>
 + *         Simon Xue <xxm@rock-chips.com>
 + */
 +
 +#include <linux/configfs.h>
 +#include <linux/delay.h>
 +#include <linux/kernel.h>
 +#include <linux/of.h>
 +#include <linux/pci-epc.h>
 +#include <linux/platform_device.h>
 +#include <linux/pci-epf.h>
 +#include <linux/sizes.h>
 +
 +#include "pcie-rockchip.h"
 +
 +/**
 + * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
 + * @rockchip: Rockchip PCIe controller
 + * @max_regions: maximum number of regions supported by hardware
 + * @ob_region_map: bitmask of mapped outbound regions
 + * @ob_addr: base addresses in the AXI bus where the outbound regions start
 + * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
 + *               dedicated outbound regions is mapped.
 + * @irq_cpu_addr: base address in the CPU space where a write access triggers
 + *              the sending of a memory write (MSI) / normal message (legacy
 + *              IRQ) TLP through the PCIe bus.
 + * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
 + *              dedicated outbound region.
 + * @irq_pci_fn: the latest PCI function that has updated the mapping of
 + *            the MSI/legacy IRQ dedicated outbound region.
 + * @irq_pending: bitmask of asserted legacy IRQs.
 + */
 +struct rockchip_pcie_ep {
 +      struct rockchip_pcie    rockchip;
 +      struct pci_epc          *epc;
 +      u32                     max_regions;
 +      unsigned long           ob_region_map;
 +      phys_addr_t             *ob_addr;
 +      phys_addr_t             irq_phys_addr;
 +      void __iomem            *irq_cpu_addr;
 +      u64                     irq_pci_addr;
 +      u8                      irq_pci_fn;
 +      u8                      irq_pending;
 +};
 +
 +static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
 +                                        u32 region)
 +{
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
 +      rockchip_pcie_write(rockchip, 0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
 +}
 +
 +static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
 +                                       u32 r, u32 type, u64 cpu_addr,
 +                                       u64 pci_addr, size_t size)
 +{
 +      u64 sz = 1ULL << fls64(size - 1);
 +      int num_pass_bits = ilog2(sz);
 +      u32 addr0, addr1, desc0, desc1;
 +      bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
 +
 +      /* The minimal region size is 1MB */
 +      if (num_pass_bits < 8)
 +              num_pass_bits = 8;
 +
 +      cpu_addr -= rockchip->mem_res->start;
 +      addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
 +              PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
 +              (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
 +      addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
 +      desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
 +      desc1 = 0;
 +
 +      if (is_nor_msg) {
 +              rockchip_pcie_write(rockchip, 0,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
 +              rockchip_pcie_write(rockchip, 0,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
 +              rockchip_pcie_write(rockchip, desc0,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
 +              rockchip_pcie_write(rockchip, desc1,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
 +      } else {
 +              /* PCI bus address region */
 +              rockchip_pcie_write(rockchip, addr0,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
 +              rockchip_pcie_write(rockchip, addr1,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
 +              rockchip_pcie_write(rockchip, desc0,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
 +              rockchip_pcie_write(rockchip, desc1,
 +                                  ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
 +
 +              addr0 =
 +                  ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
 +                  (lower_32_bits(cpu_addr) &
 +                   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
 +              addr1 = upper_32_bits(cpu_addr);
 +      }
 +
 +      /* CPU bus address region */
 +      rockchip_pcie_write(rockchip, addr0,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
 +      rockchip_pcie_write(rockchip, addr1,
 +                          ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
 +}
 +
 +static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
 +                                       struct pci_epf_header *hdr)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +
 +      /* All functions share the same vendor ID with function 0 */
 +      if (fn == 0) {
 +              u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
 +                             (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
 +
 +              rockchip_pcie_write(rockchip, vid_regs,
 +                                  PCIE_CORE_CONFIG_VENDOR);
 +      }
 +
 +      rockchip_pcie_write(rockchip, hdr->deviceid << 16,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
 +
 +      rockchip_pcie_write(rockchip,
 +                          hdr->revid |
 +                          hdr->progif_code << 8 |
 +                          hdr->subclass_code << 16 |
 +                          hdr->baseclass_code << 24,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
 +      rockchip_pcie_write(rockchip, hdr->cache_line_size,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                          PCI_CACHE_LINE_SIZE);
 +      rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                          PCI_SUBSYSTEM_VENDOR_ID);
 +      rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                          PCI_INTERRUPT_LINE);
 +
 +      return 0;
 +}
 +
 +static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
 +                                  struct pci_epf_bar *epf_bar)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      dma_addr_t bar_phys = epf_bar->phys_addr;
 +      enum pci_barno bar = epf_bar->barno;
 +      int flags = epf_bar->flags;
 +      u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
 +      u64 sz;
 +
 +      /* BAR size is 2^(aperture + 7) */
 +      sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
 +
 +      /*
 +       * roundup_pow_of_two() returns an unsigned long, which is not suited
 +       * for 64bit values.
 +       */
 +      sz = 1ULL << fls64(sz - 1);
 +      aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
 +
 +      if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
 +              ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
 +      } else {
 +              bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
 +              bool is_64bits = sz > SZ_2G;
 +
 +              if (is_64bits && (bar & 1))
 +                      return -EINVAL;
 +
 +              if (is_64bits && is_prefetch)
 +                      ctrl =
 +                          ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
 +              else if (is_prefetch)
 +                      ctrl =
 +                          ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
 +              else if (is_64bits)
 +                      ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
 +              else
 +                      ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
 +      }
 +
 +      if (bar < BAR_4) {
 +              reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
 +              b = bar;
 +      } else {
 +              reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
 +              b = bar - BAR_4;
 +      }
 +
 +      addr0 = lower_32_bits(bar_phys);
 +      addr1 = upper_32_bits(bar_phys);
 +
 +      cfg = rockchip_pcie_read(rockchip, reg);
 +      cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
 +               ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
 +      cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
 +              ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
 +
 +      rockchip_pcie_write(rockchip, cfg, reg);
 +      rockchip_pcie_write(rockchip, addr0,
 +                          ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
 +      rockchip_pcie_write(rockchip, addr1,
 +                          ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
 +
 +      return 0;
 +}
 +
 +static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
 +                                     struct pci_epf_bar *epf_bar)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u32 reg, cfg, b, ctrl;
 +      enum pci_barno bar = epf_bar->barno;
 +
 +      if (bar < BAR_4) {
 +              reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
 +              b = bar;
 +      } else {
 +              reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
 +              b = bar - BAR_4;
 +      }
 +
 +      ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
 +      cfg = rockchip_pcie_read(rockchip, reg);
 +      cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
 +               ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
 +      cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
 +
 +      rockchip_pcie_write(rockchip, cfg, reg);
 +      rockchip_pcie_write(rockchip, 0x0,
 +                          ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
 +      rockchip_pcie_write(rockchip, 0x0,
 +                          ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
 +}
 +
 +static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
 +                                   phys_addr_t addr, u64 pci_addr,
 +                                   size_t size)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *pcie = &ep->rockchip;
 +      u32 r;
 +
 +      r = find_first_zero_bit(&ep->ob_region_map,
 +                              sizeof(ep->ob_region_map) * BITS_PER_LONG);
 +      /*
 +       * Region 0 is reserved for configuration space and shouldn't
 +       * be used elsewhere per TRM, so leave it out.
 +       */
 +      if (r >= ep->max_regions - 1) {
 +              dev_err(&epc->dev, "no free outbound region\n");
 +              return -EINVAL;
 +      }
 +
 +      rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
 +                                   pci_addr, size);
 +
 +      set_bit(r, &ep->ob_region_map);
 +      ep->ob_addr[r] = addr;
 +
 +      return 0;
 +}
 +
 +static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
 +                                      phys_addr_t addr)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u32 r;
 +
 +      for (r = 0; r < ep->max_regions - 1; r++)
 +              if (ep->ob_addr[r] == addr)
 +                      break;
 +
 +      /*
 +       * Region 0 is reserved for configuration space and shouldn't
 +       * be used elsewhere per TRM, so leave it out.
 +       */
 +      if (r == ep->max_regions - 1)
 +              return;
 +
 +      rockchip_pcie_clear_ep_ob_atu(rockchip, r);
 +
 +      ep->ob_addr[r] = 0;
 +      clear_bit(r, &ep->ob_region_map);
 +}
 +
 +static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
 +                                  u8 multi_msg_cap)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u16 flags;
 +
 +      flags = rockchip_pcie_read(rockchip,
 +                                 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
 +      flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
 +      flags |=
 +         ((multi_msg_cap << 1) <<  ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
 +         PCI_MSI_FLAGS_64BIT;
 +      flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
 +      rockchip_pcie_write(rockchip, flags,
 +                          ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                          ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
 +      return 0;
 +}
 +
 +static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u16 flags;
 +
 +      flags = rockchip_pcie_read(rockchip,
 +                                 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
 +      if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
 +              return -EINVAL;
 +
 +      return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
 +                      ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
 +}
 +
 +static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
 +                                       u8 intx, bool is_asserted)
 +{
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u32 r = ep->max_regions - 1;
 +      u32 offset;
 +      u16 status;
 +      u8 msg_code;
 +
 +      if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
 +                   ep->irq_pci_fn != fn)) {
 +              rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
 +                                           AXI_WRAPPER_NOR_MSG,
 +                                           ep->irq_phys_addr, 0, 0);
 +              ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
 +              ep->irq_pci_fn = fn;
 +      }
 +
 +      intx &= 3;
 +      if (is_asserted) {
 +              ep->irq_pending |= BIT(intx);
 +              msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
 +      } else {
 +              ep->irq_pending &= ~BIT(intx);
 +              msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
 +      }
 +
 +      status = rockchip_pcie_read(rockchip,
 +                                  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                  ROCKCHIP_PCIE_EP_CMD_STATUS);
 +      status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
 +
 +      if ((status != 0) ^ (ep->irq_pending != 0)) {
 +              status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
 +              rockchip_pcie_write(rockchip, status,
 +                                  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                  ROCKCHIP_PCIE_EP_CMD_STATUS);
 +      }
 +
 +      offset =
 +         ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
 +         ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
 +      writel(0, ep->irq_cpu_addr + offset);
 +}
 +
 +static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
 +                                          u8 intx)
 +{
 +      u16 cmd;
 +
 +      cmd = rockchip_pcie_read(&ep->rockchip,
 +                               ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                               ROCKCHIP_PCIE_EP_CMD_STATUS);
 +
 +      if (cmd & PCI_COMMAND_INTX_DISABLE)
 +              return -EINVAL;
 +
 +      /*
 +       * Should add some delay between toggling INTx per TRM vaguely saying
 +       * it depends on some cycles of the AHB bus clock to function it. So
 +       * add sufficient 1ms here.
 +       */
 +      rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
 +      mdelay(1);
 +      rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
 +      return 0;
 +}
 +
 +static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
 +                                       u8 interrupt_num)
 +{
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      u16 flags, mme, data, data_mask;
 +      u8 msi_count;
 +      u64 pci_addr, pci_addr_mask = 0xff;
 +
 +      /* Check MSI enable bit */
 +      flags = rockchip_pcie_read(&ep->rockchip,
 +                                 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
 +      if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
 +              return -EINVAL;
 +
 +      /* Get MSI numbers from MME */
 +      mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
 +                      ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
 +      msi_count = 1 << mme;
 +      if (!interrupt_num || interrupt_num > msi_count)
 +              return -EINVAL;
 +
 +      /* Set MSI private data */
 +      data_mask = msi_count - 1;
 +      data = rockchip_pcie_read(rockchip,
 +                                ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
 +                                PCI_MSI_DATA_64);
 +      data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
 +
 +      /* Get MSI PCI address */
 +      pci_addr = rockchip_pcie_read(rockchip,
 +                                    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                    ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
 +                                    PCI_MSI_ADDRESS_HI);
 +      pci_addr <<= 32;
 +      pci_addr |= rockchip_pcie_read(rockchip,
 +                                     ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
 +                                     ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
 +                                     PCI_MSI_ADDRESS_LO);
 +      pci_addr &= GENMASK_ULL(63, 2);
 +
 +      /* Set the outbound region if needed. */
 +      if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
 +                   ep->irq_pci_fn != fn)) {
 +              rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
 +                                           AXI_WRAPPER_MEM_WRITE,
 +                                           ep->irq_phys_addr,
 +                                           pci_addr & ~pci_addr_mask,
 +                                           pci_addr_mask + 1);
 +              ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
 +              ep->irq_pci_fn = fn;
 +      }
 +
 +      writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
 +      return 0;
 +}
 +
 +static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
 +                                    enum pci_epc_irq_type type,
 +                                    u8 interrupt_num)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +
 +      switch (type) {
 +      case PCI_EPC_IRQ_LEGACY:
 +              return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
 +      case PCI_EPC_IRQ_MSI:
 +              return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
 +      default:
 +              return -EINVAL;
 +      }
 +}
 +
 +static int rockchip_pcie_ep_start(struct pci_epc *epc)
 +{
 +      struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 +      struct rockchip_pcie *rockchip = &ep->rockchip;
 +      struct pci_epf *epf;
 +      u32 cfg;
 +
 +      cfg = BIT(0);
 +      list_for_each_entry(epf, &epc->pci_epf, list)
 +              cfg |= BIT(epf->func_no);
 +
 +      rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
 +
 +      list_for_each_entry(epf, &epc->pci_epf, list)
 +              pci_epf_linkup(epf);
 +
 +      return 0;
 +}
 +
 +static const struct pci_epc_ops rockchip_pcie_epc_ops = {
 +      .write_header   = rockchip_pcie_ep_write_header,
 +      .set_bar        = rockchip_pcie_ep_set_bar,
 +      .clear_bar      = rockchip_pcie_ep_clear_bar,
 +      .map_addr       = rockchip_pcie_ep_map_addr,
 +      .unmap_addr     = rockchip_pcie_ep_unmap_addr,
 +      .set_msi        = rockchip_pcie_ep_set_msi,
 +      .get_msi        = rockchip_pcie_ep_get_msi,
 +      .raise_irq      = rockchip_pcie_ep_raise_irq,
 +      .start          = rockchip_pcie_ep_start,
 +};
 +
 +static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
 +                                   struct rockchip_pcie_ep *ep)
 +{
 +      struct device *dev = rockchip->dev;
 +      int err;
 +
 +      err = rockchip_pcie_parse_dt(rockchip);
 +      if (err)
 +              return err;
 +
 +      err = rockchip_pcie_get_phys(rockchip);
 +      if (err)
 +              return err;
 +
 +      err = of_property_read_u32(dev->of_node,
 +                                 "rockchip,max-outbound-regions",
 +                                 &ep->max_regions);
 +      if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
 +              ep->max_regions = MAX_REGION_LIMIT;
 +
 +      err = of_property_read_u8(dev->of_node, "max-functions",
 +                                &ep->epc->max_functions);
 +      if (err < 0)
 +              ep->epc->max_functions = 1;
 +
 +      return 0;
 +}
 +
 +static const struct of_device_id rockchip_pcie_ep_of_match[] = {
 +      { .compatible = "rockchip,rk3399-pcie-ep"},
 +      {},
 +};
 +
 +static int rockchip_pcie_ep_probe(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct rockchip_pcie_ep *ep;
 +      struct rockchip_pcie *rockchip;
 +      struct pci_epc *epc;
 +      size_t max_regions;
 +      int err;
 +
 +      ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
 +      if (!ep)
 +              return -ENOMEM;
 +
 +      rockchip = &ep->rockchip;
 +      rockchip->is_rc = false;
 +      rockchip->dev = dev;
 +
 +      epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
 +      if (IS_ERR(epc)) {
 +              dev_err(dev, "failed to create epc device\n");
 +              return PTR_ERR(epc);
 +      }
 +
 +      ep->epc = epc;
 +      epc_set_drvdata(epc, ep);
 +
 +      err = rockchip_pcie_parse_ep_dt(rockchip, ep);
 +      if (err)
 +              return err;
 +
 +      err = rockchip_pcie_enable_clocks(rockchip);
 +      if (err)
 +              return err;
 +
 +      err = rockchip_pcie_init_port(rockchip);
 +      if (err)
 +              goto err_disable_clocks;
 +
 +      /* Establish the link automatically */
 +      rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
 +                          PCIE_CLIENT_CONFIG);
 +
 +      max_regions = ep->max_regions;
++      ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
 +                                 GFP_KERNEL);
 +
 +      if (!ep->ob_addr) {
 +              err = -ENOMEM;
 +              goto err_uninit_port;
 +      }
 +
 +      /* Only enable function 0 by default */
 +      rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
 +
 +      err = pci_epc_mem_init(epc, rockchip->mem_res->start,
 +                             resource_size(rockchip->mem_res));
 +      if (err < 0) {
 +              dev_err(dev, "failed to initialize the memory space\n");
 +              goto err_uninit_port;
 +      }
 +
 +      ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
 +                                                SZ_128K);
 +      if (!ep->irq_cpu_addr) {
 +              dev_err(dev, "failed to reserve memory space for MSI\n");
 +              err = -ENOMEM;
 +              goto err_epc_mem_exit;
 +      }
 +
 +      ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
 +
 +      return 0;
 +err_epc_mem_exit:
 +      pci_epc_mem_exit(epc);
 +err_uninit_port:
 +      rockchip_pcie_deinit_phys(rockchip);
 +err_disable_clocks:
 +      rockchip_pcie_disable_clocks(rockchip);
 +      return err;
 +}
 +
 +static struct platform_driver rockchip_pcie_ep_driver = {
 +      .driver = {
 +              .name = "rockchip-pcie-ep",
 +              .of_match_table = rockchip_pcie_ep_of_match,
 +      },
 +      .probe = rockchip_pcie_ep_probe,
 +};
 +
 +builtin_platform_driver(rockchip_pcie_ep_driver);
index 128e3dd3186dc646a160932b679a2d898ab4a4e2,d041d9852b23e4903d91cf604e77e5809c896656..5b24bb4bfbf6608084ccf5607c4649d27c33ea14
@@@ -13,7 -13,6 +13,7 @@@
  #include <linux/clk.h>
  #include <linux/init.h>
  #include <linux/io.h>
 +#include <linux/iopoll.h>
  #include <linux/mfd/syscon.h>
  #include <linux/of_device.h>
  #include <linux/platform_device.h>
  #include <dt-bindings/power/mt7623a-power.h>
  #include <dt-bindings/power/mt8173-power.h>
  
 +#define MTK_POLL_DELAY_US   10
 +#define MTK_POLL_TIMEOUT    (jiffies_to_usecs(HZ))
 +
 +#define MTK_SCPD_ACTIVE_WAKEUP                BIT(0)
 +#define MTK_SCPD_FWAIT_SRAM           BIT(1)
 +#define MTK_SCPD_CAPS(_scpd, _x)      ((_scpd)->data->caps & (_x))
 +
  #define SPM_VDE_PWR_CON                       0x0210
  #define SPM_MFG_PWR_CON                       0x0214
  #define SPM_VEN_PWR_CON                       0x0230
@@@ -124,7 -116,7 +124,7 @@@ struct scp_domain_data 
        u32 sram_pdn_ack_bits;
        u32 bus_prot_mask;
        enum clk_id clk_id[MAX_CLKS];
 -      bool active_wakeup;
 +      u8 caps;
  };
  
  struct scp;
@@@ -192,10 -184,12 +192,10 @@@ static int scpsys_power_on(struct gener
  {
        struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
        struct scp *scp = scpd->scp;
 -      unsigned long timeout;
 -      bool expired;
        void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
 -      u32 sram_pdn_ack = scpd->data->sram_pdn_ack_bits;
 +      u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
        u32 val;
 -      int ret;
 +      int ret, tmp;
        int i;
  
        if (scpd->supply) {
        writel(val, ctl_addr);
  
        /* wait until PWR_ACK = 1 */
 -      timeout = jiffies + HZ;
 -      expired = false;
 -      while (1) {
 -              ret = scpsys_domain_is_on(scpd);
 -              if (ret > 0)
 -                      break;
 -
 -              if (expired) {
 -                      ret = -ETIMEDOUT;
 -                      goto err_pwr_ack;
 -              }
 -
 -              cpu_relax();
 -
 -              if (time_after(jiffies, timeout))
 -                      expired = true;
 -      }
 +      ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp > 0,
 +                               MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
 +      if (ret < 0)
 +              goto err_pwr_ack;
  
        val &= ~PWR_CLK_DIS_BIT;
        writel(val, ctl_addr);
        val &= ~scpd->data->sram_pdn_bits;
        writel(val, ctl_addr);
  
 -      /* wait until SRAM_PDN_ACK all 0 */
 -      timeout = jiffies + HZ;
 -      expired = false;
 -      while (sram_pdn_ack && (readl(ctl_addr) & sram_pdn_ack)) {
 +      /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
 +      if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
 +              /*
 +               * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
 +               * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is
 +               * applied here.
 +               */
 +              usleep_range(12000, 12100);
  
 -              if (expired) {
 -                      ret = -ETIMEDOUT;
 +      } else {
 +              ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0,
 +                                       MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
 +              if (ret < 0)
                        goto err_pwr_ack;
 -              }
 -
 -              cpu_relax();
 -
 -              if (time_after(jiffies, timeout))
 -                      expired = true;
        }
  
        if (scpd->data->bus_prot_mask) {
@@@ -282,10 -289,12 +282,10 @@@ static int scpsys_power_off(struct gene
  {
        struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
        struct scp *scp = scpd->scp;
 -      unsigned long timeout;
 -      bool expired;
        void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
        u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
        u32 val;
 -      int ret;
 +      int ret, tmp;
        int i;
  
        if (scpd->data->bus_prot_mask) {
        writel(val, ctl_addr);
  
        /* wait until SRAM_PDN_ACK all 1 */
 -      timeout = jiffies + HZ;
 -      expired = false;
 -      while (pdn_ack && (readl(ctl_addr) & pdn_ack) != pdn_ack) {
 -              if (expired) {
 -                      ret = -ETIMEDOUT;
 -                      goto out;
 -              }
 -
 -              cpu_relax();
 -
 -              if (time_after(jiffies, timeout))
 -                      expired = true;
 -      }
 +      ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack,
 +                               MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
 +      if (ret < 0)
 +              goto out;
  
        val |= PWR_ISO_BIT;
        writel(val, ctl_addr);
        writel(val, ctl_addr);
  
        /* wait until PWR_ACK = 0 */
 -      timeout = jiffies + HZ;
 -      expired = false;
 -      while (1) {
 -              ret = scpsys_domain_is_on(scpd);
 -              if (ret == 0)
 -                      break;
 -
 -              if (expired) {
 -                      ret = -ETIMEDOUT;
 -                      goto out;
 -              }
 -
 -              cpu_relax();
 -
 -              if (time_after(jiffies, timeout))
 -                      expired = true;
 -      }
 +      ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp == 0,
 +                               MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
 +      if (ret < 0)
 +              goto out;
  
        for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
                clk_disable_unprepare(scpd->clk[i]);
@@@ -376,15 -407,15 +376,15 @@@ static struct scp *init_scp(struct plat
        if (IS_ERR(scp->base))
                return ERR_CAST(scp->base);
  
-       scp->domains = devm_kzalloc(&pdev->dev,
-                               sizeof(*scp->domains) * num, GFP_KERNEL);
+       scp->domains = devm_kcalloc(&pdev->dev,
+                               num, sizeof(*scp->domains), GFP_KERNEL);
        if (!scp->domains)
                return ERR_PTR(-ENOMEM);
  
        pd_data = &scp->pd_data;
  
-       pd_data->domains = devm_kzalloc(&pdev->dev,
-                       sizeof(*pd_data->domains) * num, GFP_KERNEL);
+       pd_data->domains = devm_kcalloc(&pdev->dev,
+                       num, sizeof(*pd_data->domains), GFP_KERNEL);
        if (!pd_data->domains)
                return ERR_PTR(-ENOMEM);
  
                genpd->name = data->name;
                genpd->power_off = scpsys_power_off;
                genpd->power_on = scpsys_power_on;
 -              if (scpd->data->active_wakeup)
 +              if (MTK_SCPD_CAPS(scpd, MTK_SCPD_ACTIVE_WAKEUP))
                        genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
        }
  
@@@ -491,7 -522,7 +491,7 @@@ static const struct scp_domain_data scp
                .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
                                 MT2701_TOP_AXI_PROT_EN_CONN_S,
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_DISP] = {
                .name = "disp",
                .sram_pdn_bits = GENMASK(11, 8),
                .clk_id = {CLK_MM},
                .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_MM_M0,
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_MFG] = {
                .name = "mfg",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(12, 12),
                .clk_id = {CLK_MFG},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_VDEC] = {
                .name = "vdec",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(12, 12),
                .clk_id = {CLK_MM},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_ISP] = {
                .name = "isp",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(13, 12),
                .clk_id = {CLK_MM},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_BDP] = {
                .name = "bdp",
                .ctl_offs = SPM_BDP_PWR_CON,
                .sram_pdn_bits = GENMASK(11, 8),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_ETH] = {
                .name = "eth",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_ETHIF},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_HIF] = {
                .name = "hif",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_ETHIF},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2701_POWER_DOMAIN_IFR_MSC] = {
                .name = "ifr_msc",
                .sta_mask = PWR_STATUS_IFR_MSC,
                .ctl_offs = SPM_IFR_MSC_PWR_CON,
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
  };
  
@@@ -575,7 -606,7 +575,7 @@@ static const struct scp_domain_data scp
                .sram_pdn_bits = GENMASK(8, 8),
                .sram_pdn_ack_bits = GENMASK(12, 12),
                .clk_id = {CLK_MM},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_VDEC] = {
                .name = "vdec",
                .sram_pdn_bits = GENMASK(8, 8),
                .sram_pdn_ack_bits = GENMASK(12, 12),
                .clk_id = {CLK_MM, CLK_VDEC},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_VENC] = {
                .name = "venc",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_MM, CLK_VENC, CLK_JPGDEC},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_ISP] = {
                .name = "isp",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(13, 12),
                .clk_id = {CLK_MM},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_AUDIO] = {
                .name = "audio",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_AUDIO},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_USB] = {
                .name = "usb",
                .sram_pdn_bits = GENMASK(10, 8),
                .sram_pdn_ack_bits = GENMASK(14, 12),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_USB2] = {
                .name = "usb2",
                .sram_pdn_bits = GENMASK(10, 8),
                .sram_pdn_ack_bits = GENMASK(14, 12),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_MFG] = {
                .name = "mfg",
                .sram_pdn_ack_bits = GENMASK(16, 16),
                .clk_id = {CLK_MFG},
                .bus_prot_mask = BIT(14) | BIT(21) | BIT(23),
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_MFG_SC1] = {
                .name = "mfg_sc1",
                .sram_pdn_bits = GENMASK(8, 8),
                .sram_pdn_ack_bits = GENMASK(16, 16),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_MFG_SC2] = {
                .name = "mfg_sc2",
                .sram_pdn_bits = GENMASK(8, 8),
                .sram_pdn_ack_bits = GENMASK(16, 16),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT2712_POWER_DOMAIN_MFG_SC3] = {
                .name = "mfg_sc3",
                .sram_pdn_bits = GENMASK(8, 8),
                .sram_pdn_ack_bits = GENMASK(16, 16),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
  };
  
@@@ -766,7 -797,7 +766,7 @@@ static const struct scp_domain_data scp
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_NONE},
                .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_ETHSYS,
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7622_POWER_DOMAIN_HIF0] = {
                .name = "hif0",
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_HIFSEL},
                .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF0,
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7622_POWER_DOMAIN_HIF1] = {
                .name = "hif1",
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_HIFSEL},
                .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF1,
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7622_POWER_DOMAIN_WB] = {
                .name = "wb",
                .sram_pdn_ack_bits = 0,
                .clk_id = {CLK_NONE},
                .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_WB,
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_FWAIT_SRAM,
        },
  };
  
@@@ -812,7 -843,7 +812,7 @@@ static const struct scp_domain_data scp
                .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
                                 MT2701_TOP_AXI_PROT_EN_CONN_S,
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7623A_POWER_DOMAIN_ETH] = {
                .name = "eth",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_ETHIF},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7623A_POWER_DOMAIN_HIF] = {
                .name = "hif",
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_ETHIF},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT7623A_POWER_DOMAIN_IFR_MSC] = {
                .name = "ifr_msc",
                .sta_mask = PWR_STATUS_IFR_MSC,
                .ctl_offs = SPM_IFR_MSC_PWR_CON,
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
  };
  
@@@ -903,7 -934,7 +903,7 @@@ static const struct scp_domain_data scp
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = GENMASK(15, 12),
                .clk_id = {CLK_NONE},
 -              .active_wakeup = true,
 +              .caps = MTK_SCPD_ACTIVE_WAKEUP,
        },
        [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
                .name = "mfg_async",
@@@ -1036,13 -1067,15 +1036,13 @@@ static const struct of_device_id of_scp
  
  static int scpsys_probe(struct platform_device *pdev)
  {
 -      const struct of_device_id *match;
        const struct scp_subdomain *sd;
        const struct scp_soc_data *soc;
        struct scp *scp;
        struct genpd_onecell_data *pd_data;
        int i, ret;
  
 -      match = of_match_device(of_scpsys_match_tbl, &pdev->dev);
 -      soc = (const struct scp_soc_data *)match->data;
 +      soc = of_device_get_match_data(&pdev->dev);
  
        scp = init_scp(pdev, soc->domains, soc->num_domains, &soc->regs,
                        soc->bus_prot_reg_update);
index dfdf6dbc2ddcd67cc07868f5bdd0ec209386f20a,953c83967ceb533984e650660c06faeabbcac1e1..9ec27ac1856bf3b4c68b40427744ecc5af18fc9a
@@@ -147,9 -147,9 +147,9 @@@ static int int340x_thermal_get_trip_hys
  
        status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
        if (ACPI_FAILURE(status))
 -              return -EIO;
 -
 -      *temp = hyst * 100;
 +              *temp = 0;
 +      else
 +              *temp = hyst * 100;
  
        return 0;
  }
@@@ -239,9 -239,10 +239,10 @@@ struct int34x_thermal_zone *int340x_the
        if (ACPI_FAILURE(status))
                trip_cnt = 0;
        else {
-               int34x_thermal_zone->aux_trips = kzalloc(
-                               sizeof(*int34x_thermal_zone->aux_trips) *
-                               trip_cnt, GFP_KERNEL);
+               int34x_thermal_zone->aux_trips =
+                       kcalloc(trip_cnt,
+                               sizeof(*int34x_thermal_zone->aux_trips),
+                               GFP_KERNEL);
                if (!int34x_thermal_zone->aux_trips) {
                        ret = -ENOMEM;
                        goto err_trip_alloc;
index eea2fce82bf7f3237c1d74d54ee2d60338e5fa37,5798420ac29cbf241aad267a063b1a582d11ced8..977a8307fbb1a4e0d66cb1c619834472bfe0d1ea
@@@ -1,9 -1,26 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0
  /*
   *  of-thermal.c - Generic Thermal Management device tree support.
   *
   *  Copyright (C) 2013 Texas Instruments
   *  Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
 - *
 - *
 - *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 - *
 - *  This program is free software; you can redistribute it and/or modify
 - *  it under the terms of the GNU General Public License as published by
 - *  the Free Software Foundation; version 2 of the License.
 - *
 - *  This program is distributed in the hope that it will be useful, but
 - *  WITHOUT ANY WARRANTY; without even the implied warranty of
 - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 - *  General Public License for more details.
 - *
 - *  You should have received a copy of the GNU General Public License along
 - *  with this program; if not, write to the Free Software Foundation, Inc.,
 - *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 - *
 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   */
  #include <linux/thermal.h>
  #include <linux/slab.h>
@@@ -853,7 -870,7 +853,7 @@@ __init *thermal_of_build_thermal_zone(s
        if (tz->ntrips == 0) /* must have at least one child */
                goto finish;
  
-       tz->trips = kzalloc(tz->ntrips * sizeof(*tz->trips), GFP_KERNEL);
+       tz->trips = kcalloc(tz->ntrips, sizeof(*tz->trips), GFP_KERNEL);
        if (!tz->trips) {
                ret = -ENOMEM;
                goto free_tz;
        if (tz->num_tbps == 0)
                goto finish;
  
-       tz->tbps = kzalloc(tz->num_tbps * sizeof(*tz->tbps), GFP_KERNEL);
+       tz->tbps = kcalloc(tz->num_tbps, sizeof(*tz->tbps), GFP_KERNEL);
        if (!tz->tbps) {
                ret = -ENOMEM;
                goto free_trips;
index 207a7983864391121f376ecdebf8a269aa7ce45f,e1fc2b06f3432379fcedcfe8f07b17718597a506..ed28110a3535e6780eb62b842cd620616834121f
@@@ -240,6 -240,31 +240,6 @@@ struct tegra_soctherm 
        struct dentry *debugfs_dir;
  };
  
 -/**
 - * clk_writel() - writes a value to a CAR register
 - * @ts: pointer to a struct tegra_soctherm
 - * @v: the value to write
 - * @reg: the register offset
 - *
 - * Writes @v to @reg.  No return value.
 - */
 -static inline void clk_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
 -{
 -      writel(value, (ts->clk_regs + reg));
 -}
 -
 -/**
 - * clk_readl() - reads specified register from CAR IP block
 - * @ts: pointer to a struct tegra_soctherm
 - * @reg: register address to be read
 - *
 - * Return: the value of the register
 - */
 -static inline u32 clk_readl(struct tegra_soctherm *ts, u32 reg)
 -{
 -      return readl(ts->clk_regs + reg);
 -}
 -
  /**
   * ccroc_writel() - writes a value to a CCROC register
   * @ts: pointer to a struct tegra_soctherm
@@@ -901,7 -926,7 +901,7 @@@ static int throt_set_cdev_state(struct 
        return 0;
  }
  
 -static struct thermal_cooling_device_ops throt_cooling_ops = {
 +static const struct thermal_cooling_device_ops throt_cooling_ops = {
        .get_max_state = throt_get_cdev_max_state,
        .get_cur_state = throt_get_cdev_cur_state,
        .set_cur_state = throt_set_cdev_state,
@@@ -1182,9 -1207,9 +1182,9 @@@ static void tegra_soctherm_throttle(str
        } else {
                writel(v, ts->regs + THROT_GLOBAL_CFG);
  
 -              v = clk_readl(ts, CAR_SUPER_CCLKG_DIVIDER);
 +              v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
                v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
 -              clk_writel(ts, v, CAR_SUPER_CCLKG_DIVIDER);
 +              writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER);
        }
  
        /* initialize stats collection */
@@@ -1318,8 -1343,8 +1318,8 @@@ static int tegra_soctherm_probe(struct 
                return PTR_ERR(tegra->clock_soctherm);
        }
  
-       tegra->calib = devm_kzalloc(&pdev->dev,
-                                   sizeof(u32) * soc->num_tsensors,
+       tegra->calib = devm_kcalloc(&pdev->dev,
+                                   soc->num_tsensors, sizeof(u32),
                                    GFP_KERNEL);
        if (!tegra->calib)
                return -ENOMEM;
                        return err;
        }
  
-       tegra->thermctl_tzs = devm_kzalloc(&pdev->dev,
-                                          sizeof(*z) * soc->num_ttgs,
+       tegra->thermctl_tzs = devm_kcalloc(&pdev->dev,
+                                          soc->num_ttgs, sizeof(*z),
                                           GFP_KERNEL);
        if (!tegra->thermctl_tzs)
                return -ENOMEM;
index 3ae038d9c2926db2207756d5ce7bbf0428eb16ab,956f2782602682e40937af60d2cc4cd2deb7e1b3..d4a07acad5989e1374f879f2cc46c284f9aa8c4f
@@@ -461,7 -461,7 +461,7 @@@ ff_layout_alloc_lseg(struct pnfs_layout
                fh_count = be32_to_cpup(p);
  
                fls->mirror_array[i]->fh_versions =
-                       kzalloc(fh_count * sizeof(struct nfs_fh),
+                       kcalloc(fh_count, sizeof(struct nfs_fh),
                                gfp_flags);
                if (fls->mirror_array[i]->fh_versions == NULL) {
                        rc = -ENOMEM;
@@@ -2347,7 -2347,6 +2347,7 @@@ static struct pnfs_layoutdriver_type fl
        .id                     = LAYOUT_FLEX_FILES,
        .name                   = "LAYOUT_FLEX_FILES",
        .owner                  = THIS_MODULE,
 +      .flags                  = PNFS_LAYOUTGET_ON_OPEN,
        .set_layoutdriver       = ff_layout_set_layoutdriver,
        .alloc_layout_hdr       = ff_layout_alloc_layout_hdr,
        .free_layout_hdr        = ff_layout_free_layout_hdr,
diff --combined fs/nfsd/nfs4state.c
index 3b40d1b57613b5d57c861bfb7866ea9bbb244127,39370a503a6313233bf75732519ab9c76bb652ec..857141446d6b378cbdfcb09c8ec5b2df5c319d6d
@@@ -1807,8 -1807,9 +1807,9 @@@ static struct nfs4_client *alloc_client
        clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
        if (clp->cl_name.data == NULL)
                goto err_no_name;
-       clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
-                       OWNER_HASH_SIZE, GFP_KERNEL);
+       clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
+                                                sizeof(struct list_head),
+                                                GFP_KERNEL);
        if (!clp->cl_ownerstr_hashtbl)
                goto err_no_hashtbl;
        for (i = 0; i < OWNER_HASH_SIZE; i++)
@@@ -4378,11 -4379,8 +4379,11 @@@ nfs4_set_delegation(struct nfs4_client 
        spin_unlock(&state_lock);
  
        if (status)
 -              destroy_unhashed_deleg(dp);
 +              goto out_unlock;
 +
        return dp;
 +out_unlock:
 +      vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
  out_clnt_odstate:
        put_clnt_odstate(dp->dl_clnt_odstate);
  out_stid:
@@@ -7096,16 -7094,19 +7097,19 @@@ static int nfs4_state_create_net(struc
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        int i;
  
-       nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
-                       CLIENT_HASH_SIZE, GFP_KERNEL);
+       nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
+                                           sizeof(struct list_head),
+                                           GFP_KERNEL);
        if (!nn->conf_id_hashtbl)
                goto err;
-       nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
-                       CLIENT_HASH_SIZE, GFP_KERNEL);
+       nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
+                                             sizeof(struct list_head),
+                                             GFP_KERNEL);
        if (!nn->unconf_id_hashtbl)
                goto err_unconf_id;
-       nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
-                       SESSION_HASH_SIZE, GFP_KERNEL);
+       nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
+                                             sizeof(struct list_head),
+                                             GFP_KERNEL);
        if (!nn->sessionid_hashtbl)
                goto err_sessionid;
  
diff --combined fs/nfsd/nfscache.c
index 637f87c39183e9f1cc0608f515ccda2d6b35274b,223b3b2dff87a989b7443b7570cdb12f035681e7..dbdeb9d6af0392e3017b1ed73feebc993397c269
@@@ -177,7 -177,8 +177,8 @@@ int nfsd_reply_cache_init(void
  
        drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
        if (!drc_hashtbl) {
-               drc_hashtbl = vzalloc(hashsize * sizeof(*drc_hashtbl));
+               drc_hashtbl = vzalloc(array_size(hashsize,
+                                                sizeof(*drc_hashtbl)));
                if (!drc_hashtbl)
                        goto out_nomem;
        }
@@@ -394,6 -395,7 +395,6 @@@ nfsd_cache_lookup(struct svc_rqst *rqst
        __wsum                  csum;
        u32 hash = nfsd_cache_hash(xid);
        struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
 -      unsigned long           age;
        int type = rqstp->rq_cachetype;
        int rtn = RC_DOIT;
  
  found_entry:
        nfsdstats.rchits++;
        /* We found a matching entry which is either in progress or done. */
 -      age = jiffies - rp->c_timestamp;
        lru_put_end(b, rp);
  
        rtn = RC_DROPIT;
 -      /* Request being processed or excessive rexmits */
 -      if (rp->c_state == RC_INPROG || age < RC_DELAY)
 +      /* Request being processed */
 +      if (rp->c_state == RC_INPROG)
                goto out;
  
        /* From the hall of fame of impractical attacks:
index d98e2b610ce829f532286a2c2dc7a25852893863,d58bd058b09ba7898b0d14d09a4124a99382eb86..1c7c49dbf8ba6a837658aef0a72dc0b0e58df128
@@@ -224,7 -224,7 +224,7 @@@ static void gssp_free_receive_pages(str
  static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
  {
        arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
-       arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
+       arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL);
        /*
         * XXX: actual pages are allocated by xdr layer in
         * xdr_partial_copy_from_skb.
@@@ -298,11 -298,9 +298,11 @@@ int gssp_accept_sec_context_upcall(stru
        if (res.context_handle) {
                data->out_handle = rctxh.exported_context_token;
                data->mech_oid.len = rctxh.mech.len;
 -              if (rctxh.mech.data)
 +              if (rctxh.mech.data) {
                        memcpy(data->mech_oid.data, rctxh.mech.data,
                                                data->mech_oid.len);
 +                      kfree(rctxh.mech.data);
 +              }
                client_name = rctxh.src_name.display_name;
        }
  
diff --combined virt/kvm/kvm_main.c
index aa7da1d8ece2784a94dd15dcec6588b0e8a57466,828ec2ca9b31464938700da9c7927fbd686a57a1..ada21f47f22b5a902e81572ba94efb16a2a7bccb
@@@ -203,47 -203,29 +203,47 @@@ static inline bool kvm_kick_many_cpus(c
        return true;
  }
  
 -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
 +                               unsigned long *vcpu_bitmap, cpumask_var_t tmp)
  {
        int i, cpu, me;
 -      cpumask_var_t cpus;
 -      bool called;
        struct kvm_vcpu *vcpu;
 -
 -      zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 +      bool called;
  
        me = get_cpu();
 +
        kvm_for_each_vcpu(i, vcpu, kvm) {
 +              if (!test_bit(i, vcpu_bitmap))
 +                      continue;
 +
                kvm_make_request(req, vcpu);
                cpu = vcpu->cpu;
  
                if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                        continue;
  
 -              if (cpus != NULL && cpu != -1 && cpu != me &&
 +              if (tmp != NULL && cpu != -1 && cpu != me &&
                    kvm_request_needs_ipi(vcpu, req))
 -                      __cpumask_set_cpu(cpu, cpus);
 +                      __cpumask_set_cpu(cpu, tmp);
        }
 -      called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
 +
 +      called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
        put_cpu();
 +
 +      return called;
 +}
 +
 +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 +{
 +      cpumask_var_t cpus;
 +      bool called;
 +      static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]
 +              = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX};
 +
 +      zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 +
 +      called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus);
 +
        free_cpumask_var(cpus);
        return called;
  }
@@@ -590,7 -572,10 +590,7 @@@ static int kvm_create_vm_debugfs(struc
                return 0;
  
        snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
 -      kvm->debugfs_dentry = debugfs_create_dir(dir_name,
 -                                               kvm_debugfs_dir);
 -      if (!kvm->debugfs_dentry)
 -              return -ENOMEM;
 +      kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
  
        kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
                                         sizeof(*kvm->debugfs_stat_data),
                stat_data->kvm = kvm;
                stat_data->offset = p->offset;
                kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
 -              if (!debugfs_create_file(p->name, 0644,
 -                                       kvm->debugfs_dentry,
 -                                       stat_data,
 -                                       stat_fops_per_vm[p->kind]))
 -                      return -ENOMEM;
 +              debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
 +                                  stat_data, stat_fops_per_vm[p->kind]);
        }
        return 0;
  }
@@@ -2352,7 -2340,7 +2352,7 @@@ void kvm_vcpu_on_spin(struct kvm_vcpu *
  }
  EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  
 -static int kvm_vcpu_fault(struct vm_fault *vmf)
 +static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
  {
        struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
        struct page *page;
@@@ -2562,13 -2550,8 +2562,13 @@@ static long kvm_vcpu_ioctl(struct file 
                oldpid = rcu_access_pointer(vcpu->pid);
                if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
                        /* The thread running this VCPU changed. */
 -                      struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
 +                      struct pid *newpid;
 +
 +                      r = kvm_arch_vcpu_run_pid_change(vcpu);
 +                      if (r)
 +                              break;
  
 +                      newpid = get_task_pid(current, PIDTYPE_PID);
                        rcu_assign_pointer(vcpu->pid, newpid);
                        if (oldpid)
                                synchronize_rcu();
@@@ -3076,7 -3059,8 +3076,8 @@@ static long kvm_vm_ioctl(struct file *f
                        goto out;
                if (routing.nr) {
                        r = -ENOMEM;
-                       entries = vmalloc(routing.nr * sizeof(*entries));
+                       entries = vmalloc(array_size(sizeof(*entries),
+                                                    routing.nr));
                        if (!entries)
                                goto out;
                        r = -EFAULT;
@@@ -3913,18 -3897,29 +3914,18 @@@ static void kvm_uevent_notify_change(un
        kfree(env);
  }
  
 -static int kvm_init_debug(void)
 +static void kvm_init_debug(void)
  {
 -      int r = -EEXIST;
        struct kvm_stats_debugfs_item *p;
  
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
 -      if (kvm_debugfs_dir == NULL)
 -              goto out;
  
        kvm_debugfs_num_entries = 0;
        for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
 -              if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
 -                                       (void *)(long)p->offset,
 -                                       stat_fops[p->kind]))
 -                      goto out_dir;
 +              debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
 +                                  (void *)(long)p->offset,
 +                                  stat_fops[p->kind]);
        }
 -
 -      return 0;
 -
 -out_dir:
 -      debugfs_remove_recursive(kvm_debugfs_dir);
 -out:
 -      return r;
  }
  
  static int kvm_suspend(void)
@@@ -4052,13 -4047,20 +4053,13 @@@ int kvm_init(void *opaque, unsigned vcp
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
  
 -      r = kvm_init_debug();
 -      if (r) {
 -              pr_err("kvm: create debugfs files failed\n");
 -              goto out_undebugfs;
 -      }
 +      kvm_init_debug();
  
        r = kvm_vfio_ops_init();
        WARN_ON(r);
  
        return 0;
  
 -out_undebugfs:
 -      unregister_syscore_ops(&kvm_syscore_ops);
 -      misc_deregister(&kvm_dev);
  out_unreg:
        kvm_async_pf_deinit();
  out_free: