Merge tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Sep 2014 00:45:27 +0000 (17:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Sep 2014 00:45:27 +0000 (17:45 -0700)
Pull Xen ARM bugfix from Stefano Stabellini:
 "The patches fix the "xen_add_mach_to_phys_entry: cannot add" bug that
  has been affecting xen on arm and arm64 guests since 3.16.  They
  require a few hypervisor side changes that just went in xen-unstable.

  A couple of days ago David sent out a pull request with a few other
  Xen fixes (it is already in master).  Sorry we didn't synchronized
  better among us"

* tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/arm: remove mach_to_phys rbtree
  xen/arm: reimplement xen_dma_unmap_page & friends
  xen/arm: introduce XENFEAT_grant_map_identity

arch/arm/include/asm/xen/page-coherent.h
arch/arm/include/asm/xen/page.h
arch/arm/xen/Makefile
arch/arm/xen/enlighten.c
arch/arm/xen/mm32.c [new file with mode: 0644]
arch/arm/xen/p2m.c
include/xen/interface/features.h

index 1109017499e52f05e92ca7056831323f4ed330e9..e8275ea88e8806d79b784deef60b57c6cf76a8ad 100644 (file)
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
        __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
 }
 
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
-               struct dma_attrs *attrs)
-{
-       if (__generic_dma_ops(hwdev)->unmap_page)
-               __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+               struct dma_attrs *attrs);
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
-               __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       if (__generic_dma_ops(hwdev)->sync_single_for_device)
-               __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index ded062f9b358038c05fa074816706c40619e6408..135c24a5ba262b76529fa1c3dbe088d098de84a5 100644 (file)
@@ -33,7 +33,6 @@ typedef struct xpaddr {
 #define INVALID_P2M_ENTRY      (~0UL)
 
 unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
 extern struct rb_root phys_to_mach;
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
 
 static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
-       unsigned long pfn;
-
-       if (phys_to_mach.rb_node != NULL) {
-               pfn = __mfn_to_pfn(mfn);
-               if (pfn != INVALID_P2M_ENTRY)
-                       return pfn;
-       }
-
        return mfn;
 }
 
index 12969523414cf2d0b972bdfb8b3ff50b9c781c8f..1f85bfe6b4704d97e999b748f46a61a362f20d66 100644 (file)
@@ -1 +1 @@
-obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
+obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
index 98544c5f86e99710a7845e7a55648cdf73c14599..0e15f011f9c86895db17d57e0e1e999164d0312c 100644 (file)
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
        xen_domain_type = XEN_HVM_DOMAIN;
 
        xen_setup_features();
+
+       if (!xen_feature(XENFEAT_grant_map_identity)) {
+               pr_warn("Please upgrade your Xen.\n"
+                               "If your platform has any non-coherent DMA devices, they won't work properly.\n");
+       }
+
        if (xen_feature(XENFEAT_dom0))
                xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
        else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644 (file)
index 0000000..3b99860
--- /dev/null
@@ -0,0 +1,202 @@
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <xen/features.h>
+
+static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
+static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
+
+static int alloc_xen_mm32_scratch_page(int cpu)
+{
+       struct page *page;
+       unsigned long virt;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
+               return 0;
+
+       page = alloc_page(GFP_KERNEL);
+       if (page == NULL) {
+               pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
+               return -ENOMEM;
+       }
+
+       virt = (unsigned long)__va(page_to_phys(page));
+       pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+       ptep = pte_offset_kernel(pmdp, virt);
+
+       per_cpu(xen_mm32_scratch_virt, cpu) = virt;
+       per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
+
+       return 0;
+}
+
+static int xen_mm32_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+       switch (action) {
+       case CPU_UP_PREPARE:
+               if (alloc_xen_mm32_scratch_page(cpu))
+                       return NOTIFY_BAD;
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block xen_mm32_cpu_notifier = {
+       .notifier_call  = xen_mm32_cpu_notify,
+};
+
+static void* xen_mm32_remap_page(dma_addr_t handle)
+{
+       unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
+       pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
+
+       *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
+       local_flush_tlb_kernel_page(virt);
+
+       return (void*)virt;
+}
+
+static void xen_mm32_unmap(void *vaddr)
+{
+       put_cpu_var(xen_mm32_scratch_virt);
+}
+
+
+/* functions called by SWIOTLB */
+
+static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
+       size_t size, enum dma_data_direction dir,
+       void (*op)(const void *, size_t, int))
+{
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
+       do {
+               size_t len = left;
+               void *vaddr;
+       
+               if (!pfn_valid(pfn))
+               {
+                       /* Cannot map the page, we don't know its physical address.
+                        * Return and hope for the best */
+                       if (!xen_feature(XENFEAT_grant_map_identity))
+                               return;
+                       vaddr = xen_mm32_remap_page(handle) + offset;
+                       op(vaddr, len, dir);
+                       xen_mm32_unmap(vaddr - offset);
+               } else {
+                       struct page *page = pfn_to_page(pfn);
+
+                       if (PageHighMem(page)) {
+                               if (len + offset > PAGE_SIZE)
+                                       len = PAGE_SIZE - offset;
+
+                               if (cache_is_vipt_nonaliasing()) {
+                                       vaddr = kmap_atomic(page);
+                                       op(vaddr + offset, len, dir);
+                                       kunmap_atomic(vaddr);
+                               } else {
+                                       vaddr = kmap_high_get(page);
+                                       if (vaddr) {
+                                               op(vaddr + offset, len, dir);
+                                               kunmap_high(page);
+                                       }
+                               }
+                       } else {
+                               vaddr = page_address(page) + offset;
+                               op(vaddr, len, dir);
+                       }
+               }
+
+               offset = 0;
+               pfn++;
+               left -= len;
+       } while (left);
+}
+
+static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+       /* Cannot use __dma_page_dev_to_cpu because we don't have a
+        * struct page for handle */
+
+       if (dir != DMA_TO_DEVICE)
+               outer_inv_range(handle, handle + size);
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
+}
+
+static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir)
+{
+
+       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
+
+       if (dir == DMA_FROM_DEVICE) {
+               outer_inv_range(handle, handle + size);
+       } else {
+               outer_clean_range(handle, handle + size);
+       }
+}
+
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+
+{
+       if (!__generic_dma_ops(hwdev)->unmap_page)
+               return;
+       if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               return;
+
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
+               return;
+       __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+}
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       if (!__generic_dma_ops(hwdev)->sync_single_for_device)
+               return;
+       __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
+}
+
+int __init xen_mm32_init(void)
+{
+       int cpu;
+
+       if (!xen_initial_domain())
+               return 0;
+
+       register_cpu_notifier(&xen_mm32_cpu_notifier);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               if (alloc_xen_mm32_scratch_page(cpu)) {
+                       put_online_cpus();
+                       unregister_cpu_notifier(&xen_mm32_cpu_notifier);
+                       return -ENOMEM;
+               }
+       }
+       put_online_cpus();
+
+       return 0;
+}
+arch_initcall(xen_mm32_init);
index 97baf44278171680a0932f949c22e8e26c54926f..05485777625474e835fc41a5d57fb494ca778552 100644 (file)
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
        unsigned long pfn;
        unsigned long mfn;
        unsigned long nr_pages;
-       struct rb_node rbnode_mach;
        struct rb_node rbnode_phys;
 };
 
 static rwlock_t p2m_lock;
 struct rb_root phys_to_mach = RB_ROOT;
 EXPORT_SYMBOL_GPL(phys_to_mach);
-static struct rb_root mach_to_phys = RB_ROOT;
 
 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
 {
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
                parent = *link;
                entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
 
-               if (new->mfn == entry->mfn)
-                       goto err_out;
                if (new->pfn == entry->pfn)
                        goto err_out;
 
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
 
-static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
-{
-       struct rb_node **link = &mach_to_phys.rb_node;
-       struct rb_node *parent = NULL;
-       struct xen_p2m_entry *entry;
-       int rc = 0;
-
-       while (*link) {
-               parent = *link;
-               entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
-
-               if (new->mfn == entry->mfn)
-                       goto err_out;
-               if (new->pfn == entry->pfn)
-                       goto err_out;
-
-               if (new->mfn < entry->mfn)
-                       link = &(*link)->rb_left;
-               else
-                       link = &(*link)->rb_right;
-       }
-       rb_link_node(&new->rbnode_mach, parent, link);
-       rb_insert_color(&new->rbnode_mach, &mach_to_phys);
-       goto out;
-
-err_out:
-       rc = -EINVAL;
-       pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
-                       __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
-out:
-       return rc;
-}
-
-unsigned long __mfn_to_pfn(unsigned long mfn)
-{
-       struct rb_node *n = mach_to_phys.rb_node;
-       struct xen_p2m_entry *entry;
-       unsigned long irqflags;
-
-       read_lock_irqsave(&p2m_lock, irqflags);
-       while (n) {
-               entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
-               if (entry->mfn <= mfn &&
-                               entry->mfn + entry->nr_pages > mfn) {
-                       read_unlock_irqrestore(&p2m_lock, irqflags);
-                       return entry->pfn + (mfn - entry->mfn);
-               }
-               if (mfn < entry->mfn)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       read_unlock_irqrestore(&p2m_lock, irqflags);
-
-       return INVALID_P2M_ENTRY;
-}
-EXPORT_SYMBOL_GPL(__mfn_to_pfn);
-
 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                            struct gnttab_map_grant_ref *kmap_ops,
                            struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
                        p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                        if (p2m_entry->pfn <= pfn &&
                                        p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
-                               rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
                                rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
                                write_unlock_irqrestore(&p2m_lock, irqflags);
                                kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
        p2m_entry->mfn = mfn;
 
        write_lock_irqsave(&p2m_lock, irqflags);
-       if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
-               (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
+       if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
                write_unlock_irqrestore(&p2m_lock, irqflags);
                return false;
        }
index 131a6ccdba25693e6899b7813d99a6934e846ce4..14334d0161d5db26746af589920d10757209a5fb 100644 (file)
@@ -53,6 +53,9 @@
 /* operation as Dom0 is supported */
 #define XENFEAT_dom0                      11
 
+/* Xen also maps grant references at pfn = mfn */
+#define XENFEAT_grant_map_identity        12
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */