Merge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / x86 / xen / mmu.c
index 65083ad63b6f7bcfbdf49a83d46669b745831791..dd151b2045b0e395d58a6b8f5b2f8bac7292dc05 100644 (file)
@@ -2436,99 +2436,11 @@ void __init xen_hvm_init_mmu_ops(void)
 }
 #endif
 
-#ifdef CONFIG_XEN_PVH
-/*
- * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
- * space creating new guest on pvh dom0 and needing to map domU pages.
- */
-static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
-                           unsigned int domid)
-{
-       int rc, err = 0;
-       xen_pfn_t gpfn = lpfn;
-       xen_ulong_t idx = fgfn;
-
-       struct xen_add_to_physmap_range xatp = {
-               .domid = DOMID_SELF,
-               .foreign_domid = domid,
-               .size = 1,
-               .space = XENMAPSPACE_gmfn_foreign,
-       };
-       set_xen_guest_handle(xatp.idxs, &idx);
-       set_xen_guest_handle(xatp.gpfns, &gpfn);
-       set_xen_guest_handle(xatp.errs, &err);
-
-       rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
-       if (rc < 0)
-               return rc;
-       return err;
-}
-
-static int xlate_remove_from_p2m(unsigned long spfn, int count)
-{
-       struct xen_remove_from_physmap xrp;
-       int i, rc;
-
-       for (i = 0; i < count; i++) {
-               xrp.domid = DOMID_SELF;
-               xrp.gpfn = spfn+i;
-               rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
-               if (rc)
-                       break;
-       }
-       return rc;
-}
-
-struct xlate_remap_data {
-       unsigned long fgfn; /* foreign domain's gfn */
-       pgprot_t prot;
-       domid_t  domid;
-       int index;
-       struct page **pages;
-};
-
-static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
-                           void *data)
-{
-       int rc;
-       struct xlate_remap_data *remap = data;
-       unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
-       pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
-
-       rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
-       if (rc)
-               return rc;
-       native_set_pte(ptep, pteval);
-
-       return 0;
-}
-
-static int xlate_remap_gfn_range(struct vm_area_struct *vma,
-                                unsigned long addr, unsigned long mfn,
-                                int nr, pgprot_t prot, unsigned domid,
-                                struct page **pages)
-{
-       int err;
-       struct xlate_remap_data pvhdata;
-
-       BUG_ON(!pages);
-
-       pvhdata.fgfn = mfn;
-       pvhdata.prot = prot;
-       pvhdata.domid = domid;
-       pvhdata.index = 0;
-       pvhdata.pages = pages;
-       err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
-                                 xlate_map_pte_fn, &pvhdata);
-       flush_tlb_all();
-       return err;
-}
-#endif
-
 #define REMAP_BATCH_SIZE 16
 
 struct remap_data {
-       unsigned long mfn;
+       xen_pfn_t *mfn;
+       bool contiguous;
        pgprot_t prot;
        struct mmu_update *mmu_update;
 };
@@ -2537,7 +2449,14 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
                                 unsigned long addr, void *data)
 {
        struct remap_data *rmd = data;
-       pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
+       pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+
+       /* If we have a contigious range, just update the mfn itself,
+          else update pointer to be "next mfn". */
+       if (rmd->contiguous)
+               (*rmd->mfn)++;
+       else
+               rmd->mfn++;
 
        rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
        rmd->mmu_update->val = pte_val_ma(pte);
@@ -2546,26 +2465,26 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
        return 0;
 }
 
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
-                              unsigned long addr,
-                              xen_pfn_t mfn, int nr,
-                              pgprot_t prot, unsigned domid,
-                              struct page **pages)
-
+static int do_remap_mfn(struct vm_area_struct *vma,
+                       unsigned long addr,
+                       xen_pfn_t *mfn, int nr,
+                       int *err_ptr, pgprot_t prot,
+                       unsigned domid,
+                       struct page **pages)
 {
+       int err = 0;
        struct remap_data rmd;
        struct mmu_update mmu_update[REMAP_BATCH_SIZE];
-       int batch;
        unsigned long range;
-       int err = 0;
+       int mapped = 0;
 
        BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
        if (xen_feature(XENFEAT_auto_translated_physmap)) {
 #ifdef CONFIG_XEN_PVH
                /* We need to update the local page tables and the xen HAP */
-               return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
-                                            domid, pages);
+               return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+                                                prot, domid, pages);
 #else
                return -EINVAL;
 #endif
@@ -2573,9 +2492,15 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
 
        rmd.mfn = mfn;
        rmd.prot = prot;
+       /* We use the err_ptr to indicate if there we are doing a contigious
+        * mapping or a discontigious mapping. */
+       rmd.contiguous = !err_ptr;
 
        while (nr) {
-               batch = min(REMAP_BATCH_SIZE, nr);
+               int index = 0;
+               int done = 0;
+               int batch = min(REMAP_BATCH_SIZE, nr);
+               int batch_left = batch;
                range = (unsigned long)batch << PAGE_SHIFT;
 
                rmd.mmu_update = mmu_update;
@@ -2584,23 +2509,72 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
                if (err)
                        goto out;
 
-               err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
-               if (err < 0)
-                       goto out;
+               /* We record the error for each page that gives an error, but
+                * continue mapping until the whole set is done */
+               do {
+                       int i;
+
+                       err = HYPERVISOR_mmu_update(&mmu_update[index],
+                                                   batch_left, &done, domid);
+
+                       /*
+                        * @err_ptr may be the same buffer as @mfn, so
+                        * only clear it after each chunk of @mfn is
+                        * used.
+                        */
+                       if (err_ptr) {
+                               for (i = index; i < index + done; i++)
+                                       err_ptr[i] = 0;
+                       }
+                       if (err < 0) {
+                               if (!err_ptr)
+                                       goto out;
+                               err_ptr[i] = err;
+                               done++; /* Skip failed frame. */
+                       } else
+                               mapped += done;
+                       batch_left -= done;
+                       index += done;
+               } while (batch_left);
 
                nr -= batch;
                addr += range;
+               if (err_ptr)
+                       err_ptr += batch;
        }
-
-       err = 0;
 out:
 
        xen_flush_tlb_all();
 
-       return err;
+       return err < 0 ? err : mapped;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              xen_pfn_t mfn, int nr,
+                              pgprot_t prot, unsigned domid,
+                              struct page **pages)
+{
+       return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
 }
 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
 
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              xen_pfn_t *mfn, int nr,
+                              int *err_ptr, pgprot_t prot,
+                              unsigned domid, struct page **pages)
+{
+       /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+        * and the consequences later is quite hard to detect what the actual
+        * cause of "wrong memory was mapped in".
+        */
+       BUG_ON(err_ptr == NULL);
+       return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
+
 /* Returns: 0 success */
 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
                               int numpgs, struct page **pages)
@@ -2609,22 +2583,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
                return 0;
 
 #ifdef CONFIG_XEN_PVH
-       while (numpgs--) {
-               /*
-                * The mmu has already cleaned up the process mmu
-                * resources at this point (lookup_address will return
-                * NULL).
-                */
-               unsigned long pfn = page_to_pfn(pages[numpgs]);
-
-               xlate_remove_from_p2m(pfn, 1);
-       }
-       /*
-        * We don't need to flush tlbs because as part of
-        * xlate_remove_from_p2m, the hypervisor will do tlb flushes
-        * after removing the p2m entries from the EPT/NPT
-        */
-       return 0;
+       return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
 #else
        return -EINVAL;
 #endif