Merge tag 'edac_fix_for_4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
[linux-2.6-block.git] / arch / x86 / xen / mmu.c
CommitLineData
7e0563de
VK
1#include <linux/pfn.h>
2#include <asm/xen/page.h>
3#include <asm/xen/hypercall.h>
4#include <xen/interface/memory.h>
70e61199 5
7e0563de
VK
6#include "multicalls.h"
7#include "mmu.h"
70e61199
JG
8
9/*
7e0563de
VK
10 * Protects atomic reservation decrease/increase against concurrent increases.
11 * Also protects non-atomic updates of current_pages and balloon lists.
70e61199 12 */
7e0563de 13DEFINE_SPINLOCK(xen_reservation_lock);
70e61199 14
7e0563de 15unsigned long arbitrary_virt_to_mfn(void *vaddr)
70e61199 16{
7e0563de 17 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
70e61199 18
7e0563de 19 return PFN_DOWN(maddr.maddr);
70e61199
JG
20}
21
7e0563de 22xmaddr_t arbitrary_virt_to_machine(void *vaddr)
5b5c1af1 23{
7e0563de
VK
24 unsigned long address = (unsigned long)vaddr;
25 unsigned int level;
26 pte_t *pte;
27 unsigned offset;
5b5c1af1
IC
28
29 /*
7e0563de
VK
30 * if the PFN is in the linear mapped vaddr range, we can just use
31 * the (quick) virt_to_machine() p2m lookup
5b5c1af1 32 */
7e0563de
VK
33 if (virt_addr_valid(vaddr))
34 return virt_to_machine(vaddr);
20f36e03 35
7e0563de 36 /* otherwise we have to do a (slower) full page-table walk */
d2cb2145 37
7e0563de
VK
38 pte = lookup_address(address, &level);
39 BUG_ON(pte == NULL);
40 offset = address & ~PAGE_MASK;
41 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
030cb6c0 42}
7e0563de 43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
319f3ba5 44
7e0563de 45void xen_flush_tlb_all(void)
08bbc9da 46{
7e0563de 47 struct mmuext_op *op;
08bbc9da
AN
48 struct multicall_space mcs;
49
7e0563de 50 trace_xen_mmu_flush_tlb_all(0);
08bbc9da 51
7e0563de 52 preempt_disable();
08bbc9da 53
7e0563de 54 mcs = xen_mc_entry(sizeof(*op));
08bbc9da 55
7e0563de
VK
56 op = mcs.args;
57 op->cmd = MMUEXT_TLB_FLUSH_ALL;
58 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
08bbc9da 59
7e0563de 60 xen_mc_issue(PARAVIRT_LAZY_MMU);
08bbc9da 61
7e0563de 62 preempt_enable();
030cb6c0 63}
319f3ba5 64
de1ef206
IC
65#define REMAP_BATCH_SIZE 16
66
67struct remap_data {
4e8c0c8c
DV
68 xen_pfn_t *mfn;
69 bool contiguous;
de1ef206
IC
70 pgprot_t prot;
71 struct mmu_update *mmu_update;
72};
73
74static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
75 unsigned long addr, void *data)
76{
77 struct remap_data *rmd = data;
4e8c0c8c
DV
78 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
79
6a6256f9 80 /* If we have a contiguous range, just update the mfn itself,
4e8c0c8c
DV
81 else update pointer to be "next mfn". */
82 if (rmd->contiguous)
83 (*rmd->mfn)++;
84 else
85 rmd->mfn++;
de1ef206 86
d5108316 87 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
de1ef206
IC
88 rmd->mmu_update->val = pte_val_ma(pte);
89 rmd->mmu_update++;
90
91 return 0;
92}
93
a13d7201 94static int do_remap_gfn(struct vm_area_struct *vma,
4e8c0c8c 95 unsigned long addr,
a13d7201 96 xen_pfn_t *gfn, int nr,
4e8c0c8c
DV
97 int *err_ptr, pgprot_t prot,
98 unsigned domid,
99 struct page **pages)
de1ef206 100{
4e8c0c8c 101 int err = 0;
de1ef206
IC
102 struct remap_data rmd;
103 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
de1ef206 104 unsigned long range;
4e8c0c8c 105 int mapped = 0;
de1ef206 106
314e51b9 107 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
de1ef206 108
a13d7201 109 rmd.mfn = gfn;
de1ef206 110 rmd.prot = prot;
6a6256f9 111 /* We use the err_ptr to indicate if there we are doing a contiguous
4e8c0c8c
DV
112 * mapping or a discontigious mapping. */
113 rmd.contiguous = !err_ptr;
de1ef206
IC
114
115 while (nr) {
4e8c0c8c
DV
116 int index = 0;
117 int done = 0;
118 int batch = min(REMAP_BATCH_SIZE, nr);
119 int batch_left = batch;
de1ef206
IC
120 range = (unsigned long)batch << PAGE_SHIFT;
121
122 rmd.mmu_update = mmu_update;
123 err = apply_to_page_range(vma->vm_mm, addr, range,
124 remap_area_mfn_pte_fn, &rmd);
125 if (err)
126 goto out;
127
4e8c0c8c
DV
128 /* We record the error for each page that gives an error, but
129 * continue mapping until the whole set is done */
130 do {
131 int i;
132
133 err = HYPERVISOR_mmu_update(&mmu_update[index],
134 batch_left, &done, domid);
135
136 /*
a13d7201
JG
137 * @err_ptr may be the same buffer as @gfn, so
138 * only clear it after each chunk of @gfn is
4e8c0c8c
DV
139 * used.
140 */
141 if (err_ptr) {
142 for (i = index; i < index + done; i++)
143 err_ptr[i] = 0;
144 }
145 if (err < 0) {
146 if (!err_ptr)
147 goto out;
148 err_ptr[i] = err;
149 done++; /* Skip failed frame. */
150 } else
151 mapped += done;
152 batch_left -= done;
153 index += done;
154 } while (batch_left);
de1ef206
IC
155
156 nr -= batch;
157 addr += range;
4e8c0c8c
DV
158 if (err_ptr)
159 err_ptr += batch;
914beb9f 160 cond_resched();
de1ef206 161 }
de1ef206
IC
162out:
163
95a7d768 164 xen_flush_tlb_all();
de1ef206 165
4e8c0c8c
DV
166 return err < 0 ? err : mapped;
167}
168
a13d7201 169int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
4e8c0c8c 170 unsigned long addr,
a13d7201 171 xen_pfn_t gfn, int nr,
4e8c0c8c
DV
172 pgprot_t prot, unsigned domid,
173 struct page **pages)
174{
a13d7201 175 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
de1ef206 176}
a13d7201 177EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
9a032e39 178
a13d7201 179int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
4e8c0c8c 180 unsigned long addr,
a13d7201 181 xen_pfn_t *gfn, int nr,
4e8c0c8c
DV
182 int *err_ptr, pgprot_t prot,
183 unsigned domid, struct page **pages)
184{
185 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
186 * and the consequences later is quite hard to detect what the actual
187 * cause of "wrong memory was mapped in".
188 */
189 BUG_ON(err_ptr == NULL);
a13d7201 190 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
4e8c0c8c 191}
a13d7201 192EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
4e8c0c8c 193
9a032e39 194/* Returns: 0 success */
a13d7201 195int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
9a032e39
IC
196 int numpgs, struct page **pages)
197{
198 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
199 return 0;
200
201 return -EINVAL;
202}
a13d7201 203EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);