MIPS: KVM: Fix mapped fault broken commpage handling
[linux-2.6-block.git] / arch / mips / kvm / mmu.c
CommitLineData
403015b3
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
28cc5bd5 12#include <linux/highmem.h>
403015b3
JH
13#include <linux/kvm_host.h>
14#include <asm/mmu_context.h>
15
16static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
17{
18 int cpu = smp_processor_id();
19
20 return vcpu->arch.guest_kernel_asid[cpu] &
21 cpu_asid_mask(&cpu_data[cpu]);
22}
23
24static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
25{
26 int cpu = smp_processor_id();
27
28 return vcpu->arch.guest_user_asid[cpu] &
29 cpu_asid_mask(&cpu_data[cpu]);
30}
31
32static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
33{
34 int srcu_idx, err = 0;
35 kvm_pfn_t pfn;
36
37 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
38 return 0;
39
40 srcu_idx = srcu_read_lock(&kvm->srcu);
9befad23 41 pfn = gfn_to_pfn(kvm, gfn);
403015b3 42
9befad23 43 if (is_error_pfn(pfn)) {
403015b3
JH
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 err = -EFAULT;
46 goto out;
47 }
48
49 kvm->arch.guest_pmap[gfn] = pfn;
50out:
51 srcu_read_unlock(&kvm->srcu, srcu_idx);
52 return err;
53}
54
55/* Translate guest KSEG0 addresses to Host PA */
56unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
57 unsigned long gva)
58{
59 gfn_t gfn;
60 unsigned long offset = gva & ~PAGE_MASK;
61 struct kvm *kvm = vcpu->kvm;
62
63 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
64 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
65 __builtin_return_address(0), gva);
66 return KVM_INVALID_PAGE;
67 }
68
69 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
70
71 if (gfn >= kvm->arch.guest_pmap_npages) {
72 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
73 gva);
74 return KVM_INVALID_PAGE;
75 }
76
77 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
78 return KVM_INVALID_ADDR;
79
80 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
81}
82
83/* XXXKYMA: Must be called with interrupts disabled */
84int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
85 struct kvm_vcpu *vcpu)
86{
87 gfn_t gfn;
88 kvm_pfn_t pfn0, pfn1;
89 unsigned long vaddr = 0;
90 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
403015b3
JH
91 struct kvm *kvm = vcpu->kvm;
92 const int flush_dcache_mask = 0;
93 int ret;
94
95 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 kvm_mips_dump_host_tlbs();
98 return -1;
99 }
100
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if (gfn >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 gfn, badvaddr);
105 kvm_mips_dump_host_tlbs();
106 return -1;
107 }
403015b3
JH
108 vaddr = badvaddr & (PAGE_MASK << 1);
109
110 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
111 return -1;
112
113 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
114 return -1;
115
021df206
JH
116 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
117 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
403015b3 118
e6207bbe 119 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
7414d2f6
JH
120 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
121 ENTRYLO_D | ENTRYLO_V;
e6207bbe 122 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
7414d2f6
JH
123 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
124 ENTRYLO_D | ENTRYLO_V;
403015b3
JH
125
126 preempt_disable();
127 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
128 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
129 flush_dcache_mask);
130 preempt_enable();
131
132 return ret;
133}
134
135int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
26ee17ff 136 struct kvm_mips_tlb *tlb)
403015b3
JH
137{
138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 struct kvm *kvm = vcpu->kvm;
140 kvm_pfn_t pfn0, pfn1;
c604cffa 141 long tlb_lo[2];
403015b3
JH
142 int ret;
143
c604cffa
JH
144 tlb_lo[0] = tlb->tlb_lo[0];
145 tlb_lo[1] = tlb->tlb_lo[1];
146
147 /*
148 * The commpage address must not be mapped to anything else if the guest
149 * TLB contains entries nearby, or commpage accesses will break.
150 */
151 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
152 VPN2_MASK & (PAGE_MASK << 1)))
153 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
154
155 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[0])
156 >> PAGE_SHIFT) < 0)
157 return -1;
158
159 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb_lo[1])
160 >> PAGE_SHIFT) < 0)
161 return -1;
162
163 pfn0 = kvm->arch.guest_pmap[
164 mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT];
165 pfn1 = kvm->arch.guest_pmap[
166 mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT];
403015b3 167
403015b3 168 /* Get attributes from the Guest TLB */
e6207bbe 169 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
7414d2f6 170 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
c604cffa
JH
171 (tlb_lo[0] & ENTRYLO_D) |
172 (tlb_lo[0] & ENTRYLO_V);
e6207bbe 173 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
7414d2f6 174 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
c604cffa
JH
175 (tlb_lo[1] & ENTRYLO_D) |
176 (tlb_lo[1] & ENTRYLO_V);
403015b3
JH
177
178 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
9fbfb06a 179 tlb->tlb_lo[0], tlb->tlb_lo[1]);
403015b3
JH
180
181 preempt_disable();
182 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
183 kvm_mips_get_kernel_asid(vcpu) :
184 kvm_mips_get_user_asid(vcpu));
185 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
186 tlb->tlb_mask);
187 preempt_enable();
188
189 return ret;
190}
191
192void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
193 struct kvm_vcpu *vcpu)
194{
195 unsigned long asid = asid_cache(cpu);
196
197 asid += cpu_asid_inc();
198 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
199 if (cpu_has_vtag_icache)
200 flush_icache_all();
201
202 kvm_local_flush_tlb_all(); /* start new asid cycle */
203
204 if (!asid) /* fix version if needed */
205 asid = asid_first_version(cpu);
206 }
207
208 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
209}
210
211/**
212 * kvm_mips_migrate_count() - Migrate timer.
213 * @vcpu: Virtual CPU.
214 *
215 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
216 * if it was running prior to being cancelled.
217 *
218 * Must be called when the VCPU is migrated to a different CPU to ensure that
219 * timer expiry during guest execution interrupts the guest and causes the
220 * interrupt to be delivered in a timely manner.
221 */
222static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
223{
224 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
225 hrtimer_restart(&vcpu->arch.comparecount_timer);
226}
227
228/* Restore ASID once we are scheduled back after preemption */
229void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
230{
231 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
232 unsigned long flags;
233 int newasid = 0;
234
235 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
236
237 /* Allocate new kernel and user ASIDs if needed */
238
239 local_irq_save(flags);
240
241 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
242 asid_version_mask(cpu)) {
243 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
244 vcpu->arch.guest_kernel_asid[cpu] =
245 vcpu->arch.guest_kernel_mm.context.asid[cpu];
246 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
247 vcpu->arch.guest_user_asid[cpu] =
248 vcpu->arch.guest_user_mm.context.asid[cpu];
249 newasid++;
250
251 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
252 cpu_context(cpu, current->mm));
253 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
254 cpu, vcpu->arch.guest_kernel_asid[cpu]);
255 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
256 vcpu->arch.guest_user_asid[cpu]);
257 }
258
259 if (vcpu->arch.last_sched_cpu != cpu) {
260 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
261 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
262 /*
263 * Migrate the timer interrupt to the current CPU so that it
264 * always interrupts the guest and synchronously triggers a
265 * guest timer interrupt.
266 */
267 kvm_mips_migrate_count(vcpu);
268 }
269
270 if (!newasid) {
271 /*
272 * If we preempted while the guest was executing, then reload
273 * the pre-empted ASID
274 */
275 if (current->flags & PF_VCPU) {
276 write_c0_entryhi(vcpu->arch.
277 preempt_entryhi & asid_mask);
278 ehb();
279 }
280 } else {
281 /* New ASIDs were allocated for the VM */
282
283 /*
284 * Were we in guest context? If so then the pre-empted ASID is
285 * no longer valid, we need to set it to what it should be based
286 * on the mode of the Guest (Kernel/User)
287 */
288 if (current->flags & PF_VCPU) {
289 if (KVM_GUEST_KERNEL_MODE(vcpu))
290 write_c0_entryhi(vcpu->arch.
291 guest_kernel_asid[cpu] &
292 asid_mask);
293 else
294 write_c0_entryhi(vcpu->arch.
295 guest_user_asid[cpu] &
296 asid_mask);
297 ehb();
298 }
299 }
300
301 /* restore guest state to registers */
302 kvm_mips_callbacks->vcpu_set_regs(vcpu);
303
304 local_irq_restore(flags);
305
306}
307
308/* ASID can change if another task is scheduled during preemption */
309void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
310{
311 unsigned long flags;
312 int cpu;
313
314 local_irq_save(flags);
315
316 cpu = smp_processor_id();
317
318 vcpu->arch.preempt_entryhi = read_c0_entryhi();
319 vcpu->arch.last_sched_cpu = cpu;
320
321 /* save guest state in registers */
322 kvm_mips_callbacks->vcpu_get_regs(vcpu);
323
324 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
325 asid_version_mask(cpu))) {
326 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
327 cpu_context(cpu, current->mm));
328 drop_mmu_context(current->mm, cpu);
329 }
330 write_c0_entryhi(cpu_asid(cpu, current->mm));
331 ehb();
332
333 local_irq_restore(flags);
334}
335
336u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
337{
338 struct mips_coproc *cop0 = vcpu->arch.cop0;
339 unsigned long paddr, flags, vpn2, asid;
35fec262 340 unsigned long va = (unsigned long)opc;
28cc5bd5 341 void *vaddr;
403015b3
JH
342 u32 inst;
343 int index;
344
35fec262
JH
345 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
346 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
403015b3 347 local_irq_save(flags);
35fec262 348 index = kvm_mips_host_tlb_lookup(vcpu, va);
403015b3
JH
349 if (index >= 0) {
350 inst = *(opc);
351 } else {
35fec262 352 vpn2 = va & VPN2_MASK;
403015b3
JH
353 asid = kvm_read_c0_guest_entryhi(cop0) &
354 KVM_ENTRYHI_ASID;
355 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
356 if (index < 0) {
357 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
358 __func__, opc, vcpu, read_c0_entryhi());
359 kvm_mips_dump_host_tlbs();
eafc4ed2 360 kvm_mips_dump_guest_tlbs(vcpu);
403015b3
JH
361 local_irq_restore(flags);
362 return KVM_INVALID_INST;
363 }
364 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
365 &vcpu->arch.
26ee17ff 366 guest_tlb[index]);
403015b3
JH
367 inst = *(opc);
368 }
369 local_irq_restore(flags);
35fec262
JH
370 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
371 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
28cc5bd5
JH
372 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
373 vaddr += paddr & ~PAGE_MASK;
374 inst = *(u32 *)vaddr;
375 kunmap_atomic(vaddr);
403015b3
JH
376 } else {
377 kvm_err("%s: illegal address: %p\n", __func__, opc);
378 return KVM_INVALID_INST;
379 }
380
381 return inst;
382}