KVM: PPC: Allow use of small pages to back Book3S HV guests
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_hv.c
CommitLineData
de56a948
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
66b15db6 27#include <linux/export.h>
de56a948
PM
28#include <linux/fs.h>
29#include <linux/anon_inodes.h>
30#include <linux/cpumask.h>
aa04b4cc
PM
31#include <linux/spinlock.h>
32#include <linux/page-flags.h>
de56a948
PM
33
34#include <asm/reg.h>
35#include <asm/cputable.h>
36#include <asm/cacheflush.h>
37#include <asm/tlbflush.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
40#include <asm/kvm_ppc.h>
41#include <asm/kvm_book3s.h>
42#include <asm/mmu_context.h>
43#include <asm/lppaca.h>
44#include <asm/processor.h>
371fefd6 45#include <asm/cputhreads.h>
aa04b4cc 46#include <asm/page.h>
de1d9248 47#include <asm/hvcall.h>
de56a948
PM
48#include <linux/gfp.h>
49#include <linux/sched.h>
50#include <linux/vmalloc.h>
51#include <linux/highmem.h>
c77162de 52#include <linux/hugetlb.h>
de56a948
PM
53
54/* #define EXIT_DEBUG */
55/* #define EXIT_DEBUG_SIMPLE */
56/* #define EXIT_DEBUG_INT */
57
19ccb76a 58static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
c77162de 59static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
19ccb76a 60
de56a948
PM
61void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
62{
63 local_paca->kvm_hstate.kvm_vcpu = vcpu;
371fefd6 64 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
de56a948
PM
65}
66
67void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
68{
69}
70
de56a948
PM
71void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
72{
73 vcpu->arch.shregs.msr = msr;
19ccb76a 74 kvmppc_end_cede(vcpu);
de56a948
PM
75}
76
77void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
78{
79 vcpu->arch.pvr = pvr;
80}
81
82void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
83{
84 int r;
85
86 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
87 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
88 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
89 for (r = 0; r < 16; ++r)
90 pr_err("r%2d = %.16lx r%d = %.16lx\n",
91 r, kvmppc_get_gpr(vcpu, r),
92 r+16, kvmppc_get_gpr(vcpu, r+16));
93 pr_err("ctr = %.16lx lr = %.16lx\n",
94 vcpu->arch.ctr, vcpu->arch.lr);
95 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
96 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
97 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
98 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
99 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
100 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
101 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
102 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
103 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
104 pr_err("fault dar = %.16lx dsisr = %.8x\n",
105 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
106 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
107 for (r = 0; r < vcpu->arch.slb_max; ++r)
108 pr_err(" ESID = %.16llx VSID = %.16llx\n",
109 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
110 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
aa04b4cc 111 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
de56a948
PM
112 vcpu->arch.last_inst);
113}
114
a8606e20
PM
115struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
116{
117 int r;
118 struct kvm_vcpu *v, *ret = NULL;
119
120 mutex_lock(&kvm->lock);
121 kvm_for_each_vcpu(r, v, kvm) {
122 if (v->vcpu_id == id) {
123 ret = v;
124 break;
125 }
126 }
127 mutex_unlock(&kvm->lock);
128 return ret;
129}
130
131static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
132{
133 vpa->shared_proc = 1;
134 vpa->yield_count = 1;
135}
136
137static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
138 unsigned long flags,
139 unsigned long vcpuid, unsigned long vpa)
140{
141 struct kvm *kvm = vcpu->kvm;
93e60249 142 unsigned long len, nb;
a8606e20
PM
143 void *va;
144 struct kvm_vcpu *tvcpu;
93e60249 145 int err = H_PARAMETER;
a8606e20
PM
146
147 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
148 if (!tvcpu)
149 return H_PARAMETER;
150
151 flags >>= 63 - 18;
152 flags &= 7;
153 if (flags == 0 || flags == 4)
154 return H_PARAMETER;
155 if (flags < 4) {
156 if (vpa & 0x7f)
157 return H_PARAMETER;
93e60249
PM
158 if (flags >= 2 && !tvcpu->arch.vpa)
159 return H_RESOURCE;
a8606e20 160 /* registering new area; convert logical addr to real */
93e60249
PM
161 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
162 if (va == NULL)
b2b2f165 163 return H_PARAMETER;
a8606e20
PM
164 if (flags <= 1)
165 len = *(unsigned short *)(va + 4);
166 else
167 len = *(unsigned int *)(va + 4);
93e60249
PM
168 if (len > nb)
169 goto out_unpin;
a8606e20
PM
170 switch (flags) {
171 case 1: /* register VPA */
172 if (len < 640)
93e60249
PM
173 goto out_unpin;
174 if (tvcpu->arch.vpa)
175 kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
a8606e20
PM
176 tvcpu->arch.vpa = va;
177 init_vpa(vcpu, va);
178 break;
179 case 2: /* register DTL */
180 if (len < 48)
93e60249 181 goto out_unpin;
a8606e20 182 len -= len % 48;
93e60249
PM
183 if (tvcpu->arch.dtl)
184 kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
a8606e20
PM
185 tvcpu->arch.dtl = va;
186 tvcpu->arch.dtl_end = va + len;
187 break;
188 case 3: /* register SLB shadow buffer */
93e60249
PM
189 if (len < 16)
190 goto out_unpin;
191 if (tvcpu->arch.slb_shadow)
192 kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
a8606e20
PM
193 tvcpu->arch.slb_shadow = va;
194 break;
195 }
196 } else {
197 switch (flags) {
198 case 5: /* unregister VPA */
199 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
200 return H_RESOURCE;
93e60249
PM
201 if (!tvcpu->arch.vpa)
202 break;
203 kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
a8606e20
PM
204 tvcpu->arch.vpa = NULL;
205 break;
206 case 6: /* unregister DTL */
93e60249
PM
207 if (!tvcpu->arch.dtl)
208 break;
209 kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
a8606e20
PM
210 tvcpu->arch.dtl = NULL;
211 break;
212 case 7: /* unregister SLB shadow buffer */
93e60249
PM
213 if (!tvcpu->arch.slb_shadow)
214 break;
215 kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
a8606e20
PM
216 tvcpu->arch.slb_shadow = NULL;
217 break;
218 }
219 }
220 return H_SUCCESS;
93e60249
PM
221
222 out_unpin:
223 kvmppc_unpin_guest_page(kvm, va);
224 return err;
a8606e20
PM
225}
226
227int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
228{
229 unsigned long req = kvmppc_get_gpr(vcpu, 3);
230 unsigned long target, ret = H_SUCCESS;
231 struct kvm_vcpu *tvcpu;
232
233 switch (req) {
c77162de
PM
234 case H_ENTER:
235 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
236 kvmppc_get_gpr(vcpu, 5),
237 kvmppc_get_gpr(vcpu, 6),
238 kvmppc_get_gpr(vcpu, 7));
239 break;
a8606e20 240 case H_CEDE:
a8606e20
PM
241 break;
242 case H_PROD:
243 target = kvmppc_get_gpr(vcpu, 4);
244 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
245 if (!tvcpu) {
246 ret = H_PARAMETER;
247 break;
248 }
249 tvcpu->arch.prodded = 1;
250 smp_mb();
251 if (vcpu->arch.ceded) {
252 if (waitqueue_active(&vcpu->wq)) {
253 wake_up_interruptible(&vcpu->wq);
254 vcpu->stat.halt_wakeup++;
255 }
256 }
257 break;
258 case H_CONFER:
259 break;
260 case H_REGISTER_VPA:
261 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
262 kvmppc_get_gpr(vcpu, 5),
263 kvmppc_get_gpr(vcpu, 6));
264 break;
265 default:
266 return RESUME_HOST;
267 }
268 kvmppc_set_gpr(vcpu, 3, ret);
269 vcpu->arch.hcall_needed = 0;
270 return RESUME_GUEST;
271}
272
de56a948
PM
273static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
274 struct task_struct *tsk)
275{
276 int r = RESUME_HOST;
277
278 vcpu->stat.sum_exits++;
279
280 run->exit_reason = KVM_EXIT_UNKNOWN;
281 run->ready_for_interrupt_injection = 1;
282 switch (vcpu->arch.trap) {
283 /* We're good on these - the host merely wanted to get our attention */
284 case BOOK3S_INTERRUPT_HV_DECREMENTER:
285 vcpu->stat.dec_exits++;
286 r = RESUME_GUEST;
287 break;
288 case BOOK3S_INTERRUPT_EXTERNAL:
289 vcpu->stat.ext_intr_exits++;
290 r = RESUME_GUEST;
291 break;
292 case BOOK3S_INTERRUPT_PERFMON:
293 r = RESUME_GUEST;
294 break;
295 case BOOK3S_INTERRUPT_PROGRAM:
296 {
297 ulong flags;
298 /*
299 * Normally program interrupts are delivered directly
300 * to the guest by the hardware, but we can get here
301 * as a result of a hypervisor emulation interrupt
302 * (e40) getting turned into a 700 by BML RTAS.
303 */
304 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
305 kvmppc_core_queue_program(vcpu, flags);
306 r = RESUME_GUEST;
307 break;
308 }
309 case BOOK3S_INTERRUPT_SYSCALL:
310 {
311 /* hcall - punt to userspace */
312 int i;
313
314 if (vcpu->arch.shregs.msr & MSR_PR) {
315 /* sc 1 from userspace - reflect to guest syscall */
316 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
317 r = RESUME_GUEST;
318 break;
319 }
320 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
321 for (i = 0; i < 9; ++i)
322 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
323 run->exit_reason = KVM_EXIT_PAPR_HCALL;
324 vcpu->arch.hcall_needed = 1;
325 r = RESUME_HOST;
326 break;
327 }
328 /*
329 * We get these next two if the guest does a bad real-mode access,
330 * as we have enabled VRMA (virtualized real mode area) mode in the
331 * LPCR. We just generate an appropriate DSI/ISI to the guest.
332 */
333 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
334 vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
335 vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
336 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
337 r = RESUME_GUEST;
338 break;
339 case BOOK3S_INTERRUPT_H_INST_STORAGE:
340 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
341 0x08000000);
342 r = RESUME_GUEST;
343 break;
344 /*
345 * This occurs if the guest executes an illegal instruction.
346 * We just generate a program interrupt to the guest, since
347 * we don't emulate any guest instructions at this stage.
348 */
349 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
350 kvmppc_core_queue_program(vcpu, 0x80000);
351 r = RESUME_GUEST;
352 break;
353 default:
354 kvmppc_dump_regs(vcpu);
355 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
356 vcpu->arch.trap, kvmppc_get_pc(vcpu),
357 vcpu->arch.shregs.msr);
358 r = RESUME_HOST;
359 BUG();
360 break;
361 }
362
de56a948
PM
363 return r;
364}
365
366int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
367 struct kvm_sregs *sregs)
368{
369 int i;
370
371 sregs->pvr = vcpu->arch.pvr;
372
373 memset(sregs, 0, sizeof(struct kvm_sregs));
374 for (i = 0; i < vcpu->arch.slb_max; i++) {
375 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
376 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
377 }
378
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
384{
385 int i, j;
386
387 kvmppc_set_pvr(vcpu, sregs->pvr);
388
389 j = 0;
390 for (i = 0; i < vcpu->arch.slb_nr; i++) {
391 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
392 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
393 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
394 ++j;
395 }
396 }
397 vcpu->arch.slb_max = j;
398
399 return 0;
400}
401
402int kvmppc_core_check_processor_compat(void)
403{
9e368f29 404 if (cpu_has_feature(CPU_FTR_HVMODE))
de56a948
PM
405 return 0;
406 return -EIO;
407}
408
409struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
410{
411 struct kvm_vcpu *vcpu;
371fefd6
PM
412 int err = -EINVAL;
413 int core;
414 struct kvmppc_vcore *vcore;
de56a948 415
371fefd6
PM
416 core = id / threads_per_core;
417 if (core >= KVM_MAX_VCORES)
418 goto out;
419
420 err = -ENOMEM;
de56a948
PM
421 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
422 if (!vcpu)
423 goto out;
424
425 err = kvm_vcpu_init(vcpu, kvm, id);
426 if (err)
427 goto free_vcpu;
428
429 vcpu->arch.shared = &vcpu->arch.shregs;
430 vcpu->arch.last_cpu = -1;
431 vcpu->arch.mmcr[0] = MMCR0_FC;
432 vcpu->arch.ctrl = CTRL_RUNLATCH;
433 /* default to host PVR, since we can't spoof it */
434 vcpu->arch.pvr = mfspr(SPRN_PVR);
435 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
436
de56a948
PM
437 kvmppc_mmu_book3s_hv_init(vcpu);
438
371fefd6 439 /*
19ccb76a 440 * We consider the vcpu stopped until we see the first run ioctl for it.
371fefd6 441 */
19ccb76a 442 vcpu->arch.state = KVMPPC_VCPU_STOPPED;
371fefd6
PM
443
444 init_waitqueue_head(&vcpu->arch.cpu_run);
445
446 mutex_lock(&kvm->lock);
447 vcore = kvm->arch.vcores[core];
448 if (!vcore) {
449 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
450 if (vcore) {
451 INIT_LIST_HEAD(&vcore->runnable_threads);
452 spin_lock_init(&vcore->lock);
19ccb76a 453 init_waitqueue_head(&vcore->wq);
371fefd6
PM
454 }
455 kvm->arch.vcores[core] = vcore;
456 }
457 mutex_unlock(&kvm->lock);
458
459 if (!vcore)
460 goto free_vcpu;
461
462 spin_lock(&vcore->lock);
463 ++vcore->num_threads;
371fefd6
PM
464 spin_unlock(&vcore->lock);
465 vcpu->arch.vcore = vcore;
466
af8f38b3
AG
467 vcpu->arch.cpu_type = KVM_CPU_3S_64;
468 kvmppc_sanity_check(vcpu);
469
de56a948
PM
470 return vcpu;
471
472free_vcpu:
473 kfree(vcpu);
474out:
475 return ERR_PTR(err);
476}
477
478void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
479{
93e60249
PM
480 if (vcpu->arch.dtl)
481 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
482 if (vcpu->arch.slb_shadow)
483 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
484 if (vcpu->arch.vpa)
485 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
de56a948
PM
486 kvm_vcpu_uninit(vcpu);
487 kfree(vcpu);
488}
489
19ccb76a 490static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
371fefd6 491{
19ccb76a 492 unsigned long dec_nsec, now;
371fefd6 493
19ccb76a
PM
494 now = get_tb();
495 if (now > vcpu->arch.dec_expires) {
496 /* decrementer has already gone negative */
497 kvmppc_core_queue_dec(vcpu);
7e28e60e 498 kvmppc_core_prepare_to_enter(vcpu);
19ccb76a 499 return;
371fefd6 500 }
19ccb76a
PM
501 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
502 / tb_ticks_per_sec;
503 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
504 HRTIMER_MODE_REL);
505 vcpu->arch.timer_running = 1;
371fefd6
PM
506}
507
19ccb76a 508static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
371fefd6 509{
19ccb76a
PM
510 vcpu->arch.ceded = 0;
511 if (vcpu->arch.timer_running) {
512 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
513 vcpu->arch.timer_running = 0;
514 }
371fefd6
PM
515}
516
de56a948 517extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
371fefd6 518extern void xics_wake_cpu(int cpu);
de56a948 519
371fefd6
PM
520static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
521 struct kvm_vcpu *vcpu)
de56a948 522{
371fefd6 523 struct kvm_vcpu *v;
de56a948 524
371fefd6
PM
525 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
526 return;
527 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
528 --vc->n_runnable;
19ccb76a 529 ++vc->n_busy;
371fefd6
PM
530 /* decrement the physical thread id of each following vcpu */
531 v = vcpu;
532 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
533 --v->arch.ptid;
534 list_del(&vcpu->arch.run_list);
535}
536
537static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
538{
539 int cpu;
540 struct paca_struct *tpaca;
541 struct kvmppc_vcore *vc = vcpu->arch.vcore;
542
19ccb76a
PM
543 if (vcpu->arch.timer_running) {
544 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
545 vcpu->arch.timer_running = 0;
546 }
371fefd6
PM
547 cpu = vc->pcpu + vcpu->arch.ptid;
548 tpaca = &paca[cpu];
549 tpaca->kvm_hstate.kvm_vcpu = vcpu;
550 tpaca->kvm_hstate.kvm_vcore = vc;
19ccb76a
PM
551 tpaca->kvm_hstate.napping = 0;
552 vcpu->cpu = vc->pcpu;
371fefd6 553 smp_wmb();
251da038 554#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
371fefd6
PM
555 if (vcpu->arch.ptid) {
556 tpaca->cpu_start = 0x80;
371fefd6
PM
557 wmb();
558 xics_wake_cpu(cpu);
559 ++vc->n_woken;
de56a948 560 }
371fefd6
PM
561#endif
562}
de56a948 563
371fefd6
PM
564static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
565{
566 int i;
567
568 HMT_low();
569 i = 0;
570 while (vc->nap_count < vc->n_woken) {
571 if (++i >= 1000000) {
572 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
573 vc->nap_count, vc->n_woken);
574 break;
575 }
576 cpu_relax();
577 }
578 HMT_medium();
579}
580
581/*
582 * Check that we are on thread 0 and that any other threads in
583 * this core are off-line.
584 */
585static int on_primary_thread(void)
586{
587 int cpu = smp_processor_id();
588 int thr = cpu_thread_in_core(cpu);
589
590 if (thr)
591 return 0;
592 while (++thr < threads_per_core)
593 if (cpu_online(cpu + thr))
594 return 0;
595 return 1;
596}
597
598/*
599 * Run a set of guest threads on a physical core.
600 * Called with vc->lock held.
601 */
602static int kvmppc_run_core(struct kvmppc_vcore *vc)
603{
19ccb76a 604 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
371fefd6
PM
605 long ret;
606 u64 now;
19ccb76a 607 int ptid;
371fefd6
PM
608
609 /* don't start if any threads have a signal pending */
610 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
611 if (signal_pending(vcpu->arch.run_task))
612 return 0;
de56a948
PM
613
614 /*
615 * Make sure we are running on thread 0, and that
616 * secondary threads are offline.
617 * XXX we should also block attempts to bring any
618 * secondary threads online.
619 */
371fefd6
PM
620 if (threads_per_core > 1 && !on_primary_thread()) {
621 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
622 vcpu->arch.ret = -EBUSY;
623 goto out;
de56a948
PM
624 }
625
19ccb76a
PM
626 /*
627 * Assign physical thread IDs, first to non-ceded vcpus
628 * and then to ceded ones.
629 */
630 ptid = 0;
631 vcpu0 = NULL;
632 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
633 if (!vcpu->arch.ceded) {
634 if (!ptid)
635 vcpu0 = vcpu;
636 vcpu->arch.ptid = ptid++;
637 }
638 }
639 if (!vcpu0)
640 return 0; /* nothing to run */
641 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
642 if (vcpu->arch.ceded)
643 vcpu->arch.ptid = ptid++;
644
371fefd6
PM
645 vc->n_woken = 0;
646 vc->nap_count = 0;
647 vc->entry_exit_count = 0;
19ccb76a 648 vc->vcore_state = VCORE_RUNNING;
371fefd6
PM
649 vc->in_guest = 0;
650 vc->pcpu = smp_processor_id();
19ccb76a 651 vc->napping_threads = 0;
371fefd6
PM
652 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
653 kvmppc_start_thread(vcpu);
371fefd6 654
19ccb76a 655 preempt_disable();
371fefd6 656 spin_unlock(&vc->lock);
de56a948 657
371fefd6 658 kvm_guest_enter();
19ccb76a 659 __kvmppc_vcore_entry(NULL, vcpu0);
de56a948 660
371fefd6 661 spin_lock(&vc->lock);
19ccb76a
PM
662 /* disable sending of IPIs on virtual external irqs */
663 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
664 vcpu->cpu = -1;
665 /* wait for secondary threads to finish writing their state to memory */
371fefd6
PM
666 if (vc->nap_count < vc->n_woken)
667 kvmppc_wait_for_nap(vc);
668 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
19ccb76a 669 vc->vcore_state = VCORE_EXITING;
371fefd6
PM
670 spin_unlock(&vc->lock);
671
672 /* make sure updates to secondary vcpu structs are visible now */
673 smp_mb();
de56a948
PM
674 kvm_guest_exit();
675
676 preempt_enable();
677 kvm_resched(vcpu);
678
679 now = get_tb();
371fefd6
PM
680 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
681 /* cancel pending dec exception if dec is positive */
682 if (now < vcpu->arch.dec_expires &&
683 kvmppc_core_pending_dec(vcpu))
684 kvmppc_core_dequeue_dec(vcpu);
19ccb76a
PM
685
686 ret = RESUME_GUEST;
687 if (vcpu->arch.trap)
688 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
689 vcpu->arch.run_task);
690
371fefd6
PM
691 vcpu->arch.ret = ret;
692 vcpu->arch.trap = 0;
19ccb76a
PM
693
694 if (vcpu->arch.ceded) {
695 if (ret != RESUME_GUEST)
696 kvmppc_end_cede(vcpu);
697 else
698 kvmppc_set_timer(vcpu);
699 }
371fefd6 700 }
de56a948 701
371fefd6 702 spin_lock(&vc->lock);
de56a948 703 out:
19ccb76a 704 vc->vcore_state = VCORE_INACTIVE;
371fefd6
PM
705 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
706 arch.run_list) {
707 if (vcpu->arch.ret != RESUME_GUEST) {
708 kvmppc_remove_runnable(vc, vcpu);
709 wake_up(&vcpu->arch.cpu_run);
710 }
711 }
712
713 return 1;
714}
715
19ccb76a
PM
716/*
717 * Wait for some other vcpu thread to execute us, and
718 * wake us up when we need to handle something in the host.
719 */
720static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
371fefd6 721{
371fefd6
PM
722 DEFINE_WAIT(wait);
723
19ccb76a
PM
724 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
725 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
726 schedule();
727 finish_wait(&vcpu->arch.cpu_run, &wait);
728}
729
730/*
731 * All the vcpus in this vcore are idle, so wait for a decrementer
732 * or external interrupt to one of the vcpus. vc->lock is held.
733 */
734static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
735{
736 DEFINE_WAIT(wait);
737 struct kvm_vcpu *v;
738 int all_idle = 1;
739
740 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
741 vc->vcore_state = VCORE_SLEEPING;
742 spin_unlock(&vc->lock);
743 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
744 if (!v->arch.ceded || v->arch.pending_exceptions) {
745 all_idle = 0;
746 break;
747 }
371fefd6 748 }
19ccb76a
PM
749 if (all_idle)
750 schedule();
751 finish_wait(&vc->wq, &wait);
752 spin_lock(&vc->lock);
753 vc->vcore_state = VCORE_INACTIVE;
754}
371fefd6 755
19ccb76a
PM
756static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
757{
758 int n_ceded;
759 int prev_state;
760 struct kvmppc_vcore *vc;
761 struct kvm_vcpu *v, *vn;
9e368f29 762
371fefd6
PM
763 kvm_run->exit_reason = 0;
764 vcpu->arch.ret = RESUME_GUEST;
765 vcpu->arch.trap = 0;
766
371fefd6
PM
767 /*
768 * Synchronize with other threads in this virtual core
769 */
770 vc = vcpu->arch.vcore;
771 spin_lock(&vc->lock);
19ccb76a 772 vcpu->arch.ceded = 0;
371fefd6
PM
773 vcpu->arch.run_task = current;
774 vcpu->arch.kvm_run = kvm_run;
19ccb76a
PM
775 prev_state = vcpu->arch.state;
776 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
371fefd6
PM
777 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
778 ++vc->n_runnable;
779
19ccb76a
PM
780 /*
781 * This happens the first time this is called for a vcpu.
782 * If the vcore is already running, we may be able to start
783 * this thread straight away and have it join in.
784 */
785 if (prev_state == KVMPPC_VCPU_STOPPED) {
786 if (vc->vcore_state == VCORE_RUNNING &&
787 VCORE_EXIT_COUNT(vc) == 0) {
788 vcpu->arch.ptid = vc->n_runnable - 1;
789 kvmppc_start_thread(vcpu);
371fefd6
PM
790 }
791
19ccb76a
PM
792 } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
793 --vc->n_busy;
371fefd6 794
19ccb76a
PM
795 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
796 !signal_pending(current)) {
797 if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
798 spin_unlock(&vc->lock);
799 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
800 spin_lock(&vc->lock);
801 continue;
802 }
803 n_ceded = 0;
804 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
805 n_ceded += v->arch.ceded;
806 if (n_ceded == vc->n_runnable)
807 kvmppc_vcore_blocked(vc);
808 else
809 kvmppc_run_core(vc);
810
811 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
812 arch.run_list) {
7e28e60e 813 kvmppc_core_prepare_to_enter(v);
19ccb76a
PM
814 if (signal_pending(v->arch.run_task)) {
815 kvmppc_remove_runnable(vc, v);
816 v->stat.signal_exits++;
817 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
818 v->arch.ret = -EINTR;
819 wake_up(&v->arch.cpu_run);
820 }
821 }
822 }
371fefd6 823
19ccb76a
PM
824 if (signal_pending(current)) {
825 if (vc->vcore_state == VCORE_RUNNING ||
826 vc->vcore_state == VCORE_EXITING) {
827 spin_unlock(&vc->lock);
828 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
829 spin_lock(&vc->lock);
830 }
831 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
832 kvmppc_remove_runnable(vc, vcpu);
833 vcpu->stat.signal_exits++;
834 kvm_run->exit_reason = KVM_EXIT_INTR;
835 vcpu->arch.ret = -EINTR;
836 }
371fefd6
PM
837 }
838
371fefd6 839 spin_unlock(&vc->lock);
371fefd6 840 return vcpu->arch.ret;
de56a948
PM
841}
842
a8606e20
PM
843int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
844{
845 int r;
846
af8f38b3
AG
847 if (!vcpu->arch.sane) {
848 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
849 return -EINVAL;
850 }
851
25051b5a
SW
852 kvmppc_core_prepare_to_enter(vcpu);
853
19ccb76a
PM
854 /* No need to go into the guest when all we'll do is come back out */
855 if (signal_pending(current)) {
856 run->exit_reason = KVM_EXIT_INTR;
857 return -EINTR;
858 }
859
c77162de
PM
860 /* On the first time here, set up VRMA or RMA */
861 if (!vcpu->kvm->arch.rma_setup_done) {
862 r = kvmppc_hv_setup_rma(vcpu);
863 if (r)
864 return r;
865 }
19ccb76a
PM
866
867 flush_fp_to_thread(current);
868 flush_altivec_to_thread(current);
869 flush_vsx_to_thread(current);
870 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
871
a8606e20
PM
872 do {
873 r = kvmppc_run_vcpu(run, vcpu);
874
875 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
876 !(vcpu->arch.shregs.msr & MSR_PR)) {
877 r = kvmppc_pseries_do_hcall(vcpu);
7e28e60e 878 kvmppc_core_prepare_to_enter(vcpu);
a8606e20
PM
879 }
880 } while (r == RESUME_GUEST);
881 return r;
882}
883
54738c09
DG
884static long kvmppc_stt_npages(unsigned long window_size)
885{
886 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
887 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
888}
889
890static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
891{
892 struct kvm *kvm = stt->kvm;
893 int i;
894
895 mutex_lock(&kvm->lock);
896 list_del(&stt->list);
897 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
898 __free_page(stt->pages[i]);
899 kfree(stt);
900 mutex_unlock(&kvm->lock);
901
902 kvm_put_kvm(kvm);
903}
904
905static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
906{
907 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
908 struct page *page;
909
910 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
911 return VM_FAULT_SIGBUS;
912
913 page = stt->pages[vmf->pgoff];
914 get_page(page);
915 vmf->page = page;
916 return 0;
917}
918
919static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
920 .fault = kvm_spapr_tce_fault,
921};
922
923static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
924{
925 vma->vm_ops = &kvm_spapr_tce_vm_ops;
926 return 0;
927}
928
929static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
930{
931 struct kvmppc_spapr_tce_table *stt = filp->private_data;
932
933 release_spapr_tce_table(stt);
934 return 0;
935}
936
937static struct file_operations kvm_spapr_tce_fops = {
938 .mmap = kvm_spapr_tce_mmap,
939 .release = kvm_spapr_tce_release,
940};
941
942long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
943 struct kvm_create_spapr_tce *args)
944{
945 struct kvmppc_spapr_tce_table *stt = NULL;
946 long npages;
947 int ret = -ENOMEM;
948 int i;
949
950 /* Check this LIOBN hasn't been previously allocated */
951 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
952 if (stt->liobn == args->liobn)
953 return -EBUSY;
954 }
955
956 npages = kvmppc_stt_npages(args->window_size);
957
958 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
959 GFP_KERNEL);
960 if (!stt)
961 goto fail;
962
963 stt->liobn = args->liobn;
964 stt->window_size = args->window_size;
965 stt->kvm = kvm;
966
967 for (i = 0; i < npages; i++) {
968 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
969 if (!stt->pages[i])
970 goto fail;
971 }
972
973 kvm_get_kvm(kvm);
974
975 mutex_lock(&kvm->lock);
976 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
977
978 mutex_unlock(&kvm->lock);
979
980 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
981 stt, O_RDWR);
982
983fail:
984 if (stt) {
985 for (i = 0; i < npages; i++)
986 if (stt->pages[i])
987 __free_page(stt->pages[i]);
988
989 kfree(stt);
990 }
991 return ret;
992}
993
aa04b4cc 994/* Work out RMLS (real mode limit selector) field value for a given RMA size.
9e368f29 995 Assumes POWER7 or PPC970. */
aa04b4cc
PM
996static inline int lpcr_rmls(unsigned long rma_size)
997{
998 switch (rma_size) {
999 case 32ul << 20: /* 32 MB */
9e368f29
PM
1000 if (cpu_has_feature(CPU_FTR_ARCH_206))
1001 return 8; /* only supported on POWER7 */
1002 return -1;
aa04b4cc
PM
1003 case 64ul << 20: /* 64 MB */
1004 return 3;
1005 case 128ul << 20: /* 128 MB */
1006 return 7;
1007 case 256ul << 20: /* 256 MB */
1008 return 4;
1009 case 1ul << 30: /* 1 GB */
1010 return 2;
1011 case 16ul << 30: /* 16 GB */
1012 return 1;
1013 case 256ul << 30: /* 256 GB */
1014 return 0;
1015 default:
1016 return -1;
1017 }
1018}
1019
1020static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1021{
1022 struct kvmppc_rma_info *ri = vma->vm_file->private_data;
1023 struct page *page;
1024
1025 if (vmf->pgoff >= ri->npages)
1026 return VM_FAULT_SIGBUS;
1027
1028 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1029 get_page(page);
1030 vmf->page = page;
1031 return 0;
1032}
1033
1034static const struct vm_operations_struct kvm_rma_vm_ops = {
1035 .fault = kvm_rma_fault,
1036};
1037
1038static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1039{
1040 vma->vm_flags |= VM_RESERVED;
1041 vma->vm_ops = &kvm_rma_vm_ops;
1042 return 0;
1043}
1044
1045static int kvm_rma_release(struct inode *inode, struct file *filp)
1046{
1047 struct kvmppc_rma_info *ri = filp->private_data;
1048
1049 kvm_release_rma(ri);
1050 return 0;
1051}
1052
1053static struct file_operations kvm_rma_fops = {
1054 .mmap = kvm_rma_mmap,
1055 .release = kvm_rma_release,
1056};
1057
1058long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1059{
1060 struct kvmppc_rma_info *ri;
1061 long fd;
1062
1063 ri = kvm_alloc_rma();
1064 if (!ri)
1065 return -ENOMEM;
1066
1067 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1068 if (fd < 0)
1069 kvm_release_rma(ri);
1070
1071 ret->rma_size = ri->npages << PAGE_SHIFT;
1072 return fd;
1073}
1074
da9d1d7f
PM
1075static unsigned long slb_pgsize_encoding(unsigned long psize)
1076{
1077 unsigned long senc = 0;
1078
1079 if (psize > 0x1000) {
1080 senc = SLB_VSID_L;
1081 if (psize == 0x10000)
1082 senc |= SLB_VSID_LP_01;
1083 }
1084 return senc;
1085}
1086
de56a948
PM
1087int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1088 struct kvm_userspace_memory_region *mem)
1089{
c77162de 1090 unsigned long npages;
b2b2f165 1091 unsigned long *phys;
aa04b4cc 1092
b2b2f165 1093 /* Allocate a slot_phys array */
da9d1d7f 1094 npages = mem->memory_size >> PAGE_SHIFT;
b2b2f165
PM
1095 phys = kvm->arch.slot_phys[mem->slot];
1096 if (!phys) {
1097 phys = vzalloc(npages * sizeof(unsigned long));
1098 if (!phys)
1099 return -ENOMEM;
1100 kvm->arch.slot_phys[mem->slot] = phys;
1101 kvm->arch.slot_npages[mem->slot] = npages;
1102 }
aa04b4cc 1103
c77162de
PM
1104 return 0;
1105}
aa04b4cc 1106
c77162de
PM
1107static void unpin_slot(struct kvm *kvm, int slot_id)
1108{
1109 unsigned long *physp;
1110 unsigned long j, npages, pfn;
1111 struct page *page;
1112
1113 physp = kvm->arch.slot_phys[slot_id];
1114 npages = kvm->arch.slot_npages[slot_id];
1115 if (physp) {
1116 spin_lock(&kvm->arch.slot_phys_lock);
1117 for (j = 0; j < npages; j++) {
1118 if (!(physp[j] & KVMPPC_GOT_PAGE))
1119 continue;
1120 pfn = physp[j] >> PAGE_SHIFT;
1121 page = pfn_to_page(pfn);
da9d1d7f
PM
1122 if (PageHuge(page))
1123 page = compound_head(page);
c77162de
PM
1124 SetPageDirty(page);
1125 put_page(page);
9e368f29 1126 }
c77162de
PM
1127 kvm->arch.slot_phys[slot_id] = NULL;
1128 spin_unlock(&kvm->arch.slot_phys_lock);
1129 vfree(physp);
aa04b4cc 1130 }
c77162de
PM
1131}
1132
1133void kvmppc_core_commit_memory_region(struct kvm *kvm,
1134 struct kvm_userspace_memory_region *mem)
1135{
1136}
1137
1138static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1139{
1140 int err = 0;
1141 struct kvm *kvm = vcpu->kvm;
1142 struct kvmppc_rma_info *ri = NULL;
1143 unsigned long hva;
1144 struct kvm_memory_slot *memslot;
1145 struct vm_area_struct *vma;
da9d1d7f 1146 unsigned long lpcr, senc;
c77162de
PM
1147 unsigned long psize, porder;
1148 unsigned long rma_size;
1149 unsigned long rmls;
1150 unsigned long *physp;
da9d1d7f 1151 unsigned long i, npages;
c77162de
PM
1152
1153 mutex_lock(&kvm->lock);
1154 if (kvm->arch.rma_setup_done)
1155 goto out; /* another vcpu beat us to it */
aa04b4cc 1156
c77162de
PM
1157 /* Look up the memslot for guest physical address 0 */
1158 memslot = gfn_to_memslot(kvm, 0);
aa04b4cc 1159
c77162de
PM
1160 /* We must have some memory at 0 by now */
1161 err = -EINVAL;
1162 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1163 goto out;
1164
1165 /* Look up the VMA for the start of this memory slot */
1166 hva = memslot->userspace_addr;
1167 down_read(&current->mm->mmap_sem);
1168 vma = find_vma(current->mm, hva);
1169 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1170 goto up_out;
1171
1172 psize = vma_kernel_pagesize(vma);
da9d1d7f 1173 porder = __ilog2(psize);
c77162de
PM
1174
1175 /* Is this one of our preallocated RMAs? */
1176 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1177 hva == vma->vm_start)
1178 ri = vma->vm_file->private_data;
1179
1180 up_read(&current->mm->mmap_sem);
1181
1182 if (!ri) {
1183 /* On POWER7, use VRMA; on PPC970, give up */
1184 err = -EPERM;
1185 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1186 pr_err("KVM: CPU requires an RMO\n");
1187 goto out;
1188 }
1189
da9d1d7f
PM
1190 /* We can handle 4k, 64k or 16M pages in the VRMA */
1191 err = -EINVAL;
1192 if (!(psize == 0x1000 || psize == 0x10000 ||
1193 psize == 0x1000000))
1194 goto out;
1195
c77162de 1196 /* Update VRMASD field in the LPCR */
da9d1d7f
PM
1197 senc = slb_pgsize_encoding(psize);
1198 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1199 lpcr |= senc << (LPCR_VRMASD_SH - 4);
c77162de
PM
1200 kvm->arch.lpcr = lpcr;
1201
1202 /* Create HPTEs in the hash page table for the VRMA */
da9d1d7f 1203 kvmppc_map_vrma(vcpu, memslot, porder);
c77162de
PM
1204
1205 } else {
1206 /* Set up to use an RMO region */
1207 rma_size = ri->npages;
1208 if (rma_size > memslot->npages)
1209 rma_size = memslot->npages;
1210 rma_size <<= PAGE_SHIFT;
aa04b4cc 1211 rmls = lpcr_rmls(rma_size);
c77162de 1212 err = -EINVAL;
aa04b4cc 1213 if (rmls < 0) {
c77162de
PM
1214 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1215 goto out;
aa04b4cc
PM
1216 }
1217 atomic_inc(&ri->use_count);
1218 kvm->arch.rma = ri;
9e368f29
PM
1219
1220 /* Update LPCR and RMOR */
1221 lpcr = kvm->arch.lpcr;
1222 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1223 /* PPC970; insert RMLS value (split field) in HID4 */
1224 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1225 (3ul << HID4_RMLS2_SH));
1226 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1227 ((rmls & 3) << HID4_RMLS2_SH);
1228 /* RMOR is also in HID4 */
1229 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1230 << HID4_RMOR_SH;
1231 } else {
1232 /* POWER7 */
1233 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1234 lpcr |= rmls << LPCR_RMLS_SH;
1235 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1236 }
aa04b4cc 1237 kvm->arch.lpcr = lpcr;
c77162de 1238 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
aa04b4cc 1239 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
aa04b4cc 1240
c77162de 1241 /* Initialize phys addrs of pages in RMO */
da9d1d7f
PM
1242 npages = ri->npages;
1243 porder = __ilog2(npages);
c77162de
PM
1244 physp = kvm->arch.slot_phys[memslot->id];
1245 spin_lock(&kvm->arch.slot_phys_lock);
1246 for (i = 0; i < npages; ++i)
da9d1d7f 1247 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
c77162de 1248 spin_unlock(&kvm->arch.slot_phys_lock);
aa04b4cc
PM
1249 }
1250
c77162de
PM
1251 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1252 smp_wmb();
1253 kvm->arch.rma_setup_done = 1;
1254 err = 0;
1255 out:
1256 mutex_unlock(&kvm->lock);
1257 return err;
b2b2f165 1258
c77162de
PM
1259 up_out:
1260 up_read(&current->mm->mmap_sem);
1261 goto out;
de56a948
PM
1262}
1263
1264int kvmppc_core_init_vm(struct kvm *kvm)
1265{
1266 long r;
aa04b4cc 1267 unsigned long lpcr;
de56a948
PM
1268
1269 /* Allocate hashed page table */
1270 r = kvmppc_alloc_hpt(kvm);
54738c09
DG
1271 if (r)
1272 return r;
de56a948 1273
54738c09 1274 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
aa04b4cc 1275
aa04b4cc 1276 kvm->arch.rma = NULL;
aa04b4cc 1277
9e368f29 1278 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
aa04b4cc 1279
9e368f29
PM
1280 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1281 /* PPC970; HID4 is effectively the LPCR */
1282 unsigned long lpid = kvm->arch.lpid;
1283 kvm->arch.host_lpid = 0;
1284 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1285 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1286 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1287 ((lpid & 0xf) << HID4_LPID5_SH);
1288 } else {
1289 /* POWER7; init LPCR for virtual RMA mode */
1290 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1291 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1292 lpcr &= LPCR_PECE | LPCR_LPES;
1293 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1294 LPCR_VPM0 | LPCR_VRMA_L;
1295 }
1296 kvm->arch.lpcr = lpcr;
aa04b4cc 1297
c77162de 1298 spin_lock_init(&kvm->arch.slot_phys_lock);
54738c09 1299 return 0;
de56a948
PM
1300}
1301
1302void kvmppc_core_destroy_vm(struct kvm *kvm)
1303{
aa04b4cc
PM
1304 unsigned long i;
1305
b2b2f165
PM
1306 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1307 unpin_slot(kvm, i);
1308
aa04b4cc
PM
1309 if (kvm->arch.rma) {
1310 kvm_release_rma(kvm->arch.rma);
1311 kvm->arch.rma = NULL;
1312 }
1313
de56a948 1314 kvmppc_free_hpt(kvm);
54738c09 1315 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
de56a948
PM
1316}
1317
1318/* These are stubs for now */
1319void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1320{
1321}
1322
1323/* We don't need to emulate any privileged instructions or dcbz */
1324int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1325 unsigned int inst, int *advance)
1326{
1327 return EMULATE_FAIL;
1328}
1329
1330int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1331{
1332 return EMULATE_FAIL;
1333}
1334
1335int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1336{
1337 return EMULATE_FAIL;
1338}
1339
1340static int kvmppc_book3s_hv_init(void)
1341{
1342 int r;
1343
1344 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1345
1346 if (r)
1347 return r;
1348
1349 r = kvmppc_mmu_hv_init();
1350
1351 return r;
1352}
1353
1354static void kvmppc_book3s_hv_exit(void)
1355{
1356 kvm_exit();
1357}
1358
1359module_init(kvmppc_book3s_hv_init);
1360module_exit(kvmppc_book3s_hv_exit);