KVM: ARM: Hypervisor initialization
[linux-2.6-block.git] / arch / arm / kvm / arm.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/vmalloc.h>
24 #include <linux/fs.h>
25 #include <linux/mman.h>
26 #include <linux/sched.h>
27 #include <trace/events/kvm.h>
28
29 #define CREATE_TRACE_POINTS
30 #include "trace.h"
31
32 #include <asm/unified.h>
33 #include <asm/uaccess.h>
34 #include <asm/ptrace.h>
35 #include <asm/mman.h>
36 #include <asm/cputype.h>
37 #include <asm/tlbflush.h>
38 #include <asm/virt.h>
39 #include <asm/kvm_arm.h>
40 #include <asm/kvm_asm.h>
41 #include <asm/kvm_mmu.h>
42
43 #ifdef REQUIRES_VIRT
44 __asm__(".arch_extension        virt");
45 #endif
46
47 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
48 static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
49 static unsigned long hyp_default_vectors;
50
51
52 int kvm_arch_hardware_enable(void *garbage)
53 {
54         return 0;
55 }
56
57 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
58 {
59         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
60 }
61
62 void kvm_arch_hardware_disable(void *garbage)
63 {
64 }
65
66 int kvm_arch_hardware_setup(void)
67 {
68         return 0;
69 }
70
71 void kvm_arch_hardware_unsetup(void)
72 {
73 }
74
75 void kvm_arch_check_processor_compat(void *rtn)
76 {
77         *(int *)rtn = 0;
78 }
79
80 void kvm_arch_sync_events(struct kvm *kvm)
81 {
82 }
83
84 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
85 {
86         if (type)
87                 return -EINVAL;
88
89         return 0;
90 }
91
92 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
93 {
94         return VM_FAULT_SIGBUS;
95 }
96
97 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
98                            struct kvm_memory_slot *dont)
99 {
100 }
101
102 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
103 {
104         return 0;
105 }
106
107 void kvm_arch_destroy_vm(struct kvm *kvm)
108 {
109         int i;
110
111         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
112                 if (kvm->vcpus[i]) {
113                         kvm_arch_vcpu_free(kvm->vcpus[i]);
114                         kvm->vcpus[i] = NULL;
115                 }
116         }
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122         switch (ext) {
123         case KVM_CAP_USER_MEMORY:
124         case KVM_CAP_SYNC_MMU:
125         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
126         case KVM_CAP_ONE_REG:
127                 r = 1;
128                 break;
129         case KVM_CAP_COALESCED_MMIO:
130                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
131                 break;
132         case KVM_CAP_NR_VCPUS:
133                 r = num_online_cpus();
134                 break;
135         case KVM_CAP_MAX_VCPUS:
136                 r = KVM_MAX_VCPUS;
137                 break;
138         default:
139                 r = 0;
140                 break;
141         }
142         return r;
143 }
144
145 long kvm_arch_dev_ioctl(struct file *filp,
146                         unsigned int ioctl, unsigned long arg)
147 {
148         return -EINVAL;
149 }
150
151 int kvm_arch_set_memory_region(struct kvm *kvm,
152                                struct kvm_userspace_memory_region *mem,
153                                struct kvm_memory_slot old,
154                                int user_alloc)
155 {
156         return 0;
157 }
158
159 int kvm_arch_prepare_memory_region(struct kvm *kvm,
160                                    struct kvm_memory_slot *memslot,
161                                    struct kvm_memory_slot old,
162                                    struct kvm_userspace_memory_region *mem,
163                                    int user_alloc)
164 {
165         return 0;
166 }
167
168 void kvm_arch_commit_memory_region(struct kvm *kvm,
169                                    struct kvm_userspace_memory_region *mem,
170                                    struct kvm_memory_slot old,
171                                    int user_alloc)
172 {
173 }
174
175 void kvm_arch_flush_shadow_all(struct kvm *kvm)
176 {
177 }
178
179 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
180                                    struct kvm_memory_slot *slot)
181 {
182 }
183
184 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
185 {
186         int err;
187         struct kvm_vcpu *vcpu;
188
189         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
190         if (!vcpu) {
191                 err = -ENOMEM;
192                 goto out;
193         }
194
195         err = kvm_vcpu_init(vcpu, kvm, id);
196         if (err)
197                 goto free_vcpu;
198
199         return vcpu;
200 free_vcpu:
201         kmem_cache_free(kvm_vcpu_cache, vcpu);
202 out:
203         return ERR_PTR(err);
204 }
205
206 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
207 {
208         return 0;
209 }
210
211 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
212 {
213 }
214
215 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
216 {
217         kvm_arch_vcpu_free(vcpu);
218 }
219
220 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
221 {
222         return 0;
223 }
224
225 int __attribute_const__ kvm_target_cpu(void)
226 {
227         unsigned long implementor = read_cpuid_implementor();
228         unsigned long part_number = read_cpuid_part_number();
229
230         if (implementor != ARM_CPU_IMP_ARM)
231                 return -EINVAL;
232
233         switch (part_number) {
234         case ARM_CPU_PART_CORTEX_A15:
235                 return KVM_ARM_TARGET_CORTEX_A15;
236         default:
237                 return -EINVAL;
238         }
239 }
240
241 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242 {
243         return 0;
244 }
245
246 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
247 {
248 }
249
250 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
251 {
252 }
253
254 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
255 {
256 }
257
258 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
259                                         struct kvm_guest_debug *dbg)
260 {
261         return -EINVAL;
262 }
263
264
265 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
266                                     struct kvm_mp_state *mp_state)
267 {
268         return -EINVAL;
269 }
270
271 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
272                                     struct kvm_mp_state *mp_state)
273 {
274         return -EINVAL;
275 }
276
277 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
278 {
279         return 0;
280 }
281
282 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
283 {
284         return -EINVAL;
285 }
286
287 long kvm_arch_vcpu_ioctl(struct file *filp,
288                          unsigned int ioctl, unsigned long arg)
289 {
290         struct kvm_vcpu *vcpu = filp->private_data;
291         void __user *argp = (void __user *)arg;
292
293         switch (ioctl) {
294         case KVM_ARM_VCPU_INIT: {
295                 struct kvm_vcpu_init init;
296
297                 if (copy_from_user(&init, argp, sizeof(init)))
298                         return -EFAULT;
299
300                 return kvm_vcpu_set_target(vcpu, &init);
301
302         }
303         case KVM_SET_ONE_REG:
304         case KVM_GET_ONE_REG: {
305                 struct kvm_one_reg reg;
306                 if (copy_from_user(&reg, argp, sizeof(reg)))
307                         return -EFAULT;
308                 if (ioctl == KVM_SET_ONE_REG)
309                         return kvm_arm_set_reg(vcpu, &reg);
310                 else
311                         return kvm_arm_get_reg(vcpu, &reg);
312         }
313         case KVM_GET_REG_LIST: {
314                 struct kvm_reg_list __user *user_list = argp;
315                 struct kvm_reg_list reg_list;
316                 unsigned n;
317
318                 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
319                         return -EFAULT;
320                 n = reg_list.n;
321                 reg_list.n = kvm_arm_num_regs(vcpu);
322                 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
323                         return -EFAULT;
324                 if (n < reg_list.n)
325                         return -E2BIG;
326                 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
327         }
328         default:
329                 return -EINVAL;
330         }
331 }
332
333 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
334 {
335         return -EINVAL;
336 }
337
338 long kvm_arch_vm_ioctl(struct file *filp,
339                        unsigned int ioctl, unsigned long arg)
340 {
341         return -EINVAL;
342 }
343
344 static void cpu_init_hyp_mode(void *vector)
345 {
346         unsigned long long pgd_ptr;
347         unsigned long pgd_low, pgd_high;
348         unsigned long hyp_stack_ptr;
349         unsigned long stack_page;
350         unsigned long vector_ptr;
351
352         /* Switch from the HYP stub to our own HYP init vector */
353         __hyp_set_vectors((unsigned long)vector);
354
355         pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
356         pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
357         pgd_high = (pgd_ptr >> 32ULL);
358         stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
359         hyp_stack_ptr = stack_page + PAGE_SIZE;
360         vector_ptr = (unsigned long)__kvm_hyp_vector;
361
362         /*
363          * Call initialization code, and switch to the full blown
364          * HYP code. The init code doesn't need to preserve these registers as
365          * r1-r3 and r12 are already callee save according to the AAPCS.
366          * Note that we slightly misuse the prototype by casing the pgd_low to
367          * a void *.
368          */
369         kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
370 }
371
372 /**
373  * Inits Hyp-mode on all online CPUs
374  */
375 static int init_hyp_mode(void)
376 {
377         phys_addr_t init_phys_addr;
378         int cpu;
379         int err = 0;
380
381         /*
382          * Allocate Hyp PGD and setup Hyp identity mapping
383          */
384         err = kvm_mmu_init();
385         if (err)
386                 goto out_err;
387
388         /*
389          * It is probably enough to obtain the default on one
390          * CPU. It's unlikely to be different on the others.
391          */
392         hyp_default_vectors = __hyp_get_vectors();
393
394         /*
395          * Allocate stack pages for Hypervisor-mode
396          */
397         for_each_possible_cpu(cpu) {
398                 unsigned long stack_page;
399
400                 stack_page = __get_free_page(GFP_KERNEL);
401                 if (!stack_page) {
402                         err = -ENOMEM;
403                         goto out_free_stack_pages;
404                 }
405
406                 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
407         }
408
409         /*
410          * Execute the init code on each CPU.
411          *
412          * Note: The stack is not mapped yet, so don't do anything else than
413          * initializing the hypervisor mode on each CPU using a local stack
414          * space for temporary storage.
415          */
416         init_phys_addr = virt_to_phys(__kvm_hyp_init);
417         for_each_online_cpu(cpu) {
418                 smp_call_function_single(cpu, cpu_init_hyp_mode,
419                                          (void *)(long)init_phys_addr, 1);
420         }
421
422         /*
423          * Unmap the identity mapping
424          */
425         kvm_clear_hyp_idmap();
426
427         /*
428          * Map the Hyp-code called directly from the host
429          */
430         err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
431         if (err) {
432                 kvm_err("Cannot map world-switch code\n");
433                 goto out_free_mappings;
434         }
435
436         /*
437          * Map the Hyp stack pages
438          */
439         for_each_possible_cpu(cpu) {
440                 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
441                 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
442
443                 if (err) {
444                         kvm_err("Cannot map hyp stack\n");
445                         goto out_free_mappings;
446                 }
447         }
448
449         /*
450          * Map the host VFP structures
451          */
452         kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
453         if (!kvm_host_vfp_state) {
454                 err = -ENOMEM;
455                 kvm_err("Cannot allocate host VFP state\n");
456                 goto out_free_mappings;
457         }
458
459         for_each_possible_cpu(cpu) {
460                 struct vfp_hard_struct *vfp;
461
462                 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
463                 err = create_hyp_mappings(vfp, vfp + 1);
464
465                 if (err) {
466                         kvm_err("Cannot map host VFP state: %d\n", err);
467                         goto out_free_vfp;
468                 }
469         }
470
471         kvm_info("Hyp mode initialized successfully\n");
472         return 0;
473 out_free_vfp:
474         free_percpu(kvm_host_vfp_state);
475 out_free_mappings:
476         free_hyp_pmds();
477 out_free_stack_pages:
478         for_each_possible_cpu(cpu)
479                 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
480 out_err:
481         kvm_err("error initializing Hyp mode: %d\n", err);
482         return err;
483 }
484
485 /**
486  * Initialize Hyp-mode and memory mappings on all CPUs.
487  */
488 int kvm_arch_init(void *opaque)
489 {
490         int err;
491
492         if (!is_hyp_mode_available()) {
493                 kvm_err("HYP mode not available\n");
494                 return -ENODEV;
495         }
496
497         if (kvm_target_cpu() < 0) {
498                 kvm_err("Target CPU not supported!\n");
499                 return -ENODEV;
500         }
501
502         err = init_hyp_mode();
503         if (err)
504                 goto out_err;
505
506         return 0;
507 out_err:
508         return err;
509 }
510
511 /* NOP: Compiling as a module not supported */
512 void kvm_arch_exit(void)
513 {
514 }
515
516 static int arm_init(void)
517 {
518         int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
519         return rc;
520 }
521
522 module_init(arm_init);