Commit | Line | Data |
---|---|---|
342cd0ab CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
18 | ||
19 | #ifndef __ARM_KVM_MMU_H__ | |
20 | #define __ARM_KVM_MMU_H__ | |
21 | ||
5a677ce0 MZ |
22 | #include <asm/memory.h> |
23 | #include <asm/page.h> | |
c62ee2b2 | 24 | |
06e8c3b0 MZ |
25 | /* |
26 | * We directly use the kernel VA for the HYP, as we can directly share | |
27 | * the mapping (HTTBR "covers" TTBR1). | |
28 | */ | |
5a677ce0 | 29 | #define HYP_PAGE_OFFSET_MASK UL(~0) |
06e8c3b0 MZ |
30 | #define HYP_PAGE_OFFSET PAGE_OFFSET |
31 | #define KERN_TO_HYP(kva) (kva) | |
32 | ||
5a677ce0 MZ |
33 | /* |
34 | * Our virtual mapping for the boot-time MMU-enable code. Must be | |
35 | * shared across all the page-tables. Conveniently, we use the vectors | |
36 | * page, where no kernel data will ever be shared with HYP. | |
37 | */ | |
38 | #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) | |
39 | ||
40 | #ifndef __ASSEMBLY__ | |
41 | ||
42 | #include <asm/cacheflush.h> | |
43 | #include <asm/pgalloc.h> | |
44 | ||
342cd0ab CD |
45 | int create_hyp_mappings(void *from, void *to); |
46 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | |
d157f4a5 | 47 | void free_boot_hyp_pgd(void); |
4f728276 | 48 | void free_hyp_pgds(void); |
342cd0ab | 49 | |
d5d8184d CD |
50 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
51 | void kvm_free_stage2_pgd(struct kvm *kvm); | |
52 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
53 | phys_addr_t pa, unsigned long size); | |
54 | ||
55 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
56 | ||
57 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | |
58 | ||
342cd0ab | 59 | phys_addr_t kvm_mmu_get_httbr(void); |
5a677ce0 MZ |
60 | phys_addr_t kvm_mmu_get_boot_httbr(void); |
61 | phys_addr_t kvm_get_idmap_vector(void); | |
342cd0ab CD |
62 | int kvm_mmu_init(void); |
63 | void kvm_clear_hyp_idmap(void); | |
94f8e641 | 64 | |
c62ee2b2 MZ |
65 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) |
66 | { | |
0963e5d0 | 67 | *pte = new_pte; |
c62ee2b2 MZ |
68 | /* |
69 | * flush_pmd_entry just takes a void pointer and cleans the necessary | |
70 | * cache entries, so we can reuse the function for ptes. | |
71 | */ | |
72 | flush_pmd_entry(pte); | |
73 | } | |
74 | ||
94f8e641 CD |
75 | static inline bool kvm_is_write_fault(unsigned long hsr) |
76 | { | |
77 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | |
78 | if (hsr_ec == HSR_EC_IABT) | |
79 | return false; | |
80 | else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) | |
81 | return false; | |
82 | else | |
83 | return true; | |
84 | } | |
85 | ||
c62ee2b2 MZ |
86 | static inline void kvm_clean_pgd(pgd_t *pgd) |
87 | { | |
88 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | |
89 | } | |
90 | ||
91 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | |
92 | { | |
93 | clean_pmd_entry(pmd); | |
94 | } | |
95 | ||
96 | static inline void kvm_clean_pte(pte_t *pte) | |
97 | { | |
98 | clean_pte_table(pte); | |
99 | } | |
100 | ||
101 | static inline void kvm_set_s2pte_writable(pte_t *pte) | |
102 | { | |
103 | pte_val(*pte) |= L_PTE_S2_RDWR; | |
104 | } | |
105 | ||
106 | struct kvm; | |
107 | ||
108 | static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | |
109 | { | |
110 | /* | |
111 | * If we are going to insert an instruction page and the icache is | |
112 | * either VIPT or PIPT, there is a potential problem where the host | |
113 | * (or another VM) may have used the same page as this guest, and we | |
114 | * read incorrect data from the icache. If we're using a PIPT cache, | |
115 | * we can invalidate just that page, but if we are using a VIPT cache | |
116 | * we need to invalidate the entire icache - damn shame - as written | |
117 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | |
118 | * | |
119 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | |
120 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | |
121 | */ | |
122 | if (icache_is_pipt()) { | |
123 | unsigned long hva = gfn_to_hva(kvm, gfn); | |
124 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | |
125 | } else if (!icache_is_vivt_asid_tagged()) { | |
126 | /* any kind of VIPT cache */ | |
127 | __flush_icache_all(); | |
128 | } | |
129 | } | |
130 | ||
5a677ce0 MZ |
131 | #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) |
132 | ||
133 | #endif /* !__ASSEMBLY__ */ | |
134 | ||
342cd0ab | 135 | #endif /* __ARM_KVM_MMU_H__ */ |