arm/arm64: KVM: Remove unreferenced S2_PGD_ORDER
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_mmu.h
CommitLineData
37c43753
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
38f791a4
CD
44/*
45 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
46 * levels in addition to the PGD and potentially the PUD which are
47 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
48 * tables use one level of tables less than the kernel.
49 */
50#ifdef CONFIG_ARM64_64K_PAGES
51#define KVM_MMU_CACHE_MIN_PAGES 1
52#else
53#define KVM_MMU_CACHE_MIN_PAGES 2
54#endif
55
37c43753
MZ
56#ifdef __ASSEMBLY__
57
58/*
59 * Convert a kernel VA into a HYP VA.
60 * reg: VA to be converted.
61 */
62.macro kern_hyp_va reg
63 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
64.endm
65
66#else
67
38f791a4 68#include <asm/pgalloc.h>
37c43753
MZ
69#include <asm/cachetype.h>
70#include <asm/cacheflush.h>
e4c5a685
AB
71#include <asm/mmu_context.h>
72#include <asm/pgtable.h>
37c43753
MZ
73
74#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
75
76/*
dbff124e 77 * We currently only support a 40bit IPA.
37c43753 78 */
dbff124e 79#define KVM_PHYS_SHIFT (40)
37c43753
MZ
80#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
81#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
82
37c43753
MZ
83int create_hyp_mappings(void *from, void *to);
84int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
85void free_boot_hyp_pgd(void);
86void free_hyp_pgds(void);
87
957db105 88void stage2_unmap_vm(struct kvm *kvm);
37c43753
MZ
89int kvm_alloc_stage2_pgd(struct kvm *kvm);
90void kvm_free_stage2_pgd(struct kvm *kvm);
91int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 92 phys_addr_t pa, unsigned long size, bool writable);
37c43753
MZ
93
94int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
95
96void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
97
98phys_addr_t kvm_mmu_get_httbr(void);
99phys_addr_t kvm_mmu_get_boot_httbr(void);
100phys_addr_t kvm_get_idmap_vector(void);
101int kvm_mmu_init(void);
102void kvm_clear_hyp_idmap(void);
103
104#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
ad361f09 105#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
37c43753 106
37c43753 107static inline void kvm_clean_pgd(pgd_t *pgd) {}
38f791a4 108static inline void kvm_clean_pmd(pmd_t *pmd) {}
37c43753
MZ
109static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
110static inline void kvm_clean_pte(pte_t *pte) {}
111static inline void kvm_clean_pte_entry(pte_t *pte) {}
112
113static inline void kvm_set_s2pte_writable(pte_t *pte)
114{
115 pte_val(*pte) |= PTE_S2_RDWR;
116}
117
ad361f09
CD
118static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
119{
120 pmd_val(*pmd) |= PMD_S2_RDWR;
121}
122
8199ed0e
MS
123static inline void kvm_set_s2pte_readonly(pte_t *pte)
124{
125 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
126}
127
128static inline bool kvm_s2pte_readonly(pte_t *pte)
129{
130 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
131}
132
133static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
134{
135 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
136}
137
138static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
139{
140 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
141}
142
143
a3c8bd31
MZ
144#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
145#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
146#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
147
38f791a4
CD
148/*
149 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
150 * the entire IPA input range with a single pgd entry, and we would only need
151 * one pgd entry. Note that in this case, the pgd is actually not used by
152 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
153 * structure for the kernel pgtable macros to work.
154 */
155#if PGDIR_SHIFT > KVM_PHYS_SHIFT
156#define PTRS_PER_S2_PGD_SHIFT 0
157#else
158#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
159#endif
160#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
38f791a4 161
04b8dc85
MZ
162#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
163
38f791a4
CD
164/*
165 * If we are concatenating first level stage-2 page tables, we would have less
166 * than or equal to 16 pointers in the fake PGD, because that's what the
9f25e6ad 167 * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
38f791a4
CD
168 * represents the first level for the host, and we add 1 to go to the next
169 * level (which uses contatenation) for the stage-2 tables.
170 */
171#if PTRS_PER_S2_PGD <= 16
9f25e6ad 172#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
38f791a4
CD
173#else
174#define KVM_PREALLOC_LEVEL (0)
175#endif
176
38f791a4
CD
177static inline void *kvm_get_hwpgd(struct kvm *kvm)
178{
179 pgd_t *pgd = kvm->arch.pgd;
180 pud_t *pud;
181
182 if (KVM_PREALLOC_LEVEL == 0)
183 return pgd;
184
185 pud = pud_offset(pgd, 0);
186 if (KVM_PREALLOC_LEVEL == 1)
187 return pud;
188
189 BUG_ON(KVM_PREALLOC_LEVEL != 2);
190 return pmd_offset(pud, 0);
191}
192
a987370f 193static inline unsigned int kvm_get_hwpgd_size(void)
38f791a4 194{
a987370f
MZ
195 if (KVM_PREALLOC_LEVEL > 0)
196 return PTRS_PER_S2_PGD * PAGE_SIZE;
197 return PTRS_PER_S2_PGD * sizeof(pgd_t);
38f791a4
CD
198}
199
4f853a71
CD
200static inline bool kvm_page_empty(void *ptr)
201{
202 struct page *ptr_page = virt_to_page(ptr);
203 return page_count(ptr_page) == 1;
204}
205
38f791a4
CD
206#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
207
208#ifdef __PAGETABLE_PMD_FOLDED
209#define kvm_pmd_table_empty(kvm, pmdp) (0)
210#else
211#define kvm_pmd_table_empty(kvm, pmdp) \
212 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
213#endif
214
215#ifdef __PAGETABLE_PUD_FOLDED
216#define kvm_pud_table_empty(kvm, pudp) (0)
4f853a71 217#else
38f791a4
CD
218#define kvm_pud_table_empty(kvm, pudp) \
219 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
4f853a71 220#endif
4f853a71
CD
221
222
37c43753
MZ
223struct kvm;
224
2d58b733
MZ
225#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
226
227static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
37c43753 228{
2d58b733
MZ
229 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
230}
231
0d3e4d4f
MZ
232static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
233 unsigned long size,
234 bool ipa_uncached)
2d58b733 235{
0d3e4d4f
MZ
236 void *va = page_address(pfn_to_page(pfn));
237
840f4bfb 238 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
0d3e4d4f 239 kvm_flush_dcache_to_poc(va, size);
2d58b733 240
37c43753 241 if (!icache_is_aliasing()) { /* PIPT */
0d3e4d4f
MZ
242 flush_icache_range((unsigned long)va,
243 (unsigned long)va + size);
37c43753
MZ
244 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
245 /* any kind of VIPT cache */
246 __flush_icache_all();
247 }
248}
249
363ef89f
MZ
250static inline void __kvm_flush_dcache_pte(pte_t pte)
251{
252 struct page *page = pte_page(pte);
253 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
254}
255
256static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
257{
258 struct page *page = pmd_page(pmd);
259 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
260}
261
262static inline void __kvm_flush_dcache_pud(pud_t pud)
263{
264 struct page *page = pud_page(pud);
265 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
266}
267
4fda342c 268#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
37c43753 269
3c1e7165
MZ
270void kvm_set_way_flush(struct kvm_vcpu *vcpu);
271void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
9d218a1f 272
e4c5a685
AB
273static inline bool __kvm_cpu_uses_extended_idmap(void)
274{
275 return __cpu_uses_extended_idmap();
276}
277
278static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
279 pgd_t *hyp_pgd,
280 pgd_t *merged_hyp_pgd,
281 unsigned long hyp_idmap_start)
282{
283 int idmap_idx;
284
285 /*
286 * Use the first entry to access the HYP mappings. It is
287 * guaranteed to be free, otherwise we wouldn't use an
288 * extended idmap.
289 */
290 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
291 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
292
293 /*
294 * Create another extended level entry that points to the boot HYP map,
295 * which contains an ID mapping of the HYP init code. We essentially
296 * merge the boot and runtime HYP maps by doing so, but they don't
297 * overlap anyway, so this is fine.
298 */
299 idmap_idx = hyp_idmap_start >> VA_BITS;
300 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
301 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
302}
303
37c43753
MZ
304#endif /* __ASSEMBLY__ */
305#endif /* __ARM64_KVM_MMU_H__ */