Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_mmu.h
CommitLineData
37c43753
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
20475f78 23#include <asm/cpufeature.h>
37c43753
MZ
24
25/*
cedbb8b7 26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
37c43753
MZ
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
82a81bff 32 * kernel address). We need to find out how many bits to mask.
cedbb8b7 33 *
82a81bff
MZ
34 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
37 *
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
41 *
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
46 *
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
49 *
2077be67 50 * T = __pa_symbol(__hyp_idmap_text_start)
82a81bff
MZ
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
53 * else
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
56 *
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
66 *
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
37c43753 70 */
d53d9bc6
MZ
71
72#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
73#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
74
37c43753
MZ
75#ifdef __ASSEMBLY__
76
cedbb8b7
MZ
77#include <asm/alternative.h>
78#include <asm/cpufeature.h>
79
37c43753
MZ
80/*
81 * Convert a kernel VA into a HYP VA.
82 * reg: VA to be converted.
fd81e6bf
MZ
83 *
84 * This generates the following sequences:
85 * - High mask:
86 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
87 * nop
88 * - Low mask:
89 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
90 * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
91 * - VHE:
92 * nop
93 * nop
94 *
95 * The "low mask" version works because the mask is a strict subset of
96 * the "high mask", hence performing the first mask for nothing.
97 * Should be completely invisible on any viable CPU.
37c43753
MZ
98 */
99.macro kern_hyp_va reg
fd81e6bf
MZ
100alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101 and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
e506236a
MR
102alternative_else_nop_endif
103alternative_if ARM64_HYP_OFFSET_LOW
fd81e6bf 104 and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
e506236a 105alternative_else_nop_endif
37c43753
MZ
106.endm
107
108#else
109
38f791a4 110#include <asm/pgalloc.h>
02f7760e 111#include <asm/cache.h>
37c43753 112#include <asm/cacheflush.h>
e4c5a685
AB
113#include <asm/mmu_context.h>
114#include <asm/pgtable.h>
37c43753 115
fd81e6bf
MZ
116static inline unsigned long __kern_hyp_va(unsigned long v)
117{
118 asm volatile(ALTERNATIVE("and %0, %0, %1",
119 "nop",
120 ARM64_HAS_VIRT_HOST_EXTN)
121 : "+r" (v)
122 : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
123 asm volatile(ALTERNATIVE("nop",
124 "and %0, %0, %1",
125 ARM64_HYP_OFFSET_LOW)
126 : "+r" (v)
127 : "i" (HYP_PAGE_OFFSET_LOW_MASK));
128 return v;
129}
130
94d0e598 131#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
37c43753
MZ
132
133/*
dbff124e 134 * We currently only support a 40bit IPA.
37c43753 135 */
dbff124e 136#define KVM_PHYS_SHIFT (40)
37c43753
MZ
137#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
138#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
139
c0ef6326
SP
140#include <asm/stage2_pgtable.h>
141
c8dddecd 142int create_hyp_mappings(void *from, void *to, pgprot_t prot);
37c43753 143int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
37c43753
MZ
144void free_hyp_pgds(void);
145
957db105 146void stage2_unmap_vm(struct kvm *kvm);
37c43753
MZ
147int kvm_alloc_stage2_pgd(struct kvm *kvm);
148void kvm_free_stage2_pgd(struct kvm *kvm);
149int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 150 phys_addr_t pa, unsigned long size, bool writable);
37c43753
MZ
151
152int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
153
154void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
155
156phys_addr_t kvm_mmu_get_httbr(void);
37c43753
MZ
157phys_addr_t kvm_get_idmap_vector(void);
158int kvm_mmu_init(void);
159void kvm_clear_hyp_idmap(void);
160
161#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
ad361f09 162#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
37c43753 163
06485053 164static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
37c43753 165{
06485053
CM
166 pte_val(pte) |= PTE_S2_RDWR;
167 return pte;
37c43753
MZ
168}
169
06485053 170static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
ad361f09 171{
06485053
CM
172 pmd_val(pmd) |= PMD_S2_RDWR;
173 return pmd;
ad361f09
CD
174}
175
d0e22b4a
MZ
176static inline pte_t kvm_s2pte_mkexec(pte_t pte)
177{
178 pte_val(pte) &= ~PTE_S2_XN;
179 return pte;
180}
181
182static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
183{
184 pmd_val(pmd) &= ~PMD_S2_XN;
185 return pmd;
186}
187
20a004e7 188static inline void kvm_set_s2pte_readonly(pte_t *ptep)
8199ed0e 189{
0966253d
CM
190 pteval_t old_pteval, pteval;
191
20a004e7 192 pteval = READ_ONCE(pte_val(*ptep));
0966253d
CM
193 do {
194 old_pteval = pteval;
195 pteval &= ~PTE_S2_RDWR;
196 pteval |= PTE_S2_RDONLY;
20a004e7 197 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
0966253d 198 } while (pteval != old_pteval);
8199ed0e
MS
199}
200
20a004e7 201static inline bool kvm_s2pte_readonly(pte_t *ptep)
8199ed0e 202{
20a004e7 203 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
8199ed0e
MS
204}
205
20a004e7 206static inline bool kvm_s2pte_exec(pte_t *ptep)
7a3796d2 207{
20a004e7 208 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
7a3796d2
MZ
209}
210
20a004e7 211static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
8199ed0e 212{
20a004e7 213 kvm_set_s2pte_readonly((pte_t *)pmdp);
8199ed0e
MS
214}
215
20a004e7 216static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
8199ed0e 217{
20a004e7 218 return kvm_s2pte_readonly((pte_t *)pmdp);
38f791a4
CD
219}
220
20a004e7 221static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
7a3796d2 222{
20a004e7 223 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
7a3796d2
MZ
224}
225
4f853a71
CD
226static inline bool kvm_page_empty(void *ptr)
227{
228 struct page *ptr_page = virt_to_page(ptr);
229 return page_count(ptr_page) == 1;
230}
231
66f877fa 232#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
38f791a4
CD
233
234#ifdef __PAGETABLE_PMD_FOLDED
66f877fa 235#define hyp_pmd_table_empty(pmdp) (0)
38f791a4 236#else
66f877fa 237#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
38f791a4
CD
238#endif
239
240#ifdef __PAGETABLE_PUD_FOLDED
66f877fa 241#define hyp_pud_table_empty(pudp) (0)
4f853a71 242#else
66f877fa 243#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
4f853a71 244#endif
4f853a71 245
37c43753
MZ
246struct kvm;
247
2d58b733
MZ
248#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
249
250static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
37c43753 251{
2d58b733
MZ
252 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
253}
254
17ab9d57 255static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
2d58b733 256{
0d3e4d4f
MZ
257 void *va = page_address(pfn_to_page(pfn));
258
8f36ebaf 259 kvm_flush_dcache_to_poc(va, size);
a15f6939 260}
2d58b733 261
17ab9d57 262static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
a15f6939
MZ
263 unsigned long size)
264{
87da236e 265 if (icache_is_aliasing()) {
37c43753
MZ
266 /* any kind of VIPT cache */
267 __flush_icache_all();
87da236e
WD
268 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
269 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
a15f6939
MZ
270 void *va = page_address(pfn_to_page(pfn));
271
4fee9473
MZ
272 invalidate_icache_range((unsigned long)va,
273 (unsigned long)va + size);
37c43753
MZ
274 }
275}
276
363ef89f
MZ
277static inline void __kvm_flush_dcache_pte(pte_t pte)
278{
279 struct page *page = pte_page(pte);
280 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
281}
282
283static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
284{
285 struct page *page = pmd_page(pmd);
286 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
287}
288
289static inline void __kvm_flush_dcache_pud(pud_t pud)
290{
291 struct page *page = pud_page(pud);
292 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
293}
294
2077be67 295#define kvm_virt_to_phys(x) __pa_symbol(x)
37c43753 296
3c1e7165
MZ
297void kvm_set_way_flush(struct kvm_vcpu *vcpu);
298void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
9d218a1f 299
e4c5a685
AB
300static inline bool __kvm_cpu_uses_extended_idmap(void)
301{
fa2a8445
KM
302 return __cpu_uses_extended_idmap_level();
303}
304
305static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
306{
307 return idmap_ptrs_per_pgd;
e4c5a685
AB
308}
309
19338304
KM
310/*
311 * Can't use pgd_populate here, because the extended idmap adds an extra level
312 * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
313 * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
314 */
e4c5a685
AB
315static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
316 pgd_t *hyp_pgd,
317 pgd_t *merged_hyp_pgd,
318 unsigned long hyp_idmap_start)
319{
320 int idmap_idx;
75387b92 321 u64 pgd_addr;
e4c5a685
AB
322
323 /*
324 * Use the first entry to access the HYP mappings. It is
325 * guaranteed to be free, otherwise we wouldn't use an
326 * extended idmap.
327 */
328 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
75387b92
KM
329 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
330 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
e4c5a685
AB
331
332 /*
333 * Create another extended level entry that points to the boot HYP map,
334 * which contains an ID mapping of the HYP init code. We essentially
335 * merge the boot and runtime HYP maps by doing so, but they don't
336 * overlap anyway, so this is fine.
337 */
338 idmap_idx = hyp_idmap_start >> VA_BITS;
339 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
75387b92
KM
340 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
341 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
e4c5a685
AB
342}
343
20475f78
VM
344static inline unsigned int kvm_get_vmid_bits(void)
345{
46823dd1 346 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
20475f78 347
28c5dcb2 348 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
20475f78
VM
349}
350
6840bdd7
MZ
351#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
352#include <asm/mmu.h>
353
354static inline void *kvm_get_hyp_vector(void)
355{
356 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
357 void *vect = kvm_ksym_ref(__kvm_hyp_vector);
358
359 if (data->fn) {
360 vect = __bp_harden_hyp_vecs_start +
361 data->hyp_vectors_slot * SZ_2K;
362
363 if (!has_vhe())
364 vect = lm_alias(vect);
365 }
366
367 return vect;
368}
369
370static inline int kvm_map_vectors(void)
371{
372 return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
373 kvm_ksym_ref(__bp_harden_hyp_vecs_end),
374 PAGE_HYP_EXEC);
375}
376
377#else
378static inline void *kvm_get_hyp_vector(void)
379{
380 return kvm_ksym_ref(__kvm_hyp_vector);
381}
382
383static inline int kvm_map_vectors(void)
384{
385 return 0;
386}
387#endif
388
529c4b05
KM
389#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
390
37c43753
MZ
391#endif /* __ASSEMBLY__ */
392#endif /* __ARM64_KVM_MMU_H__ */