2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
34 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35 * macros (the entire kernel runs at EL2).
37 #define HYP_PAGE_OFFSET_SHIFT VA_BITS
38 #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
39 #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
42 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
43 * shared across all the page-tables. Conveniently, we use the last
44 * possible page, where no kernel mapping will ever exist.
46 #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
49 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
50 * levels in addition to the PGD and potentially the PUD which are
51 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
52 * tables use one level of tables less than the kernel.
54 #ifdef CONFIG_ARM64_64K_PAGES
55 #define KVM_MMU_CACHE_MIN_PAGES 1
57 #define KVM_MMU_CACHE_MIN_PAGES 2
62 #include <asm/alternative.h>
63 #include <asm/cpufeature.h>
66 * Convert a kernel VA into a HYP VA.
67 * reg: VA to be converted.
69 .macro kern_hyp_va reg
70 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
71 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
79 #include <asm/pgalloc.h>
80 #include <asm/cachetype.h>
81 #include <asm/cacheflush.h>
82 #include <asm/mmu_context.h>
83 #include <asm/pgtable.h>
85 #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
88 * We currently only support a 40bit IPA.
90 #define KVM_PHYS_SHIFT (40)
91 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
92 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
94 int create_hyp_mappings(void *from, void *to);
95 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
96 void free_boot_hyp_pgd(void);
97 void free_hyp_pgds(void);
99 void stage2_unmap_vm(struct kvm *kvm);
100 int kvm_alloc_stage2_pgd(struct kvm *kvm);
101 void kvm_free_stage2_pgd(struct kvm *kvm);
102 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
103 phys_addr_t pa, unsigned long size, bool writable);
105 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
107 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
109 phys_addr_t kvm_mmu_get_httbr(void);
110 phys_addr_t kvm_mmu_get_boot_httbr(void);
111 phys_addr_t kvm_get_idmap_vector(void);
112 phys_addr_t kvm_get_idmap_start(void);
113 int kvm_mmu_init(void);
114 void kvm_clear_hyp_idmap(void);
116 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
117 #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
119 static inline void kvm_clean_pgd(pgd_t *pgd) {}
120 static inline void kvm_clean_pmd(pmd_t *pmd) {}
121 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
122 static inline void kvm_clean_pte(pte_t *pte) {}
123 static inline void kvm_clean_pte_entry(pte_t *pte) {}
125 static inline void kvm_set_s2pte_writable(pte_t *pte)
127 pte_val(*pte) |= PTE_S2_RDWR;
130 static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
132 pmd_val(*pmd) |= PMD_S2_RDWR;
135 static inline void kvm_set_s2pte_readonly(pte_t *pte)
137 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
140 static inline bool kvm_s2pte_readonly(pte_t *pte)
142 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
145 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
147 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
150 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
152 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
156 #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
157 #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
158 #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
161 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
162 * the entire IPA input range with a single pgd entry, and we would only need
163 * one pgd entry. Note that in this case, the pgd is actually not used by
164 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
165 * structure for the kernel pgtable macros to work.
167 #if PGDIR_SHIFT > KVM_PHYS_SHIFT
168 #define PTRS_PER_S2_PGD_SHIFT 0
170 #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
172 #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
174 #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
177 * If we are concatenating first level stage-2 page tables, we would have less
178 * than or equal to 16 pointers in the fake PGD, because that's what the
179 * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
180 * represents the first level for the host, and we add 1 to go to the next
181 * level (which uses contatenation) for the stage-2 tables.
183 #if PTRS_PER_S2_PGD <= 16
184 #define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
186 #define KVM_PREALLOC_LEVEL (0)
189 static inline void *kvm_get_hwpgd(struct kvm *kvm)
191 pgd_t *pgd = kvm->arch.pgd;
194 if (KVM_PREALLOC_LEVEL == 0)
197 pud = pud_offset(pgd, 0);
198 if (KVM_PREALLOC_LEVEL == 1)
201 BUG_ON(KVM_PREALLOC_LEVEL != 2);
202 return pmd_offset(pud, 0);
205 static inline unsigned int kvm_get_hwpgd_size(void)
207 if (KVM_PREALLOC_LEVEL > 0)
208 return PTRS_PER_S2_PGD * PAGE_SIZE;
209 return PTRS_PER_S2_PGD * sizeof(pgd_t);
212 static inline bool kvm_page_empty(void *ptr)
214 struct page *ptr_page = virt_to_page(ptr);
215 return page_count(ptr_page) == 1;
218 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
220 #ifdef __PAGETABLE_PMD_FOLDED
221 #define kvm_pmd_table_empty(kvm, pmdp) (0)
223 #define kvm_pmd_table_empty(kvm, pmdp) \
224 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
227 #ifdef __PAGETABLE_PUD_FOLDED
228 #define kvm_pud_table_empty(kvm, pudp) (0)
230 #define kvm_pud_table_empty(kvm, pudp) \
231 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
237 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
239 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
241 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
244 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
249 void *va = page_address(pfn_to_page(pfn));
251 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
252 kvm_flush_dcache_to_poc(va, size);
254 if (!icache_is_aliasing()) { /* PIPT */
255 flush_icache_range((unsigned long)va,
256 (unsigned long)va + size);
257 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
258 /* any kind of VIPT cache */
259 __flush_icache_all();
263 static inline void __kvm_flush_dcache_pte(pte_t pte)
265 struct page *page = pte_page(pte);
266 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
269 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
271 struct page *page = pmd_page(pmd);
272 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
275 static inline void __kvm_flush_dcache_pud(pud_t pud)
277 struct page *page = pud_page(pud);
278 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
281 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
283 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
284 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
286 static inline bool __kvm_cpu_uses_extended_idmap(void)
288 return __cpu_uses_extended_idmap();
291 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
293 pgd_t *merged_hyp_pgd,
294 unsigned long hyp_idmap_start)
299 * Use the first entry to access the HYP mappings. It is
300 * guaranteed to be free, otherwise we wouldn't use an
303 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
304 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
307 * Create another extended level entry that points to the boot HYP map,
308 * which contains an ID mapping of the HYP init code. We essentially
309 * merge the boot and runtime HYP maps by doing so, but they don't
310 * overlap anyway, so this is fine.
312 idmap_idx = hyp_idmap_start >> VA_BITS;
313 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
314 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
317 static inline unsigned int kvm_get_vmid_bits(void)
319 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
321 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
324 #endif /* __ASSEMBLY__ */
325 #endif /* __ARM64_KVM_MMU_H__ */