arm/arm64: KVM: Use set/way op trapping to track the state of the caches
[linux-2.6-block.git] / arch / arm / include / asm / kvm_mmu.h
CommitLineData
342cd0ab
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
5a677ce0
MZ
22#include <asm/memory.h>
23#include <asm/page.h>
c62ee2b2 24
06e8c3b0
MZ
25/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
5a677ce0 29#define HYP_PAGE_OFFSET_MASK UL(~0)
06e8c3b0
MZ
30#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva)
32
5a677ce0
MZ
33/*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
38f791a4
CD
40/*
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42 */
43#define KVM_MMU_CACHE_MIN_PAGES 2
44
5a677ce0
MZ
45#ifndef __ASSEMBLY__
46
47#include <asm/cacheflush.h>
48#include <asm/pgalloc.h>
49
342cd0ab
CD
50int create_hyp_mappings(void *from, void *to);
51int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
d157f4a5 52void free_boot_hyp_pgd(void);
4f728276 53void free_hyp_pgds(void);
342cd0ab 54
957db105 55void stage2_unmap_vm(struct kvm *kvm);
d5d8184d
CD
56int kvm_alloc_stage2_pgd(struct kvm *kvm);
57void kvm_free_stage2_pgd(struct kvm *kvm);
58int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 59 phys_addr_t pa, unsigned long size, bool writable);
d5d8184d
CD
60
61int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
62
63void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
64
342cd0ab 65phys_addr_t kvm_mmu_get_httbr(void);
5a677ce0
MZ
66phys_addr_t kvm_mmu_get_boot_httbr(void);
67phys_addr_t kvm_get_idmap_vector(void);
342cd0ab
CD
68int kvm_mmu_init(void);
69void kvm_clear_hyp_idmap(void);
94f8e641 70
ad361f09
CD
71static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
72{
73 *pmd = new_pmd;
74 flush_pmd_entry(pmd);
75}
76
c62ee2b2
MZ
77static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
78{
0963e5d0 79 *pte = new_pte;
c62ee2b2
MZ
80 /*
81 * flush_pmd_entry just takes a void pointer and cleans the necessary
82 * cache entries, so we can reuse the function for ptes.
83 */
84 flush_pmd_entry(pte);
85}
86
c62ee2b2
MZ
87static inline void kvm_clean_pgd(pgd_t *pgd)
88{
89 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
90}
91
38f791a4
CD
92static inline void kvm_clean_pmd(pmd_t *pmd)
93{
94 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
95}
96
c62ee2b2
MZ
97static inline void kvm_clean_pmd_entry(pmd_t *pmd)
98{
99 clean_pmd_entry(pmd);
100}
101
102static inline void kvm_clean_pte(pte_t *pte)
103{
104 clean_pte_table(pte);
105}
106
107static inline void kvm_set_s2pte_writable(pte_t *pte)
108{
109 pte_val(*pte) |= L_PTE_S2_RDWR;
110}
111
ad361f09
CD
112static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
113{
114 pmd_val(*pmd) |= L_PMD_S2_RDWR;
115}
116
a3c8bd31
MZ
117/* Open coded p*d_addr_end that can deal with 64bit addresses */
118#define kvm_pgd_addr_end(addr, end) \
119({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
120 (__boundary - 1 < (end) - 1)? __boundary: (end); \
121})
122
123#define kvm_pud_addr_end(addr,end) (end)
124
125#define kvm_pmd_addr_end(addr, end) \
126({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
128})
129
4f853a71
CD
130static inline bool kvm_page_empty(void *ptr)
131{
132 struct page *ptr_page = virt_to_page(ptr);
133 return page_count(ptr_page) == 1;
134}
135
136
38f791a4
CD
137#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
138#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
139#define kvm_pud_table_empty(kvm, pudp) (0)
140
141#define KVM_PREALLOC_LEVEL 0
4f853a71 142
38f791a4
CD
143static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
144{
145 return 0;
146}
147
148static inline void kvm_free_hwpgd(struct kvm *kvm) { }
149
150static inline void *kvm_get_hwpgd(struct kvm *kvm)
151{
152 return kvm->arch.pgd;
153}
4f853a71 154
c62ee2b2
MZ
155struct kvm;
156
15979300
MZ
157#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
158
159static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
160{
161 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
162}
163
2d58b733 164static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
840f4bfb
LE
165 unsigned long size,
166 bool ipa_uncached)
c62ee2b2 167{
840f4bfb 168 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
15979300
MZ
169 kvm_flush_dcache_to_poc((void *)hva, size);
170
c62ee2b2
MZ
171 /*
172 * If we are going to insert an instruction page and the icache is
173 * either VIPT or PIPT, there is a potential problem where the host
174 * (or another VM) may have used the same page as this guest, and we
175 * read incorrect data from the icache. If we're using a PIPT cache,
176 * we can invalidate just that page, but if we are using a VIPT cache
177 * we need to invalidate the entire icache - damn shame - as written
178 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
179 *
180 * VIVT caches are tagged using both the ASID and the VMID and doesn't
181 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
182 */
183 if (icache_is_pipt()) {
ad361f09 184 __cpuc_coherent_user_range(hva, hva + size);
c62ee2b2
MZ
185 } else if (!icache_is_vivt_asid_tagged()) {
186 /* any kind of VIPT cache */
187 __flush_icache_all();
188 }
189}
190
4fda342c 191#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
5a677ce0 192
3c1e7165
MZ
193void kvm_set_way_flush(struct kvm_vcpu *vcpu);
194void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
9d218a1f 195
5a677ce0
MZ
196#endif /* !__ASSEMBLY__ */
197
342cd0ab 198#endif /* __ARM_KVM_MMU_H__ */