Commit | Line | Data |
---|---|---|
d94d71cb | 1 | // SPDX-License-Identifier: GPL-2.0-only |
749cf76c CD |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
749cf76c | 5 | */ |
342cd0ab CD |
6 | |
7 | #include <linux/mman.h> | |
8 | #include <linux/kvm_host.h> | |
9 | #include <linux/io.h> | |
ad361f09 | 10 | #include <linux/hugetlb.h> |
196f878a | 11 | #include <linux/sched/signal.h> |
45e96ea6 | 12 | #include <trace/events/kvm.h> |
342cd0ab | 13 | #include <asm/pgalloc.h> |
94f8e641 | 14 | #include <asm/cacheflush.h> |
342cd0ab CD |
15 | #include <asm/kvm_arm.h> |
16 | #include <asm/kvm_mmu.h> | |
0db5e022 | 17 | #include <asm/kvm_ras.h> |
d5d8184d | 18 | #include <asm/kvm_asm.h> |
94f8e641 | 19 | #include <asm/kvm_emulate.h> |
1e947bad | 20 | #include <asm/virt.h> |
d5d8184d CD |
21 | |
22 | #include "trace.h" | |
342cd0ab | 23 | |
5a677ce0 | 24 | static pgd_t *boot_hyp_pgd; |
2fb41059 | 25 | static pgd_t *hyp_pgd; |
e4c5a685 | 26 | static pgd_t *merged_hyp_pgd; |
342cd0ab CD |
27 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
28 | ||
5a677ce0 MZ |
29 | static unsigned long hyp_idmap_start; |
30 | static unsigned long hyp_idmap_end; | |
31 | static phys_addr_t hyp_idmap_vector; | |
32 | ||
e3f019b3 MZ |
33 | static unsigned long io_map_base; |
34 | ||
38f791a4 | 35 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
5d4e08c4 | 36 | |
15a49a44 MS |
37 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
38 | #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) | |
39 | ||
6d674e28 MZ |
40 | static bool is_iomap(unsigned long flags) |
41 | { | |
42 | return flags & KVM_S2PTE_FLAG_IS_IOMAP; | |
43 | } | |
44 | ||
15a49a44 MS |
45 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
46 | { | |
15a49a44 | 47 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
7276030a MS |
48 | } |
49 | ||
50 | /** | |
51 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 | |
52 | * @kvm: pointer to kvm structure. | |
53 | * | |
54 | * Interface to HYP function to flush all VM TLB entries | |
55 | */ | |
56 | void kvm_flush_remote_tlbs(struct kvm *kvm) | |
57 | { | |
58 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | |
15a49a44 | 59 | } |
ad361f09 | 60 | |
48762767 | 61 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
d5d8184d | 62 | { |
8684e701 | 63 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
d5d8184d CD |
64 | } |
65 | ||
363ef89f MZ |
66 | /* |
67 | * D-Cache management functions. They take the page table entries by | |
68 | * value, as they are flushing the cache using the kernel mapping (or | |
69 | * kmap on 32bit). | |
70 | */ | |
71 | static void kvm_flush_dcache_pte(pte_t pte) | |
72 | { | |
73 | __kvm_flush_dcache_pte(pte); | |
74 | } | |
75 | ||
76 | static void kvm_flush_dcache_pmd(pmd_t pmd) | |
77 | { | |
78 | __kvm_flush_dcache_pmd(pmd); | |
79 | } | |
80 | ||
81 | static void kvm_flush_dcache_pud(pud_t pud) | |
82 | { | |
83 | __kvm_flush_dcache_pud(pud); | |
84 | } | |
85 | ||
e6fab544 AB |
86 | static bool kvm_is_device_pfn(unsigned long pfn) |
87 | { | |
88 | return !pfn_valid(pfn); | |
89 | } | |
90 | ||
15a49a44 MS |
91 | /** |
92 | * stage2_dissolve_pmd() - clear and flush huge PMD entry | |
93 | * @kvm: pointer to kvm structure. | |
94 | * @addr: IPA | |
95 | * @pmd: pmd pointer for IPA | |
96 | * | |
8324c3d5 | 97 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. |
15a49a44 MS |
98 | */ |
99 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | |
100 | { | |
bbb3b6b3 | 101 | if (!pmd_thp_or_huge(*pmd)) |
15a49a44 MS |
102 | return; |
103 | ||
104 | pmd_clear(pmd); | |
105 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
106 | put_page(virt_to_page(pmd)); | |
107 | } | |
108 | ||
b8e0ba7c PA |
109 | /** |
110 | * stage2_dissolve_pud() - clear and flush huge PUD entry | |
111 | * @kvm: pointer to kvm structure. | |
112 | * @addr: IPA | |
113 | * @pud: pud pointer for IPA | |
114 | * | |
8324c3d5 | 115 | * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. |
b8e0ba7c PA |
116 | */ |
117 | static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) | |
118 | { | |
119 | if (!stage2_pud_huge(kvm, *pudp)) | |
120 | return; | |
121 | ||
122 | stage2_pud_clear(kvm, pudp); | |
123 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
124 | put_page(virt_to_page(pudp)); | |
125 | } | |
126 | ||
7a1c831e | 127 | static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) |
979acd5e | 128 | { |
e9f63768 | 129 | p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL); |
e55cac5b | 130 | stage2_pgd_clear(kvm, pgd); |
4f853a71 | 131 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
e9f63768 | 132 | stage2_p4d_free(kvm, p4d_table); |
4f853a71 | 133 | put_page(virt_to_page(pgd)); |
979acd5e MZ |
134 | } |
135 | ||
e9f63768 MR |
136 | static void clear_stage2_p4d_entry(struct kvm *kvm, p4d_t *p4d, phys_addr_t addr) |
137 | { | |
138 | pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0); | |
139 | stage2_p4d_clear(kvm, p4d); | |
140 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
141 | stage2_pud_free(kvm, pud_table); | |
142 | put_page(virt_to_page(p4d)); | |
143 | } | |
144 | ||
7a1c831e | 145 | static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
342cd0ab | 146 | { |
e55cac5b SP |
147 | pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0); |
148 | VM_BUG_ON(stage2_pud_huge(kvm, *pud)); | |
149 | stage2_pud_clear(kvm, pud); | |
4f853a71 | 150 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
e55cac5b | 151 | stage2_pmd_free(kvm, pmd_table); |
4f728276 MZ |
152 | put_page(virt_to_page(pud)); |
153 | } | |
342cd0ab | 154 | |
7a1c831e | 155 | static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
4f728276 | 156 | { |
4f853a71 | 157 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
bbb3b6b3 | 158 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
4f853a71 CD |
159 | pmd_clear(pmd); |
160 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
14b94d07 | 161 | free_page((unsigned long)pte_table); |
4f728276 MZ |
162 | put_page(virt_to_page(pmd)); |
163 | } | |
164 | ||
88dc25e8 MZ |
165 | static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte) |
166 | { | |
167 | WRITE_ONCE(*ptep, new_pte); | |
168 | dsb(ishst); | |
169 | } | |
170 | ||
171 | static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd) | |
172 | { | |
173 | WRITE_ONCE(*pmdp, new_pmd); | |
174 | dsb(ishst); | |
175 | } | |
176 | ||
0db9dd8a MZ |
177 | static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep) |
178 | { | |
179 | kvm_set_pmd(pmdp, kvm_mk_pmd(ptep)); | |
180 | } | |
181 | ||
182 | static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp) | |
183 | { | |
184 | WRITE_ONCE(*pudp, kvm_mk_pud(pmdp)); | |
185 | dsb(ishst); | |
186 | } | |
187 | ||
e9f63768 | 188 | static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp) |
0db9dd8a | 189 | { |
e9f63768 | 190 | WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp)); |
0db9dd8a MZ |
191 | dsb(ishst); |
192 | } | |
193 | ||
e9f63768 MR |
194 | static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp) |
195 | { | |
196 | #ifndef __PAGETABLE_P4D_FOLDED | |
197 | WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp)); | |
198 | dsb(ishst); | |
199 | #endif | |
200 | } | |
201 | ||
363ef89f MZ |
202 | /* |
203 | * Unmapping vs dcache management: | |
204 | * | |
205 | * If a guest maps certain memory pages as uncached, all writes will | |
206 | * bypass the data cache and go directly to RAM. However, the CPUs | |
207 | * can still speculate reads (not writes) and fill cache lines with | |
208 | * data. | |
209 | * | |
210 | * Those cache lines will be *clean* cache lines though, so a | |
211 | * clean+invalidate operation is equivalent to an invalidate | |
212 | * operation, because no cache lines are marked dirty. | |
213 | * | |
214 | * Those clean cache lines could be filled prior to an uncached write | |
215 | * by the guest, and the cache coherent IO subsystem would therefore | |
216 | * end up writing old data to disk. | |
217 | * | |
218 | * This is why right after unmapping a page/section and invalidating | |
219 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure | |
220 | * the IO subsystem will never hit in the cache. | |
e48d53a9 MZ |
221 | * |
222 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as | |
223 | * we then fully enforce cacheability of RAM, no matter what the guest | |
224 | * does. | |
363ef89f | 225 | */ |
7a1c831e | 226 | static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, |
4f853a71 | 227 | phys_addr_t addr, phys_addr_t end) |
4f728276 | 228 | { |
4f853a71 CD |
229 | phys_addr_t start_addr = addr; |
230 | pte_t *pte, *start_pte; | |
231 | ||
232 | start_pte = pte = pte_offset_kernel(pmd, addr); | |
233 | do { | |
234 | if (!pte_none(*pte)) { | |
363ef89f MZ |
235 | pte_t old_pte = *pte; |
236 | ||
4f853a71 | 237 | kvm_set_pte(pte, __pte(0)); |
4f853a71 | 238 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
363ef89f MZ |
239 | |
240 | /* No need to invalidate the cache for device mappings */ | |
0de58f85 | 241 | if (!kvm_is_device_pfn(pte_pfn(old_pte))) |
363ef89f MZ |
242 | kvm_flush_dcache_pte(old_pte); |
243 | ||
244 | put_page(virt_to_page(pte)); | |
4f853a71 CD |
245 | } |
246 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
247 | ||
e55cac5b | 248 | if (stage2_pte_table_empty(kvm, start_pte)) |
7a1c831e | 249 | clear_stage2_pmd_entry(kvm, pmd, start_addr); |
342cd0ab CD |
250 | } |
251 | ||
7a1c831e | 252 | static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud, |
4f853a71 | 253 | phys_addr_t addr, phys_addr_t end) |
000d3996 | 254 | { |
4f853a71 CD |
255 | phys_addr_t next, start_addr = addr; |
256 | pmd_t *pmd, *start_pmd; | |
000d3996 | 257 | |
e55cac5b | 258 | start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr); |
4f853a71 | 259 | do { |
e55cac5b | 260 | next = stage2_pmd_addr_end(kvm, addr, end); |
4f853a71 | 261 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 262 | if (pmd_thp_or_huge(*pmd)) { |
363ef89f MZ |
263 | pmd_t old_pmd = *pmd; |
264 | ||
4f853a71 CD |
265 | pmd_clear(pmd); |
266 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
363ef89f MZ |
267 | |
268 | kvm_flush_dcache_pmd(old_pmd); | |
269 | ||
4f853a71 CD |
270 | put_page(virt_to_page(pmd)); |
271 | } else { | |
7a1c831e | 272 | unmap_stage2_ptes(kvm, pmd, addr, next); |
4f853a71 | 273 | } |
ad361f09 | 274 | } |
4f853a71 | 275 | } while (pmd++, addr = next, addr != end); |
ad361f09 | 276 | |
e55cac5b | 277 | if (stage2_pmd_table_empty(kvm, start_pmd)) |
7a1c831e | 278 | clear_stage2_pud_entry(kvm, pud, start_addr); |
4f853a71 | 279 | } |
000d3996 | 280 | |
e9f63768 | 281 | static void unmap_stage2_puds(struct kvm *kvm, p4d_t *p4d, |
4f853a71 CD |
282 | phys_addr_t addr, phys_addr_t end) |
283 | { | |
284 | phys_addr_t next, start_addr = addr; | |
285 | pud_t *pud, *start_pud; | |
4f728276 | 286 | |
e9f63768 | 287 | start_pud = pud = stage2_pud_offset(kvm, p4d, addr); |
4f853a71 | 288 | do { |
e55cac5b SP |
289 | next = stage2_pud_addr_end(kvm, addr, end); |
290 | if (!stage2_pud_none(kvm, *pud)) { | |
291 | if (stage2_pud_huge(kvm, *pud)) { | |
363ef89f MZ |
292 | pud_t old_pud = *pud; |
293 | ||
e55cac5b | 294 | stage2_pud_clear(kvm, pud); |
4f853a71 | 295 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
363ef89f | 296 | kvm_flush_dcache_pud(old_pud); |
4f853a71 CD |
297 | put_page(virt_to_page(pud)); |
298 | } else { | |
7a1c831e | 299 | unmap_stage2_pmds(kvm, pud, addr, next); |
4f728276 MZ |
300 | } |
301 | } | |
4f853a71 | 302 | } while (pud++, addr = next, addr != end); |
4f728276 | 303 | |
e55cac5b | 304 | if (stage2_pud_table_empty(kvm, start_pud)) |
e9f63768 MR |
305 | clear_stage2_p4d_entry(kvm, p4d, start_addr); |
306 | } | |
307 | ||
308 | static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd, | |
309 | phys_addr_t addr, phys_addr_t end) | |
310 | { | |
311 | phys_addr_t next, start_addr = addr; | |
312 | p4d_t *p4d, *start_p4d; | |
313 | ||
314 | start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr); | |
315 | do { | |
316 | next = stage2_p4d_addr_end(kvm, addr, end); | |
317 | if (!stage2_p4d_none(kvm, *p4d)) | |
318 | unmap_stage2_puds(kvm, p4d, addr, next); | |
319 | } while (p4d++, addr = next, addr != end); | |
320 | ||
321 | if (stage2_p4d_table_empty(kvm, start_p4d)) | |
7a1c831e | 322 | clear_stage2_pgd_entry(kvm, pgd, start_addr); |
4f853a71 CD |
323 | } |
324 | ||
7a1c831e SP |
325 | /** |
326 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
327 | * @kvm: The VM pointer | |
328 | * @start: The intermediate physical base address of the range to unmap | |
329 | * @size: The size of the area to unmap | |
330 | * | |
331 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
332 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
333 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
334 | * with things behind our backs. | |
335 | */ | |
336 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |
4f853a71 CD |
337 | { |
338 | pgd_t *pgd; | |
339 | phys_addr_t addr = start, end = start + size; | |
340 | phys_addr_t next; | |
341 | ||
8b3405e3 | 342 | assert_spin_locked(&kvm->mmu_lock); |
47a91b72 JH |
343 | WARN_ON(size & ~PAGE_MASK); |
344 | ||
e55cac5b | 345 | pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); |
4f853a71 | 346 | do { |
0c428a6a SP |
347 | /* |
348 | * Make sure the page table is still active, as another thread | |
349 | * could have possibly freed the page table, while we released | |
350 | * the lock. | |
351 | */ | |
352 | if (!READ_ONCE(kvm->arch.pgd)) | |
353 | break; | |
e55cac5b SP |
354 | next = stage2_pgd_addr_end(kvm, addr, end); |
355 | if (!stage2_pgd_none(kvm, *pgd)) | |
e9f63768 | 356 | unmap_stage2_p4ds(kvm, pgd, addr, next); |
8b3405e3 SP |
357 | /* |
358 | * If the range is too large, release the kvm->mmu_lock | |
359 | * to prevent starvation and lockup detector warnings. | |
360 | */ | |
361 | if (next != end) | |
362 | cond_resched_lock(&kvm->mmu_lock); | |
4f853a71 | 363 | } while (pgd++, addr = next, addr != end); |
000d3996 MZ |
364 | } |
365 | ||
9d218a1f MZ |
366 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
367 | phys_addr_t addr, phys_addr_t end) | |
368 | { | |
369 | pte_t *pte; | |
370 | ||
371 | pte = pte_offset_kernel(pmd, addr); | |
372 | do { | |
0de58f85 | 373 | if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) |
363ef89f | 374 | kvm_flush_dcache_pte(*pte); |
9d218a1f MZ |
375 | } while (pte++, addr += PAGE_SIZE, addr != end); |
376 | } | |
377 | ||
378 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |
379 | phys_addr_t addr, phys_addr_t end) | |
380 | { | |
381 | pmd_t *pmd; | |
382 | phys_addr_t next; | |
383 | ||
e55cac5b | 384 | pmd = stage2_pmd_offset(kvm, pud, addr); |
9d218a1f | 385 | do { |
e55cac5b | 386 | next = stage2_pmd_addr_end(kvm, addr, end); |
9d218a1f | 387 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 388 | if (pmd_thp_or_huge(*pmd)) |
363ef89f MZ |
389 | kvm_flush_dcache_pmd(*pmd); |
390 | else | |
9d218a1f | 391 | stage2_flush_ptes(kvm, pmd, addr, next); |
9d218a1f MZ |
392 | } |
393 | } while (pmd++, addr = next, addr != end); | |
394 | } | |
395 | ||
e9f63768 | 396 | static void stage2_flush_puds(struct kvm *kvm, p4d_t *p4d, |
9d218a1f MZ |
397 | phys_addr_t addr, phys_addr_t end) |
398 | { | |
399 | pud_t *pud; | |
400 | phys_addr_t next; | |
401 | ||
e9f63768 | 402 | pud = stage2_pud_offset(kvm, p4d, addr); |
9d218a1f | 403 | do { |
e55cac5b SP |
404 | next = stage2_pud_addr_end(kvm, addr, end); |
405 | if (!stage2_pud_none(kvm, *pud)) { | |
406 | if (stage2_pud_huge(kvm, *pud)) | |
363ef89f MZ |
407 | kvm_flush_dcache_pud(*pud); |
408 | else | |
9d218a1f | 409 | stage2_flush_pmds(kvm, pud, addr, next); |
9d218a1f MZ |
410 | } |
411 | } while (pud++, addr = next, addr != end); | |
412 | } | |
413 | ||
e9f63768 MR |
414 | static void stage2_flush_p4ds(struct kvm *kvm, pgd_t *pgd, |
415 | phys_addr_t addr, phys_addr_t end) | |
416 | { | |
417 | p4d_t *p4d; | |
418 | phys_addr_t next; | |
419 | ||
420 | p4d = stage2_p4d_offset(kvm, pgd, addr); | |
421 | do { | |
422 | next = stage2_p4d_addr_end(kvm, addr, end); | |
423 | if (!stage2_p4d_none(kvm, *p4d)) | |
424 | stage2_flush_puds(kvm, p4d, addr, next); | |
425 | } while (p4d++, addr = next, addr != end); | |
426 | } | |
427 | ||
9d218a1f MZ |
428 | static void stage2_flush_memslot(struct kvm *kvm, |
429 | struct kvm_memory_slot *memslot) | |
430 | { | |
431 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
432 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
433 | phys_addr_t next; | |
434 | pgd_t *pgd; | |
435 | ||
e55cac5b | 436 | pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); |
9d218a1f | 437 | do { |
e55cac5b SP |
438 | next = stage2_pgd_addr_end(kvm, addr, end); |
439 | if (!stage2_pgd_none(kvm, *pgd)) | |
e9f63768 | 440 | stage2_flush_p4ds(kvm, pgd, addr, next); |
48c963e3 JY |
441 | |
442 | if (next != end) | |
443 | cond_resched_lock(&kvm->mmu_lock); | |
9d218a1f MZ |
444 | } while (pgd++, addr = next, addr != end); |
445 | } | |
446 | ||
447 | /** | |
448 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
449 | * @kvm: The struct kvm pointer | |
450 | * | |
451 | * Go through the stage 2 page tables and invalidate any cache lines | |
452 | * backing memory already mapped to the VM. | |
453 | */ | |
3c1e7165 | 454 | static void stage2_flush_vm(struct kvm *kvm) |
9d218a1f MZ |
455 | { |
456 | struct kvm_memslots *slots; | |
457 | struct kvm_memory_slot *memslot; | |
458 | int idx; | |
459 | ||
460 | idx = srcu_read_lock(&kvm->srcu); | |
461 | spin_lock(&kvm->mmu_lock); | |
462 | ||
463 | slots = kvm_memslots(kvm); | |
464 | kvm_for_each_memslot(memslot, slots) | |
465 | stage2_flush_memslot(kvm, memslot); | |
466 | ||
467 | spin_unlock(&kvm->mmu_lock); | |
468 | srcu_read_unlock(&kvm->srcu, idx); | |
469 | } | |
470 | ||
64f32497 SP |
471 | static void clear_hyp_pgd_entry(pgd_t *pgd) |
472 | { | |
e9f63768 | 473 | p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL); |
64f32497 | 474 | pgd_clear(pgd); |
e9f63768 | 475 | p4d_free(NULL, p4d_table); |
64f32497 SP |
476 | put_page(virt_to_page(pgd)); |
477 | } | |
478 | ||
e9f63768 MR |
479 | static void clear_hyp_p4d_entry(p4d_t *p4d) |
480 | { | |
481 | pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); | |
482 | VM_BUG_ON(p4d_huge(*p4d)); | |
483 | p4d_clear(p4d); | |
484 | pud_free(NULL, pud_table); | |
485 | put_page(virt_to_page(p4d)); | |
486 | } | |
487 | ||
64f32497 SP |
488 | static void clear_hyp_pud_entry(pud_t *pud) |
489 | { | |
490 | pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0); | |
491 | VM_BUG_ON(pud_huge(*pud)); | |
492 | pud_clear(pud); | |
493 | pmd_free(NULL, pmd_table); | |
494 | put_page(virt_to_page(pud)); | |
495 | } | |
496 | ||
497 | static void clear_hyp_pmd_entry(pmd_t *pmd) | |
498 | { | |
499 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | |
500 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); | |
501 | pmd_clear(pmd); | |
502 | pte_free_kernel(NULL, pte_table); | |
503 | put_page(virt_to_page(pmd)); | |
504 | } | |
505 | ||
506 | static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | |
507 | { | |
508 | pte_t *pte, *start_pte; | |
509 | ||
510 | start_pte = pte = pte_offset_kernel(pmd, addr); | |
511 | do { | |
512 | if (!pte_none(*pte)) { | |
513 | kvm_set_pte(pte, __pte(0)); | |
514 | put_page(virt_to_page(pte)); | |
515 | } | |
516 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
517 | ||
518 | if (hyp_pte_table_empty(start_pte)) | |
519 | clear_hyp_pmd_entry(pmd); | |
520 | } | |
521 | ||
522 | static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | |
523 | { | |
524 | phys_addr_t next; | |
525 | pmd_t *pmd, *start_pmd; | |
526 | ||
527 | start_pmd = pmd = pmd_offset(pud, addr); | |
528 | do { | |
529 | next = pmd_addr_end(addr, end); | |
530 | /* Hyp doesn't use huge pmds */ | |
531 | if (!pmd_none(*pmd)) | |
532 | unmap_hyp_ptes(pmd, addr, next); | |
533 | } while (pmd++, addr = next, addr != end); | |
534 | ||
535 | if (hyp_pmd_table_empty(start_pmd)) | |
536 | clear_hyp_pud_entry(pud); | |
537 | } | |
538 | ||
e9f63768 | 539 | static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) |
64f32497 SP |
540 | { |
541 | phys_addr_t next; | |
542 | pud_t *pud, *start_pud; | |
543 | ||
e9f63768 | 544 | start_pud = pud = pud_offset(p4d, addr); |
64f32497 SP |
545 | do { |
546 | next = pud_addr_end(addr, end); | |
547 | /* Hyp doesn't use huge puds */ | |
548 | if (!pud_none(*pud)) | |
549 | unmap_hyp_pmds(pud, addr, next); | |
550 | } while (pud++, addr = next, addr != end); | |
551 | ||
552 | if (hyp_pud_table_empty(start_pud)) | |
e9f63768 MR |
553 | clear_hyp_p4d_entry(p4d); |
554 | } | |
555 | ||
556 | static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) | |
557 | { | |
558 | phys_addr_t next; | |
559 | p4d_t *p4d, *start_p4d; | |
560 | ||
561 | start_p4d = p4d = p4d_offset(pgd, addr); | |
562 | do { | |
563 | next = p4d_addr_end(addr, end); | |
564 | /* Hyp doesn't use huge p4ds */ | |
565 | if (!p4d_none(*p4d)) | |
566 | unmap_hyp_puds(p4d, addr, next); | |
567 | } while (p4d++, addr = next, addr != end); | |
568 | ||
569 | if (hyp_p4d_table_empty(start_p4d)) | |
64f32497 SP |
570 | clear_hyp_pgd_entry(pgd); |
571 | } | |
572 | ||
3ddd4556 MZ |
573 | static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd) |
574 | { | |
575 | return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1); | |
576 | } | |
577 | ||
578 | static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd, | |
579 | phys_addr_t start, u64 size) | |
64f32497 SP |
580 | { |
581 | pgd_t *pgd; | |
582 | phys_addr_t addr = start, end = start + size; | |
583 | phys_addr_t next; | |
584 | ||
585 | /* | |
586 | * We don't unmap anything from HYP, except at the hyp tear down. | |
587 | * Hence, we don't have to invalidate the TLBs here. | |
588 | */ | |
3ddd4556 | 589 | pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd); |
64f32497 SP |
590 | do { |
591 | next = pgd_addr_end(addr, end); | |
592 | if (!pgd_none(*pgd)) | |
e9f63768 | 593 | unmap_hyp_p4ds(pgd, addr, next); |
64f32497 SP |
594 | } while (pgd++, addr = next, addr != end); |
595 | } | |
596 | ||
3ddd4556 MZ |
597 | static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) |
598 | { | |
599 | __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size); | |
600 | } | |
601 | ||
602 | static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) | |
603 | { | |
604 | __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size); | |
605 | } | |
606 | ||
342cd0ab | 607 | /** |
4f728276 | 608 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 609 | * |
5a677ce0 MZ |
610 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
611 | * therefore contains either mappings in the kernel memory area (above | |
e3f019b3 | 612 | * PAGE_OFFSET), or device mappings in the idmap range. |
5a677ce0 | 613 | * |
e3f019b3 MZ |
614 | * boot_hyp_pgd should only map the idmap range, and is only used in |
615 | * the extended idmap case. | |
342cd0ab | 616 | */ |
4f728276 | 617 | void free_hyp_pgds(void) |
342cd0ab | 618 | { |
e3f019b3 MZ |
619 | pgd_t *id_pgd; |
620 | ||
d157f4a5 | 621 | mutex_lock(&kvm_hyp_pgd_mutex); |
5a677ce0 | 622 | |
e3f019b3 MZ |
623 | id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd; |
624 | ||
625 | if (id_pgd) { | |
626 | /* In case we never called hyp_mmu_init() */ | |
627 | if (!io_map_base) | |
628 | io_map_base = hyp_idmap_start; | |
629 | unmap_hyp_idmap_range(id_pgd, io_map_base, | |
630 | hyp_idmap_start + PAGE_SIZE - io_map_base); | |
631 | } | |
632 | ||
26781f9c | 633 | if (boot_hyp_pgd) { |
26781f9c MZ |
634 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); |
635 | boot_hyp_pgd = NULL; | |
636 | } | |
637 | ||
4f728276 | 638 | if (hyp_pgd) { |
7839c672 MZ |
639 | unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET), |
640 | (uintptr_t)high_memory - PAGE_OFFSET); | |
d4cb9df5 | 641 | |
38f791a4 | 642 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
d157f4a5 | 643 | hyp_pgd = NULL; |
4f728276 | 644 | } |
e4c5a685 AB |
645 | if (merged_hyp_pgd) { |
646 | clear_page(merged_hyp_pgd); | |
647 | free_page((unsigned long)merged_hyp_pgd); | |
648 | merged_hyp_pgd = NULL; | |
649 | } | |
4f728276 | 650 | |
342cd0ab CD |
651 | mutex_unlock(&kvm_hyp_pgd_mutex); |
652 | } | |
653 | ||
654 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |
6060df84 MZ |
655 | unsigned long end, unsigned long pfn, |
656 | pgprot_t prot) | |
342cd0ab CD |
657 | { |
658 | pte_t *pte; | |
659 | unsigned long addr; | |
342cd0ab | 660 | |
3562c76d MZ |
661 | addr = start; |
662 | do { | |
6060df84 | 663 | pte = pte_offset_kernel(pmd, addr); |
f8df7338 | 664 | kvm_set_pte(pte, kvm_pfn_pte(pfn, prot)); |
4f728276 | 665 | get_page(virt_to_page(pte)); |
6060df84 | 666 | pfn++; |
3562c76d | 667 | } while (addr += PAGE_SIZE, addr != end); |
342cd0ab CD |
668 | } |
669 | ||
670 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |
6060df84 MZ |
671 | unsigned long end, unsigned long pfn, |
672 | pgprot_t prot) | |
342cd0ab CD |
673 | { |
674 | pmd_t *pmd; | |
675 | pte_t *pte; | |
676 | unsigned long addr, next; | |
677 | ||
3562c76d MZ |
678 | addr = start; |
679 | do { | |
6060df84 | 680 | pmd = pmd_offset(pud, addr); |
342cd0ab CD |
681 | |
682 | BUG_ON(pmd_sect(*pmd)); | |
683 | ||
684 | if (pmd_none(*pmd)) { | |
4cf58924 | 685 | pte = pte_alloc_one_kernel(NULL); |
342cd0ab CD |
686 | if (!pte) { |
687 | kvm_err("Cannot allocate Hyp pte\n"); | |
688 | return -ENOMEM; | |
689 | } | |
0db9dd8a | 690 | kvm_pmd_populate(pmd, pte); |
4f728276 | 691 | get_page(virt_to_page(pmd)); |
342cd0ab CD |
692 | } |
693 | ||
694 | next = pmd_addr_end(addr, end); | |
695 | ||
6060df84 MZ |
696 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
697 | pfn += (next - addr) >> PAGE_SHIFT; | |
3562c76d | 698 | } while (addr = next, addr != end); |
342cd0ab CD |
699 | |
700 | return 0; | |
701 | } | |
702 | ||
e9f63768 | 703 | static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start, |
38f791a4 CD |
704 | unsigned long end, unsigned long pfn, |
705 | pgprot_t prot) | |
706 | { | |
707 | pud_t *pud; | |
708 | pmd_t *pmd; | |
709 | unsigned long addr, next; | |
710 | int ret; | |
711 | ||
712 | addr = start; | |
713 | do { | |
e9f63768 | 714 | pud = pud_offset(p4d, addr); |
38f791a4 CD |
715 | |
716 | if (pud_none_or_clear_bad(pud)) { | |
717 | pmd = pmd_alloc_one(NULL, addr); | |
718 | if (!pmd) { | |
719 | kvm_err("Cannot allocate Hyp pmd\n"); | |
720 | return -ENOMEM; | |
721 | } | |
0db9dd8a | 722 | kvm_pud_populate(pud, pmd); |
38f791a4 | 723 | get_page(virt_to_page(pud)); |
38f791a4 CD |
724 | } |
725 | ||
726 | next = pud_addr_end(addr, end); | |
727 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); | |
728 | if (ret) | |
729 | return ret; | |
730 | pfn += (next - addr) >> PAGE_SHIFT; | |
731 | } while (addr = next, addr != end); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
e9f63768 MR |
736 | static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start, |
737 | unsigned long end, unsigned long pfn, | |
738 | pgprot_t prot) | |
739 | { | |
740 | p4d_t *p4d; | |
741 | pud_t *pud; | |
742 | unsigned long addr, next; | |
743 | int ret; | |
744 | ||
745 | addr = start; | |
746 | do { | |
747 | p4d = p4d_offset(pgd, addr); | |
748 | ||
749 | if (p4d_none(*p4d)) { | |
750 | pud = pud_alloc_one(NULL, addr); | |
751 | if (!pud) { | |
752 | kvm_err("Cannot allocate Hyp pud\n"); | |
753 | return -ENOMEM; | |
754 | } | |
755 | kvm_p4d_populate(p4d, pud); | |
756 | get_page(virt_to_page(p4d)); | |
757 | } | |
758 | ||
759 | next = p4d_addr_end(addr, end); | |
760 | ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot); | |
761 | if (ret) | |
762 | return ret; | |
763 | pfn += (next - addr) >> PAGE_SHIFT; | |
764 | } while (addr = next, addr != end); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
98732d1b | 769 | static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd, |
6060df84 MZ |
770 | unsigned long start, unsigned long end, |
771 | unsigned long pfn, pgprot_t prot) | |
342cd0ab | 772 | { |
342cd0ab | 773 | pgd_t *pgd; |
e9f63768 | 774 | p4d_t *p4d; |
342cd0ab CD |
775 | unsigned long addr, next; |
776 | int err = 0; | |
777 | ||
342cd0ab | 778 | mutex_lock(&kvm_hyp_pgd_mutex); |
3562c76d MZ |
779 | addr = start & PAGE_MASK; |
780 | end = PAGE_ALIGN(end); | |
781 | do { | |
3ddd4556 | 782 | pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd); |
342cd0ab | 783 | |
38f791a4 | 784 | if (pgd_none(*pgd)) { |
e9f63768 MR |
785 | p4d = p4d_alloc_one(NULL, addr); |
786 | if (!p4d) { | |
787 | kvm_err("Cannot allocate Hyp p4d\n"); | |
342cd0ab CD |
788 | err = -ENOMEM; |
789 | goto out; | |
790 | } | |
e9f63768 | 791 | kvm_pgd_populate(pgd, p4d); |
38f791a4 | 792 | get_page(virt_to_page(pgd)); |
342cd0ab CD |
793 | } |
794 | ||
795 | next = pgd_addr_end(addr, end); | |
e9f63768 | 796 | err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot); |
342cd0ab CD |
797 | if (err) |
798 | goto out; | |
6060df84 | 799 | pfn += (next - addr) >> PAGE_SHIFT; |
3562c76d | 800 | } while (addr = next, addr != end); |
342cd0ab CD |
801 | out: |
802 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
803 | return err; | |
804 | } | |
805 | ||
40c2729b CD |
806 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
807 | { | |
808 | if (!is_vmalloc_addr(kaddr)) { | |
809 | BUG_ON(!virt_addr_valid(kaddr)); | |
810 | return __pa(kaddr); | |
811 | } else { | |
812 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
813 | offset_in_page(kaddr); | |
814 | } | |
815 | } | |
816 | ||
342cd0ab | 817 | /** |
06e8c3b0 | 818 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
819 | * @from: The virtual kernel start address of the range |
820 | * @to: The virtual kernel end address of the range (exclusive) | |
c8dddecd | 821 | * @prot: The protection to be applied to this range |
342cd0ab | 822 | * |
06e8c3b0 MZ |
823 | * The same virtual address as the kernel virtual address is also used |
824 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
825 | * physical pages. | |
342cd0ab | 826 | */ |
c8dddecd | 827 | int create_hyp_mappings(void *from, void *to, pgprot_t prot) |
342cd0ab | 828 | { |
40c2729b CD |
829 | phys_addr_t phys_addr; |
830 | unsigned long virt_addr; | |
6c41a413 MZ |
831 | unsigned long start = kern_hyp_va((unsigned long)from); |
832 | unsigned long end = kern_hyp_va((unsigned long)to); | |
6060df84 | 833 | |
1e947bad MZ |
834 | if (is_kernel_in_hyp_mode()) |
835 | return 0; | |
836 | ||
40c2729b CD |
837 | start = start & PAGE_MASK; |
838 | end = PAGE_ALIGN(end); | |
6060df84 | 839 | |
40c2729b CD |
840 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
841 | int err; | |
6060df84 | 842 | |
40c2729b | 843 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
98732d1b KM |
844 | err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, |
845 | virt_addr, virt_addr + PAGE_SIZE, | |
40c2729b | 846 | __phys_to_pfn(phys_addr), |
c8dddecd | 847 | prot); |
40c2729b CD |
848 | if (err) |
849 | return err; | |
850 | } | |
851 | ||
852 | return 0; | |
342cd0ab CD |
853 | } |
854 | ||
dc2e4633 MZ |
855 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, |
856 | unsigned long *haddr, pgprot_t prot) | |
342cd0ab | 857 | { |
e3f019b3 MZ |
858 | pgd_t *pgd = hyp_pgd; |
859 | unsigned long base; | |
860 | int ret = 0; | |
6060df84 | 861 | |
e3f019b3 | 862 | mutex_lock(&kvm_hyp_pgd_mutex); |
6060df84 | 863 | |
e3f019b3 | 864 | /* |
656012c7 | 865 | * This assumes that we have enough space below the idmap |
e3f019b3 MZ |
866 | * page to allocate our VAs. If not, the check below will |
867 | * kick. A potential alternative would be to detect that | |
868 | * overflow and switch to an allocation above the idmap. | |
869 | * | |
870 | * The allocated size is always a multiple of PAGE_SIZE. | |
871 | */ | |
872 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); | |
873 | base = io_map_base - size; | |
1bb32a44 | 874 | |
e3f019b3 MZ |
875 | /* |
876 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by | |
877 | * allocating the new area, as it would indicate we've | |
878 | * overflowed the idmap/IO address range. | |
879 | */ | |
880 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) | |
881 | ret = -ENOMEM; | |
882 | else | |
883 | io_map_base = base; | |
884 | ||
885 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
886 | ||
887 | if (ret) | |
888 | goto out; | |
889 | ||
890 | if (__kvm_cpu_uses_extended_idmap()) | |
891 | pgd = boot_hyp_pgd; | |
892 | ||
893 | ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(), | |
894 | base, base + size, | |
dc2e4633 | 895 | __phys_to_pfn(phys_addr), prot); |
e3f019b3 MZ |
896 | if (ret) |
897 | goto out; | |
898 | ||
dc2e4633 | 899 | *haddr = base + offset_in_page(phys_addr); |
e3f019b3 MZ |
900 | |
901 | out: | |
dc2e4633 MZ |
902 | return ret; |
903 | } | |
904 | ||
905 | /** | |
906 | * create_hyp_io_mappings - Map IO into both kernel and HYP | |
907 | * @phys_addr: The physical start address which gets mapped | |
908 | * @size: Size of the region being mapped | |
909 | * @kaddr: Kernel VA for this mapping | |
910 | * @haddr: HYP VA for this mapping | |
911 | */ | |
912 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, | |
913 | void __iomem **kaddr, | |
914 | void __iomem **haddr) | |
915 | { | |
916 | unsigned long addr; | |
917 | int ret; | |
918 | ||
919 | *kaddr = ioremap(phys_addr, size); | |
920 | if (!*kaddr) | |
921 | return -ENOMEM; | |
922 | ||
923 | if (is_kernel_in_hyp_mode()) { | |
924 | *haddr = *kaddr; | |
925 | return 0; | |
926 | } | |
927 | ||
928 | ret = __create_hyp_private_mapping(phys_addr, size, | |
929 | &addr, PAGE_HYP_DEVICE); | |
1bb32a44 MZ |
930 | if (ret) { |
931 | iounmap(*kaddr); | |
932 | *kaddr = NULL; | |
dc2e4633 MZ |
933 | *haddr = NULL; |
934 | return ret; | |
935 | } | |
936 | ||
937 | *haddr = (void __iomem *)addr; | |
938 | return 0; | |
939 | } | |
940 | ||
941 | /** | |
942 | * create_hyp_exec_mappings - Map an executable range into HYP | |
943 | * @phys_addr: The physical start address which gets mapped | |
944 | * @size: Size of the region being mapped | |
945 | * @haddr: HYP VA for this mapping | |
946 | */ | |
947 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, | |
948 | void **haddr) | |
949 | { | |
950 | unsigned long addr; | |
951 | int ret; | |
952 | ||
953 | BUG_ON(is_kernel_in_hyp_mode()); | |
954 | ||
955 | ret = __create_hyp_private_mapping(phys_addr, size, | |
956 | &addr, PAGE_HYP_EXEC); | |
957 | if (ret) { | |
958 | *haddr = NULL; | |
1bb32a44 MZ |
959 | return ret; |
960 | } | |
961 | ||
dc2e4633 | 962 | *haddr = (void *)addr; |
1bb32a44 | 963 | return 0; |
342cd0ab CD |
964 | } |
965 | ||
d5d8184d CD |
966 | /** |
967 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | |
968 | * @kvm: The KVM struct pointer for the VM. | |
969 | * | |
8324c3d5 ZY |
970 | * Allocates only the stage-2 HW PGD level table(s) of size defined by |
971 | * stage2_pgd_size(kvm). | |
d5d8184d CD |
972 | * |
973 | * Note we don't need locking here as this is only called when the VM is | |
974 | * created, which can only be done once. | |
975 | */ | |
976 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | |
977 | { | |
e329fb75 | 978 | phys_addr_t pgd_phys; |
d5d8184d CD |
979 | pgd_t *pgd; |
980 | ||
981 | if (kvm->arch.pgd != NULL) { | |
982 | kvm_err("kvm_arch already initialized?\n"); | |
983 | return -EINVAL; | |
984 | } | |
985 | ||
9163ee23 | 986 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ |
e55cac5b | 987 | pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO); |
9163ee23 | 988 | if (!pgd) |
a987370f MZ |
989 | return -ENOMEM; |
990 | ||
e329fb75 CD |
991 | pgd_phys = virt_to_phys(pgd); |
992 | if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm))) | |
993 | return -EINVAL; | |
994 | ||
d5d8184d | 995 | kvm->arch.pgd = pgd; |
e329fb75 | 996 | kvm->arch.pgd_phys = pgd_phys; |
d5d8184d CD |
997 | return 0; |
998 | } | |
999 | ||
957db105 CD |
1000 | static void stage2_unmap_memslot(struct kvm *kvm, |
1001 | struct kvm_memory_slot *memslot) | |
1002 | { | |
1003 | hva_t hva = memslot->userspace_addr; | |
1004 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
1005 | phys_addr_t size = PAGE_SIZE * memslot->npages; | |
1006 | hva_t reg_end = hva + size; | |
1007 | ||
1008 | /* | |
1009 | * A memory region could potentially cover multiple VMAs, and any holes | |
1010 | * between them, so iterate over all of them to find out if we should | |
1011 | * unmap any of them. | |
1012 | * | |
1013 | * +--------------------------------------------+ | |
1014 | * +---------------+----------------+ +----------------+ | |
1015 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
1016 | * +---------------+----------------+ +----------------+ | |
1017 | * | memory region | | |
1018 | * +--------------------------------------------+ | |
1019 | */ | |
1020 | do { | |
1021 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
1022 | hva_t vm_start, vm_end; | |
1023 | ||
1024 | if (!vma || vma->vm_start >= reg_end) | |
1025 | break; | |
1026 | ||
1027 | /* | |
1028 | * Take the intersection of this VMA with the memory region | |
1029 | */ | |
1030 | vm_start = max(hva, vma->vm_start); | |
1031 | vm_end = min(reg_end, vma->vm_end); | |
1032 | ||
1033 | if (!(vma->vm_flags & VM_PFNMAP)) { | |
1034 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | |
1035 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | |
1036 | } | |
1037 | hva = vm_end; | |
1038 | } while (hva < reg_end); | |
1039 | } | |
1040 | ||
1041 | /** | |
1042 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | |
1043 | * @kvm: The struct kvm pointer | |
1044 | * | |
656012c7 | 1045 | * Go through the memregions and unmap any regular RAM |
957db105 CD |
1046 | * backing memory already mapped to the VM. |
1047 | */ | |
1048 | void stage2_unmap_vm(struct kvm *kvm) | |
1049 | { | |
1050 | struct kvm_memslots *slots; | |
1051 | struct kvm_memory_slot *memslot; | |
1052 | int idx; | |
1053 | ||
1054 | idx = srcu_read_lock(&kvm->srcu); | |
89154dd5 | 1055 | mmap_read_lock(current->mm); |
957db105 CD |
1056 | spin_lock(&kvm->mmu_lock); |
1057 | ||
1058 | slots = kvm_memslots(kvm); | |
1059 | kvm_for_each_memslot(memslot, slots) | |
1060 | stage2_unmap_memslot(kvm, memslot); | |
1061 | ||
1062 | spin_unlock(&kvm->mmu_lock); | |
89154dd5 | 1063 | mmap_read_unlock(current->mm); |
957db105 CD |
1064 | srcu_read_unlock(&kvm->srcu, idx); |
1065 | } | |
1066 | ||
d5d8184d CD |
1067 | /** |
1068 | * kvm_free_stage2_pgd - free all stage-2 tables | |
1069 | * @kvm: The KVM struct pointer for the VM. | |
1070 | * | |
1071 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | |
1072 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | |
1073 | * and setting the struct pointer to NULL. | |
d5d8184d CD |
1074 | */ |
1075 | void kvm_free_stage2_pgd(struct kvm *kvm) | |
1076 | { | |
6c0d706b | 1077 | void *pgd = NULL; |
d5d8184d | 1078 | |
8b3405e3 | 1079 | spin_lock(&kvm->mmu_lock); |
6c0d706b | 1080 | if (kvm->arch.pgd) { |
e55cac5b | 1081 | unmap_stage2_range(kvm, 0, kvm_phys_size(kvm)); |
2952a607 | 1082 | pgd = READ_ONCE(kvm->arch.pgd); |
6c0d706b | 1083 | kvm->arch.pgd = NULL; |
e329fb75 | 1084 | kvm->arch.pgd_phys = 0; |
6c0d706b | 1085 | } |
8b3405e3 SP |
1086 | spin_unlock(&kvm->mmu_lock); |
1087 | ||
9163ee23 | 1088 | /* Free the HW pgd, one page at a time */ |
6c0d706b | 1089 | if (pgd) |
e55cac5b | 1090 | free_pages_exact(pgd, stage2_pgd_size(kvm)); |
d5d8184d CD |
1091 | } |
1092 | ||
e9f63768 | 1093 | static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
ad361f09 | 1094 | phys_addr_t addr) |
d5d8184d CD |
1095 | { |
1096 | pgd_t *pgd; | |
e9f63768 | 1097 | p4d_t *p4d; |
d5d8184d | 1098 | |
e55cac5b SP |
1099 | pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); |
1100 | if (stage2_pgd_none(kvm, *pgd)) { | |
38f791a4 CD |
1101 | if (!cache) |
1102 | return NULL; | |
c1a33aeb | 1103 | p4d = kvm_mmu_memory_cache_alloc(cache); |
e9f63768 | 1104 | stage2_pgd_populate(kvm, pgd, p4d); |
38f791a4 CD |
1105 | get_page(virt_to_page(pgd)); |
1106 | } | |
1107 | ||
e9f63768 MR |
1108 | return stage2_p4d_offset(kvm, pgd, addr); |
1109 | } | |
1110 | ||
1111 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
1112 | phys_addr_t addr) | |
1113 | { | |
1114 | p4d_t *p4d; | |
1115 | pud_t *pud; | |
1116 | ||
1117 | p4d = stage2_get_p4d(kvm, cache, addr); | |
1118 | if (stage2_p4d_none(kvm, *p4d)) { | |
1119 | if (!cache) | |
1120 | return NULL; | |
c1a33aeb | 1121 | pud = kvm_mmu_memory_cache_alloc(cache); |
e9f63768 MR |
1122 | stage2_p4d_populate(kvm, p4d, pud); |
1123 | get_page(virt_to_page(p4d)); | |
1124 | } | |
1125 | ||
1126 | return stage2_pud_offset(kvm, p4d, addr); | |
38f791a4 CD |
1127 | } |
1128 | ||
1129 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
1130 | phys_addr_t addr) | |
1131 | { | |
1132 | pud_t *pud; | |
1133 | pmd_t *pmd; | |
1134 | ||
1135 | pud = stage2_get_pud(kvm, cache, addr); | |
b8e0ba7c | 1136 | if (!pud || stage2_pud_huge(kvm, *pud)) |
d6dbdd3c MZ |
1137 | return NULL; |
1138 | ||
e55cac5b | 1139 | if (stage2_pud_none(kvm, *pud)) { |
d5d8184d | 1140 | if (!cache) |
ad361f09 | 1141 | return NULL; |
c1a33aeb | 1142 | pmd = kvm_mmu_memory_cache_alloc(cache); |
e55cac5b | 1143 | stage2_pud_populate(kvm, pud, pmd); |
d5d8184d | 1144 | get_page(virt_to_page(pud)); |
c62ee2b2 MZ |
1145 | } |
1146 | ||
e55cac5b | 1147 | return stage2_pmd_offset(kvm, pud, addr); |
ad361f09 CD |
1148 | } |
1149 | ||
1150 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |
1151 | *cache, phys_addr_t addr, const pmd_t *new_pmd) | |
1152 | { | |
1153 | pmd_t *pmd, old_pmd; | |
1154 | ||
3c3736cd | 1155 | retry: |
ad361f09 CD |
1156 | pmd = stage2_get_pmd(kvm, cache, addr); |
1157 | VM_BUG_ON(!pmd); | |
d5d8184d | 1158 | |
ad361f09 | 1159 | old_pmd = *pmd; |
3c3736cd SP |
1160 | /* |
1161 | * Multiple vcpus faulting on the same PMD entry, can | |
1162 | * lead to them sequentially updating the PMD with the | |
1163 | * same value. Following the break-before-make | |
1164 | * (pmd_clear() followed by tlb_flush()) process can | |
1165 | * hinder forward progress due to refaults generated | |
1166 | * on missing translations. | |
1167 | * | |
1168 | * Skip updating the page table if the entry is | |
1169 | * unchanged. | |
1170 | */ | |
1171 | if (pmd_val(old_pmd) == pmd_val(*new_pmd)) | |
1172 | return 0; | |
1173 | ||
d4b9e079 | 1174 | if (pmd_present(old_pmd)) { |
86658b81 | 1175 | /* |
3c3736cd SP |
1176 | * If we already have PTE level mapping for this block, |
1177 | * we must unmap it to avoid inconsistent TLB state and | |
1178 | * leaking the table page. We could end up in this situation | |
1179 | * if the memory slot was marked for dirty logging and was | |
1180 | * reverted, leaving PTE level mappings for the pages accessed | |
1181 | * during the period. So, unmap the PTE level mapping for this | |
1182 | * block and retry, as we could have released the upper level | |
1183 | * table in the process. | |
86658b81 | 1184 | * |
3c3736cd SP |
1185 | * Normal THP split/merge follows mmu_notifier callbacks and do |
1186 | * get handled accordingly. | |
86658b81 | 1187 | */ |
3c3736cd SP |
1188 | if (!pmd_thp_or_huge(old_pmd)) { |
1189 | unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE); | |
1190 | goto retry; | |
1191 | } | |
86658b81 PA |
1192 | /* |
1193 | * Mapping in huge pages should only happen through a | |
1194 | * fault. If a page is merged into a transparent huge | |
1195 | * page, the individual subpages of that huge page | |
1196 | * should be unmapped through MMU notifiers before we | |
1197 | * get here. | |
1198 | * | |
1199 | * Merging of CompoundPages is not supported; they | |
1200 | * should become splitting first, unmapped, merged, | |
1201 | * and mapped back in on-demand. | |
1202 | */ | |
3c3736cd | 1203 | WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); |
d4b9e079 | 1204 | pmd_clear(pmd); |
ad361f09 | 1205 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d4b9e079 | 1206 | } else { |
ad361f09 | 1207 | get_page(virt_to_page(pmd)); |
d4b9e079 MZ |
1208 | } |
1209 | ||
1210 | kvm_set_pmd(pmd, *new_pmd); | |
ad361f09 CD |
1211 | return 0; |
1212 | } | |
1213 | ||
b8e0ba7c PA |
1214 | static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
1215 | phys_addr_t addr, const pud_t *new_pudp) | |
1216 | { | |
1217 | pud_t *pudp, old_pud; | |
1218 | ||
3c3736cd | 1219 | retry: |
b8e0ba7c PA |
1220 | pudp = stage2_get_pud(kvm, cache, addr); |
1221 | VM_BUG_ON(!pudp); | |
1222 | ||
1223 | old_pud = *pudp; | |
1224 | ||
1225 | /* | |
1226 | * A large number of vcpus faulting on the same stage 2 entry, | |
3c3736cd SP |
1227 | * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). |
1228 | * Skip updating the page tables if there is no change. | |
b8e0ba7c PA |
1229 | */ |
1230 | if (pud_val(old_pud) == pud_val(*new_pudp)) | |
1231 | return 0; | |
1232 | ||
1233 | if (stage2_pud_present(kvm, old_pud)) { | |
3c3736cd SP |
1234 | /* |
1235 | * If we already have table level mapping for this block, unmap | |
1236 | * the range for this block and retry. | |
1237 | */ | |
1238 | if (!stage2_pud_huge(kvm, old_pud)) { | |
1239 | unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE); | |
1240 | goto retry; | |
1241 | } | |
1242 | ||
1243 | WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); | |
b8e0ba7c PA |
1244 | stage2_pud_clear(kvm, pudp); |
1245 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
1246 | } else { | |
1247 | get_page(virt_to_page(pudp)); | |
1248 | } | |
1249 | ||
1250 | kvm_set_pud(pudp, *new_pudp); | |
1251 | return 0; | |
1252 | } | |
1253 | ||
86d1c55e PA |
1254 | /* |
1255 | * stage2_get_leaf_entry - walk the stage2 VM page tables and return | |
1256 | * true if a valid and present leaf-entry is found. A pointer to the | |
1257 | * leaf-entry is returned in the appropriate level variable - pudpp, | |
1258 | * pmdpp, ptepp. | |
1259 | */ | |
1260 | static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, | |
1261 | pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) | |
7a3796d2 | 1262 | { |
86d1c55e | 1263 | pud_t *pudp; |
7a3796d2 MZ |
1264 | pmd_t *pmdp; |
1265 | pte_t *ptep; | |
1266 | ||
86d1c55e PA |
1267 | *pudpp = NULL; |
1268 | *pmdpp = NULL; | |
1269 | *ptepp = NULL; | |
1270 | ||
1271 | pudp = stage2_get_pud(kvm, NULL, addr); | |
1272 | if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp)) | |
1273 | return false; | |
1274 | ||
1275 | if (stage2_pud_huge(kvm, *pudp)) { | |
1276 | *pudpp = pudp; | |
1277 | return true; | |
1278 | } | |
1279 | ||
1280 | pmdp = stage2_pmd_offset(kvm, pudp, addr); | |
7a3796d2 MZ |
1281 | if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) |
1282 | return false; | |
1283 | ||
86d1c55e PA |
1284 | if (pmd_thp_or_huge(*pmdp)) { |
1285 | *pmdpp = pmdp; | |
1286 | return true; | |
1287 | } | |
7a3796d2 MZ |
1288 | |
1289 | ptep = pte_offset_kernel(pmdp, addr); | |
1290 | if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) | |
1291 | return false; | |
1292 | ||
86d1c55e PA |
1293 | *ptepp = ptep; |
1294 | return true; | |
1295 | } | |
1296 | ||
b757b47a | 1297 | static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) |
86d1c55e PA |
1298 | { |
1299 | pud_t *pudp; | |
1300 | pmd_t *pmdp; | |
1301 | pte_t *ptep; | |
1302 | bool found; | |
1303 | ||
1304 | found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); | |
1305 | if (!found) | |
1306 | return false; | |
1307 | ||
1308 | if (pudp) | |
b757b47a | 1309 | return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); |
86d1c55e | 1310 | else if (pmdp) |
b757b47a | 1311 | return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); |
86d1c55e | 1312 | else |
b757b47a | 1313 | return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); |
7a3796d2 MZ |
1314 | } |
1315 | ||
ad361f09 | 1316 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
15a49a44 MS |
1317 | phys_addr_t addr, const pte_t *new_pte, |
1318 | unsigned long flags) | |
ad361f09 | 1319 | { |
b8e0ba7c | 1320 | pud_t *pud; |
ad361f09 CD |
1321 | pmd_t *pmd; |
1322 | pte_t *pte, old_pte; | |
15a49a44 MS |
1323 | bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP; |
1324 | bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE; | |
1325 | ||
1326 | VM_BUG_ON(logging_active && !cache); | |
ad361f09 | 1327 | |
38f791a4 | 1328 | /* Create stage-2 page table mapping - Levels 0 and 1 */ |
b8e0ba7c PA |
1329 | pud = stage2_get_pud(kvm, cache, addr); |
1330 | if (!pud) { | |
1331 | /* | |
1332 | * Ignore calls from kvm_set_spte_hva for unallocated | |
1333 | * address ranges. | |
1334 | */ | |
1335 | return 0; | |
1336 | } | |
1337 | ||
1338 | /* | |
1339 | * While dirty page logging - dissolve huge PUD, then continue | |
1340 | * on to allocate page. | |
1341 | */ | |
1342 | if (logging_active) | |
1343 | stage2_dissolve_pud(kvm, addr, pud); | |
1344 | ||
1345 | if (stage2_pud_none(kvm, *pud)) { | |
1346 | if (!cache) | |
1347 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
c1a33aeb | 1348 | pmd = kvm_mmu_memory_cache_alloc(cache); |
b8e0ba7c PA |
1349 | stage2_pud_populate(kvm, pud, pmd); |
1350 | get_page(virt_to_page(pud)); | |
1351 | } | |
1352 | ||
1353 | pmd = stage2_pmd_offset(kvm, pud, addr); | |
ad361f09 CD |
1354 | if (!pmd) { |
1355 | /* | |
1356 | * Ignore calls from kvm_set_spte_hva for unallocated | |
1357 | * address ranges. | |
1358 | */ | |
1359 | return 0; | |
1360 | } | |
1361 | ||
15a49a44 MS |
1362 | /* |
1363 | * While dirty page logging - dissolve huge PMD, then continue on to | |
1364 | * allocate page. | |
1365 | */ | |
1366 | if (logging_active) | |
1367 | stage2_dissolve_pmd(kvm, addr, pmd); | |
1368 | ||
ad361f09 | 1369 | /* Create stage-2 page mappings - Level 2 */ |
d5d8184d CD |
1370 | if (pmd_none(*pmd)) { |
1371 | if (!cache) | |
1372 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
c1a33aeb | 1373 | pte = kvm_mmu_memory_cache_alloc(cache); |
0db9dd8a | 1374 | kvm_pmd_populate(pmd, pte); |
d5d8184d | 1375 | get_page(virt_to_page(pmd)); |
c62ee2b2 MZ |
1376 | } |
1377 | ||
1378 | pte = pte_offset_kernel(pmd, addr); | |
d5d8184d CD |
1379 | |
1380 | if (iomap && pte_present(*pte)) | |
1381 | return -EFAULT; | |
1382 | ||
1383 | /* Create 2nd stage page table mapping - Level 3 */ | |
1384 | old_pte = *pte; | |
d4b9e079 | 1385 | if (pte_present(old_pte)) { |
976d34e2 PA |
1386 | /* Skip page table update if there is no change */ |
1387 | if (pte_val(old_pte) == pte_val(*new_pte)) | |
1388 | return 0; | |
1389 | ||
d4b9e079 | 1390 | kvm_set_pte(pte, __pte(0)); |
48762767 | 1391 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d4b9e079 | 1392 | } else { |
d5d8184d | 1393 | get_page(virt_to_page(pte)); |
d4b9e079 | 1394 | } |
d5d8184d | 1395 | |
d4b9e079 | 1396 | kvm_set_pte(pte, *new_pte); |
d5d8184d CD |
1397 | return 0; |
1398 | } | |
d5d8184d | 1399 | |
06485053 CM |
1400 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1401 | static int stage2_ptep_test_and_clear_young(pte_t *pte) | |
1402 | { | |
1403 | if (pte_young(*pte)) { | |
1404 | *pte = pte_mkold(*pte); | |
1405 | return 1; | |
1406 | } | |
d5d8184d CD |
1407 | return 0; |
1408 | } | |
06485053 CM |
1409 | #else |
1410 | static int stage2_ptep_test_and_clear_young(pte_t *pte) | |
1411 | { | |
1412 | return __ptep_test_and_clear_young(pte); | |
1413 | } | |
1414 | #endif | |
1415 | ||
1416 | static int stage2_pmdp_test_and_clear_young(pmd_t *pmd) | |
1417 | { | |
1418 | return stage2_ptep_test_and_clear_young((pte_t *)pmd); | |
1419 | } | |
d5d8184d | 1420 | |
35a63966 PA |
1421 | static int stage2_pudp_test_and_clear_young(pud_t *pud) |
1422 | { | |
1423 | return stage2_ptep_test_and_clear_young((pte_t *)pud); | |
1424 | } | |
1425 | ||
d5d8184d CD |
1426 | /** |
1427 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
1428 | * | |
1429 | * @kvm: The KVM pointer | |
1430 | * @guest_ipa: The IPA at which to insert the mapping | |
1431 | * @pa: The physical address of the device | |
1432 | * @size: The size of the mapping | |
1433 | */ | |
1434 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
c40f2f8f | 1435 | phys_addr_t pa, unsigned long size, bool writable) |
d5d8184d CD |
1436 | { |
1437 | phys_addr_t addr, end; | |
1438 | int ret = 0; | |
1439 | unsigned long pfn; | |
c1a33aeb | 1440 | struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; |
d5d8184d CD |
1441 | |
1442 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | |
1443 | pfn = __phys_to_pfn(pa); | |
1444 | ||
1445 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | |
f8df7338 | 1446 | pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE); |
d5d8184d | 1447 | |
c40f2f8f | 1448 | if (writable) |
06485053 | 1449 | pte = kvm_s2pte_mkwrite(pte); |
c40f2f8f | 1450 | |
c1a33aeb SC |
1451 | ret = kvm_mmu_topup_memory_cache(&cache, |
1452 | kvm_mmu_cache_min_pages(kvm)); | |
d5d8184d CD |
1453 | if (ret) |
1454 | goto out; | |
1455 | spin_lock(&kvm->mmu_lock); | |
15a49a44 MS |
1456 | ret = stage2_set_pte(kvm, &cache, addr, &pte, |
1457 | KVM_S2PTE_FLAG_IS_IOMAP); | |
d5d8184d CD |
1458 | spin_unlock(&kvm->mmu_lock); |
1459 | if (ret) | |
1460 | goto out; | |
1461 | ||
1462 | pfn++; | |
1463 | } | |
1464 | ||
1465 | out: | |
c1a33aeb | 1466 | kvm_mmu_free_memory_cache(&cache); |
d5d8184d CD |
1467 | return ret; |
1468 | } | |
1469 | ||
c6473555 MS |
1470 | /** |
1471 | * stage2_wp_ptes - write protect PMD range | |
1472 | * @pmd: pointer to pmd entry | |
1473 | * @addr: range start address | |
1474 | * @end: range end address | |
1475 | */ | |
1476 | static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | |
1477 | { | |
1478 | pte_t *pte; | |
1479 | ||
1480 | pte = pte_offset_kernel(pmd, addr); | |
1481 | do { | |
1482 | if (!pte_none(*pte)) { | |
1483 | if (!kvm_s2pte_readonly(pte)) | |
1484 | kvm_set_s2pte_readonly(pte); | |
1485 | } | |
1486 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
1487 | } | |
1488 | ||
1489 | /** | |
1490 | * stage2_wp_pmds - write protect PUD range | |
e55cac5b | 1491 | * kvm: kvm instance for the VM |
c6473555 MS |
1492 | * @pud: pointer to pud entry |
1493 | * @addr: range start address | |
1494 | * @end: range end address | |
1495 | */ | |
e55cac5b SP |
1496 | static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud, |
1497 | phys_addr_t addr, phys_addr_t end) | |
c6473555 MS |
1498 | { |
1499 | pmd_t *pmd; | |
1500 | phys_addr_t next; | |
1501 | ||
e55cac5b | 1502 | pmd = stage2_pmd_offset(kvm, pud, addr); |
c6473555 MS |
1503 | |
1504 | do { | |
e55cac5b | 1505 | next = stage2_pmd_addr_end(kvm, addr, end); |
c6473555 | 1506 | if (!pmd_none(*pmd)) { |
bbb3b6b3 | 1507 | if (pmd_thp_or_huge(*pmd)) { |
c6473555 MS |
1508 | if (!kvm_s2pmd_readonly(pmd)) |
1509 | kvm_set_s2pmd_readonly(pmd); | |
1510 | } else { | |
1511 | stage2_wp_ptes(pmd, addr, next); | |
1512 | } | |
1513 | } | |
1514 | } while (pmd++, addr = next, addr != end); | |
1515 | } | |
1516 | ||
1517 | /** | |
e9f63768 | 1518 | * stage2_wp_puds - write protect P4D range |
8324c3d5 ZY |
1519 | * @pgd: pointer to pgd entry |
1520 | * @addr: range start address | |
1521 | * @end: range end address | |
1522 | */ | |
e9f63768 | 1523 | static void stage2_wp_puds(struct kvm *kvm, p4d_t *p4d, |
e55cac5b | 1524 | phys_addr_t addr, phys_addr_t end) |
c6473555 MS |
1525 | { |
1526 | pud_t *pud; | |
1527 | phys_addr_t next; | |
1528 | ||
e9f63768 | 1529 | pud = stage2_pud_offset(kvm, p4d, addr); |
c6473555 | 1530 | do { |
e55cac5b SP |
1531 | next = stage2_pud_addr_end(kvm, addr, end); |
1532 | if (!stage2_pud_none(kvm, *pud)) { | |
4ea5af53 PA |
1533 | if (stage2_pud_huge(kvm, *pud)) { |
1534 | if (!kvm_s2pud_readonly(pud)) | |
1535 | kvm_set_s2pud_readonly(pud); | |
1536 | } else { | |
1537 | stage2_wp_pmds(kvm, pud, addr, next); | |
1538 | } | |
c6473555 MS |
1539 | } |
1540 | } while (pud++, addr = next, addr != end); | |
1541 | } | |
1542 | ||
e9f63768 MR |
1543 | /** |
1544 | * stage2_wp_p4ds - write protect PGD range | |
1545 | * @pgd: pointer to pgd entry | |
1546 | * @addr: range start address | |
1547 | * @end: range end address | |
1548 | */ | |
1549 | static void stage2_wp_p4ds(struct kvm *kvm, pgd_t *pgd, | |
1550 | phys_addr_t addr, phys_addr_t end) | |
1551 | { | |
1552 | p4d_t *p4d; | |
1553 | phys_addr_t next; | |
1554 | ||
1555 | p4d = stage2_p4d_offset(kvm, pgd, addr); | |
1556 | do { | |
1557 | next = stage2_p4d_addr_end(kvm, addr, end); | |
1558 | if (!stage2_p4d_none(kvm, *p4d)) | |
1559 | stage2_wp_puds(kvm, p4d, addr, next); | |
1560 | } while (p4d++, addr = next, addr != end); | |
1561 | } | |
1562 | ||
c6473555 MS |
1563 | /** |
1564 | * stage2_wp_range() - write protect stage2 memory region range | |
1565 | * @kvm: The KVM pointer | |
1566 | * @addr: Start address of range | |
1567 | * @end: End address of range | |
1568 | */ | |
1569 | static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
1570 | { | |
1571 | pgd_t *pgd; | |
1572 | phys_addr_t next; | |
1573 | ||
e55cac5b | 1574 | pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); |
c6473555 MS |
1575 | do { |
1576 | /* | |
1577 | * Release kvm_mmu_lock periodically if the memory region is | |
1578 | * large. Otherwise, we may see kernel panics with | |
227ea818 CD |
1579 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
1580 | * CONFIG_LOCKDEP. Additionally, holding the lock too long | |
0c428a6a SP |
1581 | * will also starve other vCPUs. We have to also make sure |
1582 | * that the page tables are not freed while we released | |
1583 | * the lock. | |
c6473555 | 1584 | */ |
0c428a6a SP |
1585 | cond_resched_lock(&kvm->mmu_lock); |
1586 | if (!READ_ONCE(kvm->arch.pgd)) | |
1587 | break; | |
e55cac5b SP |
1588 | next = stage2_pgd_addr_end(kvm, addr, end); |
1589 | if (stage2_pgd_present(kvm, *pgd)) | |
e9f63768 | 1590 | stage2_wp_p4ds(kvm, pgd, addr, next); |
c6473555 MS |
1591 | } while (pgd++, addr = next, addr != end); |
1592 | } | |
1593 | ||
1594 | /** | |
1595 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot | |
1596 | * @kvm: The KVM pointer | |
1597 | * @slot: The memory slot to write protect | |
1598 | * | |
1599 | * Called to start logging dirty pages after memory region | |
1600 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns | |
4ea5af53 | 1601 | * all present PUD, PMD and PTEs are write protected in the memory region. |
c6473555 MS |
1602 | * Afterwards read of dirty page log can be called. |
1603 | * | |
1604 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, | |
1605 | * serializing operations for VM memory regions. | |
1606 | */ | |
1607 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) | |
1608 | { | |
9f6b8029 PB |
1609 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1610 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); | |
0577d1ab SC |
1611 | phys_addr_t start, end; |
1612 | ||
1613 | if (WARN_ON_ONCE(!memslot)) | |
1614 | return; | |
1615 | ||
1616 | start = memslot->base_gfn << PAGE_SHIFT; | |
1617 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
c6473555 MS |
1618 | |
1619 | spin_lock(&kvm->mmu_lock); | |
1620 | stage2_wp_range(kvm, start, end); | |
1621 | spin_unlock(&kvm->mmu_lock); | |
1622 | kvm_flush_remote_tlbs(kvm); | |
1623 | } | |
53c810c3 MS |
1624 | |
1625 | /** | |
3b0f1d01 | 1626 | * kvm_mmu_write_protect_pt_masked() - write protect dirty pages |
53c810c3 MS |
1627 | * @kvm: The KVM pointer |
1628 | * @slot: The memory slot associated with mask | |
1629 | * @gfn_offset: The gfn offset in memory slot | |
1630 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | |
1631 | * slot to be write protected | |
1632 | * | |
1633 | * Walks bits set in mask write protects the associated pte's. Caller must | |
1634 | * acquire kvm_mmu_lock. | |
1635 | */ | |
3b0f1d01 | 1636 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
53c810c3 MS |
1637 | struct kvm_memory_slot *slot, |
1638 | gfn_t gfn_offset, unsigned long mask) | |
1639 | { | |
1640 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; | |
1641 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; | |
1642 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; | |
1643 | ||
1644 | stage2_wp_range(kvm, start, end); | |
1645 | } | |
c6473555 | 1646 | |
3b0f1d01 KH |
1647 | /* |
1648 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected | |
1649 | * dirty pages. | |
1650 | * | |
1651 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to | |
1652 | * enable dirty logging for them. | |
1653 | */ | |
1654 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | |
1655 | struct kvm_memory_slot *slot, | |
1656 | gfn_t gfn_offset, unsigned long mask) | |
1657 | { | |
1658 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); | |
1659 | } | |
1660 | ||
17ab9d57 | 1661 | static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) |
0d3e4d4f | 1662 | { |
17ab9d57 | 1663 | __clean_dcache_guest_page(pfn, size); |
a15f6939 MZ |
1664 | } |
1665 | ||
17ab9d57 | 1666 | static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) |
a15f6939 | 1667 | { |
17ab9d57 | 1668 | __invalidate_icache_guest_page(pfn, size); |
0d3e4d4f MZ |
1669 | } |
1670 | ||
1559b758 | 1671 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
196f878a | 1672 | { |
795a8371 | 1673 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
196f878a JM |
1674 | } |
1675 | ||
a80868f3 SP |
1676 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
1677 | unsigned long hva, | |
1678 | unsigned long map_size) | |
6794ad54 | 1679 | { |
c2be79a0 | 1680 | gpa_t gpa_start; |
6794ad54 CD |
1681 | hva_t uaddr_start, uaddr_end; |
1682 | size_t size; | |
1683 | ||
9f283614 SP |
1684 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
1685 | if (map_size == PAGE_SIZE) | |
1686 | return true; | |
1687 | ||
6794ad54 CD |
1688 | size = memslot->npages * PAGE_SIZE; |
1689 | ||
1690 | gpa_start = memslot->base_gfn << PAGE_SHIFT; | |
6794ad54 CD |
1691 | |
1692 | uaddr_start = memslot->userspace_addr; | |
1693 | uaddr_end = uaddr_start + size; | |
1694 | ||
1695 | /* | |
1696 | * Pages belonging to memslots that don't have the same alignment | |
a80868f3 SP |
1697 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
1698 | * PMD/PUD entries, because we'll end up mapping the wrong pages. | |
6794ad54 CD |
1699 | * |
1700 | * Consider a layout like the following: | |
1701 | * | |
1702 | * memslot->userspace_addr: | |
1703 | * +-----+--------------------+--------------------+---+ | |
a80868f3 | 1704 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
6794ad54 CD |
1705 | * +-----+--------------------+--------------------+---+ |
1706 | * | |
9f283614 | 1707 | * memslot->base_gfn << PAGE_SHIFT: |
6794ad54 | 1708 | * +---+--------------------+--------------------+-----+ |
a80868f3 | 1709 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
6794ad54 CD |
1710 | * +---+--------------------+--------------------+-----+ |
1711 | * | |
a80868f3 | 1712 | * If we create those stage-2 blocks, we'll end up with this incorrect |
6794ad54 CD |
1713 | * mapping: |
1714 | * d -> f | |
1715 | * e -> g | |
1716 | * f -> h | |
1717 | */ | |
a80868f3 | 1718 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
6794ad54 CD |
1719 | return false; |
1720 | ||
1721 | /* | |
1722 | * Next, let's make sure we're not trying to map anything not covered | |
a80868f3 SP |
1723 | * by the memslot. This means we have to prohibit block size mappings |
1724 | * for the beginning and end of a non-block aligned and non-block sized | |
6794ad54 CD |
1725 | * memory slot (illustrated by the head and tail parts of the |
1726 | * userspace view above containing pages 'abcde' and 'xyz', | |
1727 | * respectively). | |
1728 | * | |
1729 | * Note that it doesn't matter if we do the check using the | |
1730 | * userspace_addr or the base_gfn, as both are equally aligned (per | |
1731 | * the check above) and equally sized. | |
1732 | */ | |
a80868f3 SP |
1733 | return (hva & ~(map_size - 1)) >= uaddr_start && |
1734 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; | |
6794ad54 CD |
1735 | } |
1736 | ||
0529c902 SP |
1737 | /* |
1738 | * Check if the given hva is backed by a transparent huge page (THP) and | |
1739 | * whether it can be mapped using block mapping in stage2. If so, adjust | |
1740 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently | |
1741 | * supported. This will need to be updated to support other THP sizes. | |
1742 | * | |
1743 | * Returns the size of the mapping. | |
1744 | */ | |
1745 | static unsigned long | |
1746 | transparent_hugepage_adjust(struct kvm_memory_slot *memslot, | |
1747 | unsigned long hva, kvm_pfn_t *pfnp, | |
1748 | phys_addr_t *ipap) | |
1749 | { | |
1750 | kvm_pfn_t pfn = *pfnp; | |
1751 | ||
1752 | /* | |
1753 | * Make sure the adjustment is done only for THP pages. Also make | |
1754 | * sure that the HVA and IPA are sufficiently aligned and that the | |
1755 | * block map is contained within the memslot. | |
1756 | */ | |
1757 | if (kvm_is_transparent_hugepage(pfn) && | |
1758 | fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { | |
1759 | /* | |
1760 | * The address we faulted on is backed by a transparent huge | |
1761 | * page. However, because we map the compound huge page and | |
1762 | * not the individual tail page, we need to transfer the | |
1763 | * refcount to the head page. We have to be careful that the | |
1764 | * THP doesn't start to split while we are adjusting the | |
1765 | * refcounts. | |
1766 | * | |
1767 | * We are sure this doesn't happen, because mmu_notifier_retry | |
1768 | * was successful and we are holding the mmu_lock, so if this | |
1769 | * THP is trying to split, it will be blocked in the mmu | |
1770 | * notifier before touching any of the pages, specifically | |
1771 | * before being able to call __split_huge_page_refcount(). | |
1772 | * | |
1773 | * We can therefore safely transfer the refcount from PG_tail | |
1774 | * to PG_head and switch the pfn from a tail page to the head | |
1775 | * page accordingly. | |
1776 | */ | |
1777 | *ipap &= PMD_MASK; | |
1778 | kvm_release_pfn_clean(pfn); | |
1779 | pfn &= ~(PTRS_PER_PMD - 1); | |
1780 | kvm_get_pfn(pfn); | |
1781 | *pfnp = pfn; | |
1782 | ||
1783 | return PMD_SIZE; | |
1784 | } | |
1785 | ||
1786 | /* Use page mapping if we cannot use block mapping. */ | |
1787 | return PAGE_SIZE; | |
1788 | } | |
1789 | ||
94f8e641 | 1790 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
98047888 | 1791 | struct kvm_memory_slot *memslot, unsigned long hva, |
94f8e641 CD |
1792 | unsigned long fault_status) |
1793 | { | |
94f8e641 | 1794 | int ret; |
6396b852 PA |
1795 | bool write_fault, writable, force_pte = false; |
1796 | bool exec_fault, needs_exec; | |
94f8e641 | 1797 | unsigned long mmu_seq; |
ad361f09 | 1798 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
ad361f09 | 1799 | struct kvm *kvm = vcpu->kvm; |
94f8e641 | 1800 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 | 1801 | struct vm_area_struct *vma; |
1559b758 | 1802 | short vma_shift; |
ba049e93 | 1803 | kvm_pfn_t pfn; |
b8865767 | 1804 | pgprot_t mem_type = PAGE_S2; |
15a49a44 | 1805 | bool logging_active = memslot_is_logging(memslot); |
3f58bf63 | 1806 | unsigned long vma_pagesize, flags = 0; |
94f8e641 | 1807 | |
a7d079ce | 1808 | write_fault = kvm_is_write_fault(vcpu); |
d0e22b4a MZ |
1809 | exec_fault = kvm_vcpu_trap_is_iabt(vcpu); |
1810 | VM_BUG_ON(write_fault && exec_fault); | |
1811 | ||
1812 | if (fault_status == FSC_PERM && !write_fault && !exec_fault) { | |
94f8e641 CD |
1813 | kvm_err("Unexpected L2 read permission error\n"); |
1814 | return -EFAULT; | |
1815 | } | |
1816 | ||
ad361f09 | 1817 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
89154dd5 | 1818 | mmap_read_lock(current->mm); |
ad361f09 | 1819 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
37b54408 AB |
1820 | if (unlikely(!vma)) { |
1821 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | |
89154dd5 | 1822 | mmap_read_unlock(current->mm); |
37b54408 AB |
1823 | return -EFAULT; |
1824 | } | |
1825 | ||
1559b758 JM |
1826 | if (is_vm_hugetlb_page(vma)) |
1827 | vma_shift = huge_page_shift(hstate_vma(vma)); | |
1828 | else | |
1829 | vma_shift = PAGE_SHIFT; | |
1830 | ||
1831 | vma_pagesize = 1ULL << vma_shift; | |
a80868f3 | 1832 | if (logging_active || |
6d674e28 | 1833 | (vma->vm_flags & VM_PFNMAP) || |
a80868f3 SP |
1834 | !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { |
1835 | force_pte = true; | |
1836 | vma_pagesize = PAGE_SIZE; | |
1837 | } | |
1838 | ||
b8e0ba7c | 1839 | /* |
280cebfd SP |
1840 | * The stage2 has a minimum of 2 level table (For arm64 see |
1841 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can | |
1842 | * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). | |
1843 | * As for PUD huge maps, we must make sure that we have at least | |
1844 | * 3 levels, i.e, PMD is not folded. | |
b8e0ba7c | 1845 | */ |
a80868f3 SP |
1846 | if (vma_pagesize == PMD_SIZE || |
1847 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) | |
b8e0ba7c | 1848 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
89154dd5 | 1849 | mmap_read_unlock(current->mm); |
ad361f09 | 1850 | |
94f8e641 | 1851 | /* We need minimum second+third level pages */ |
c1a33aeb | 1852 | ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); |
94f8e641 CD |
1853 | if (ret) |
1854 | return ret; | |
1855 | ||
1856 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
1857 | /* | |
1858 | * Ensure the read of mmu_notifier_seq happens before we call | |
1859 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
1860 | * the page we just got a reference to gets unmapped before we have a | |
1861 | * chance to grab the mmu_lock, which ensure that if the page gets | |
1862 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | |
1863 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | |
1864 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
1865 | */ | |
1866 | smp_rmb(); | |
1867 | ||
ad361f09 | 1868 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
196f878a | 1869 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
1559b758 | 1870 | kvm_send_hwpoison_signal(hva, vma_shift); |
196f878a JM |
1871 | return 0; |
1872 | } | |
9ac71595 | 1873 | if (is_error_noslot_pfn(pfn)) |
94f8e641 CD |
1874 | return -EFAULT; |
1875 | ||
15a49a44 | 1876 | if (kvm_is_device_pfn(pfn)) { |
b8865767 | 1877 | mem_type = PAGE_S2_DEVICE; |
15a49a44 MS |
1878 | flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
1879 | } else if (logging_active) { | |
1880 | /* | |
1881 | * Faults on pages in a memslot with logging enabled | |
1882 | * should not be mapped with huge pages (it introduces churn | |
1883 | * and performance degradation), so force a pte mapping. | |
1884 | */ | |
15a49a44 MS |
1885 | flags |= KVM_S2_FLAG_LOGGING_ACTIVE; |
1886 | ||
1887 | /* | |
1888 | * Only actually map the page as writable if this was a write | |
1889 | * fault. | |
1890 | */ | |
1891 | if (!write_fault) | |
1892 | writable = false; | |
1893 | } | |
b8865767 | 1894 | |
6d674e28 MZ |
1895 | if (exec_fault && is_iomap(flags)) |
1896 | return -ENOEXEC; | |
1897 | ||
ad361f09 CD |
1898 | spin_lock(&kvm->mmu_lock); |
1899 | if (mmu_notifier_retry(kvm, mmu_seq)) | |
94f8e641 | 1900 | goto out_unlock; |
15a49a44 | 1901 | |
0529c902 SP |
1902 | /* |
1903 | * If we are not forced to use page mapping, check if we are | |
1904 | * backed by a THP and thus use block mapping if possible. | |
1905 | */ | |
1906 | if (vma_pagesize == PAGE_SIZE && !force_pte) | |
1907 | vma_pagesize = transparent_hugepage_adjust(memslot, hva, | |
1908 | &pfn, &fault_ipa); | |
3f58bf63 PA |
1909 | if (writable) |
1910 | kvm_set_pfn_dirty(pfn); | |
ad361f09 | 1911 | |
6d674e28 | 1912 | if (fault_status != FSC_PERM && !is_iomap(flags)) |
3f58bf63 PA |
1913 | clean_dcache_guest_page(pfn, vma_pagesize); |
1914 | ||
1915 | if (exec_fault) | |
1916 | invalidate_icache_guest_page(pfn, vma_pagesize); | |
1917 | ||
6396b852 PA |
1918 | /* |
1919 | * If we took an execution fault we have made the | |
1920 | * icache/dcache coherent above and should now let the s2 | |
1921 | * mapping be executable. | |
1922 | * | |
1923 | * Write faults (!exec_fault && FSC_PERM) are orthogonal to | |
1924 | * execute permissions, and we preserve whatever we have. | |
1925 | */ | |
1926 | needs_exec = exec_fault || | |
b757b47a WD |
1927 | (fault_status == FSC_PERM && |
1928 | stage2_is_exec(kvm, fault_ipa, vma_pagesize)); | |
6396b852 | 1929 | |
b8e0ba7c PA |
1930 | if (vma_pagesize == PUD_SIZE) { |
1931 | pud_t new_pud = kvm_pfn_pud(pfn, mem_type); | |
1932 | ||
1933 | new_pud = kvm_pud_mkhuge(new_pud); | |
1934 | if (writable) | |
1935 | new_pud = kvm_s2pud_mkwrite(new_pud); | |
1936 | ||
1937 | if (needs_exec) | |
1938 | new_pud = kvm_s2pud_mkexec(new_pud); | |
1939 | ||
1940 | ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud); | |
1941 | } else if (vma_pagesize == PMD_SIZE) { | |
f8df7338 PA |
1942 | pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type); |
1943 | ||
1944 | new_pmd = kvm_pmd_mkhuge(new_pmd); | |
1945 | ||
3f58bf63 | 1946 | if (writable) |
06485053 | 1947 | new_pmd = kvm_s2pmd_mkwrite(new_pmd); |
d0e22b4a | 1948 | |
6396b852 | 1949 | if (needs_exec) |
d0e22b4a | 1950 | new_pmd = kvm_s2pmd_mkexec(new_pmd); |
a15f6939 | 1951 | |
ad361f09 CD |
1952 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1953 | } else { | |
f8df7338 | 1954 | pte_t new_pte = kvm_pfn_pte(pfn, mem_type); |
15a49a44 | 1955 | |
ad361f09 | 1956 | if (writable) { |
06485053 | 1957 | new_pte = kvm_s2pte_mkwrite(new_pte); |
15a49a44 | 1958 | mark_page_dirty(kvm, gfn); |
ad361f09 | 1959 | } |
a9c0e12e | 1960 | |
6396b852 | 1961 | if (needs_exec) |
d0e22b4a | 1962 | new_pte = kvm_s2pte_mkexec(new_pte); |
a15f6939 | 1963 | |
15a49a44 | 1964 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); |
94f8e641 | 1965 | } |
ad361f09 | 1966 | |
94f8e641 | 1967 | out_unlock: |
ad361f09 | 1968 | spin_unlock(&kvm->mmu_lock); |
35307b9a | 1969 | kvm_set_pfn_accessed(pfn); |
94f8e641 | 1970 | kvm_release_pfn_clean(pfn); |
ad361f09 | 1971 | return ret; |
94f8e641 CD |
1972 | } |
1973 | ||
aeda9130 MZ |
1974 | /* |
1975 | * Resolve the access fault by making the page young again. | |
1976 | * Note that because the faulting entry is guaranteed not to be | |
1977 | * cached in the TLB, we don't need to invalidate anything. | |
06485053 CM |
1978 | * Only the HW Access Flag updates are supported for Stage 2 (no DBM), |
1979 | * so there is no need for atomic (pte|pmd)_mkyoung operations. | |
aeda9130 MZ |
1980 | */ |
1981 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) | |
1982 | { | |
eb3f0624 | 1983 | pud_t *pud; |
aeda9130 MZ |
1984 | pmd_t *pmd; |
1985 | pte_t *pte; | |
ba049e93 | 1986 | kvm_pfn_t pfn; |
aeda9130 MZ |
1987 | bool pfn_valid = false; |
1988 | ||
1989 | trace_kvm_access_fault(fault_ipa); | |
1990 | ||
1991 | spin_lock(&vcpu->kvm->mmu_lock); | |
1992 | ||
eb3f0624 | 1993 | if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte)) |
aeda9130 MZ |
1994 | goto out; |
1995 | ||
eb3f0624 PA |
1996 | if (pud) { /* HugeTLB */ |
1997 | *pud = kvm_s2pud_mkyoung(*pud); | |
1998 | pfn = kvm_pud_pfn(*pud); | |
1999 | pfn_valid = true; | |
2000 | } else if (pmd) { /* THP, HugeTLB */ | |
aeda9130 MZ |
2001 | *pmd = pmd_mkyoung(*pmd); |
2002 | pfn = pmd_pfn(*pmd); | |
2003 | pfn_valid = true; | |
eb3f0624 PA |
2004 | } else { |
2005 | *pte = pte_mkyoung(*pte); /* Just a page... */ | |
2006 | pfn = pte_pfn(*pte); | |
2007 | pfn_valid = true; | |
aeda9130 MZ |
2008 | } |
2009 | ||
aeda9130 MZ |
2010 | out: |
2011 | spin_unlock(&vcpu->kvm->mmu_lock); | |
2012 | if (pfn_valid) | |
2013 | kvm_set_pfn_accessed(pfn); | |
2014 | } | |
2015 | ||
94f8e641 CD |
2016 | /** |
2017 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
2018 | * @vcpu: the VCPU pointer | |
94f8e641 CD |
2019 | * |
2020 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
2021 | * missing second stage translation table entry, which can mean that either the | |
2022 | * guest simply needs more memory and we must allocate an appropriate page or it | |
2023 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
2024 | * space. The distinction is based on the IPA causing the fault and whether this | |
2025 | * memory region has been registered as standard RAM by user space. | |
2026 | */ | |
74cc7e0c | 2027 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
342cd0ab | 2028 | { |
94f8e641 CD |
2029 | unsigned long fault_status; |
2030 | phys_addr_t fault_ipa; | |
2031 | struct kvm_memory_slot *memslot; | |
98047888 CD |
2032 | unsigned long hva; |
2033 | bool is_iabt, write_fault, writable; | |
94f8e641 CD |
2034 | gfn_t gfn; |
2035 | int ret, idx; | |
2036 | ||
621f48e4 TB |
2037 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
2038 | ||
2039 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | |
bb428921 | 2040 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
621f48e4 | 2041 | |
bb428921 JM |
2042 | /* Synchronous External Abort? */ |
2043 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | |
2044 | /* | |
2045 | * For RAS the host kernel may handle this abort. | |
2046 | * There is no need to pass the error into the guest. | |
2047 | */ | |
0db5e022 | 2048 | if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu))) |
621f48e4 | 2049 | return 1; |
621f48e4 | 2050 | |
bb428921 JM |
2051 | if (unlikely(!is_iabt)) { |
2052 | kvm_inject_vabt(vcpu); | |
2053 | return 1; | |
2054 | } | |
4055710b MZ |
2055 | } |
2056 | ||
7393b599 MZ |
2057 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
2058 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | |
94f8e641 CD |
2059 | |
2060 | /* Check the stage-2 fault is trans. fault or write fault */ | |
35307b9a MZ |
2061 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
2062 | fault_status != FSC_ACCESS) { | |
0496daa5 CD |
2063 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
2064 | kvm_vcpu_trap_get_class(vcpu), | |
2065 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | |
2066 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); | |
94f8e641 CD |
2067 | return -EFAULT; |
2068 | } | |
2069 | ||
2070 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
2071 | ||
2072 | gfn = fault_ipa >> PAGE_SHIFT; | |
98047888 CD |
2073 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
2074 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | |
a7d079ce | 2075 | write_fault = kvm_is_write_fault(vcpu); |
98047888 | 2076 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
94f8e641 CD |
2077 | if (is_iabt) { |
2078 | /* Prefetch Abort on I/O address */ | |
6d674e28 MZ |
2079 | ret = -ENOEXEC; |
2080 | goto out; | |
94f8e641 CD |
2081 | } |
2082 | ||
57c841f1 MZ |
2083 | /* |
2084 | * Check for a cache maintenance operation. Since we | |
2085 | * ended-up here, we know it is outside of any memory | |
2086 | * slot. But we can't find out if that is for a device, | |
2087 | * or if the guest is just being stupid. The only thing | |
2088 | * we know for sure is that this range cannot be cached. | |
2089 | * | |
2090 | * So let's assume that the guest is just being | |
2091 | * cautious, and skip the instruction. | |
2092 | */ | |
2093 | if (kvm_vcpu_dabt_is_cm(vcpu)) { | |
2094 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
2095 | ret = 1; | |
2096 | goto out_unlock; | |
2097 | } | |
2098 | ||
cfe3950c MZ |
2099 | /* |
2100 | * The IPA is reported as [MAX:12], so we need to | |
2101 | * complement it with the bottom 12 bits from the | |
2102 | * faulting VA. This is always 12 bits, irrespective | |
2103 | * of the page size. | |
2104 | */ | |
2105 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
74cc7e0c | 2106 | ret = io_mem_abort(vcpu, fault_ipa); |
94f8e641 CD |
2107 | goto out_unlock; |
2108 | } | |
2109 | ||
c3058d5d | 2110 | /* Userspace should not be able to register out-of-bounds IPAs */ |
e55cac5b | 2111 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); |
c3058d5d | 2112 | |
aeda9130 MZ |
2113 | if (fault_status == FSC_ACCESS) { |
2114 | handle_access_fault(vcpu, fault_ipa); | |
2115 | ret = 1; | |
2116 | goto out_unlock; | |
2117 | } | |
2118 | ||
98047888 | 2119 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
94f8e641 CD |
2120 | if (ret == 0) |
2121 | ret = 1; | |
6d674e28 MZ |
2122 | out: |
2123 | if (ret == -ENOEXEC) { | |
2124 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | |
2125 | ret = 1; | |
2126 | } | |
94f8e641 CD |
2127 | out_unlock: |
2128 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
2129 | return ret; | |
342cd0ab CD |
2130 | } |
2131 | ||
1d2ebacc MZ |
2132 | static int handle_hva_to_gpa(struct kvm *kvm, |
2133 | unsigned long start, | |
2134 | unsigned long end, | |
2135 | int (*handler)(struct kvm *kvm, | |
056aad67 SP |
2136 | gpa_t gpa, u64 size, |
2137 | void *data), | |
1d2ebacc | 2138 | void *data) |
d5d8184d CD |
2139 | { |
2140 | struct kvm_memslots *slots; | |
2141 | struct kvm_memory_slot *memslot; | |
1d2ebacc | 2142 | int ret = 0; |
d5d8184d CD |
2143 | |
2144 | slots = kvm_memslots(kvm); | |
2145 | ||
2146 | /* we only care about the pages that the guest sees */ | |
2147 | kvm_for_each_memslot(memslot, slots) { | |
2148 | unsigned long hva_start, hva_end; | |
056aad67 | 2149 | gfn_t gpa; |
d5d8184d CD |
2150 | |
2151 | hva_start = max(start, memslot->userspace_addr); | |
2152 | hva_end = min(end, memslot->userspace_addr + | |
2153 | (memslot->npages << PAGE_SHIFT)); | |
2154 | if (hva_start >= hva_end) | |
2155 | continue; | |
2156 | ||
056aad67 SP |
2157 | gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; |
2158 | ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); | |
d5d8184d | 2159 | } |
1d2ebacc MZ |
2160 | |
2161 | return ret; | |
d5d8184d CD |
2162 | } |
2163 | ||
056aad67 | 2164 | static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
d5d8184d | 2165 | { |
056aad67 | 2166 | unmap_stage2_range(kvm, gpa, size); |
1d2ebacc | 2167 | return 0; |
d5d8184d CD |
2168 | } |
2169 | ||
d5d8184d CD |
2170 | int kvm_unmap_hva_range(struct kvm *kvm, |
2171 | unsigned long start, unsigned long end) | |
2172 | { | |
2173 | if (!kvm->arch.pgd) | |
2174 | return 0; | |
2175 | ||
2176 | trace_kvm_unmap_hva_range(start, end); | |
2177 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | |
2178 | return 0; | |
2179 | } | |
2180 | ||
056aad67 | 2181 | static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
d5d8184d CD |
2182 | { |
2183 | pte_t *pte = (pte_t *)data; | |
2184 | ||
056aad67 | 2185 | WARN_ON(size != PAGE_SIZE); |
15a49a44 MS |
2186 | /* |
2187 | * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE | |
2188 | * flag clear because MMU notifiers will have unmapped a huge PMD before | |
2189 | * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and | |
2190 | * therefore stage2_set_pte() never needs to clear out a huge PMD | |
2191 | * through this calling path. | |
2192 | */ | |
2193 | stage2_set_pte(kvm, NULL, gpa, pte, 0); | |
1d2ebacc | 2194 | return 0; |
d5d8184d CD |
2195 | } |
2196 | ||
2197 | ||
748c0e31 | 2198 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
d5d8184d CD |
2199 | { |
2200 | unsigned long end = hva + PAGE_SIZE; | |
694556d5 | 2201 | kvm_pfn_t pfn = pte_pfn(pte); |
d5d8184d CD |
2202 | pte_t stage2_pte; |
2203 | ||
2204 | if (!kvm->arch.pgd) | |
748c0e31 | 2205 | return 0; |
d5d8184d CD |
2206 | |
2207 | trace_kvm_set_spte_hva(hva); | |
694556d5 MZ |
2208 | |
2209 | /* | |
2210 | * We've moved a page around, probably through CoW, so let's treat it | |
2211 | * just like a translation fault and clean the cache to the PoC. | |
2212 | */ | |
2213 | clean_dcache_guest_page(pfn, PAGE_SIZE); | |
f8df7338 | 2214 | stage2_pte = kvm_pfn_pte(pfn, PAGE_S2); |
d5d8184d | 2215 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
748c0e31 LT |
2216 | |
2217 | return 0; | |
d5d8184d CD |
2218 | } |
2219 | ||
056aad67 | 2220 | static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
35307b9a | 2221 | { |
35a63966 | 2222 | pud_t *pud; |
35307b9a MZ |
2223 | pmd_t *pmd; |
2224 | pte_t *pte; | |
2225 | ||
35a63966 PA |
2226 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
2227 | if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) | |
35307b9a MZ |
2228 | return 0; |
2229 | ||
35a63966 PA |
2230 | if (pud) |
2231 | return stage2_pudp_test_and_clear_young(pud); | |
2232 | else if (pmd) | |
06485053 | 2233 | return stage2_pmdp_test_and_clear_young(pmd); |
35a63966 PA |
2234 | else |
2235 | return stage2_ptep_test_and_clear_young(pte); | |
35307b9a MZ |
2236 | } |
2237 | ||
056aad67 | 2238 | static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
35307b9a | 2239 | { |
35a63966 | 2240 | pud_t *pud; |
35307b9a MZ |
2241 | pmd_t *pmd; |
2242 | pte_t *pte; | |
2243 | ||
35a63966 PA |
2244 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
2245 | if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) | |
35307b9a MZ |
2246 | return 0; |
2247 | ||
35a63966 PA |
2248 | if (pud) |
2249 | return kvm_s2pud_young(*pud); | |
2250 | else if (pmd) | |
35307b9a | 2251 | return pmd_young(*pmd); |
35a63966 | 2252 | else |
35307b9a | 2253 | return pte_young(*pte); |
35307b9a MZ |
2254 | } |
2255 | ||
2256 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | |
2257 | { | |
7e5a6722 SP |
2258 | if (!kvm->arch.pgd) |
2259 | return 0; | |
35307b9a MZ |
2260 | trace_kvm_age_hva(start, end); |
2261 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | |
2262 | } | |
2263 | ||
2264 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
2265 | { | |
7e5a6722 SP |
2266 | if (!kvm->arch.pgd) |
2267 | return 0; | |
35307b9a | 2268 | trace_kvm_test_age_hva(hva); |
cf2d23e0 GS |
2269 | return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE, |
2270 | kvm_test_age_hva_handler, NULL); | |
35307b9a MZ |
2271 | } |
2272 | ||
d5d8184d CD |
2273 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
2274 | { | |
c1a33aeb | 2275 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
d5d8184d CD |
2276 | } |
2277 | ||
342cd0ab CD |
2278 | phys_addr_t kvm_mmu_get_httbr(void) |
2279 | { | |
e4c5a685 AB |
2280 | if (__kvm_cpu_uses_extended_idmap()) |
2281 | return virt_to_phys(merged_hyp_pgd); | |
2282 | else | |
2283 | return virt_to_phys(hyp_pgd); | |
342cd0ab CD |
2284 | } |
2285 | ||
5a677ce0 MZ |
2286 | phys_addr_t kvm_get_idmap_vector(void) |
2287 | { | |
2288 | return hyp_idmap_vector; | |
2289 | } | |
2290 | ||
0535a3e2 MZ |
2291 | static int kvm_map_idmap_text(pgd_t *pgd) |
2292 | { | |
2293 | int err; | |
2294 | ||
2295 | /* Create the idmap in the boot page tables */ | |
98732d1b | 2296 | err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(), |
0535a3e2 MZ |
2297 | hyp_idmap_start, hyp_idmap_end, |
2298 | __phys_to_pfn(hyp_idmap_start), | |
2299 | PAGE_HYP_EXEC); | |
2300 | if (err) | |
2301 | kvm_err("Failed to idmap %lx-%lx\n", | |
2302 | hyp_idmap_start, hyp_idmap_end); | |
2303 | ||
2304 | return err; | |
2305 | } | |
2306 | ||
342cd0ab CD |
2307 | int kvm_mmu_init(void) |
2308 | { | |
2fb41059 MZ |
2309 | int err; |
2310 | ||
0a78791c | 2311 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
46fef158 | 2312 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
0a78791c | 2313 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
46fef158 | 2314 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
0a78791c | 2315 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
5a677ce0 | 2316 | |
06f75a1f AB |
2317 | /* |
2318 | * We rely on the linker script to ensure at build time that the HYP | |
2319 | * init code does not cross a page boundary. | |
2320 | */ | |
2321 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | |
5a677ce0 | 2322 | |
b4ef0499 MZ |
2323 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
2324 | kvm_debug("HYP VA range: %lx:%lx\n", | |
2325 | kern_hyp_va(PAGE_OFFSET), | |
2326 | kern_hyp_va((unsigned long)high_memory - 1)); | |
eac378a9 | 2327 | |
6c41a413 | 2328 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
ed57cac8 | 2329 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
d2896d4b | 2330 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
eac378a9 MZ |
2331 | /* |
2332 | * The idmap page is intersecting with the VA space, | |
2333 | * it is not safe to continue further. | |
2334 | */ | |
2335 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); | |
2336 | err = -EINVAL; | |
2337 | goto out; | |
2338 | } | |
2339 | ||
38f791a4 | 2340 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
0535a3e2 | 2341 | if (!hyp_pgd) { |
d5d8184d | 2342 | kvm_err("Hyp mode PGD not allocated\n"); |
2fb41059 MZ |
2343 | err = -ENOMEM; |
2344 | goto out; | |
2345 | } | |
2346 | ||
0535a3e2 MZ |
2347 | if (__kvm_cpu_uses_extended_idmap()) { |
2348 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
2349 | hyp_pgd_order); | |
2350 | if (!boot_hyp_pgd) { | |
2351 | kvm_err("Hyp boot PGD not allocated\n"); | |
2352 | err = -ENOMEM; | |
2353 | goto out; | |
2354 | } | |
2fb41059 | 2355 | |
0535a3e2 MZ |
2356 | err = kvm_map_idmap_text(boot_hyp_pgd); |
2357 | if (err) | |
2358 | goto out; | |
d5d8184d | 2359 | |
e4c5a685 AB |
2360 | merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
2361 | if (!merged_hyp_pgd) { | |
2362 | kvm_err("Failed to allocate extra HYP pgd\n"); | |
2363 | goto out; | |
2364 | } | |
2365 | __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, | |
2366 | hyp_idmap_start); | |
0535a3e2 MZ |
2367 | } else { |
2368 | err = kvm_map_idmap_text(hyp_pgd); | |
2369 | if (err) | |
2370 | goto out; | |
5a677ce0 MZ |
2371 | } |
2372 | ||
e3f019b3 | 2373 | io_map_base = hyp_idmap_start; |
d5d8184d | 2374 | return 0; |
2fb41059 | 2375 | out: |
4f728276 | 2376 | free_hyp_pgds(); |
2fb41059 | 2377 | return err; |
342cd0ab | 2378 | } |
df6ce24f EA |
2379 | |
2380 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
09170a49 | 2381 | const struct kvm_userspace_memory_region *mem, |
9d4c197c | 2382 | struct kvm_memory_slot *old, |
f36f3f28 | 2383 | const struct kvm_memory_slot *new, |
df6ce24f EA |
2384 | enum kvm_mr_change change) |
2385 | { | |
c6473555 MS |
2386 | /* |
2387 | * At this point memslot has been committed and there is an | |
656012c7 | 2388 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
c6473555 MS |
2389 | * memory slot is write protected. |
2390 | */ | |
c862626e KZ |
2391 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
2392 | /* | |
2393 | * If we're with initial-all-set, we don't need to write | |
2394 | * protect any pages because they're all reported as dirty. | |
2395 | * Huge pages and normal pages will be write protect gradually. | |
2396 | */ | |
2397 | if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { | |
2398 | kvm_mmu_wp_memory_region(kvm, mem->slot); | |
2399 | } | |
2400 | } | |
df6ce24f EA |
2401 | } |
2402 | ||
2403 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
2404 | struct kvm_memory_slot *memslot, | |
09170a49 | 2405 | const struct kvm_userspace_memory_region *mem, |
df6ce24f EA |
2406 | enum kvm_mr_change change) |
2407 | { | |
8eef9123 AB |
2408 | hva_t hva = mem->userspace_addr; |
2409 | hva_t reg_end = hva + mem->memory_size; | |
2410 | bool writable = !(mem->flags & KVM_MEM_READONLY); | |
2411 | int ret = 0; | |
2412 | ||
15a49a44 MS |
2413 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
2414 | change != KVM_MR_FLAGS_ONLY) | |
8eef9123 AB |
2415 | return 0; |
2416 | ||
c3058d5d CD |
2417 | /* |
2418 | * Prevent userspace from creating a memory region outside of the IPA | |
2419 | * space addressable by the KVM guest IPA space. | |
2420 | */ | |
2421 | if (memslot->base_gfn + memslot->npages >= | |
e55cac5b | 2422 | (kvm_phys_size(kvm) >> PAGE_SHIFT)) |
c3058d5d CD |
2423 | return -EFAULT; |
2424 | ||
89154dd5 | 2425 | mmap_read_lock(current->mm); |
8eef9123 AB |
2426 | /* |
2427 | * A memory region could potentially cover multiple VMAs, and any holes | |
2428 | * between them, so iterate over all of them to find out if we can map | |
2429 | * any of them right now. | |
2430 | * | |
2431 | * +--------------------------------------------+ | |
2432 | * +---------------+----------------+ +----------------+ | |
2433 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
2434 | * +---------------+----------------+ +----------------+ | |
2435 | * | memory region | | |
2436 | * +--------------------------------------------+ | |
2437 | */ | |
2438 | do { | |
2439 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
2440 | hva_t vm_start, vm_end; | |
2441 | ||
2442 | if (!vma || vma->vm_start >= reg_end) | |
2443 | break; | |
2444 | ||
8eef9123 AB |
2445 | /* |
2446 | * Take the intersection of this VMA with the memory region | |
2447 | */ | |
2448 | vm_start = max(hva, vma->vm_start); | |
2449 | vm_end = min(reg_end, vma->vm_end); | |
2450 | ||
2451 | if (vma->vm_flags & VM_PFNMAP) { | |
2452 | gpa_t gpa = mem->guest_phys_addr + | |
2453 | (vm_start - mem->userspace_addr); | |
ca09f02f MM |
2454 | phys_addr_t pa; |
2455 | ||
2456 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; | |
2457 | pa += vm_start - vma->vm_start; | |
8eef9123 | 2458 | |
15a49a44 | 2459 | /* IO region dirty page logging not allowed */ |
72f31048 MZ |
2460 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
2461 | ret = -EINVAL; | |
2462 | goto out; | |
2463 | } | |
15a49a44 | 2464 | |
8eef9123 AB |
2465 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
2466 | vm_end - vm_start, | |
2467 | writable); | |
2468 | if (ret) | |
2469 | break; | |
2470 | } | |
2471 | hva = vm_end; | |
2472 | } while (hva < reg_end); | |
2473 | ||
15a49a44 | 2474 | if (change == KVM_MR_FLAGS_ONLY) |
72f31048 | 2475 | goto out; |
15a49a44 | 2476 | |
849260c7 AB |
2477 | spin_lock(&kvm->mmu_lock); |
2478 | if (ret) | |
8eef9123 | 2479 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); |
849260c7 AB |
2480 | else |
2481 | stage2_flush_memslot(kvm, memslot); | |
2482 | spin_unlock(&kvm->mmu_lock); | |
72f31048 | 2483 | out: |
89154dd5 | 2484 | mmap_read_unlock(current->mm); |
8eef9123 | 2485 | return ret; |
df6ce24f EA |
2486 | } |
2487 | ||
e96c81ee | 2488 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
df6ce24f EA |
2489 | { |
2490 | } | |
2491 | ||
15248258 | 2492 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
df6ce24f EA |
2493 | { |
2494 | } | |
2495 | ||
2496 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | |
2497 | { | |
293f2936 | 2498 | kvm_free_stage2_pgd(kvm); |
df6ce24f EA |
2499 | } |
2500 | ||
2501 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
2502 | struct kvm_memory_slot *slot) | |
2503 | { | |
8eef9123 AB |
2504 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
2505 | phys_addr_t size = slot->npages << PAGE_SHIFT; | |
2506 | ||
2507 | spin_lock(&kvm->mmu_lock); | |
2508 | unmap_stage2_range(kvm, gpa, size); | |
2509 | spin_unlock(&kvm->mmu_lock); | |
df6ce24f | 2510 | } |
3c1e7165 MZ |
2511 | |
2512 | /* | |
2513 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | |
2514 | * | |
2515 | * Main problems: | |
2516 | * - S/W ops are local to a CPU (not broadcast) | |
2517 | * - We have line migration behind our back (speculation) | |
2518 | * - System caches don't support S/W at all (damn!) | |
2519 | * | |
2520 | * In the face of the above, the best we can do is to try and convert | |
2521 | * S/W ops to VA ops. Because the guest is not allowed to infer the | |
2522 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | |
2523 | * which is a rather good thing for us. | |
2524 | * | |
2525 | * Also, it is only used when turning caches on/off ("The expected | |
2526 | * usage of the cache maintenance instructions that operate by set/way | |
2527 | * is associated with the cache maintenance instructions associated | |
2528 | * with the powerdown and powerup of caches, if this is required by | |
2529 | * the implementation."). | |
2530 | * | |
2531 | * We use the following policy: | |
2532 | * | |
2533 | * - If we trap a S/W operation, we enable VM trapping to detect | |
2534 | * caches being turned on/off, and do a full clean. | |
2535 | * | |
2536 | * - We flush the caches on both caches being turned on and off. | |
2537 | * | |
2538 | * - Once the caches are enabled, we stop trapping VM ops. | |
2539 | */ | |
2540 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | |
2541 | { | |
3df59d8d | 2542 | unsigned long hcr = *vcpu_hcr(vcpu); |
3c1e7165 MZ |
2543 | |
2544 | /* | |
2545 | * If this is the first time we do a S/W operation | |
2546 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | |
2547 | * VM trapping. | |
2548 | * | |
2549 | * Otherwise, rely on the VM trapping to wait for the MMU + | |
2550 | * Caches to be turned off. At that point, we'll be able to | |
2551 | * clean the caches again. | |
2552 | */ | |
2553 | if (!(hcr & HCR_TVM)) { | |
2554 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | |
2555 | vcpu_has_cache_enabled(vcpu)); | |
2556 | stage2_flush_vm(vcpu->kvm); | |
3df59d8d | 2557 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
3c1e7165 MZ |
2558 | } |
2559 | } | |
2560 | ||
2561 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | |
2562 | { | |
2563 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | |
2564 | ||
2565 | /* | |
2566 | * If switching the MMU+caches on, need to invalidate the caches. | |
2567 | * If switching it off, need to clean the caches. | |
2568 | * Clean + invalidate does the trick always. | |
2569 | */ | |
2570 | if (now_enabled != was_enabled) | |
2571 | stage2_flush_vm(vcpu->kvm); | |
2572 | ||
2573 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | |
2574 | if (now_enabled) | |
3df59d8d | 2575 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
3c1e7165 MZ |
2576 | |
2577 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | |
2578 | } |