Commit | Line | Data |
---|---|---|
d94d71cb | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4e342025 | 2 | /* |
4e342025 AG |
3 | * |
4 | * Copyright SUSE Linux Products GmbH 2009 | |
5 | * | |
6 | * Authors: Alexander Graf <agraf@suse.de> | |
7 | */ | |
8 | ||
9 | #ifndef __ASM_KVM_BOOK3S_H__ | |
10 | #define __ASM_KVM_BOOK3S_H__ | |
11 | ||
12 | #include <linux/types.h> | |
13 | #include <linux/kvm_host.h> | |
2191d657 | 14 | #include <asm/kvm_book3s_asm.h> |
19d31c5f | 15 | #include <asm/guest-state-buffer.h> |
4e342025 | 16 | |
4e342025 | 17 | struct kvmppc_bat { |
e15a1137 | 18 | u64 raw; |
4e342025 AG |
19 | u32 bepi; |
20 | u32 bepi_mask; | |
4e342025 AG |
21 | u32 brpn; |
22 | u8 wimg; | |
23 | u8 pp; | |
3ed9c6d2 AG |
24 | bool vs : 1; |
25 | bool vp : 1; | |
4e342025 AG |
26 | }; |
27 | ||
28 | struct kvmppc_sid_map { | |
29 | u64 guest_vsid; | |
30 | u64 guest_esid; | |
31 | u64 host_vsid; | |
3ed9c6d2 | 32 | bool valid : 1; |
4e342025 AG |
33 | }; |
34 | ||
35 | #define SID_MAP_BITS 9 | |
36 | #define SID_MAP_NUM (1 << SID_MAP_BITS) | |
37 | #define SID_MAP_MASK (SID_MAP_NUM - 1) | |
38 | ||
8b6db3bc AG |
39 | #ifdef CONFIG_PPC_BOOK3S_64 |
40 | #define SID_CONTEXTS 1 | |
41 | #else | |
42 | #define SID_CONTEXTS 128 | |
43 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) | |
44 | #endif | |
45 | ||
c4befc58 PM |
46 | struct hpte_cache { |
47 | struct hlist_node list_pte; | |
48 | struct hlist_node list_pte_long; | |
49 | struct hlist_node list_vpte; | |
50 | struct hlist_node list_vpte_long; | |
a4a0f252 PM |
51 | #ifdef CONFIG_PPC_BOOK3S_64 |
52 | struct hlist_node list_vpte_64k; | |
53 | #endif | |
c4befc58 | 54 | struct rcu_head rcu_head; |
5524a27d | 55 | u64 host_vpn; |
c4befc58 PM |
56 | u64 pfn; |
57 | ulong slot; | |
58 | struct kvmppc_pte pte; | |
c9029c34 | 59 | int pagesize; |
c4befc58 PM |
60 | }; |
61 | ||
e64fb7e2 SJS |
62 | /* |
63 | * Struct for a virtual core. | |
64 | * Note: entry_exit_map combines a bitmap of threads that have entered | |
65 | * in the bottom 8 bits and a bitmap of threads that have exited in the | |
66 | * next 8 bits. This is so that we can atomically set the entry bit | |
67 | * iff the exit map is 0 without taking a lock. | |
68 | */ | |
69 | struct kvmppc_vcore { | |
70 | int n_runnable; | |
71 | int num_threads; | |
72 | int entry_exit_map; | |
73 | int napping_threads; | |
74 | int first_vcpuid; | |
75 | u16 pcpu; | |
76 | u16 last_cpu; | |
77 | u8 vcore_state; | |
78 | u8 in_guest; | |
7b5f8272 | 79 | struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS]; |
e64fb7e2 SJS |
80 | struct list_head preempt_list; |
81 | spinlock_t lock; | |
da4ad88c | 82 | struct rcuwait wait; |
e64fb7e2 SJS |
83 | spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ |
84 | u64 stolen_tb; | |
85 | u64 preempt_tb; | |
86 | struct kvm_vcpu *runner; | |
87 | struct kvm *kvm; | |
88 | u64 tb_offset; /* guest timebase - host timebase */ | |
57b8daa7 | 89 | u64 tb_offset_applied; /* timebase offset currently in force */ |
e64fb7e2 SJS |
90 | ulong lpcr; |
91 | u32 arch_compat; | |
92 | ulong pcr; | |
93 | ulong dpdes; /* doorbell state (POWER8) */ | |
88b02cf9 | 94 | ulong vtb; /* virtual timebase */ |
e64fb7e2 | 95 | ulong conferring_threads; |
0cda69dd | 96 | unsigned int halt_poll_ns; |
7aa15842 | 97 | atomic_t online_count; |
e64fb7e2 SJS |
98 | }; |
99 | ||
4e342025 | 100 | struct kvmppc_vcpu_book3s { |
4e342025 | 101 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; |
4e342025 AG |
102 | struct { |
103 | u64 esid; | |
104 | u64 vsid; | |
105 | } slb_shadow[64]; | |
106 | u8 slb_shadow_max; | |
4e342025 AG |
107 | struct kvmppc_bat ibat[8]; |
108 | struct kvmppc_bat dbat[8]; | |
109 | u64 hid[6]; | |
d6d549b2 | 110 | u64 gqr[8]; |
4e342025 | 111 | u64 sdr1; |
4e342025 AG |
112 | u64 hior; |
113 | u64 msr_mask; | |
88b02cf9 | 114 | u64 vtb; |
8b6db3bc AG |
115 | #ifdef CONFIG_PPC_BOOK3S_32 |
116 | u32 vsid_pool[VSID_POOL_SIZE]; | |
ffe36492 | 117 | u32 vsid_next; |
8b6db3bc | 118 | #else |
ffe36492 BH |
119 | u64 proto_vsid_first; |
120 | u64 proto_vsid_max; | |
121 | u64 proto_vsid_next; | |
8b6db3bc AG |
122 | #endif |
123 | int context_id[SID_CONTEXTS]; | |
c4befc58 | 124 | |
1022fc3d AG |
125 | bool hior_explicit; /* HIOR is set by ioctl, not PVR */ |
126 | ||
c4befc58 PM |
127 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
128 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | |
129 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | |
130 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | |
a4a0f252 PM |
131 | #ifdef CONFIG_PPC_BOOK3S_64 |
132 | struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; | |
133 | #endif | |
c4befc58 PM |
134 | int hpte_cache_count; |
135 | spinlock_t mmu_lock; | |
4e342025 AG |
136 | }; |
137 | ||
c9029c34 PM |
138 | #define VSID_REAL 0x07ffffffffc00000ULL |
139 | #define VSID_BAT 0x07ffffffffb00000ULL | |
140 | #define VSID_64K 0x0800000000000000ULL | |
0f296829 | 141 | #define VSID_1T 0x1000000000000000ULL |
f7bc74e1 AG |
142 | #define VSID_REAL_DR 0x2000000000000000ULL |
143 | #define VSID_REAL_IR 0x4000000000000000ULL | |
5a1b419f | 144 | #define VSID_PR 0x8000000000000000ULL |
4e342025 | 145 | |
af7b4d10 | 146 | extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); |
4e342025 | 147 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); |
af7b4d10 | 148 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); |
4e342025 AG |
149 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); |
150 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | |
151 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | |
de56a948 | 152 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); |
93b159b4 PM |
153 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, |
154 | bool iswrite); | |
155 | extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | |
4e342025 | 156 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
0f296829 | 157 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); |
4e342025 | 158 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
8c99d345 TZ |
159 | extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, |
160 | unsigned long addr, unsigned long status); | |
697d3899 PM |
161 | extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, |
162 | unsigned long slb_v, unsigned long valid); | |
8c99d345 | 163 | extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, |
5a319350 | 164 | unsigned long gpa, gva_t ea, int is_store); |
fef093be AG |
165 | |
166 | extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | |
167 | extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); | |
d78bca72 | 168 | extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); |
fef093be AG |
169 | extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); |
170 | extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | |
171 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | |
172 | extern int kvmppc_mmu_hpte_sysinit(void); | |
173 | extern void kvmppc_mmu_hpte_sysexit(void); | |
de56a948 | 174 | extern int kvmppc_mmu_hv_init(void); |
ae2113a4 | 175 | extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); |
fef093be | 176 | |
8c99d345 | 177 | extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, |
5a319350 | 178 | unsigned long ea, unsigned long dsisr); |
6ff887b8 SJS |
179 | extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, |
180 | gva_t eaddr, void *to, void *from, | |
181 | unsigned long n); | |
d7b45615 SJS |
182 | extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, |
183 | void *to, unsigned long n); | |
184 | extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, | |
185 | void *from, unsigned long n); | |
fd10be25 SJS |
186 | extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, |
187 | struct kvmppc_pte *gpte, u64 root, | |
188 | u64 *pte_ret_p); | |
9811c78e SJS |
189 | extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, |
190 | struct kvmppc_pte *gpte, u64 table, | |
191 | int table_index, u64 *pte_ret_p); | |
9e04ba69 PM |
192 | extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
193 | struct kvmppc_pte *gpte, bool data, bool iswrite); | |
90165d3d | 194 | extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, |
dfcaacc8 | 195 | unsigned int pshift, u64 lpid); |
8cf531ed | 196 | extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, |
c43c3a86 PM |
197 | unsigned int shift, |
198 | const struct kvm_memory_slot *memslot, | |
dfcaacc8 | 199 | u64 lpid); |
6cdf3037 | 200 | extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, |
fd10be25 | 201 | bool writing, unsigned long gpa, |
dfcaacc8 | 202 | u64 lpid); |
fd10be25 SJS |
203 | extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, |
204 | unsigned long gpa, | |
205 | struct kvm_memory_slot *memslot, | |
dac09f61 | 206 | bool writing, |
fd10be25 | 207 | pte_t *inserted_pte, unsigned int *levelp); |
8cf4ecc0 | 208 | extern int kvmppc_init_vm_radix(struct kvm *kvm); |
5a319350 | 209 | extern void kvmppc_free_radix(struct kvm *kvm); |
fd10be25 | 210 | extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, |
dfcaacc8 | 211 | u64 lpid); |
5a319350 PM |
212 | extern int kvmppc_radix_init(void); |
213 | extern void kvmppc_radix_exit(void); | |
32b48bf8 | 214 | extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, |
b1c5356e SC |
215 | unsigned long gfn); |
216 | extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |
217 | unsigned long gfn); | |
218 | extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |
219 | unsigned long gfn); | |
8f7b79b8 PM |
220 | extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, |
221 | struct kvm_memory_slot *memslot, unsigned long *map); | |
5af3e9d0 PM |
222 | extern void kvmppc_radix_flush_memslot(struct kvm *kvm, |
223 | const struct kvm_memory_slot *memslot); | |
8cf4ecc0 | 224 | extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); |
9e04ba69 | 225 | |
35c4a733 | 226 | /* XXX remove this export when load_last_inst() is generic */ |
5467a97d | 227 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
4e342025 | 228 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
bc5ad3f3 BH |
229 | extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
230 | unsigned int vec); | |
de56a948 | 231 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); |
533082ae | 232 | extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); |
e15a1137 AG |
233 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
234 | bool upper, u32 val); | |
aba3bd7f | 235 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
8c99d345 | 236 | extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); |
ba049e93 | 237 | extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, |
8b135c77 | 238 | bool writing, bool *writable, struct page **page); |
342d3db7 PM |
239 | extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, |
240 | unsigned long *rmap, long pte_index, int realmode); | |
c43c3a86 | 241 | extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, |
e641a317 | 242 | unsigned long gfn, unsigned long psize); |
6f22bd32 | 243 | extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, |
342d3db7 | 244 | unsigned long pte_index); |
6f22bd32 | 245 | void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, |
55514893 | 246 | unsigned long pte_index); |
93e60249 PM |
247 | extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, |
248 | unsigned long *nb_ret); | |
c35635ef PM |
249 | extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr, |
250 | unsigned long gpa, bool dirty); | |
7ed661bf PM |
251 | extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, |
252 | long pte_index, unsigned long pteh, unsigned long ptel, | |
253 | pgd_t *pgdir, bool realmode, unsigned long *idx_ret); | |
6b445ad4 PM |
254 | extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, |
255 | unsigned long pte_index, unsigned long avpn, | |
256 | unsigned long *hpret); | |
8f7b79b8 | 257 | extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, |
dfe49dbd | 258 | struct kvm_memory_slot *memslot, unsigned long *map); |
8f7b79b8 PM |
259 | extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
260 | struct kvm_memory_slot *memslot, | |
261 | unsigned long *map); | |
67145ef4 NP |
262 | extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, |
263 | unsigned long lpcr); | |
a0144e2a PM |
264 | extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, |
265 | unsigned long mask); | |
8e6afa36 | 266 | extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); |
4e342025 | 267 | |
4bb3c7a0 PM |
268 | extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu); |
269 | extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu); | |
270 | extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu); | |
271 | ||
02143947 | 272 | extern void kvmppc_entry_trampoline(void); |
de56a948 | 273 | extern void kvmppc_hv_entry_trampoline(void); |
ca7f4203 AG |
274 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
275 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | |
0254f074 | 276 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
699a0ea0 | 277 | extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); |
ae2113a4 PM |
278 | extern int kvmppc_hcall_impl_pr(unsigned long cmd); |
279 | extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); | |
07ae5389 AG |
280 | extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); |
281 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); | |
8d2e2fc5 | 282 | |
ce275179 | 283 | long kvmppc_read_intr(void); |
ce275179 CLG |
284 | void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); |
285 | void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); | |
286 | ||
8d2e2fc5 SG |
287 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
288 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); | |
289 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); | |
e32c53d1 | 290 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); |
5706340a | 291 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); |
8d2e2fc5 SG |
292 | #else |
293 | static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} | |
294 | static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} | |
e32c53d1 | 295 | static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} |
5706340a | 296 | static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} |
8d2e2fc5 SG |
297 | #endif |
298 | ||
19d31c5f | 299 | extern unsigned long nested_capabilities; |
8e3f5fc1 PM |
300 | long kvmhv_nested_init(void); |
301 | void kvmhv_nested_exit(void); | |
302 | void kvmhv_vm_nested_init(struct kvm *kvm); | |
303 | long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); | |
6ff887b8 | 304 | long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); |
7d370e18 | 305 | void kvmhv_flush_lpid(u64 lpid); |
dfcaacc8 | 306 | void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1); |
8e3f5fc1 | 307 | void kvmhv_release_all_nested(struct kvm *kvm); |
360cae31 | 308 | long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); |
e3b6b466 | 309 | long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); |
53324b51 BR |
310 | long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, |
311 | unsigned long type, unsigned long pg_sizes, | |
312 | unsigned long start, unsigned long end); | |
8c99d345 | 313 | int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, |
360cae31 PM |
314 | u64 time_limit, unsigned long lpcr); |
315 | void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); | |
316 | void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, | |
317 | struct hv_guest_state *hr); | |
8c99d345 | 318 | long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); |
8e3f5fc1 | 319 | |
7284ca8a SG |
320 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); |
321 | ||
19d31c5f JN |
322 | |
323 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
324 | ||
325 | extern struct static_key_false __kvmhv_is_nestedv2; | |
326 | ||
327 | static inline bool kvmhv_is_nestedv2(void) | |
328 | { | |
329 | return static_branch_unlikely(&__kvmhv_is_nestedv2); | |
330 | } | |
331 | ||
332 | static inline bool kvmhv_is_nestedv1(void) | |
333 | { | |
334 | return !static_branch_likely(&__kvmhv_is_nestedv2); | |
335 | } | |
336 | ||
337 | #else | |
338 | ||
339 | static inline bool kvmhv_is_nestedv2(void) | |
340 | { | |
341 | return false; | |
342 | } | |
343 | ||
344 | static inline bool kvmhv_is_nestedv1(void) | |
345 | { | |
346 | return false; | |
347 | } | |
348 | ||
349 | #endif | |
350 | ||
351 | int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); | |
352 | int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); | |
353 | int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden); | |
354 | int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden); | |
355 | ||
356 | static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, | |
357 | struct pt_regs *regs) | |
358 | { | |
359 | if (kvmhv_is_nestedv2()) | |
360 | return __kvmhv_nestedv2_reload_ptregs(vcpu, regs); | |
361 | return 0; | |
362 | } | |
363 | static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, | |
364 | struct pt_regs *regs) | |
365 | { | |
366 | if (kvmhv_is_nestedv2()) | |
367 | return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs); | |
368 | return 0; | |
369 | } | |
370 | ||
371 | static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) | |
372 | { | |
373 | if (kvmhv_is_nestedv2()) | |
374 | return __kvmhv_nestedv2_mark_dirty(vcpu, iden); | |
375 | return 0; | |
376 | } | |
377 | ||
378 | static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) | |
379 | { | |
380 | if (kvmhv_is_nestedv2()) | |
381 | return __kvmhv_nestedv2_cached_reload(vcpu, iden); | |
382 | return 0; | |
383 | } | |
384 | ||
644abbb2 | 385 | extern int kvm_irq_bypass; |
4e342025 AG |
386 | |
387 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |
388 | { | |
3ff95502 | 389 | return vcpu->arch.book3s; |
4e342025 AG |
390 | } |
391 | ||
de56a948 PM |
392 | /* Also add subarch specific defines */ |
393 | ||
394 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | |
395 | #include <asm/kvm_book3s_32.h> | |
396 | #endif | |
397 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
398 | #include <asm/kvm_book3s_64.h> | |
399 | #endif | |
400 | ||
c7f38f46 AG |
401 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
402 | { | |
1143a706 | 403 | vcpu->arch.regs.gpr[num] = val; |
19d31c5f | 404 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num)); |
c7f38f46 AG |
405 | } |
406 | ||
407 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | |
408 | { | |
19d31c5f | 409 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); |
1143a706 | 410 | return vcpu->arch.regs.gpr[num]; |
c7f38f46 AG |
411 | } |
412 | ||
413 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | |
414 | { | |
fd0944ba | 415 | vcpu->arch.regs.ccr = val; |
19d31c5f | 416 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); |
c7f38f46 AG |
417 | } |
418 | ||
419 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | |
420 | { | |
19d31c5f | 421 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0); |
fd0944ba | 422 | return vcpu->arch.regs.ccr; |
c7f38f46 AG |
423 | } |
424 | ||
c63517c2 | 425 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) |
c7f38f46 | 426 | { |
173c520a | 427 | vcpu->arch.regs.xer = val; |
19d31c5f | 428 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); |
c7f38f46 AG |
429 | } |
430 | ||
c63517c2 | 431 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) |
c7f38f46 | 432 | { |
19d31c5f | 433 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0); |
173c520a | 434 | return vcpu->arch.regs.xer; |
c7f38f46 AG |
435 | } |
436 | ||
437 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | |
438 | { | |
173c520a | 439 | vcpu->arch.regs.ctr = val; |
19d31c5f | 440 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); |
c7f38f46 AG |
441 | } |
442 | ||
443 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | |
444 | { | |
19d31c5f | 445 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0); |
173c520a | 446 | return vcpu->arch.regs.ctr; |
c7f38f46 AG |
447 | } |
448 | ||
449 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | |
450 | { | |
173c520a | 451 | vcpu->arch.regs.link = val; |
19d31c5f | 452 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); |
c7f38f46 AG |
453 | } |
454 | ||
455 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | |
456 | { | |
19d31c5f | 457 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0); |
173c520a | 458 | return vcpu->arch.regs.link; |
c7f38f46 AG |
459 | } |
460 | ||
461 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | |
462 | { | |
173c520a | 463 | vcpu->arch.regs.nip = val; |
19d31c5f | 464 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); |
c7f38f46 AG |
465 | } |
466 | ||
467 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | |
468 | { | |
19d31c5f | 469 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0); |
173c520a | 470 | return vcpu->arch.regs.nip; |
c7f38f46 AG |
471 | } |
472 | ||
5deb8e7a | 473 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); |
73601775 | 474 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) |
c7f38f46 | 475 | { |
5deb8e7a | 476 | return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); |
73601775 | 477 | } |
c7f38f46 | 478 | |
c7f38f46 AG |
479 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) |
480 | { | |
a2d56020 PM |
481 | return vcpu->arch.fault_dar; |
482 | } | |
483 | ||
52425a3b JN |
484 | static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) |
485 | { | |
19d31c5f | 486 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); |
52425a3b JN |
487 | return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; |
488 | } | |
489 | ||
490 | static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) | |
491 | { | |
492 | vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; | |
19d31c5f | 493 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); |
52425a3b JN |
494 | } |
495 | ||
496 | static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) | |
497 | { | |
19d31c5f | 498 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0); |
52425a3b JN |
499 | return vcpu->arch.fp.fpscr; |
500 | } | |
501 | ||
502 | static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) | |
503 | { | |
504 | vcpu->arch.fp.fpscr = val; | |
19d31c5f | 505 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR); |
52425a3b JN |
506 | } |
507 | ||
508 | ||
509 | static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) | |
510 | { | |
19d31c5f | 511 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); |
52425a3b JN |
512 | return vcpu->arch.fp.fpr[i][j]; |
513 | } | |
514 | ||
515 | static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, | |
516 | u64 val) | |
517 | { | |
518 | vcpu->arch.fp.fpr[i][j] = val; | |
19d31c5f | 519 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); |
52425a3b JN |
520 | } |
521 | ||
522 | #ifdef CONFIG_ALTIVEC | |
523 | static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v) | |
524 | { | |
19d31c5f | 525 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0); |
52425a3b JN |
526 | *v = vcpu->arch.vr.vr[i]; |
527 | } | |
528 | ||
529 | static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, | |
530 | vector128 *val) | |
531 | { | |
532 | vcpu->arch.vr.vr[i] = *val; | |
19d31c5f | 533 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i)); |
52425a3b JN |
534 | } |
535 | ||
536 | static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu) | |
537 | { | |
19d31c5f | 538 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0); |
52425a3b JN |
539 | return vcpu->arch.vr.vscr.u[3]; |
540 | } | |
541 | ||
542 | static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) | |
543 | { | |
544 | vcpu->arch.vr.vscr.u[3] = val; | |
19d31c5f | 545 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR); |
52425a3b JN |
546 | } |
547 | #endif | |
548 | ||
19d31c5f | 549 | #define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ |
7028ac8d JN |
550 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ |
551 | { \ | |
552 | \ | |
553 | vcpu->arch.reg = val; \ | |
19d31c5f | 554 | kvmhv_nestedv2_mark_dirty(vcpu, iden); \ |
7028ac8d JN |
555 | } |
556 | ||
19d31c5f | 557 | #define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ |
7028ac8d JN |
558 | static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
559 | { \ | |
19d31c5f | 560 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ |
7028ac8d JN |
561 | return vcpu->arch.reg; \ |
562 | } | |
563 | ||
19d31c5f JN |
564 | #define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \ |
565 | KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ | |
566 | KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ | |
7028ac8d | 567 | |
19d31c5f JN |
568 | KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR) |
569 | KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR) | |
570 | KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR) | |
571 | KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR) | |
572 | KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR) | |
573 | KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC) | |
574 | KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE) | |
7028ac8d | 575 | |
c8ae9b3c | 576 | |
19d31c5f | 577 | #define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ |
c8ae9b3c JN |
578 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ |
579 | { \ | |
580 | vcpu->arch.vcore->reg = val; \ | |
19d31c5f | 581 | kvmhv_nestedv2_mark_dirty(vcpu, iden); \ |
c8ae9b3c JN |
582 | } |
583 | ||
19d31c5f | 584 | #define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ |
c8ae9b3c JN |
585 | static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
586 | { \ | |
19d31c5f | 587 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ |
c8ae9b3c JN |
588 | return vcpu->arch.vcore->reg; \ |
589 | } | |
590 | ||
19d31c5f JN |
591 | #define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \ |
592 | KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ | |
593 | KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ | |
c8ae9b3c JN |
594 | |
595 | ||
19d31c5f | 596 | KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) |
55dfb8be | 597 | KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES) |
19d31c5f JN |
598 | KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) |
599 | KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) | |
e0d4acbc JN |
600 | KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) |
601 | ||
602 | static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu) | |
603 | { | |
604 | return vcpu->arch.vcore->tb_offset; | |
605 | } | |
c8ae9b3c | 606 | |
7028ac8d JN |
607 | static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) |
608 | { | |
19d31c5f | 609 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); |
7028ac8d JN |
610 | return vcpu->arch.dec_expires; |
611 | } | |
612 | ||
613 | static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) | |
614 | { | |
615 | vcpu->arch.dec_expires = val; | |
19d31c5f | 616 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB); |
7028ac8d JN |
617 | } |
618 | ||
3c1a4322 NP |
619 | /* Expiry time of vcpu DEC relative to host TB */ |
620 | static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) | |
621 | { | |
c8ae9b3c | 622 | return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu); |
3c1a4322 NP |
623 | } |
624 | ||
e59d24e6 GK |
625 | static inline bool is_kvmppc_resume_guest(int r) |
626 | { | |
627 | return (r == RESUME_GUEST || r == RESUME_GUEST_NV); | |
628 | } | |
629 | ||
c12fb43c AG |
630 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm); |
631 | static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) | |
632 | { | |
633 | /* Only PR KVM supports the magic page */ | |
634 | return !is_kvmppc_hv_enabled(vcpu->kvm); | |
635 | } | |
636 | ||
99342cf8 DG |
637 | extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu); |
638 | extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); | |
639 | ||
ad0a048b AG |
640 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
641 | * instruction for the OSI hypercalls */ | |
642 | #define OSI_SC_MAGIC_R3 0x113724FA | |
643 | #define OSI_SC_MAGIC_R4 0x77810F9B | |
644 | ||
4e342025 | 645 | #define INS_DCBZ 0x7c0007ec |
8c32a2ea BB |
646 | /* TO = 31 for unconditional trap */ |
647 | #define INS_TW 0x7fe00008 | |
4e342025 | 648 | |
c01e3f66 AG |
649 | #define SPLIT_HACK_MASK 0xff000000 |
650 | #define SPLIT_HACK_OFFS 0xfb000000 | |
651 | ||
1e175d2e | 652 | /* |
a1c42dde | 653 | * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the |
1e175d2e SB |
654 | * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride |
655 | * (but not its actual threading mode, which is not available) to avoid | |
656 | * collisions. | |
657 | * | |
658 | * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block | |
659 | * 0) unchanged: if the guest is filling each VCORE completely then it will be | |
660 | * using consecutive IDs and it will fill the space without any packing. | |
661 | * | |
662 | * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo | |
663 | * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is | |
664 | * added to avoid collisions. | |
665 | * | |
666 | * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only | |
667 | * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs | |
668 | * can be safely packed into the second half of each VCORE by adding an offset | |
669 | * of (stride / 2). | |
670 | * | |
671 | * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4)) | |
672 | * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each | |
673 | * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4). | |
674 | * | |
675 | * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a | |
676 | * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7 | |
677 | * must be free to use. | |
678 | * | |
679 | * (The offsets for each block are stored in block_offsets[], indexed by the | |
680 | * block number if the stride is 8. For cases where the guest's stride is less | |
681 | * than 8, we can re-use the block_offsets array by multiplying the block | |
682 | * number by (MAX_SMT_THREADS / stride) to reach the correct entry.) | |
683 | */ | |
684 | static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id) | |
685 | { | |
686 | const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7}; | |
687 | int stride = kvm->arch.emul_smt_mode; | |
688 | int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride); | |
689 | u32 packed_id; | |
690 | ||
691 | if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack")) | |
692 | return 0; | |
693 | packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block]; | |
694 | if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed")) | |
695 | return 0; | |
696 | return packed_id; | |
697 | } | |
698 | ||
4e342025 | 699 | #endif /* __ASM_KVM_BOOK3S_H__ */ |