Commit | Line | Data |
---|---|---|
d94d71cb | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
bbf45ba5 | 2 | /* |
bbf45ba5 HB |
3 | * |
4 | * Copyright IBM Corp. 2008 | |
5 | * | |
6 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
7 | */ | |
8 | ||
9 | #ifndef __POWERPC_KVM_PPC_H__ | |
10 | #define __POWERPC_KVM_PPC_H__ | |
11 | ||
12 | /* This file exists just so we can dereference kvm_vcpu, avoiding nested header | |
13 | * dependencies. */ | |
14 | ||
15 | #include <linux/mutex.h> | |
16 | #include <linux/timer.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/kvm_host.h> | |
a136a8bd | 20 | #include <linux/bug.h> |
1c0006d8 AG |
21 | #ifdef CONFIG_PPC_BOOK3S |
22 | #include <asm/kvm_book3s.h> | |
c7f38f46 AG |
23 | #else |
24 | #include <asm/kvm_booke.h> | |
1c0006d8 | 25 | #endif |
371fefd6 PM |
26 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
27 | #include <asm/paca.h> | |
03f95332 PM |
28 | #include <asm/xive.h> |
29 | #include <asm/cpu_has_feature.h> | |
371fefd6 | 30 | #endif |
acf17878 | 31 | #include <asm/inst.h> |
bbf45ba5 | 32 | |
a59c1d9e MS |
33 | /* |
34 | * KVMPPC_INST_SW_BREAKPOINT is debug Instruction | |
35 | * for supporting software breakpoint. | |
36 | */ | |
37 | #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00 | |
38 | ||
bbf45ba5 HB |
39 | enum emulation_result { |
40 | EMULATE_DONE, /* no further processing */ | |
41 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | |
bbf45ba5 | 42 | EMULATE_FAIL, /* can't emulate this instruction */ |
37f5bca6 | 43 | EMULATE_AGAIN, /* something went wrong. go again */ |
c402a3f4 | 44 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ |
bbf45ba5 HB |
45 | }; |
46 | ||
70923603 | 47 | enum instruction_fetch_type { |
51f04726 MC |
48 | INST_GENERIC, |
49 | INST_SC, /* system call */ | |
50 | }; | |
51 | ||
7d15c06f AG |
52 | enum xlate_instdata { |
53 | XLATE_INST, /* translate instruction address */ | |
54 | XLATE_DATA /* translate data address */ | |
55 | }; | |
56 | ||
57 | enum xlate_readwrite { | |
58 | XLATE_READ, /* check for read permissions */ | |
59 | XLATE_WRITE /* check for write permissions */ | |
60 | }; | |
61 | ||
8c99d345 | 62 | extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); |
7ec21d9d | 63 | extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu); |
29eb61bc | 64 | extern void kvmppc_handler_highmem(void); |
bbf45ba5 HB |
65 | |
66 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); | |
8c99d345 | 67 | extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, |
bbf45ba5 | 68 | unsigned int rt, unsigned int bytes, |
73601775 | 69 | int is_default_endian); |
8c99d345 | 70 | extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, |
3587d534 | 71 | unsigned int rt, unsigned int bytes, |
73601775 | 72 | int is_default_endian); |
8c99d345 | 73 | extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, |
6f63e81b BL |
74 | unsigned int rt, unsigned int bytes, |
75 | int is_default_endian, int mmio_sign_extend); | |
8c99d345 | 76 | extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, |
acc9eb93 | 77 | unsigned int rt, unsigned int bytes, int is_default_endian); |
8c99d345 | 78 | extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, |
acc9eb93 | 79 | unsigned int rs, unsigned int bytes, int is_default_endian); |
8c99d345 | 80 | extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, |
73601775 CLG |
81 | u64 val, unsigned int bytes, |
82 | int is_default_endian); | |
8c99d345 | 83 | extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, |
6f63e81b BL |
84 | int rs, unsigned int bytes, |
85 | int is_default_endian); | |
bbf45ba5 | 86 | |
51f04726 | 87 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
953e3739 PM |
88 | enum instruction_fetch_type type, |
89 | unsigned long *inst); | |
51f04726 | 90 | |
35c4a733 AG |
91 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
92 | bool data); | |
93 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
94 | bool data); | |
8c99d345 | 95 | extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); |
d69614a2 | 96 | extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); |
8c99d345 | 97 | extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); |
75f74f0d | 98 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
5ce941ee | 99 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); |
d02d4d15 | 100 | extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); |
af8f38b3 | 101 | extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); |
f61c94bb BB |
102 | extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu); |
103 | extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); | |
bbf45ba5 | 104 | |
ecc0981f HB |
105 | /* Core-specific hooks */ |
106 | ||
89168618 | 107 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
7924bd41 | 108 | unsigned int gtlb_idx); |
49dd2c49 | 109 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
fa86b8dd HB |
110 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
111 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); | |
be8d1cae HB |
112 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, |
113 | gva_t eaddr); | |
b52a638c HB |
114 | extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); |
115 | extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); | |
7d15c06f AG |
116 | extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, |
117 | enum xlate_instdata xlid, enum xlate_readwrite xlrw, | |
118 | struct kvmppc_pte *pte); | |
9dd921cf | 119 | |
ff030fdf | 120 | extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu); |
db93f574 | 121 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); |
5cbb5106 | 122 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
5cbb5106 HB |
123 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, |
124 | struct kvm_translation *tr); | |
9dd921cf HB |
125 | |
126 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
127 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); | |
128 | ||
a8e4ef84 | 129 | extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); |
9dd921cf | 130 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
460ba21d NP |
131 | |
132 | extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, | |
133 | ulong srr1_flags); | |
9dc2babc | 134 | extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu); |
460ba21d NP |
135 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, |
136 | ulong srr1_flags); | |
137 | extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, | |
138 | ulong srr1_flags); | |
139 | extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, | |
140 | ulong srr1_flags); | |
141 | extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, | |
142 | ulong srr1_flags); | |
9dd921cf | 143 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
7706664d | 144 | extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); |
9dd921cf HB |
145 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
146 | struct kvm_interrupt *irq); | |
4fe27d2a | 147 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); |
460ba21d NP |
148 | extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
149 | ulong dear_flags, | |
8de12015 AG |
150 | ulong esr_flags); |
151 | extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | |
460ba21d NP |
152 | ulong srr1_flags, |
153 | ulong dar, | |
154 | ulong dsisr); | |
8de12015 AG |
155 | extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); |
156 | extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | |
460ba21d NP |
157 | ulong srr1_flags); |
158 | ||
862d31f7 | 159 | extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); |
7c973a2e | 160 | extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); |
75f74f0d | 161 | |
db93f574 HB |
162 | extern int kvmppc_booke_init(void); |
163 | extern void kvmppc_booke_exit(void); | |
164 | ||
2a342ed5 | 165 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
a4cd8b23 | 166 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); |
c30f8a6c | 167 | |
aae0777f DG |
168 | extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order); |
169 | extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); | |
67c48662 | 170 | extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); |
aae0777f | 171 | extern void kvmppc_free_hpt(struct kvm_hpt_info *info); |
18c3640c | 172 | extern void kvmppc_rmap_reset(struct kvm *kvm); |
c77162de | 173 | extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, |
da9d1d7f | 174 | struct kvm_memory_slot *memslot, unsigned long porder); |
a8606e20 | 175 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); |
121f80ba AK |
176 | extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, |
177 | struct iommu_group *grp); | |
178 | extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, | |
179 | struct iommu_group *grp); | |
18c3640c PM |
180 | extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); |
181 | extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); | |
ded13fc1 | 182 | extern void kvmppc_setup_partition_table(struct kvm *kvm); |
bc5ad3f3 | 183 | |
67c48662 | 184 | extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, |
58ded420 | 185 | struct kvm_create_spapr_tce_64 *args); |
b1af23d8 AK |
186 | #define kvmppc_ioba_validate(stt, ioba, npages) \ |
187 | (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ | |
188 | (stt)->size, (ioba), (npages)) ? \ | |
189 | H_PARAMETER : H_SUCCESS) | |
f31e65e1 BH |
190 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
191 | unsigned long ioba, unsigned long tce); | |
d3695aa4 AK |
192 | extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
193 | unsigned long liobn, unsigned long ioba, | |
194 | unsigned long tce_list, unsigned long npages); | |
195 | extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, | |
196 | unsigned long liobn, unsigned long ioba, | |
197 | unsigned long tce_value, unsigned long npages); | |
69e9fbb2 LD |
198 | extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
199 | unsigned long ioba); | |
db9a290d DG |
200 | extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages); |
201 | extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages); | |
f9e0554d PM |
202 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
203 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | |
5587027c | 204 | extern void kvmppc_core_free_memslot(struct kvm *kvm, |
e96c81ee | 205 | struct kvm_memory_slot *slot); |
f9e0554d | 206 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
537a17b3 SC |
207 | const struct kvm_memory_slot *old, |
208 | struct kvm_memory_slot *new, | |
82307e67 | 209 | enum kvm_mr_change change); |
f9e0554d | 210 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
537a17b3 | 211 | struct kvm_memory_slot *old, |
f032b734 BR |
212 | const struct kvm_memory_slot *new, |
213 | enum kvm_mr_change change); | |
5b74716e BH |
214 | extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, |
215 | struct kvm_ppc_smmu_info *info); | |
dfe49dbd PM |
216 | extern void kvmppc_core_flush_memslot(struct kvm *kvm, |
217 | struct kvm_memory_slot *memslot); | |
f9e0554d | 218 | |
d30f6e48 SW |
219 | extern int kvmppc_bookehv_init(void); |
220 | extern void kvmppc_bookehv_exit(void); | |
221 | ||
03d25c5b AG |
222 | extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); |
223 | ||
a2932923 | 224 | extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); |
67c48662 | 225 | extern int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, |
5e985969 | 226 | struct kvm_ppc_resize_hpt *rhpt); |
67c48662 TH |
227 | extern int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, |
228 | struct kvm_ppc_resize_hpt *rhpt); | |
a2932923 | 229 | |
5df554ad SW |
230 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); |
231 | ||
8e591cb7 ME |
232 | extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); |
233 | extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); | |
234 | extern void kvmppc_rtas_tokens_free(struct kvm *kvm); | |
5af50993 | 235 | |
bc5ad3f3 BH |
236 | extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, |
237 | u32 priority); | |
238 | extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |
239 | u32 *priority); | |
d19bd862 PM |
240 | extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); |
241 | extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); | |
8e591cb7 | 242 | |
2f699a59 BB |
243 | void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu); |
244 | void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu); | |
245 | ||
3a167bea AK |
246 | union kvmppc_one_reg { |
247 | u32 wval; | |
248 | u64 dval; | |
249 | vector128 vval; | |
250 | u64 vsxval[2]; | |
6f63e81b | 251 | u32 vsx32val[4]; |
acc9eb93 SG |
252 | u16 vsx16val[8]; |
253 | u8 vsx8val[16]; | |
3a167bea AK |
254 | struct { |
255 | u64 addr; | |
256 | u64 length; | |
257 | } vpaval; | |
e4945b9d | 258 | u64 xive_timaval[2]; |
3a167bea AK |
259 | }; |
260 | ||
261 | struct kvmppc_ops { | |
cbbc58d4 | 262 | struct module *owner; |
3a167bea AK |
263 | int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
264 | int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | |
265 | int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, | |
266 | union kvmppc_one_reg *val); | |
267 | int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, | |
268 | union kvmppc_one_reg *val); | |
269 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
270 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
87a45e07 | 271 | void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); |
3a167bea | 272 | void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); |
8c99d345 | 273 | int (*vcpu_run)(struct kvm_vcpu *vcpu); |
ff030fdf | 274 | int (*vcpu_create)(struct kvm_vcpu *vcpu); |
3a167bea AK |
275 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
276 | int (*check_requests)(struct kvm_vcpu *vcpu); | |
277 | int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); | |
278 | void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); | |
279 | int (*prepare_memory_region)(struct kvm *kvm, | |
537a17b3 SC |
280 | const struct kvm_memory_slot *old, |
281 | struct kvm_memory_slot *new, | |
82307e67 | 282 | enum kvm_mr_change change); |
3a167bea | 283 | void (*commit_memory_region)(struct kvm *kvm, |
537a17b3 | 284 | struct kvm_memory_slot *old, |
f032b734 BR |
285 | const struct kvm_memory_slot *new, |
286 | enum kvm_mr_change change); | |
b1c5356e SC |
287 | bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); |
288 | bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); | |
289 | bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); | |
e96c81ee | 290 | void (*free_memslot)(struct kvm_memory_slot *slot); |
3a167bea AK |
291 | int (*init_vm)(struct kvm *kvm); |
292 | void (*destroy_vm)(struct kvm *kvm); | |
3a167bea | 293 | int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); |
8c99d345 | 294 | int (*emulate_op)(struct kvm_vcpu *vcpu, |
3a167bea AK |
295 | unsigned int inst, int *advance); |
296 | int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); | |
297 | int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); | |
298 | void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); | |
67c48662 TH |
299 | int (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, |
300 | unsigned long arg); | |
ae2113a4 | 301 | int (*hcall_implemented)(unsigned long hcall); |
9576730d SW |
302 | int (*irq_bypass_add_producer)(struct irq_bypass_consumer *, |
303 | struct irq_bypass_producer *); | |
304 | void (*irq_bypass_del_producer)(struct irq_bypass_consumer *, | |
305 | struct irq_bypass_producer *); | |
c9270132 PM |
306 | int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg); |
307 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); | |
3c313524 PM |
308 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, |
309 | unsigned long flags); | |
2e6baa46 | 310 | void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); |
aa069a99 | 311 | int (*enable_nested)(struct kvm *kvm); |
dceadcf9 SJS |
312 | int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, |
313 | int size); | |
314 | int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, | |
315 | int size); | |
9a5788c6 | 316 | int (*enable_svm)(struct kvm *kvm); |
22945688 | 317 | int (*svm_off)(struct kvm *kvm); |
d9a47eda | 318 | int (*enable_dawr1)(struct kvm *kvm); |
a722076e | 319 | bool (*hash_v3_possible)(void); |
faf01aef AK |
320 | int (*create_vm_debugfs)(struct kvm *kvm); |
321 | int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); | |
3a167bea AK |
322 | }; |
323 | ||
cbbc58d4 AK |
324 | extern struct kvmppc_ops *kvmppc_hv_ops; |
325 | extern struct kvmppc_ops *kvmppc_pr_ops; | |
3a167bea | 326 | |
51f04726 | 327 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, |
acf17878 | 328 | enum instruction_fetch_type type, ppc_inst_t *inst) |
51f04726 MC |
329 | { |
330 | int ret = EMULATE_DONE; | |
331 | u32 fetched_inst; | |
332 | ||
333 | /* Load the instruction manually if it failed to do so in the | |
334 | * exit path */ | |
335 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | |
336 | ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); | |
337 | ||
338 | /* Write fetch_failed unswapped if the fetch failed */ | |
953e3739 PM |
339 | if (ret != EMULATE_DONE) { |
340 | *inst = ppc_inst(KVM_INST_FETCH_FAILED); | |
341 | return ret; | |
342 | } | |
343 | ||
344 | #ifdef CONFIG_PPC64 | |
345 | /* Is this a prefixed instruction? */ | |
346 | if ((vcpu->arch.last_inst >> 32) != 0) { | |
347 | u32 prefix = vcpu->arch.last_inst >> 32; | |
348 | u32 suffix = vcpu->arch.last_inst; | |
349 | if (kvmppc_need_byteswap(vcpu)) { | |
350 | prefix = swab32(prefix); | |
351 | suffix = swab32(suffix); | |
352 | } | |
353 | *inst = ppc_inst_prefix(prefix, suffix); | |
354 | return EMULATE_DONE; | |
355 | } | |
356 | #endif | |
51f04726 | 357 | |
953e3739 PM |
358 | fetched_inst = kvmppc_need_byteswap(vcpu) ? |
359 | swab32(vcpu->arch.last_inst) : | |
360 | vcpu->arch.last_inst; | |
acf17878 | 361 | *inst = ppc_inst(fetched_inst); |
953e3739 | 362 | return EMULATE_DONE; |
51f04726 MC |
363 | } |
364 | ||
a78b55d1 AK |
365 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) |
366 | { | |
367 | return kvm->arch.kvm_ops == kvmppc_hv_ops; | |
368 | } | |
369 | ||
e928e9cb ME |
370 | extern int kvmppc_hwrng_present(void); |
371 | ||
0564ee8a AG |
372 | /* |
373 | * Cuts out inst bits with ordering according to spec. | |
374 | * That means the leftmost bit is zero. All given bits are included. | |
375 | */ | |
376 | static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) | |
377 | { | |
378 | u32 r; | |
379 | u32 mask; | |
380 | ||
381 | BUG_ON(msb > lsb); | |
382 | ||
383 | mask = (1 << (lsb - msb + 1)) - 1; | |
384 | r = (inst >> (63 - lsb)) & mask; | |
385 | ||
386 | return r; | |
387 | } | |
388 | ||
389 | /* | |
390 | * Replaces inst bits with ordering according to spec. | |
391 | */ | |
392 | static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | |
393 | { | |
394 | u32 r; | |
395 | u32 mask; | |
396 | ||
397 | BUG_ON(msb > lsb); | |
398 | ||
399 | mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); | |
400 | r = (inst & ~mask) | ((value << (63 - lsb)) & mask); | |
401 | ||
402 | return r; | |
403 | } | |
404 | ||
a136a8bd PM |
405 | #define one_reg_size(id) \ |
406 | (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | |
407 | ||
408 | #define get_reg_val(id, reg) ({ \ | |
409 | union kvmppc_one_reg __u; \ | |
410 | switch (one_reg_size(id)) { \ | |
411 | case 4: __u.wval = (reg); break; \ | |
412 | case 8: __u.dval = (reg); break; \ | |
413 | default: BUG(); \ | |
414 | } \ | |
415 | __u; \ | |
416 | }) | |
417 | ||
418 | ||
419 | #define set_reg_val(id, val) ({ \ | |
420 | u64 __v; \ | |
421 | switch (one_reg_size(id)) { \ | |
422 | case 4: __v = (val).wval; break; \ | |
423 | case 8: __v = (val).dval; break; \ | |
424 | default: BUG(); \ | |
425 | } \ | |
426 | __v; \ | |
427 | }) | |
428 | ||
3a167bea | 429 | int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
5ce941ee SW |
430 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
431 | ||
3a167bea | 432 | int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
5ce941ee SW |
433 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
434 | ||
31f3438e PM |
435 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); |
436 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); | |
a136a8bd PM |
437 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); |
438 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); | |
31f3438e | 439 | |
5ce941ee SW |
440 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); |
441 | ||
5df554ad | 442 | struct openpic; |
5df554ad | 443 | |
9975f5e3 | 444 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
fa61a4e3 | 445 | extern void kvm_cma_reserve(void) __init; |
371fefd6 PM |
446 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
447 | { | |
d2e60075 | 448 | paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr; |
371fefd6 | 449 | } |
aa04b4cc | 450 | |
5af50993 BH |
451 | static inline void kvmppc_set_xive_tima(int cpu, |
452 | unsigned long phys_addr, | |
453 | void __iomem *virt_addr) | |
454 | { | |
d2e60075 NP |
455 | paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; |
456 | paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr; | |
371fefd6 | 457 | } |
aa04b4cc | 458 | |
54695c30 BH |
459 | static inline u32 kvmppc_get_xics_latch(void) |
460 | { | |
699cc876 | 461 | u32 xirr; |
54695c30 | 462 | |
699cc876 | 463 | xirr = get_paca()->kvm_hstate.saved_xirr; |
54695c30 | 464 | get_paca()->kvm_hstate.saved_xirr = 0; |
54695c30 BH |
465 | return xirr; |
466 | } | |
467 | ||
3a83f677 MR |
468 | /* |
469 | * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to | |
470 | * a CPU thread that's running/napping inside of a guest is by default regarded | |
471 | * as a request to wake the CPU (if needed) and continue execution within the | |
472 | * guest, potentially to process new state like externally-generated | |
473 | * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI). | |
474 | * | |
475 | * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called | |
476 | * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the | |
477 | * target CPU's PACA. To avoid unnecessary exits to the host, this flag should | |
478 | * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on | |
479 | * the receiving side prior to processing the IPI work. | |
480 | * | |
481 | * NOTE: | |
482 | * | |
483 | * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi(). | |
484 | * This is to guard against sequences such as the following: | |
485 | * | |
486 | * CPU | |
487 | * X: smp_muxed_ipi_set_message(): | |
488 | * X: smp_mb() | |
489 | * X: message[RESCHEDULE] = 1 | |
490 | * X: doorbell_global_ipi(42): | |
491 | * X: kvmppc_set_host_ipi(42) | |
492 | * X: ppc_msgsnd_sync()/smp_mb() | |
493 | * X: ppc_msgsnd() -> 42 | |
494 | * 42: doorbell_exception(): // from CPU X | |
495 | * 42: ppc_msgsync() | |
496 | * 105: smp_muxed_ipi_set_message(): | |
497 | * 105: smb_mb() | |
498 | * // STORE DEFERRED DUE TO RE-ORDERING | |
499 | * --105: message[CALL_FUNCTION] = 1 | |
500 | * | 105: doorbell_global_ipi(42): | |
501 | * | 105: kvmppc_set_host_ipi(42) | |
502 | * | 42: kvmppc_clear_host_ipi(42) | |
503 | * | 42: smp_ipi_demux_relaxed() | |
504 | * | 42: // returns to executing guest | |
505 | * | // RE-ORDERED STORE COMPLETES | |
506 | * ->105: message[CALL_FUNCTION] = 1 | |
507 | * 105: ppc_msgsnd_sync()/smp_mb() | |
508 | * 105: ppc_msgsnd() -> 42 | |
509 | * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored | |
510 | * 105: // hangs waiting on 42 to process messages/call_single_queue | |
511 | * | |
512 | * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is | |
513 | * to guard against sequences such as the following (as well as to create | |
514 | * a read-side pairing with the barrier in kvmppc_set_host_ipi()): | |
515 | * | |
516 | * CPU | |
517 | * X: smp_muxed_ipi_set_message(): | |
518 | * X: smp_mb() | |
519 | * X: message[RESCHEDULE] = 1 | |
520 | * X: doorbell_global_ipi(42): | |
521 | * X: kvmppc_set_host_ipi(42) | |
522 | * X: ppc_msgsnd_sync()/smp_mb() | |
523 | * X: ppc_msgsnd() -> 42 | |
524 | * 42: doorbell_exception(): // from CPU X | |
525 | * 42: ppc_msgsync() | |
526 | * // STORE DEFERRED DUE TO RE-ORDERING | |
527 | * -- 42: kvmppc_clear_host_ipi(42) | |
528 | * | 42: smp_ipi_demux_relaxed() | |
529 | * | 105: smp_muxed_ipi_set_message(): | |
530 | * | 105: smb_mb() | |
531 | * | 105: message[CALL_FUNCTION] = 1 | |
532 | * | 105: doorbell_global_ipi(42): | |
533 | * | 105: kvmppc_set_host_ipi(42) | |
534 | * | // RE-ORDERED STORE COMPLETES | |
535 | * -> 42: kvmppc_clear_host_ipi(42) | |
536 | * 42: // returns to executing guest | |
537 | * 105: ppc_msgsnd_sync()/smp_mb() | |
538 | * 105: ppc_msgsnd() -> 42 | |
539 | * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored | |
540 | * 105: // hangs waiting on 42 to process messages/call_single_queue | |
541 | */ | |
542 | static inline void kvmppc_set_host_ipi(int cpu) | |
543 | { | |
544 | /* | |
545 | * order stores of IPI messages vs. setting of host_ipi flag | |
546 | * | |
547 | * pairs with the barrier in kvmppc_clear_host_ipi() | |
548 | */ | |
549 | smp_mb(); | |
86dacd96 | 550 | WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1); |
3a83f677 MR |
551 | } |
552 | ||
553 | static inline void kvmppc_clear_host_ipi(int cpu) | |
54695c30 | 554 | { |
86dacd96 | 555 | WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0); |
3a83f677 MR |
556 | /* |
557 | * order clearing of host_ipi flag vs. processing of IPI messages | |
558 | * | |
559 | * pairs with the barrier in kvmppc_set_host_ipi() | |
560 | */ | |
561 | smp_mb(); | |
54695c30 BH |
562 | } |
563 | ||
3a167bea AK |
564 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) |
565 | { | |
cbbc58d4 | 566 | vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); |
3a167bea | 567 | } |
aa04b4cc | 568 | |
441c19c8 ME |
569 | extern void kvm_hv_vm_activated(void); |
570 | extern void kvm_hv_vm_deactivated(void); | |
571 | extern bool kvm_hv_mode_active(void); | |
572 | ||
0ba0e5d5 | 573 | extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu); |
2940ba0c | 574 | |
371fefd6 | 575 | #else |
fa61a4e3 AK |
576 | static inline void __init kvm_cma_reserve(void) |
577 | {} | |
578 | ||
371fefd6 PM |
579 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
580 | {} | |
aa04b4cc | 581 | |
5af50993 BH |
582 | static inline void kvmppc_set_xive_tima(int cpu, |
583 | unsigned long phys_addr, | |
584 | void __iomem *virt_addr) | |
585 | {} | |
586 | ||
54695c30 BH |
587 | static inline u32 kvmppc_get_xics_latch(void) |
588 | { | |
589 | return 0; | |
590 | } | |
591 | ||
3a83f677 MR |
592 | static inline void kvmppc_set_host_ipi(int cpu) |
593 | {} | |
594 | ||
595 | static inline void kvmppc_clear_host_ipi(int cpu) | |
54695c30 BH |
596 | {} |
597 | ||
598 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | |
599 | { | |
600 | kvm_vcpu_kick(vcpu); | |
601 | } | |
441c19c8 ME |
602 | |
603 | static inline bool kvm_hv_mode_active(void) { return false; } | |
604 | ||
bc5ad3f3 BH |
605 | #endif |
606 | ||
7f921a2d ME |
607 | #ifdef CONFIG_PPC_PSERIES |
608 | static inline bool kvmhv_on_pseries(void) | |
609 | { | |
610 | return !cpu_has_feature(CPU_FTR_HVMODE); | |
611 | } | |
612 | #else | |
613 | static inline bool kvmhv_on_pseries(void) | |
614 | { | |
615 | return false; | |
616 | } | |
19d31c5f JN |
617 | |
618 | #endif | |
619 | ||
620 | #ifndef CONFIG_PPC_BOOK3S | |
621 | ||
622 | static inline bool kvmhv_is_nestedv2(void) | |
623 | { | |
624 | return false; | |
625 | } | |
626 | ||
627 | static inline bool kvmhv_is_nestedv1(void) | |
628 | { | |
629 | return false; | |
630 | } | |
631 | ||
632 | static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, | |
633 | struct pt_regs *regs) | |
634 | { | |
635 | return 0; | |
636 | } | |
637 | static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, | |
638 | struct pt_regs *regs) | |
639 | { | |
640 | return 0; | |
641 | } | |
642 | ||
643 | static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) | |
644 | { | |
645 | return 0; | |
646 | } | |
647 | ||
648 | static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) | |
649 | { | |
650 | return 0; | |
651 | } | |
652 | ||
7f921a2d ME |
653 | #endif |
654 | ||
bc5ad3f3 BH |
655 | #ifdef CONFIG_KVM_XICS |
656 | static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) | |
657 | { | |
658 | return vcpu->arch.irq_type == KVMPPC_IRQ_XICS; | |
659 | } | |
8daaafc8 SW |
660 | |
661 | static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( | |
662 | struct kvm *kvm) | |
663 | { | |
644abbb2 | 664 | if (kvm && kvm_irq_bypass) |
8daaafc8 SW |
665 | return kvm->arch.pimap; |
666 | return NULL; | |
667 | } | |
668 | ||
79b6c247 SW |
669 | extern void kvmppc_alloc_host_rm_ops(void); |
670 | extern void kvmppc_free_host_rm_ops(void); | |
8daaafc8 | 671 | extern void kvmppc_free_pimap(struct kvm *kvm); |
f7af5209 | 672 | extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall); |
bc5ad3f3 | 673 | extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu); |
bc5ad3f3 | 674 | extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd); |
9dc2babc | 675 | extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req); |
8b78645c PM |
676 | extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu); |
677 | extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); | |
5975a2e0 PM |
678 | extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, |
679 | struct kvm_vcpu *vcpu, u32 cpu); | |
0c2a6606 | 680 | extern void kvmppc_xics_ipi_action(void); |
5d375199 PM |
681 | extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq, |
682 | unsigned long host_irq); | |
683 | extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq, | |
684 | unsigned long host_irq); | |
f725758b PM |
685 | extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, |
686 | struct kvmppc_irq_map *irq_map, | |
687 | struct kvmppc_passthru_irqmap *pimap, | |
688 | bool *again); | |
5af50993 BH |
689 | |
690 | extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, | |
691 | int level, bool line_status); | |
692 | ||
520fe9c6 | 693 | extern int h_ipi_redirect; |
bc5ad3f3 | 694 | #else |
8daaafc8 SW |
695 | static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( |
696 | struct kvm *kvm) | |
697 | { return NULL; } | |
6c6fdbb2 CF |
698 | static inline void kvmppc_alloc_host_rm_ops(void) {} |
699 | static inline void kvmppc_free_host_rm_ops(void) {} | |
700 | static inline void kvmppc_free_pimap(struct kvm *kvm) {} | |
f7af5209 SW |
701 | static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) |
702 | { return 0; } | |
bc5ad3f3 BH |
703 | static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) |
704 | { return 0; } | |
705 | static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { } | |
bc5ad3f3 BH |
706 | static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) |
707 | { return 0; } | |
9dc2babc NP |
708 | static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) |
709 | { return 0; } | |
371fefd6 PM |
710 | #endif |
711 | ||
5af50993 BH |
712 | #ifdef CONFIG_KVM_XIVE |
713 | /* | |
714 | * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" | |
715 | * ie. P9 new interrupt controller, while the second "xive" is the legacy | |
716 | * "eXternal Interrupt Vector Entry" which is the configuration of an | |
717 | * interrupt on the "xics" interrupt controller on P8 and earlier. Those | |
718 | * two function consume or produce a legacy "XIVE" state from the | |
719 | * new "XIVE" interrupt controller. | |
720 | */ | |
721 | extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, | |
722 | u32 priority); | |
723 | extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |
724 | u32 *priority); | |
725 | extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); | |
726 | extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); | |
5af50993 BH |
727 | |
728 | extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, | |
729 | struct kvm_vcpu *vcpu, u32 cpu); | |
730 | extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu); | |
731 | extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, | |
e5e78b15 | 732 | unsigned long host_irq); |
5af50993 | 733 | extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, |
e5e78b15 | 734 | unsigned long host_irq); |
5af50993 BH |
735 | extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu); |
736 | extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); | |
737 | ||
738 | extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, | |
739 | int level, bool line_status); | |
95a6432c | 740 | extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); |
023c3c96 | 741 | extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu); |
ad5ace91 | 742 | extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu); |
90c73795 | 743 | |
eacc56bb CLG |
744 | static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) |
745 | { | |
746 | return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE; | |
747 | } | |
748 | ||
749 | extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, | |
750 | struct kvm_vcpu *vcpu, u32 cpu); | |
751 | extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); | |
e4945b9d CLG |
752 | extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, |
753 | union kvmppc_one_reg *val); | |
754 | extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, | |
755 | union kvmppc_one_reg *val); | |
2ad7a27d | 756 | extern bool kvmppc_xive_native_supported(void); |
90c73795 | 757 | |
5af50993 BH |
758 | #else |
759 | static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, | |
760 | u32 priority) { return -1; } | |
761 | static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |
762 | u32 *priority) { return -1; } | |
763 | static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } | |
764 | static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } | |
5af50993 BH |
765 | |
766 | static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, | |
767 | struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } | |
768 | static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { } | |
769 | static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, | |
770 | struct irq_desc *host_desc) { return -ENODEV; } | |
771 | static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, | |
772 | struct irq_desc *host_desc) { return -ENODEV; } | |
773 | static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; } | |
774 | static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; } | |
775 | ||
776 | static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, | |
777 | int level, bool line_status) { return -ENODEV; } | |
95a6432c | 778 | static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } |
023c3c96 | 779 | static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { } |
ad5ace91 | 780 | static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; } |
90c73795 | 781 | |
eacc56bb CLG |
782 | static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) |
783 | { return 0; } | |
784 | static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, | |
785 | struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } | |
786 | static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } | |
e4945b9d CLG |
787 | static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, |
788 | union kvmppc_one_reg *val) | |
789 | { return 0; } | |
790 | static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, | |
791 | union kvmppc_one_reg *val) | |
792 | { return -ENOENT; } | |
90c73795 | 793 | |
5af50993 BH |
794 | #endif /* CONFIG_KVM_XIVE */ |
795 | ||
e74d53e3 | 796 | #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER) |
03f95332 PM |
797 | static inline bool xics_on_xive(void) |
798 | { | |
799 | return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE); | |
800 | } | |
801 | #else | |
802 | static inline bool xics_on_xive(void) | |
803 | { | |
804 | return false; | |
805 | } | |
806 | #endif | |
807 | ||
e34af784 PM |
808 | /* |
809 | * Prototypes for functions called only from assembler code. | |
810 | * Having prototypes reduces sparse errors. | |
811 | */ | |
812 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |
813 | unsigned long ioba, unsigned long tce); | |
814 | long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |
815 | unsigned long liobn, unsigned long ioba, | |
816 | unsigned long tce_list, unsigned long npages); | |
817 | long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |
818 | unsigned long liobn, unsigned long ioba, | |
819 | unsigned long tce_value, unsigned long npages); | |
820 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | |
821 | unsigned int yield_count); | |
dcbac73a | 822 | long kvmppc_rm_h_random(struct kvm_vcpu *vcpu); |
e34af784 | 823 | void kvmhv_commence_exit(int trap); |
884dfb72 | 824 | void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); |
e34af784 PM |
825 | void kvmppc_subcore_enter_guest(void); |
826 | void kvmppc_subcore_exit_guest(void); | |
827 | long kvmppc_realmode_hmi_handler(void); | |
9c5a432a | 828 | long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu); |
e34af784 PM |
829 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, |
830 | long pte_index, unsigned long pteh, unsigned long ptel); | |
831 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
832 | unsigned long pte_index, unsigned long avpn); | |
833 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu); | |
834 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
6c12c437 | 835 | unsigned long pte_index, unsigned long avpn); |
e34af784 PM |
836 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, |
837 | unsigned long pte_index); | |
838 | long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, | |
839 | unsigned long pte_index); | |
840 | long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, | |
841 | unsigned long pte_index); | |
eadfb1c5 SJS |
842 | long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, |
843 | unsigned long dest, unsigned long src); | |
e34af784 PM |
844 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, |
845 | unsigned long slb_v, unsigned int status, bool data); | |
f7035ce9 | 846 | void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); |
e34af784 | 847 | |
79b6c247 SW |
848 | /* |
849 | * Host-side operations we want to set up while running in real | |
850 | * mode in the guest operating on the xics. | |
851 | * Currently only VCPU wakeup is supported. | |
852 | */ | |
853 | ||
854 | union kvmppc_rm_state { | |
855 | unsigned long raw; | |
856 | struct { | |
857 | u32 in_host; | |
858 | u32 rm_action; | |
859 | }; | |
860 | }; | |
861 | ||
862 | struct kvmppc_host_rm_core { | |
863 | union kvmppc_rm_state rm_state; | |
864 | void *rm_data; | |
865 | char pad[112]; | |
866 | }; | |
867 | ||
868 | struct kvmppc_host_rm_ops { | |
869 | struct kvmppc_host_rm_core *rm_core; | |
870 | void (*vcpu_kick)(struct kvm_vcpu *vcpu); | |
871 | }; | |
872 | ||
873 | extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; | |
874 | ||
34f754b9 BB |
875 | static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu) |
876 | { | |
877 | #ifdef CONFIG_KVM_BOOKE_HV | |
878 | return mfspr(SPRN_GEPR); | |
879 | #elif defined(CONFIG_BOOKE) | |
880 | return vcpu->arch.epr; | |
881 | #else | |
882 | return 0; | |
883 | #endif | |
884 | } | |
885 | ||
1c810636 AG |
886 | static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) |
887 | { | |
888 | #ifdef CONFIG_KVM_BOOKE_HV | |
889 | mtspr(SPRN_GEPR, epr); | |
890 | #elif defined(CONFIG_BOOKE) | |
891 | vcpu->arch.epr = epr; | |
892 | #endif | |
893 | } | |
894 | ||
5df554ad SW |
895 | #ifdef CONFIG_KVM_MPIC |
896 | ||
897 | void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu); | |
eb1e4f43 SW |
898 | int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, |
899 | u32 cpu); | |
900 | void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu); | |
5df554ad SW |
901 | |
902 | #else | |
903 | ||
904 | static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) | |
905 | { | |
906 | } | |
907 | ||
eb1e4f43 SW |
908 | static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, |
909 | struct kvm_vcpu *vcpu, u32 cpu) | |
910 | { | |
911 | return -EINVAL; | |
912 | } | |
913 | ||
914 | static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, | |
915 | struct kvm_vcpu *vcpu) | |
916 | { | |
917 | } | |
918 | ||
5df554ad SW |
919 | #endif /* CONFIG_KVM_MPIC */ |
920 | ||
dc83b8bc SW |
921 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
922 | struct kvm_config_tlb *cfg); | |
923 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |
924 | struct kvm_dirty_tlb *cfg); | |
925 | ||
043cc4d7 | 926 | long kvmppc_alloc_lpid(void); |
043cc4d7 SW |
927 | void kvmppc_free_lpid(long lpid); |
928 | void kvmppc_init_lpid(unsigned long nr_lpids); | |
929 | ||
ba049e93 | 930 | static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) |
249ba1ee | 931 | { |
9fee28ba | 932 | struct folio *folio; |
adccf65c BB |
933 | /* |
934 | * We can only access pages that the kernel maps | |
935 | * as memory. Bail out for unmapped ones. | |
936 | */ | |
937 | if (!pfn_valid(pfn)) | |
938 | return; | |
939 | ||
940 | /* Clear i-cache for new pages */ | |
9fee28ba MWO |
941 | folio = page_folio(pfn_to_page(pfn)); |
942 | if (!test_bit(PG_dcache_clean, &folio->flags)) { | |
943 | flush_dcache_icache_folio(folio); | |
944 | set_bit(PG_dcache_clean, &folio->flags); | |
249ba1ee AG |
945 | } |
946 | } | |
947 | ||
5deb8e7a AG |
948 | /* |
949 | * Shared struct helpers. The shared struct can be little or big endian, | |
950 | * depending on the guest endianness. So expose helpers to all of them. | |
951 | */ | |
952 | static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) | |
953 | { | |
954 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | |
955 | /* Only Book3S_64 PR supports bi-endian for now */ | |
956 | return vcpu->arch.shared_big_endian; | |
957 | #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) | |
958 | /* Book3s_64 HV on little endian is always little endian */ | |
959 | return false; | |
960 | #else | |
961 | return true; | |
962 | #endif | |
963 | } | |
964 | ||
2a64bc67 | 965 | #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \ |
1dc0c5b8 BB |
966 | static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
967 | { \ | |
5a484c7c | 968 | return mfspr(bookehv_spr); \ |
1dc0c5b8 BB |
969 | } \ |
970 | ||
2a64bc67 | 971 | #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \ |
1dc0c5b8 BB |
972 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ |
973 | { \ | |
5a484c7c | 974 | mtspr(bookehv_spr, val); \ |
1dc0c5b8 BB |
975 | } \ |
976 | ||
19d31c5f | 977 | #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \ |
1dc0c5b8 | 978 | static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
5deb8e7a | 979 | { \ |
19d31c5f JN |
980 | if (iden) \ |
981 | WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ | |
5deb8e7a | 982 | if (kvmppc_shared_big_endian(vcpu)) \ |
b7bce570 | 983 | return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \ |
5deb8e7a | 984 | else \ |
b7bce570 | 985 | return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \ |
5deb8e7a AG |
986 | } \ |
987 | ||
19d31c5f | 988 | #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \ |
5deb8e7a AG |
989 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ |
990 | { \ | |
991 | if (kvmppc_shared_big_endian(vcpu)) \ | |
b7bce570 | 992 | vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \ |
5deb8e7a | 993 | else \ |
b7bce570 | 994 | vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \ |
19d31c5f JN |
995 | \ |
996 | if (iden) \ | |
997 | kvmhv_nestedv2_mark_dirty(vcpu, iden); \ | |
5deb8e7a AG |
998 | } \ |
999 | ||
19d31c5f JN |
1000 | #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \ |
1001 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \ | |
1002 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \ | |
5deb8e7a | 1003 | |
2a64bc67 JN |
1004 | #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ |
1005 | KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \ | |
1006 | KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \ | |
1dc0c5b8 BB |
1007 | |
1008 | #ifdef CONFIG_KVM_BOOKE_HV | |
1009 | ||
19d31c5f | 1010 | #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \ |
2a64bc67 | 1011 | KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \ |
1dc0c5b8 BB |
1012 | |
1013 | #else | |
1014 | ||
19d31c5f JN |
1015 | #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \ |
1016 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \ | |
1dc0c5b8 BB |
1017 | |
1018 | #endif | |
1019 | ||
19d31c5f JN |
1020 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0) |
1021 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0) | |
1022 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1) | |
1023 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2) | |
1024 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3) | |
1025 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0) | |
1026 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1) | |
1027 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR) | |
1028 | KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0) | |
1029 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR) | |
5deb8e7a AG |
1030 | static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) |
1031 | { | |
1032 | if (kvmppc_shared_big_endian(vcpu)) | |
1033 | vcpu->arch.shared->msr = cpu_to_be64(val); | |
1034 | else | |
1035 | vcpu->arch.shared->msr = cpu_to_le64(val); | |
19d31c5f | 1036 | kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR); |
5deb8e7a | 1037 | } |
19d31c5f JN |
1038 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR) |
1039 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0) | |
1040 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0) | |
1041 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0) | |
1042 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0) | |
1043 | KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0) | |
5deb8e7a AG |
1044 | |
1045 | static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) | |
1046 | { | |
1047 | if (kvmppc_shared_big_endian(vcpu)) | |
1048 | return be32_to_cpu(vcpu->arch.shared->sr[nr]); | |
1049 | else | |
1050 | return le32_to_cpu(vcpu->arch.shared->sr[nr]); | |
1051 | } | |
1052 | ||
1053 | static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) | |
1054 | { | |
1055 | if (kvmppc_shared_big_endian(vcpu)) | |
1056 | vcpu->arch.shared->sr[nr] = cpu_to_be32(val); | |
1057 | else | |
1058 | vcpu->arch.shared->sr[nr] = cpu_to_le32(val); | |
1059 | } | |
1060 | ||
5f1c248f SW |
1061 | /* |
1062 | * Please call after prepare_to_enter. This function puts the lazy ee and irq | |
1063 | * disabled tracking state back to normal mode, without actually enabling | |
1064 | * interrupts. | |
1065 | */ | |
1066 | static inline void kvmppc_fix_ee_before_entry(void) | |
bd2be683 | 1067 | { |
5f1c248f SW |
1068 | trace_hardirqs_on(); |
1069 | ||
bd2be683 | 1070 | #ifdef CONFIG_PPC64 |
6c85f52b SW |
1071 | /* |
1072 | * To avoid races, the caller must have gone directly from having | |
1073 | * interrupts fully-enabled to hard-disabled. | |
1074 | */ | |
1075 | WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); | |
1076 | ||
bd2be683 AG |
1077 | /* Only need to enable IRQs by hard enabling them after this */ |
1078 | local_paca->irq_happened = 0; | |
4e26bc4a | 1079 | irq_soft_mask_set(IRQS_ENABLED); |
bd2be683 AG |
1080 | #endif |
1081 | } | |
249ba1ee | 1082 | |
6c645b01 NP |
1083 | static inline void kvmppc_fix_ee_after_exit(void) |
1084 | { | |
1085 | #ifdef CONFIG_PPC64 | |
1086 | /* Only need to enable IRQs by hard enabling them after this */ | |
1087 | local_paca->irq_happened = PACA_IRQ_HARD_DIS; | |
1088 | irq_soft_mask_set(IRQS_ALL_DISABLED); | |
1089 | #endif | |
1090 | ||
1091 | trace_hardirqs_off(); | |
1092 | } | |
1093 | ||
1094 | ||
7cdd7a95 MC |
1095 | static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) |
1096 | { | |
1097 | ulong ea; | |
8823a8fd | 1098 | ulong msr_64bit = 0; |
7cdd7a95 MC |
1099 | |
1100 | ea = kvmppc_get_gpr(vcpu, rb); | |
1101 | if (ra) | |
1102 | ea += kvmppc_get_gpr(vcpu, ra); | |
1103 | ||
8823a8fd MC |
1104 | #if defined(CONFIG_PPC_BOOK3E_64) |
1105 | msr_64bit = MSR_CM; | |
1106 | #elif defined(CONFIG_PPC_BOOK3S_64) | |
1107 | msr_64bit = MSR_SF; | |
1108 | #endif | |
1109 | ||
5deb8e7a | 1110 | if (!(kvmppc_get_msr(vcpu) & msr_64bit)) |
8823a8fd MC |
1111 | ea = (uint32_t)ea; |
1112 | ||
7cdd7a95 MC |
1113 | return ea; |
1114 | } | |
1115 | ||
54695c30 BH |
1116 | extern void xics_wake_cpu(int cpu); |
1117 | ||
bbf45ba5 | 1118 | #endif /* __POWERPC_KVM_PPC_H__ */ |