KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked
[linux-block.git] / arch / powerpc / include / asm / kvm_ppc.h
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
a136a8bd 31#include <linux/bug.h>
1c0006d8
AG
32#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
c7f38f46
AG
34#else
35#include <asm/kvm_booke.h>
1c0006d8 36#endif
371fefd6
PM
37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
bbf45ba5 40
a59c1d9e
MS
41/*
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
44 */
45#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
46
bbf45ba5
HB
47enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
bbf45ba5 50 EMULATE_FAIL, /* can't emulate this instruction */
37f5bca6 51 EMULATE_AGAIN, /* something went wrong. go again */
c402a3f4 52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
bbf45ba5
HB
53};
54
70923603 55enum instruction_fetch_type {
51f04726
MC
56 INST_GENERIC,
57 INST_SC, /* system call */
58};
59
7d15c06f
AG
60enum xlate_instdata {
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
63};
64
65enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
68};
69
df6909e5 70extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
bbf45ba5 71extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
29eb61bc 72extern void kvmppc_handler_highmem(void);
bbf45ba5
HB
73
74extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
73601775 77 int is_default_endian);
3587d534
AG
78extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
73601775 80 int is_default_endian);
6f63e81b
BL
81extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 unsigned int rt, unsigned int bytes,
83 int is_default_endian, int mmio_sign_extend);
acc9eb93
SG
84extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 unsigned int rt, unsigned int bytes, int is_default_endian);
86extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int rs, unsigned int bytes, int is_default_endian);
bbf45ba5 88extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
89 u64 val, unsigned int bytes,
90 int is_default_endian);
6f63e81b
BL
91extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
92 int rs, unsigned int bytes,
93 int is_default_endian);
bbf45ba5 94
51f04726 95extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
70923603 96 enum instruction_fetch_type type, u32 *inst);
51f04726 97
35c4a733
AG
98extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
99 bool data);
100extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
101 bool data);
bbf45ba5
HB
102extern int kvmppc_emulate_instruction(struct kvm_run *run,
103 struct kvm_vcpu *vcpu);
d69614a2 104extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
ce263d70 105extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
75f74f0d 106extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
5ce941ee 107extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
d02d4d15 108extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
af8f38b3 109extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
f61c94bb
BB
110extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
111extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
bbf45ba5 112
ecc0981f
HB
113/* Core-specific hooks */
114
89168618 115extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
7924bd41 116 unsigned int gtlb_idx);
bbf45ba5 117extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
49dd2c49 118extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
ecc0981f 119extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
9cc5e953 120extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
fa86b8dd
HB
121extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
122extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
be8d1cae
HB
123extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
124 gva_t eaddr);
b52a638c
HB
125extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
126extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
7d15c06f
AG
127extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
128 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
129 struct kvmppc_pte *pte);
9dd921cf 130
db93f574
HB
131extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
132 unsigned int id);
133extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
5cbb5106 134extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
9dd921cf 135extern int kvmppc_core_check_processor_compat(void);
5cbb5106
HB
136extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
137 struct kvm_translation *tr);
9dd921cf
HB
138
139extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
140extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
141
a8e4ef84 142extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
9dd921cf 143extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
25a8a02d 144extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
307d9279
PM
145extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
146extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
147extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
9dd921cf 148extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
7706664d 149extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
9dd921cf
HB
150extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
151 struct kvm_interrupt *irq);
4fe27d2a 152extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
8de12015
AG
153extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
154 ulong esr_flags);
155extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156 ulong dear_flags,
157 ulong esr_flags);
158extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
159extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
160 ulong esr_flags);
862d31f7 161extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
7c973a2e 162extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
75f74f0d 163
db93f574
HB
164extern int kvmppc_booke_init(void);
165extern void kvmppc_booke_exit(void);
166
c30f8a6c 167extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
2a342ed5 168extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
a4cd8b23 169extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
c30f8a6c 170
aae0777f
DG
171extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
172extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
f98a8bf9 173extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
aae0777f 174extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
18c3640c 175extern void kvmppc_rmap_reset(struct kvm *kvm);
de56a948
PM
176extern long kvmppc_prepare_vrma(struct kvm *kvm,
177 struct kvm_userspace_memory_region *mem);
c77162de 178extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
da9d1d7f 179 struct kvm_memory_slot *memslot, unsigned long porder);
a8606e20 180extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
121f80ba
AK
181extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
182 struct iommu_group *grp);
183extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
184 struct iommu_group *grp);
18c3640c
PM
185extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
186extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
ded13fc1 187extern void kvmppc_setup_partition_table(struct kvm *kvm);
bc5ad3f3 188
54738c09 189extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
58ded420 190 struct kvm_create_spapr_tce_64 *args);
d3695aa4 191extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
503bfcbe 192 struct kvm *kvm, unsigned long liobn);
b1af23d8
AK
193#define kvmppc_ioba_validate(stt, ioba, npages) \
194 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
195 (stt)->size, (ioba), (npages)) ? \
196 H_PARAMETER : H_SUCCESS)
a3ac077b 197extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
d3695aa4
AK
198 unsigned long *ua, unsigned long **prmap);
199extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
200 unsigned long idx, unsigned long tce);
f31e65e1
BH
201extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
202 unsigned long ioba, unsigned long tce);
d3695aa4
AK
203extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
204 unsigned long liobn, unsigned long ioba,
205 unsigned long tce_list, unsigned long npages);
206extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
207 unsigned long liobn, unsigned long ioba,
208 unsigned long tce_value, unsigned long npages);
69e9fbb2
LD
209extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
210 unsigned long ioba);
db9a290d
DG
211extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
212extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
f9e0554d
PM
213extern int kvmppc_core_init_vm(struct kvm *kvm);
214extern void kvmppc_core_destroy_vm(struct kvm *kvm);
5587027c
AK
215extern void kvmppc_core_free_memslot(struct kvm *kvm,
216 struct kvm_memory_slot *free,
a66b48c3 217 struct kvm_memory_slot *dont);
5587027c
AK
218extern int kvmppc_core_create_memslot(struct kvm *kvm,
219 struct kvm_memory_slot *slot,
a66b48c3 220 unsigned long npages);
f9e0554d 221extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 222 struct kvm_memory_slot *memslot,
09170a49 223 const struct kvm_userspace_memory_region *mem);
f9e0554d 224extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
09170a49 225 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
226 const struct kvm_memory_slot *old,
227 const struct kvm_memory_slot *new);
5b74716e
BH
228extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
229 struct kvm_ppc_smmu_info *info);
dfe49dbd
PM
230extern void kvmppc_core_flush_memslot(struct kvm *kvm,
231 struct kvm_memory_slot *memslot);
f9e0554d 232
d30f6e48
SW
233extern int kvmppc_bookehv_init(void);
234extern void kvmppc_bookehv_exit(void);
235
03d25c5b
AG
236extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
237
a2932923 238extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
5e985969
DG
239extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
240 struct kvm_ppc_resize_hpt *rhpt);
241extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
242 struct kvm_ppc_resize_hpt *rhpt);
a2932923 243
5df554ad
SW
244int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
245
8e591cb7
ME
246extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
247extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
248extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
5af50993 249
bc5ad3f3
BH
250extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
251 u32 priority);
252extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
253 u32 *priority);
d19bd862
PM
254extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
255extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
8e591cb7 256
2f699a59
BB
257void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
258void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
259
3a167bea
AK
260union kvmppc_one_reg {
261 u32 wval;
262 u64 dval;
263 vector128 vval;
264 u64 vsxval[2];
6f63e81b 265 u32 vsx32val[4];
acc9eb93
SG
266 u16 vsx16val[8];
267 u8 vsx8val[16];
3a167bea
AK
268 struct {
269 u64 addr;
270 u64 length;
271 } vpaval;
272};
273
274struct kvmppc_ops {
cbbc58d4 275 struct module *owner;
3a167bea
AK
276 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
277 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
278 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
279 union kvmppc_one_reg *val);
280 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
281 union kvmppc_one_reg *val);
282 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
283 void (*vcpu_put)(struct kvm_vcpu *vcpu);
284 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
285 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
286 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
287 void (*vcpu_free)(struct kvm_vcpu *vcpu);
288 int (*check_requests)(struct kvm_vcpu *vcpu);
289 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
290 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
291 int (*prepare_memory_region)(struct kvm *kvm,
292 struct kvm_memory_slot *memslot,
09170a49 293 const struct kvm_userspace_memory_region *mem);
3a167bea 294 void (*commit_memory_region)(struct kvm *kvm,
09170a49 295 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
296 const struct kvm_memory_slot *old,
297 const struct kvm_memory_slot *new);
3a167bea
AK
298 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
299 unsigned long end);
57128468 300 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
3a167bea
AK
301 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
302 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
303 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
304 void (*free_memslot)(struct kvm_memory_slot *free,
305 struct kvm_memory_slot *dont);
306 int (*create_memslot)(struct kvm_memory_slot *slot,
307 unsigned long npages);
308 int (*init_vm)(struct kvm *kvm);
309 void (*destroy_vm)(struct kvm *kvm);
3a167bea
AK
310 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
311 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
312 unsigned int inst, int *advance);
313 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
314 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
315 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
316 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
317 unsigned long arg);
ae2113a4 318 int (*hcall_implemented)(unsigned long hcall);
9576730d
SW
319 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
320 struct irq_bypass_producer *);
321 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
322 struct irq_bypass_producer *);
c9270132
PM
323 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
324 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
3c313524
PM
325 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
326 unsigned long flags);
2e6baa46 327 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
3a167bea
AK
328};
329
cbbc58d4
AK
330extern struct kvmppc_ops *kvmppc_hv_ops;
331extern struct kvmppc_ops *kvmppc_pr_ops;
3a167bea 332
51f04726 333static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
70923603 334 enum instruction_fetch_type type, u32 *inst)
51f04726
MC
335{
336 int ret = EMULATE_DONE;
337 u32 fetched_inst;
338
339 /* Load the instruction manually if it failed to do so in the
340 * exit path */
341 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
342 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
343
344 /* Write fetch_failed unswapped if the fetch failed */
345 if (ret == EMULATE_DONE)
346 fetched_inst = kvmppc_need_byteswap(vcpu) ?
347 swab32(vcpu->arch.last_inst) :
348 vcpu->arch.last_inst;
349 else
350 fetched_inst = vcpu->arch.last_inst;
351
352 *inst = fetched_inst;
353 return ret;
354}
355
a78b55d1
AK
356static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
357{
358 return kvm->arch.kvm_ops == kvmppc_hv_ops;
359}
360
e928e9cb
ME
361extern int kvmppc_hwrng_present(void);
362
0564ee8a
AG
363/*
364 * Cuts out inst bits with ordering according to spec.
365 * That means the leftmost bit is zero. All given bits are included.
366 */
367static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
368{
369 u32 r;
370 u32 mask;
371
372 BUG_ON(msb > lsb);
373
374 mask = (1 << (lsb - msb + 1)) - 1;
375 r = (inst >> (63 - lsb)) & mask;
376
377 return r;
378}
379
380/*
381 * Replaces inst bits with ordering according to spec.
382 */
383static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
384{
385 u32 r;
386 u32 mask;
387
388 BUG_ON(msb > lsb);
389
390 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
391 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
392
393 return r;
394}
395
a136a8bd
PM
396#define one_reg_size(id) \
397 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
398
399#define get_reg_val(id, reg) ({ \
400 union kvmppc_one_reg __u; \
401 switch (one_reg_size(id)) { \
402 case 4: __u.wval = (reg); break; \
403 case 8: __u.dval = (reg); break; \
404 default: BUG(); \
405 } \
406 __u; \
407})
408
409
410#define set_reg_val(id, val) ({ \
411 u64 __v; \
412 switch (one_reg_size(id)) { \
413 case 4: __v = (val).wval; break; \
414 case 8: __v = (val).dval; break; \
415 default: BUG(); \
416 } \
417 __v; \
418})
419
3a167bea 420int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
421int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
422
3a167bea 423int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
424int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
425
31f3438e
PM
426int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
427int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
a136a8bd
PM
428int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
429int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
31f3438e 430
5ce941ee
SW
431void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
432
5df554ad 433struct openpic;
5df554ad 434
9975f5e3 435#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
fa61a4e3 436extern void kvm_cma_reserve(void) __init;
371fefd6
PM
437static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
438{
d2e60075 439 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
371fefd6 440}
aa04b4cc 441
5af50993
BH
442static inline void kvmppc_set_xive_tima(int cpu,
443 unsigned long phys_addr,
444 void __iomem *virt_addr)
445{
d2e60075
NP
446 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
447 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
371fefd6 448}
aa04b4cc 449
54695c30
BH
450static inline u32 kvmppc_get_xics_latch(void)
451{
699cc876 452 u32 xirr;
54695c30 453
699cc876 454 xirr = get_paca()->kvm_hstate.saved_xirr;
54695c30 455 get_paca()->kvm_hstate.saved_xirr = 0;
54695c30
BH
456 return xirr;
457}
458
459static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
460{
d2e60075 461 paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
54695c30
BH
462}
463
3a167bea
AK
464static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
465{
cbbc58d4 466 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
3a167bea 467}
aa04b4cc 468
441c19c8
ME
469extern void kvm_hv_vm_activated(void);
470extern void kvm_hv_vm_deactivated(void);
471extern bool kvm_hv_mode_active(void);
472
371fefd6 473#else
fa61a4e3
AK
474static inline void __init kvm_cma_reserve(void)
475{}
476
371fefd6
PM
477static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
478{}
aa04b4cc 479
5af50993
BH
480static inline void kvmppc_set_xive_tima(int cpu,
481 unsigned long phys_addr,
482 void __iomem *virt_addr)
483{}
484
54695c30
BH
485static inline u32 kvmppc_get_xics_latch(void)
486{
487 return 0;
488}
489
490static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
491{}
492
493static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
494{
495 kvm_vcpu_kick(vcpu);
496}
441c19c8
ME
497
498static inline bool kvm_hv_mode_active(void) { return false; }
499
bc5ad3f3
BH
500#endif
501
502#ifdef CONFIG_KVM_XICS
503static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
504{
505 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
506}
8daaafc8
SW
507
508static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
509 struct kvm *kvm)
510{
644abbb2 511 if (kvm && kvm_irq_bypass)
8daaafc8
SW
512 return kvm->arch.pimap;
513 return NULL;
514}
515
79b6c247
SW
516extern void kvmppc_alloc_host_rm_ops(void);
517extern void kvmppc_free_host_rm_ops(void);
8daaafc8 518extern void kvmppc_free_pimap(struct kvm *kvm);
f7af5209 519extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
bc5ad3f3 520extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
bc5ad3f3 521extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
8b78645c
PM
522extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
523extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
5975a2e0
PM
524extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
525 struct kvm_vcpu *vcpu, u32 cpu);
0c2a6606 526extern void kvmppc_xics_ipi_action(void);
5d375199
PM
527extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
528 unsigned long host_irq);
529extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
530 unsigned long host_irq);
f725758b
PM
531extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
532 struct kvmppc_irq_map *irq_map,
533 struct kvmppc_passthru_irqmap *pimap,
534 bool *again);
5af50993
BH
535
536extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
537 int level, bool line_status);
538
520fe9c6 539extern int h_ipi_redirect;
bc5ad3f3 540#else
8daaafc8
SW
541static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
542 struct kvm *kvm)
543 { return NULL; }
79b6c247
SW
544static inline void kvmppc_alloc_host_rm_ops(void) {};
545static inline void kvmppc_free_host_rm_ops(void) {};
8daaafc8 546static inline void kvmppc_free_pimap(struct kvm *kvm) {};
f7af5209
SW
547static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
548 { return 0; }
bc5ad3f3
BH
549static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
550 { return 0; }
551static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
bc5ad3f3
BH
552static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
553 { return 0; }
371fefd6
PM
554#endif
555
5af50993
BH
556#ifdef CONFIG_KVM_XIVE
557/*
558 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
559 * ie. P9 new interrupt controller, while the second "xive" is the legacy
560 * "eXternal Interrupt Vector Entry" which is the configuration of an
561 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
562 * two function consume or produce a legacy "XIVE" state from the
563 * new "XIVE" interrupt controller.
564 */
565extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
566 u32 priority);
567extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
568 u32 *priority);
569extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
570extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
571extern void kvmppc_xive_init_module(void);
572extern void kvmppc_xive_exit_module(void);
573
574extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
575 struct kvm_vcpu *vcpu, u32 cpu);
576extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
577extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
578 struct irq_desc *host_desc);
579extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
580 struct irq_desc *host_desc);
581extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
582extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
583
584extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
585 int level, bool line_status);
586#else
587static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
588 u32 priority) { return -1; }
589static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
590 u32 *priority) { return -1; }
591static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
592static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
593static inline void kvmppc_xive_init_module(void) { }
594static inline void kvmppc_xive_exit_module(void) { }
595
596static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
597 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
598static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
599static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
600 struct irq_desc *host_desc) { return -ENODEV; }
601static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
602 struct irq_desc *host_desc) { return -ENODEV; }
603static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
604static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
605
606static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
607 int level, bool line_status) { return -ENODEV; }
608#endif /* CONFIG_KVM_XIVE */
609
e34af784
PM
610/*
611 * Prototypes for functions called only from assembler code.
612 * Having prototypes reduces sparse errors.
613 */
614long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
615 unsigned long ioba, unsigned long tce);
616long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
617 unsigned long liobn, unsigned long ioba,
618 unsigned long tce_list, unsigned long npages);
619long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
620 unsigned long liobn, unsigned long ioba,
621 unsigned long tce_value, unsigned long npages);
622long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
623 unsigned int yield_count);
624long kvmppc_h_random(struct kvm_vcpu *vcpu);
625void kvmhv_commence_exit(int trap);
626long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
627void kvmppc_subcore_enter_guest(void);
628void kvmppc_subcore_exit_guest(void);
629long kvmppc_realmode_hmi_handler(void);
630long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
631 long pte_index, unsigned long pteh, unsigned long ptel);
632long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
633 unsigned long pte_index, unsigned long avpn);
634long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
635long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
636 unsigned long pte_index, unsigned long avpn,
637 unsigned long va);
638long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
639 unsigned long pte_index);
640long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
641 unsigned long pte_index);
642long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
643 unsigned long pte_index);
644long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
645 unsigned long slb_v, unsigned int status, bool data);
646unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
5af50993
BH
647unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
648unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
e34af784
PM
649int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
650 unsigned long mfrr);
651int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
652int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
f7035ce9 653void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
e34af784 654
79b6c247
SW
655/*
656 * Host-side operations we want to set up while running in real
657 * mode in the guest operating on the xics.
658 * Currently only VCPU wakeup is supported.
659 */
660
661union kvmppc_rm_state {
662 unsigned long raw;
663 struct {
664 u32 in_host;
665 u32 rm_action;
666 };
667};
668
669struct kvmppc_host_rm_core {
670 union kvmppc_rm_state rm_state;
671 void *rm_data;
672 char pad[112];
673};
674
675struct kvmppc_host_rm_ops {
676 struct kvmppc_host_rm_core *rm_core;
677 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
678};
679
680extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
681
34f754b9
BB
682static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
683{
684#ifdef CONFIG_KVM_BOOKE_HV
685 return mfspr(SPRN_GEPR);
686#elif defined(CONFIG_BOOKE)
687 return vcpu->arch.epr;
688#else
689 return 0;
690#endif
691}
692
1c810636
AG
693static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
694{
695#ifdef CONFIG_KVM_BOOKE_HV
696 mtspr(SPRN_GEPR, epr);
697#elif defined(CONFIG_BOOKE)
698 vcpu->arch.epr = epr;
699#endif
700}
701
5df554ad
SW
702#ifdef CONFIG_KVM_MPIC
703
704void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
eb1e4f43
SW
705int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
706 u32 cpu);
707void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
5df554ad
SW
708
709#else
710
711static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
712{
713}
714
eb1e4f43
SW
715static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
716 struct kvm_vcpu *vcpu, u32 cpu)
717{
718 return -EINVAL;
719}
720
721static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
722 struct kvm_vcpu *vcpu)
723{
724}
725
5df554ad
SW
726#endif /* CONFIG_KVM_MPIC */
727
dc83b8bc
SW
728int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
729 struct kvm_config_tlb *cfg);
730int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
731 struct kvm_dirty_tlb *cfg);
732
043cc4d7
SW
733long kvmppc_alloc_lpid(void);
734void kvmppc_claim_lpid(long lpid);
735void kvmppc_free_lpid(long lpid);
736void kvmppc_init_lpid(unsigned long nr_lpids);
737
ba049e93 738static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
249ba1ee 739{
249ba1ee 740 struct page *page;
adccf65c
BB
741 /*
742 * We can only access pages that the kernel maps
743 * as memory. Bail out for unmapped ones.
744 */
745 if (!pfn_valid(pfn))
746 return;
747
748 /* Clear i-cache for new pages */
249ba1ee
AG
749 page = pfn_to_page(pfn);
750 if (!test_bit(PG_arch_1, &page->flags)) {
751 flush_dcache_icache_page(page);
752 set_bit(PG_arch_1, &page->flags);
753 }
754}
755
5deb8e7a
AG
756/*
757 * Shared struct helpers. The shared struct can be little or big endian,
758 * depending on the guest endianness. So expose helpers to all of them.
759 */
760static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
761{
762#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
763 /* Only Book3S_64 PR supports bi-endian for now */
764 return vcpu->arch.shared_big_endian;
765#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
766 /* Book3s_64 HV on little endian is always little endian */
767 return false;
768#else
769 return true;
770#endif
771}
772
5a484c7c 773#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
1dc0c5b8
BB
774static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
775{ \
5a484c7c 776 return mfspr(bookehv_spr); \
1dc0c5b8
BB
777} \
778
5a484c7c 779#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
780static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
781{ \
5a484c7c 782 mtspr(bookehv_spr, val); \
1dc0c5b8
BB
783} \
784
5deb8e7a 785#define SHARED_WRAPPER_GET(reg, size) \
1dc0c5b8 786static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
5deb8e7a
AG
787{ \
788 if (kvmppc_shared_big_endian(vcpu)) \
789 return be##size##_to_cpu(vcpu->arch.shared->reg); \
790 else \
791 return le##size##_to_cpu(vcpu->arch.shared->reg); \
792} \
793
794#define SHARED_WRAPPER_SET(reg, size) \
795static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
796{ \
797 if (kvmppc_shared_big_endian(vcpu)) \
798 vcpu->arch.shared->reg = cpu_to_be##size(val); \
799 else \
800 vcpu->arch.shared->reg = cpu_to_le##size(val); \
801} \
802
803#define SHARED_WRAPPER(reg, size) \
804 SHARED_WRAPPER_GET(reg, size) \
805 SHARED_WRAPPER_SET(reg, size) \
806
5a484c7c
BB
807#define SPRNG_WRAPPER(reg, bookehv_spr) \
808 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
809 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
810
811#ifdef CONFIG_KVM_BOOKE_HV
812
5a484c7c
BB
813#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
814 SPRNG_WRAPPER(reg, bookehv_spr) \
1dc0c5b8
BB
815
816#else
817
5a484c7c 818#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
1dc0c5b8
BB
819 SHARED_WRAPPER(reg, size) \
820
821#endif
822
5deb8e7a 823SHARED_WRAPPER(critical, 64)
1dc0c5b8
BB
824SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
825SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
826SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
827SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
828SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
829SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
830SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
dc168549 831SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
5deb8e7a
AG
832SHARED_WRAPPER_GET(msr, 64)
833static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
834{
835 if (kvmppc_shared_big_endian(vcpu))
836 vcpu->arch.shared->msr = cpu_to_be64(val);
837 else
838 vcpu->arch.shared->msr = cpu_to_le64(val);
839}
840SHARED_WRAPPER(dsisr, 32)
841SHARED_WRAPPER(int_pending, 32)
842SHARED_WRAPPER(sprg4, 64)
843SHARED_WRAPPER(sprg5, 64)
844SHARED_WRAPPER(sprg6, 64)
845SHARED_WRAPPER(sprg7, 64)
846
847static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
848{
849 if (kvmppc_shared_big_endian(vcpu))
850 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
851 else
852 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
853}
854
855static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
856{
857 if (kvmppc_shared_big_endian(vcpu))
858 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
859 else
860 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
861}
862
5f1c248f
SW
863/*
864 * Please call after prepare_to_enter. This function puts the lazy ee and irq
865 * disabled tracking state back to normal mode, without actually enabling
866 * interrupts.
867 */
868static inline void kvmppc_fix_ee_before_entry(void)
bd2be683 869{
5f1c248f
SW
870 trace_hardirqs_on();
871
bd2be683 872#ifdef CONFIG_PPC64
6c85f52b
SW
873 /*
874 * To avoid races, the caller must have gone directly from having
875 * interrupts fully-enabled to hard-disabled.
876 */
877 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
878
bd2be683
AG
879 /* Only need to enable IRQs by hard enabling them after this */
880 local_paca->irq_happened = 0;
4e26bc4a 881 irq_soft_mask_set(IRQS_ENABLED);
bd2be683
AG
882#endif
883}
249ba1ee 884
7cdd7a95
MC
885static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
886{
887 ulong ea;
8823a8fd 888 ulong msr_64bit = 0;
7cdd7a95
MC
889
890 ea = kvmppc_get_gpr(vcpu, rb);
891 if (ra)
892 ea += kvmppc_get_gpr(vcpu, ra);
893
8823a8fd
MC
894#if defined(CONFIG_PPC_BOOK3E_64)
895 msr_64bit = MSR_CM;
896#elif defined(CONFIG_PPC_BOOK3S_64)
897 msr_64bit = MSR_SF;
898#endif
899
5deb8e7a 900 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
8823a8fd
MC
901 ea = (uint32_t)ea;
902
7cdd7a95
MC
903 return ea;
904}
905
54695c30
BH
906extern void xics_wake_cpu(int cpu);
907
bbf45ba5 908#endif /* __POWERPC_KVM_PPC_H__ */