powerpc/mm: Fix build error with FLATMEM book3s64 config
[linux-2.6-block.git] / arch / powerpc / include / asm / kvm_ppc.h
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
a136a8bd 31#include <linux/bug.h>
1c0006d8
AG
32#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
c7f38f46
AG
34#else
35#include <asm/kvm_booke.h>
1c0006d8 36#endif
371fefd6
PM
37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
03f95332
PM
39#include <asm/xive.h>
40#include <asm/cpu_has_feature.h>
371fefd6 41#endif
bbf45ba5 42
a59c1d9e
MS
43/*
44 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
45 * for supporting software breakpoint.
46 */
47#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
48
bbf45ba5
HB
49enum emulation_result {
50 EMULATE_DONE, /* no further processing */
51 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
bbf45ba5 52 EMULATE_FAIL, /* can't emulate this instruction */
37f5bca6 53 EMULATE_AGAIN, /* something went wrong. go again */
c402a3f4 54 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
bbf45ba5
HB
55};
56
70923603 57enum instruction_fetch_type {
51f04726
MC
58 INST_GENERIC,
59 INST_SC, /* system call */
60};
61
7d15c06f
AG
62enum xlate_instdata {
63 XLATE_INST, /* translate instruction address */
64 XLATE_DATA /* translate data address */
65};
66
67enum xlate_readwrite {
68 XLATE_READ, /* check for read permissions */
69 XLATE_WRITE /* check for write permissions */
70};
71
df6909e5 72extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
bbf45ba5 73extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
29eb61bc 74extern void kvmppc_handler_highmem(void);
bbf45ba5
HB
75
76extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
77extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 unsigned int rt, unsigned int bytes,
73601775 79 int is_default_endian);
3587d534
AG
80extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
81 unsigned int rt, unsigned int bytes,
73601775 82 int is_default_endian);
6f63e81b
BL
83extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
84 unsigned int rt, unsigned int bytes,
85 int is_default_endian, int mmio_sign_extend);
acc9eb93
SG
86extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int rt, unsigned int bytes, int is_default_endian);
88extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
89 unsigned int rs, unsigned int bytes, int is_default_endian);
bbf45ba5 90extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
91 u64 val, unsigned int bytes,
92 int is_default_endian);
6f63e81b
BL
93extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
94 int rs, unsigned int bytes,
95 int is_default_endian);
bbf45ba5 96
51f04726 97extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
70923603 98 enum instruction_fetch_type type, u32 *inst);
51f04726 99
35c4a733
AG
100extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
101 bool data);
102extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
103 bool data);
bbf45ba5
HB
104extern int kvmppc_emulate_instruction(struct kvm_run *run,
105 struct kvm_vcpu *vcpu);
d69614a2 106extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
ce263d70 107extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
75f74f0d 108extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
5ce941ee 109extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
d02d4d15 110extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
af8f38b3 111extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
f61c94bb
BB
112extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
113extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
bbf45ba5 114
ecc0981f
HB
115/* Core-specific hooks */
116
89168618 117extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
7924bd41 118 unsigned int gtlb_idx);
bbf45ba5 119extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
49dd2c49 120extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
ecc0981f 121extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
9cc5e953 122extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
fa86b8dd
HB
123extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
124extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
be8d1cae
HB
125extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
126 gva_t eaddr);
b52a638c
HB
127extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
128extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
7d15c06f
AG
129extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
130 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
131 struct kvmppc_pte *pte);
9dd921cf 132
db93f574
HB
133extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
134 unsigned int id);
135extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
5cbb5106 136extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
9dd921cf 137extern int kvmppc_core_check_processor_compat(void);
5cbb5106
HB
138extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
139 struct kvm_translation *tr);
9dd921cf
HB
140
141extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
142extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
143
a8e4ef84 144extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
9dd921cf 145extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
884dfb72 146extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
25a8a02d 147extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
307d9279
PM
148extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
149extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
150extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
9dd921cf 151extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
7706664d 152extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
9dd921cf
HB
153extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
154 struct kvm_interrupt *irq);
4fe27d2a 155extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
8de12015
AG
156extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
157 ulong esr_flags);
158extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
159 ulong dear_flags,
160 ulong esr_flags);
161extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
162extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
163 ulong esr_flags);
862d31f7 164extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
7c973a2e 165extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
75f74f0d 166
db93f574
HB
167extern int kvmppc_booke_init(void);
168extern void kvmppc_booke_exit(void);
169
c30f8a6c 170extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
2a342ed5 171extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
a4cd8b23 172extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
c30f8a6c 173
aae0777f
DG
174extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
175extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
f98a8bf9 176extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
aae0777f 177extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
18c3640c 178extern void kvmppc_rmap_reset(struct kvm *kvm);
de56a948
PM
179extern long kvmppc_prepare_vrma(struct kvm *kvm,
180 struct kvm_userspace_memory_region *mem);
c77162de 181extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
da9d1d7f 182 struct kvm_memory_slot *memslot, unsigned long porder);
a8606e20 183extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
121f80ba
AK
184extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
185 struct iommu_group *grp);
186extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
187 struct iommu_group *grp);
18c3640c
PM
188extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
189extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
ded13fc1 190extern void kvmppc_setup_partition_table(struct kvm *kvm);
bc5ad3f3 191
54738c09 192extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
58ded420 193 struct kvm_create_spapr_tce_64 *args);
d3695aa4 194extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
503bfcbe 195 struct kvm *kvm, unsigned long liobn);
b1af23d8
AK
196#define kvmppc_ioba_validate(stt, ioba, npages) \
197 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
198 (stt)->size, (ioba), (npages)) ? \
199 H_PARAMETER : H_SUCCESS)
a3ac077b 200extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
d3695aa4
AK
201 unsigned long *ua, unsigned long **prmap);
202extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
203 unsigned long idx, unsigned long tce);
f31e65e1
BH
204extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
205 unsigned long ioba, unsigned long tce);
d3695aa4
AK
206extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
207 unsigned long liobn, unsigned long ioba,
208 unsigned long tce_list, unsigned long npages);
209extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
210 unsigned long liobn, unsigned long ioba,
211 unsigned long tce_value, unsigned long npages);
69e9fbb2
LD
212extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
213 unsigned long ioba);
db9a290d
DG
214extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
215extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
f9e0554d
PM
216extern int kvmppc_core_init_vm(struct kvm *kvm);
217extern void kvmppc_core_destroy_vm(struct kvm *kvm);
5587027c
AK
218extern void kvmppc_core_free_memslot(struct kvm *kvm,
219 struct kvm_memory_slot *free,
a66b48c3 220 struct kvm_memory_slot *dont);
5587027c
AK
221extern int kvmppc_core_create_memslot(struct kvm *kvm,
222 struct kvm_memory_slot *slot,
a66b48c3 223 unsigned long npages);
f9e0554d 224extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 225 struct kvm_memory_slot *memslot,
09170a49 226 const struct kvm_userspace_memory_region *mem);
f9e0554d 227extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
09170a49 228 const struct kvm_userspace_memory_region *mem,
f36f3f28 229 const struct kvm_memory_slot *old,
f032b734
BR
230 const struct kvm_memory_slot *new,
231 enum kvm_mr_change change);
5b74716e
BH
232extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
233 struct kvm_ppc_smmu_info *info);
dfe49dbd
PM
234extern void kvmppc_core_flush_memslot(struct kvm *kvm,
235 struct kvm_memory_slot *memslot);
f9e0554d 236
d30f6e48
SW
237extern int kvmppc_bookehv_init(void);
238extern void kvmppc_bookehv_exit(void);
239
03d25c5b
AG
240extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
241
a2932923 242extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
5e985969
DG
243extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
244 struct kvm_ppc_resize_hpt *rhpt);
245extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
246 struct kvm_ppc_resize_hpt *rhpt);
a2932923 247
5df554ad
SW
248int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
249
8e591cb7
ME
250extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
251extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
252extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
5af50993 253
bc5ad3f3
BH
254extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
255 u32 priority);
256extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
257 u32 *priority);
d19bd862
PM
258extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
259extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
8e591cb7 260
2f699a59
BB
261void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
262void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
263
3a167bea
AK
264union kvmppc_one_reg {
265 u32 wval;
266 u64 dval;
267 vector128 vval;
268 u64 vsxval[2];
6f63e81b 269 u32 vsx32val[4];
acc9eb93
SG
270 u16 vsx16val[8];
271 u8 vsx8val[16];
3a167bea
AK
272 struct {
273 u64 addr;
274 u64 length;
275 } vpaval;
276};
277
278struct kvmppc_ops {
cbbc58d4 279 struct module *owner;
3a167bea
AK
280 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
281 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
282 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
283 union kvmppc_one_reg *val);
284 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
285 union kvmppc_one_reg *val);
286 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
287 void (*vcpu_put)(struct kvm_vcpu *vcpu);
288 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
289 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
290 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
291 void (*vcpu_free)(struct kvm_vcpu *vcpu);
292 int (*check_requests)(struct kvm_vcpu *vcpu);
293 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
294 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
295 int (*prepare_memory_region)(struct kvm *kvm,
296 struct kvm_memory_slot *memslot,
09170a49 297 const struct kvm_userspace_memory_region *mem);
3a167bea 298 void (*commit_memory_region)(struct kvm *kvm,
09170a49 299 const struct kvm_userspace_memory_region *mem,
f36f3f28 300 const struct kvm_memory_slot *old,
f032b734
BR
301 const struct kvm_memory_slot *new,
302 enum kvm_mr_change change);
3a167bea
AK
303 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
304 unsigned long end);
57128468 305 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
3a167bea
AK
306 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
307 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
308 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
309 void (*free_memslot)(struct kvm_memory_slot *free,
310 struct kvm_memory_slot *dont);
311 int (*create_memslot)(struct kvm_memory_slot *slot,
312 unsigned long npages);
313 int (*init_vm)(struct kvm *kvm);
314 void (*destroy_vm)(struct kvm *kvm);
3a167bea
AK
315 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
316 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
317 unsigned int inst, int *advance);
318 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
319 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
320 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
321 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
322 unsigned long arg);
ae2113a4 323 int (*hcall_implemented)(unsigned long hcall);
9576730d
SW
324 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
325 struct irq_bypass_producer *);
326 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
327 struct irq_bypass_producer *);
c9270132
PM
328 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
329 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
3c313524
PM
330 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
331 unsigned long flags);
2e6baa46 332 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
aa069a99 333 int (*enable_nested)(struct kvm *kvm);
dceadcf9
SJS
334 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
335 int size);
336 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
337 int size);
3a167bea
AK
338};
339
cbbc58d4
AK
340extern struct kvmppc_ops *kvmppc_hv_ops;
341extern struct kvmppc_ops *kvmppc_pr_ops;
3a167bea 342
51f04726 343static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
70923603 344 enum instruction_fetch_type type, u32 *inst)
51f04726
MC
345{
346 int ret = EMULATE_DONE;
347 u32 fetched_inst;
348
349 /* Load the instruction manually if it failed to do so in the
350 * exit path */
351 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
352 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
353
354 /* Write fetch_failed unswapped if the fetch failed */
355 if (ret == EMULATE_DONE)
356 fetched_inst = kvmppc_need_byteswap(vcpu) ?
357 swab32(vcpu->arch.last_inst) :
358 vcpu->arch.last_inst;
359 else
360 fetched_inst = vcpu->arch.last_inst;
361
362 *inst = fetched_inst;
363 return ret;
364}
365
a78b55d1
AK
366static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
367{
368 return kvm->arch.kvm_ops == kvmppc_hv_ops;
369}
370
e928e9cb
ME
371extern int kvmppc_hwrng_present(void);
372
0564ee8a
AG
373/*
374 * Cuts out inst bits with ordering according to spec.
375 * That means the leftmost bit is zero. All given bits are included.
376 */
377static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
378{
379 u32 r;
380 u32 mask;
381
382 BUG_ON(msb > lsb);
383
384 mask = (1 << (lsb - msb + 1)) - 1;
385 r = (inst >> (63 - lsb)) & mask;
386
387 return r;
388}
389
390/*
391 * Replaces inst bits with ordering according to spec.
392 */
393static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
394{
395 u32 r;
396 u32 mask;
397
398 BUG_ON(msb > lsb);
399
400 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
401 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
402
403 return r;
404}
405
a136a8bd
PM
406#define one_reg_size(id) \
407 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
408
409#define get_reg_val(id, reg) ({ \
410 union kvmppc_one_reg __u; \
411 switch (one_reg_size(id)) { \
412 case 4: __u.wval = (reg); break; \
413 case 8: __u.dval = (reg); break; \
414 default: BUG(); \
415 } \
416 __u; \
417})
418
419
420#define set_reg_val(id, val) ({ \
421 u64 __v; \
422 switch (one_reg_size(id)) { \
423 case 4: __v = (val).wval; break; \
424 case 8: __v = (val).dval; break; \
425 default: BUG(); \
426 } \
427 __v; \
428})
429
3a167bea 430int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
431int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
432
3a167bea 433int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
434int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
435
31f3438e
PM
436int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
437int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
a136a8bd
PM
438int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
439int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
31f3438e 440
5ce941ee
SW
441void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
442
5df554ad 443struct openpic;
5df554ad 444
9975f5e3 445#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
fa61a4e3 446extern void kvm_cma_reserve(void) __init;
371fefd6
PM
447static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
448{
d2e60075 449 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
371fefd6 450}
aa04b4cc 451
5af50993
BH
452static inline void kvmppc_set_xive_tima(int cpu,
453 unsigned long phys_addr,
454 void __iomem *virt_addr)
455{
d2e60075
NP
456 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
457 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
371fefd6 458}
aa04b4cc 459
54695c30
BH
460static inline u32 kvmppc_get_xics_latch(void)
461{
699cc876 462 u32 xirr;
54695c30 463
699cc876 464 xirr = get_paca()->kvm_hstate.saved_xirr;
54695c30 465 get_paca()->kvm_hstate.saved_xirr = 0;
54695c30
BH
466 return xirr;
467}
468
469static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
470{
d2e60075 471 paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
54695c30
BH
472}
473
3a167bea
AK
474static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
475{
cbbc58d4 476 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
3a167bea 477}
aa04b4cc 478
441c19c8
ME
479extern void kvm_hv_vm_activated(void);
480extern void kvm_hv_vm_deactivated(void);
481extern bool kvm_hv_mode_active(void);
482
371fefd6 483#else
fa61a4e3
AK
484static inline void __init kvm_cma_reserve(void)
485{}
486
371fefd6
PM
487static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
488{}
aa04b4cc 489
5af50993
BH
490static inline void kvmppc_set_xive_tima(int cpu,
491 unsigned long phys_addr,
492 void __iomem *virt_addr)
493{}
494
54695c30
BH
495static inline u32 kvmppc_get_xics_latch(void)
496{
497 return 0;
498}
499
500static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
501{}
502
503static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
504{
505 kvm_vcpu_kick(vcpu);
506}
441c19c8
ME
507
508static inline bool kvm_hv_mode_active(void) { return false; }
509
bc5ad3f3
BH
510#endif
511
512#ifdef CONFIG_KVM_XICS
513static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
514{
515 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
516}
8daaafc8
SW
517
518static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
519 struct kvm *kvm)
520{
644abbb2 521 if (kvm && kvm_irq_bypass)
8daaafc8
SW
522 return kvm->arch.pimap;
523 return NULL;
524}
525
79b6c247
SW
526extern void kvmppc_alloc_host_rm_ops(void);
527extern void kvmppc_free_host_rm_ops(void);
8daaafc8 528extern void kvmppc_free_pimap(struct kvm *kvm);
f7af5209 529extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
bc5ad3f3 530extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
bc5ad3f3 531extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
8b78645c
PM
532extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
533extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
5975a2e0
PM
534extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
535 struct kvm_vcpu *vcpu, u32 cpu);
0c2a6606 536extern void kvmppc_xics_ipi_action(void);
5d375199
PM
537extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
538 unsigned long host_irq);
539extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
540 unsigned long host_irq);
f725758b
PM
541extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
542 struct kvmppc_irq_map *irq_map,
543 struct kvmppc_passthru_irqmap *pimap,
544 bool *again);
5af50993
BH
545
546extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
547 int level, bool line_status);
548
520fe9c6 549extern int h_ipi_redirect;
bc5ad3f3 550#else
8daaafc8
SW
551static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
552 struct kvm *kvm)
553 { return NULL; }
79b6c247
SW
554static inline void kvmppc_alloc_host_rm_ops(void) {};
555static inline void kvmppc_free_host_rm_ops(void) {};
8daaafc8 556static inline void kvmppc_free_pimap(struct kvm *kvm) {};
f7af5209
SW
557static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
558 { return 0; }
bc5ad3f3
BH
559static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
560 { return 0; }
561static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
bc5ad3f3
BH
562static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
563 { return 0; }
371fefd6
PM
564#endif
565
5af50993
BH
566#ifdef CONFIG_KVM_XIVE
567/*
568 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
569 * ie. P9 new interrupt controller, while the second "xive" is the legacy
570 * "eXternal Interrupt Vector Entry" which is the configuration of an
571 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
572 * two function consume or produce a legacy "XIVE" state from the
573 * new "XIVE" interrupt controller.
574 */
575extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
576 u32 priority);
577extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
578 u32 *priority);
579extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
580extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
581extern void kvmppc_xive_init_module(void);
582extern void kvmppc_xive_exit_module(void);
583
584extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
585 struct kvm_vcpu *vcpu, u32 cpu);
586extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
587extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
588 struct irq_desc *host_desc);
589extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
590 struct irq_desc *host_desc);
591extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
592extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
593
594extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
595 int level, bool line_status);
95a6432c 596extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
5af50993
BH
597#else
598static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
599 u32 priority) { return -1; }
600static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
601 u32 *priority) { return -1; }
602static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
603static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
604static inline void kvmppc_xive_init_module(void) { }
605static inline void kvmppc_xive_exit_module(void) { }
606
607static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
608 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
609static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
610static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
611 struct irq_desc *host_desc) { return -ENODEV; }
612static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
613 struct irq_desc *host_desc) { return -ENODEV; }
614static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
615static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
616
617static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
618 int level, bool line_status) { return -ENODEV; }
95a6432c 619static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
5af50993
BH
620#endif /* CONFIG_KVM_XIVE */
621
e74d53e3 622#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
03f95332
PM
623static inline bool xics_on_xive(void)
624{
625 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
626}
627#else
628static inline bool xics_on_xive(void)
629{
630 return false;
631}
632#endif
633
e34af784
PM
634/*
635 * Prototypes for functions called only from assembler code.
636 * Having prototypes reduces sparse errors.
637 */
638long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
639 unsigned long ioba, unsigned long tce);
640long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
641 unsigned long liobn, unsigned long ioba,
642 unsigned long tce_list, unsigned long npages);
643long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
644 unsigned long liobn, unsigned long ioba,
645 unsigned long tce_value, unsigned long npages);
646long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
647 unsigned int yield_count);
648long kvmppc_h_random(struct kvm_vcpu *vcpu);
649void kvmhv_commence_exit(int trap);
884dfb72 650void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
e34af784
PM
651void kvmppc_subcore_enter_guest(void);
652void kvmppc_subcore_exit_guest(void);
653long kvmppc_realmode_hmi_handler(void);
654long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
655 long pte_index, unsigned long pteh, unsigned long ptel);
656long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
657 unsigned long pte_index, unsigned long avpn);
658long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
659long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
660 unsigned long pte_index, unsigned long avpn,
661 unsigned long va);
662long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
663 unsigned long pte_index);
664long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
665 unsigned long pte_index);
666long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
667 unsigned long pte_index);
668long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
669 unsigned long slb_v, unsigned int status, bool data);
670unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
5af50993
BH
671unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
672unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
e34af784
PM
673int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
674 unsigned long mfrr);
675int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
676int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
f7035ce9 677void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
e34af784 678
79b6c247
SW
679/*
680 * Host-side operations we want to set up while running in real
681 * mode in the guest operating on the xics.
682 * Currently only VCPU wakeup is supported.
683 */
684
685union kvmppc_rm_state {
686 unsigned long raw;
687 struct {
688 u32 in_host;
689 u32 rm_action;
690 };
691};
692
693struct kvmppc_host_rm_core {
694 union kvmppc_rm_state rm_state;
695 void *rm_data;
696 char pad[112];
697};
698
699struct kvmppc_host_rm_ops {
700 struct kvmppc_host_rm_core *rm_core;
701 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
702};
703
704extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
705
34f754b9
BB
706static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
707{
708#ifdef CONFIG_KVM_BOOKE_HV
709 return mfspr(SPRN_GEPR);
710#elif defined(CONFIG_BOOKE)
711 return vcpu->arch.epr;
712#else
713 return 0;
714#endif
715}
716
1c810636
AG
717static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
718{
719#ifdef CONFIG_KVM_BOOKE_HV
720 mtspr(SPRN_GEPR, epr);
721#elif defined(CONFIG_BOOKE)
722 vcpu->arch.epr = epr;
723#endif
724}
725
5df554ad
SW
726#ifdef CONFIG_KVM_MPIC
727
728void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
eb1e4f43
SW
729int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
730 u32 cpu);
731void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
5df554ad
SW
732
733#else
734
735static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
736{
737}
738
eb1e4f43
SW
739static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
740 struct kvm_vcpu *vcpu, u32 cpu)
741{
742 return -EINVAL;
743}
744
745static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
746 struct kvm_vcpu *vcpu)
747{
748}
749
5df554ad
SW
750#endif /* CONFIG_KVM_MPIC */
751
dc83b8bc
SW
752int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
753 struct kvm_config_tlb *cfg);
754int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
755 struct kvm_dirty_tlb *cfg);
756
043cc4d7
SW
757long kvmppc_alloc_lpid(void);
758void kvmppc_claim_lpid(long lpid);
759void kvmppc_free_lpid(long lpid);
760void kvmppc_init_lpid(unsigned long nr_lpids);
761
ba049e93 762static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
249ba1ee 763{
249ba1ee 764 struct page *page;
adccf65c
BB
765 /*
766 * We can only access pages that the kernel maps
767 * as memory. Bail out for unmapped ones.
768 */
769 if (!pfn_valid(pfn))
770 return;
771
772 /* Clear i-cache for new pages */
249ba1ee
AG
773 page = pfn_to_page(pfn);
774 if (!test_bit(PG_arch_1, &page->flags)) {
775 flush_dcache_icache_page(page);
776 set_bit(PG_arch_1, &page->flags);
777 }
778}
779
5deb8e7a
AG
780/*
781 * Shared struct helpers. The shared struct can be little or big endian,
782 * depending on the guest endianness. So expose helpers to all of them.
783 */
784static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
785{
786#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
787 /* Only Book3S_64 PR supports bi-endian for now */
788 return vcpu->arch.shared_big_endian;
789#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
790 /* Book3s_64 HV on little endian is always little endian */
791 return false;
792#else
793 return true;
794#endif
795}
796
5a484c7c 797#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
1dc0c5b8
BB
798static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
799{ \
5a484c7c 800 return mfspr(bookehv_spr); \
1dc0c5b8
BB
801} \
802
5a484c7c 803#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
804static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
805{ \
5a484c7c 806 mtspr(bookehv_spr, val); \
1dc0c5b8
BB
807} \
808
5deb8e7a 809#define SHARED_WRAPPER_GET(reg, size) \
1dc0c5b8 810static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
5deb8e7a
AG
811{ \
812 if (kvmppc_shared_big_endian(vcpu)) \
813 return be##size##_to_cpu(vcpu->arch.shared->reg); \
814 else \
815 return le##size##_to_cpu(vcpu->arch.shared->reg); \
816} \
817
818#define SHARED_WRAPPER_SET(reg, size) \
819static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
820{ \
821 if (kvmppc_shared_big_endian(vcpu)) \
822 vcpu->arch.shared->reg = cpu_to_be##size(val); \
823 else \
824 vcpu->arch.shared->reg = cpu_to_le##size(val); \
825} \
826
827#define SHARED_WRAPPER(reg, size) \
828 SHARED_WRAPPER_GET(reg, size) \
829 SHARED_WRAPPER_SET(reg, size) \
830
5a484c7c
BB
831#define SPRNG_WRAPPER(reg, bookehv_spr) \
832 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
833 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
834
835#ifdef CONFIG_KVM_BOOKE_HV
836
5a484c7c
BB
837#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
838 SPRNG_WRAPPER(reg, bookehv_spr) \
1dc0c5b8
BB
839
840#else
841
5a484c7c 842#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
1dc0c5b8
BB
843 SHARED_WRAPPER(reg, size) \
844
845#endif
846
5deb8e7a 847SHARED_WRAPPER(critical, 64)
1dc0c5b8
BB
848SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
849SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
850SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
851SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
852SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
853SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
854SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
dc168549 855SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
5deb8e7a
AG
856SHARED_WRAPPER_GET(msr, 64)
857static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
858{
859 if (kvmppc_shared_big_endian(vcpu))
860 vcpu->arch.shared->msr = cpu_to_be64(val);
861 else
862 vcpu->arch.shared->msr = cpu_to_le64(val);
863}
864SHARED_WRAPPER(dsisr, 32)
865SHARED_WRAPPER(int_pending, 32)
866SHARED_WRAPPER(sprg4, 64)
867SHARED_WRAPPER(sprg5, 64)
868SHARED_WRAPPER(sprg6, 64)
869SHARED_WRAPPER(sprg7, 64)
870
871static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
872{
873 if (kvmppc_shared_big_endian(vcpu))
874 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
875 else
876 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
877}
878
879static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
880{
881 if (kvmppc_shared_big_endian(vcpu))
882 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
883 else
884 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
885}
886
5f1c248f
SW
887/*
888 * Please call after prepare_to_enter. This function puts the lazy ee and irq
889 * disabled tracking state back to normal mode, without actually enabling
890 * interrupts.
891 */
892static inline void kvmppc_fix_ee_before_entry(void)
bd2be683 893{
5f1c248f
SW
894 trace_hardirqs_on();
895
bd2be683 896#ifdef CONFIG_PPC64
6c85f52b
SW
897 /*
898 * To avoid races, the caller must have gone directly from having
899 * interrupts fully-enabled to hard-disabled.
900 */
901 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
902
bd2be683
AG
903 /* Only need to enable IRQs by hard enabling them after this */
904 local_paca->irq_happened = 0;
4e26bc4a 905 irq_soft_mask_set(IRQS_ENABLED);
bd2be683
AG
906#endif
907}
249ba1ee 908
7cdd7a95
MC
909static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
910{
911 ulong ea;
8823a8fd 912 ulong msr_64bit = 0;
7cdd7a95
MC
913
914 ea = kvmppc_get_gpr(vcpu, rb);
915 if (ra)
916 ea += kvmppc_get_gpr(vcpu, ra);
917
8823a8fd
MC
918#if defined(CONFIG_PPC_BOOK3E_64)
919 msr_64bit = MSR_CM;
920#elif defined(CONFIG_PPC_BOOK3S_64)
921 msr_64bit = MSR_SF;
922#endif
923
5deb8e7a 924 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
8823a8fd
MC
925 ea = (uint32_t)ea;
926
7cdd7a95
MC
927 return ea;
928}
929
54695c30
BH
930extern void xics_wake_cpu(int cpu);
931
bbf45ba5 932#endif /* __POWERPC_KVM_PPC_H__ */