Merge remote-tracking branches 'asoc/topic/bcm2835', 'asoc/topic/cs42l56', 'asoc...
[linux-block.git] / arch / arm64 / include / asm / kvm_hyp.h
CommitLineData
c76a0a66
MZ
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_HYP_H__
19#define __ARM64_KVM_HYP_H__
20
21#include <linux/compiler.h>
22#include <linux/kvm_host.h>
23#include <asm/kvm_mmu.h>
24#include <asm/sysreg.h>
25
26#define __hyp_text __section(.hyp.text) notrace
27
cedbb8b7
MZ
28static inline unsigned long __kern_hyp_va(unsigned long v)
29{
30 asm volatile(ALTERNATIVE("and %0, %0, %1",
31 "nop",
32 ARM64_HAS_VIRT_HOST_EXTN)
33 : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
34 return v;
35}
36
37#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
38
39static inline unsigned long __hyp_kern_va(unsigned long v)
40{
41 u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
42 asm volatile(ALTERNATIVE("add %0, %0, %1",
43 "nop",
44 ARM64_HAS_VIRT_HOST_EXTN)
45 : "+r" (v) : "r" (offset));
46 return v;
47}
48
49#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
c76a0a66 50
915ccd1d
MZ
51#define read_sysreg_elx(r,nvh,vh) \
52 ({ \
53 u64 reg; \
54 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
55 "mrs_s %0, " __stringify(r##vh),\
56 ARM64_HAS_VIRT_HOST_EXTN) \
57 : "=r" (reg)); \
58 reg; \
59 })
60
61#define write_sysreg_elx(v,r,nvh,vh) \
62 do { \
63 u64 __val = (u64)(v); \
64 asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
65 "msr_s " __stringify(r##vh) ", %x0",\
66 ARM64_HAS_VIRT_HOST_EXTN) \
67 : : "rZ" (__val)); \
68 } while (0)
69
70/*
71 * Unified accessors for registers that have a different encoding
72 * between VHE and non-VHE. They must be specified without their "ELx"
73 * encoding.
74 */
75#define read_sysreg_el2(r) \
76 ({ \
77 u64 reg; \
78 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
79 "mrs %0, " __stringify(r##_EL1),\
80 ARM64_HAS_VIRT_HOST_EXTN) \
81 : "=r" (reg)); \
82 reg; \
83 })
84
85#define write_sysreg_el2(v,r) \
86 do { \
87 u64 __val = (u64)(v); \
88 asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
89 "msr " __stringify(r##_EL1) ", %x0",\
90 ARM64_HAS_VIRT_HOST_EXTN) \
91 : : "rZ" (__val)); \
92 } while (0)
93
94#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
95#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
96#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
97#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
98
99/* The VHE specific system registers and their encoding */
100#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
101#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
102#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
103#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
104#define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
105#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
106#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
107#define esr_EL12 sys_reg(3, 5, 5, 2, 0)
108#define far_EL12 sys_reg(3, 5, 6, 0, 0)
109#define mair_EL12 sys_reg(3, 5, 10, 2, 0)
110#define amair_EL12 sys_reg(3, 5, 10, 3, 0)
111#define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
112#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
113#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
114#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
115#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
116#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
117#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
118#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
119#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
120#define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
121#define elr_EL12 sys_reg(3, 5, 4, 0, 1)
122
c1bf6e18
MZ
123/**
124 * hyp_alternate_select - Generates patchable code sequences that are
125 * used to switch between two implementations of a function, depending
126 * on the availability of a feature.
127 *
128 * @fname: a symbol name that will be defined as a function returning a
129 * function pointer whose type will match @orig and @alt
130 * @orig: A pointer to the default function, as returned by @fname when
131 * @cond doesn't hold
132 * @alt: A pointer to the alternate function, as returned by @fname
133 * when @cond holds
134 * @cond: a CPU feature (as described in asm/cpufeature.h)
135 */
136#define hyp_alternate_select(fname, orig, alt, cond) \
137typeof(orig) * __hyp_text fname(void) \
138{ \
139 typeof(alt) *val = orig; \
140 asm volatile(ALTERNATIVE("nop \n", \
141 "mov %0, %1 \n", \
142 cond) \
143 : "+r" (val) : "r" (alt)); \
144 return val; \
145}
146
06282fd2
MZ
147void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
148void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
149
f68d2b1b
MZ
150void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
151void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
152
1431af36
MZ
153void __timer_save_state(struct kvm_vcpu *vcpu);
154void __timer_restore_state(struct kvm_vcpu *vcpu);
155
edef528d
MZ
156void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
157void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
158void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
159void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
c209ec85
MZ
160void __sysreg32_save_state(struct kvm_vcpu *vcpu);
161void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
6d6ec20f 162
8eb99267
MZ
163void __debug_save_state(struct kvm_vcpu *vcpu,
164 struct kvm_guest_debug_arch *dbg,
165 struct kvm_cpu_context *ctxt);
166void __debug_restore_state(struct kvm_vcpu *vcpu,
167 struct kvm_guest_debug_arch *dbg,
168 struct kvm_cpu_context *ctxt);
169void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
170void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
171
c13d1683
MZ
172void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
173void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
32876224 174bool __fpsimd_enabled(void);
c13d1683 175
b97b66c1 176u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
53fd5b64 177void __noreturn __hyp_do_panic(unsigned long, ...);
b97b66c1 178
c76a0a66
MZ
179#endif /* __ARM64_KVM_HYP_H__ */
180