KVM: arm/arm64: Add kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs
[linux-2.6-block.git] / arch / arm64 / include / asm / mmu.h
CommitLineData
4f04d8f0
CM
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_MMU_H
17#define __ASM_MMU_H
18
5ce93ab6 19#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
79e9aa59
JM
20#define USER_ASID_BIT 48
21#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
b519538d 22#define TTBR_ASID_MASK (UL(0xffff) << 48)
5ce93ab6 23
fc0e1299
WD
24#ifndef __ASSEMBLY__
25
4f04d8f0 26typedef struct {
5aec715d
WD
27 atomic64_t id;
28 void *vdso;
06beb72f 29 unsigned long flags;
4f04d8f0
CM
30} mm_context_t;
31
5aec715d
WD
32/*
33 * This macro is only used by the TLBI code, which cannot race with an
34 * ASID change and therefore doesn't need to reload the counter using
35 * atomic64_read.
36 */
37#define ASID(mm) ((mm)->context.id.counter & 0xffff)
4f04d8f0 38
fc0e1299
WD
39static inline bool arm64_kernel_unmapped_at_el0(void)
40{
ea1e3de8
WD
41 return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
42 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
fc0e1299
WD
43}
44
0f15adbb
WD
45typedef void (*bp_hardening_cb_t)(void);
46
47struct bp_hardening_data {
48 int hyp_vectors_slot;
49 bp_hardening_cb_t fn;
50};
51
52#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
53extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
54
55DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
56
57static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
58{
59 return this_cpu_ptr(&bp_hardening_data);
60}
61
62static inline void arm64_apply_bp_hardening(void)
63{
64 struct bp_hardening_data *d;
65
66 if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
67 return;
68
69 d = arm64_get_bp_hardening_data();
70 if (d->fn)
71 d->fn();
72}
73#else
74static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
75{
76 return NULL;
77}
78
79static inline void arm64_apply_bp_hardening(void) { }
80#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
81
4f04d8f0 82extern void paging_init(void);
3194ac6e 83extern void bootmem_init(void);
2475ff9d 84extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
0bf757c7 85extern void init_mem_pgprot(void);
8ce837ce
AB
86extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
87 unsigned long virt, phys_addr_t size,
f14c66ce 88 pgprot_t prot, bool page_mappings_only);
61bd93ce 89extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
5ea5306c 90extern void mark_linear_text_alias_ro(void);
4f04d8f0 91
fc0e1299 92#endif /* !__ASSEMBLY__ */
4f04d8f0 93#endif