| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* Paravirtualization interfaces |
| 3 | Copyright (C) 2006 Rusty Russell IBM Corporation |
| 4 | |
| 5 | |
| 6 | 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc |
| 7 | */ |
| 8 | |
| 9 | #include <linux/errno.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/efi.h> |
| 13 | #include <linux/bcd.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/kprobes.h> |
| 16 | #include <linux/pgtable.h> |
| 17 | #include <linux/static_call.h> |
| 18 | |
| 19 | #include <asm/bug.h> |
| 20 | #include <asm/paravirt.h> |
| 21 | #include <asm/debugreg.h> |
| 22 | #include <asm/desc.h> |
| 23 | #include <asm/setup.h> |
| 24 | #include <asm/time.h> |
| 25 | #include <asm/pgalloc.h> |
| 26 | #include <asm/irq.h> |
| 27 | #include <asm/delay.h> |
| 28 | #include <asm/fixmap.h> |
| 29 | #include <asm/apic.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | #include <asm/timer.h> |
| 32 | #include <asm/special_insns.h> |
| 33 | #include <asm/tlb.h> |
| 34 | #include <asm/io_bitmap.h> |
| 35 | #include <asm/gsseg.h> |
| 36 | |
| 37 | /* |
| 38 | * nop stub, which must not clobber anything *including the stack* to |
| 39 | * avoid confusing the entry prologues. |
| 40 | */ |
| 41 | DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text); |
| 42 | |
| 43 | /* stub always returning 0. */ |
| 44 | DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text); |
| 45 | |
| 46 | void __init default_banner(void) |
| 47 | { |
| 48 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", |
| 49 | pv_info.name); |
| 50 | } |
| 51 | |
| 52 | /* Undefined instruction for dealing with missing ops pointers. */ |
| 53 | noinstr void paravirt_BUG(void) |
| 54 | { |
| 55 | BUG(); |
| 56 | } |
| 57 | |
| 58 | static unsigned paravirt_patch_call(void *insn_buff, const void *target, |
| 59 | unsigned long addr, unsigned len) |
| 60 | { |
| 61 | __text_gen_insn(insn_buff, CALL_INSN_OPCODE, |
| 62 | (void *)addr, target, CALL_INSN_SIZE); |
| 63 | return CALL_INSN_SIZE; |
| 64 | } |
| 65 | |
| 66 | #ifdef CONFIG_PARAVIRT_XXL |
| 67 | DEFINE_PARAVIRT_ASM(_paravirt_ident_64, "mov %rdi, %rax", .text); |
| 68 | DEFINE_PARAVIRT_ASM(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); |
| 69 | DEFINE_PARAVIRT_ASM(pv_native_irq_disable, "cli", .noinstr.text); |
| 70 | DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text); |
| 71 | DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); |
| 72 | #endif |
| 73 | |
| 74 | DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); |
| 75 | |
| 76 | void __init native_pv_lock_init(void) |
| 77 | { |
| 78 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
| 79 | static_branch_disable(&virt_spin_lock_key); |
| 80 | } |
| 81 | |
| 82 | unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, |
| 83 | unsigned int len) |
| 84 | { |
| 85 | /* |
| 86 | * Neat trick to map patch type back to the call within the |
| 87 | * corresponding structure. |
| 88 | */ |
| 89 | void *opfunc = *((void **)&pv_ops + type); |
| 90 | unsigned ret; |
| 91 | |
| 92 | if (opfunc == NULL) |
| 93 | /* If there's no function, patch it with paravirt_BUG() */ |
| 94 | ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len); |
| 95 | else if (opfunc == _paravirt_nop) |
| 96 | ret = 0; |
| 97 | else |
| 98 | /* Otherwise call the function. */ |
| 99 | ret = paravirt_patch_call(insn_buff, opfunc, addr, len); |
| 100 | |
| 101 | return ret; |
| 102 | } |
| 103 | |
| 104 | struct static_key paravirt_steal_enabled; |
| 105 | struct static_key paravirt_steal_rq_enabled; |
| 106 | |
| 107 | static u64 native_steal_clock(int cpu) |
| 108 | { |
| 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); |
| 113 | DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); |
| 114 | |
| 115 | void paravirt_set_sched_clock(u64 (*func)(void)) |
| 116 | { |
| 117 | static_call_update(pv_sched_clock, func); |
| 118 | } |
| 119 | |
| 120 | /* These are in entry.S */ |
| 121 | static struct resource reserve_ioports = { |
| 122 | .start = 0, |
| 123 | .end = IO_SPACE_LIMIT, |
| 124 | .name = "paravirt-ioport", |
| 125 | .flags = IORESOURCE_IO | IORESOURCE_BUSY, |
| 126 | }; |
| 127 | |
| 128 | /* |
| 129 | * Reserve the whole legacy IO space to prevent any legacy drivers |
| 130 | * from wasting time probing for their hardware. This is a fairly |
| 131 | * brute-force approach to disabling all non-virtual drivers. |
| 132 | * |
| 133 | * Note that this must be called very early to have any effect. |
| 134 | */ |
| 135 | int paravirt_disable_iospace(void) |
| 136 | { |
| 137 | return request_resource(&ioport_resource, &reserve_ioports); |
| 138 | } |
| 139 | |
| 140 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; |
| 141 | |
| 142 | static inline void enter_lazy(enum paravirt_lazy_mode mode) |
| 143 | { |
| 144 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); |
| 145 | |
| 146 | this_cpu_write(paravirt_lazy_mode, mode); |
| 147 | } |
| 148 | |
| 149 | static void leave_lazy(enum paravirt_lazy_mode mode) |
| 150 | { |
| 151 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); |
| 152 | |
| 153 | this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); |
| 154 | } |
| 155 | |
| 156 | void paravirt_enter_lazy_mmu(void) |
| 157 | { |
| 158 | enter_lazy(PARAVIRT_LAZY_MMU); |
| 159 | } |
| 160 | |
| 161 | void paravirt_leave_lazy_mmu(void) |
| 162 | { |
| 163 | leave_lazy(PARAVIRT_LAZY_MMU); |
| 164 | } |
| 165 | |
| 166 | void paravirt_flush_lazy_mmu(void) |
| 167 | { |
| 168 | preempt_disable(); |
| 169 | |
| 170 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { |
| 171 | arch_leave_lazy_mmu_mode(); |
| 172 | arch_enter_lazy_mmu_mode(); |
| 173 | } |
| 174 | |
| 175 | preempt_enable(); |
| 176 | } |
| 177 | |
| 178 | #ifdef CONFIG_PARAVIRT_XXL |
| 179 | void paravirt_start_context_switch(struct task_struct *prev) |
| 180 | { |
| 181 | BUG_ON(preemptible()); |
| 182 | |
| 183 | if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { |
| 184 | arch_leave_lazy_mmu_mode(); |
| 185 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); |
| 186 | } |
| 187 | enter_lazy(PARAVIRT_LAZY_CPU); |
| 188 | } |
| 189 | |
| 190 | void paravirt_end_context_switch(struct task_struct *next) |
| 191 | { |
| 192 | BUG_ON(preemptible()); |
| 193 | |
| 194 | leave_lazy(PARAVIRT_LAZY_CPU); |
| 195 | |
| 196 | if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) |
| 197 | arch_enter_lazy_mmu_mode(); |
| 198 | } |
| 199 | |
| 200 | static noinstr void pv_native_write_cr2(unsigned long val) |
| 201 | { |
| 202 | native_write_cr2(val); |
| 203 | } |
| 204 | |
| 205 | static noinstr unsigned long pv_native_get_debugreg(int regno) |
| 206 | { |
| 207 | return native_get_debugreg(regno); |
| 208 | } |
| 209 | |
| 210 | static noinstr void pv_native_set_debugreg(int regno, unsigned long val) |
| 211 | { |
| 212 | native_set_debugreg(regno, val); |
| 213 | } |
| 214 | |
| 215 | noinstr void pv_native_wbinvd(void) |
| 216 | { |
| 217 | native_wbinvd(); |
| 218 | } |
| 219 | |
| 220 | static noinstr void pv_native_safe_halt(void) |
| 221 | { |
| 222 | native_safe_halt(); |
| 223 | } |
| 224 | #endif |
| 225 | |
| 226 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) |
| 227 | { |
| 228 | if (in_interrupt()) |
| 229 | return PARAVIRT_LAZY_NONE; |
| 230 | |
| 231 | return this_cpu_read(paravirt_lazy_mode); |
| 232 | } |
| 233 | |
| 234 | struct pv_info pv_info = { |
| 235 | .name = "bare hardware", |
| 236 | #ifdef CONFIG_PARAVIRT_XXL |
| 237 | .extra_user_64bit_cs = __USER_CS, |
| 238 | #endif |
| 239 | }; |
| 240 | |
| 241 | /* 64-bit pagetable entries */ |
| 242 | #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) |
| 243 | |
| 244 | struct paravirt_patch_template pv_ops = { |
| 245 | /* Cpu ops. */ |
| 246 | .cpu.io_delay = native_io_delay, |
| 247 | |
| 248 | #ifdef CONFIG_PARAVIRT_XXL |
| 249 | .cpu.cpuid = native_cpuid, |
| 250 | .cpu.get_debugreg = pv_native_get_debugreg, |
| 251 | .cpu.set_debugreg = pv_native_set_debugreg, |
| 252 | .cpu.read_cr0 = native_read_cr0, |
| 253 | .cpu.write_cr0 = native_write_cr0, |
| 254 | .cpu.write_cr4 = native_write_cr4, |
| 255 | .cpu.wbinvd = pv_native_wbinvd, |
| 256 | .cpu.read_msr = native_read_msr, |
| 257 | .cpu.write_msr = native_write_msr, |
| 258 | .cpu.read_msr_safe = native_read_msr_safe, |
| 259 | .cpu.write_msr_safe = native_write_msr_safe, |
| 260 | .cpu.read_pmc = native_read_pmc, |
| 261 | .cpu.load_tr_desc = native_load_tr_desc, |
| 262 | .cpu.set_ldt = native_set_ldt, |
| 263 | .cpu.load_gdt = native_load_gdt, |
| 264 | .cpu.load_idt = native_load_idt, |
| 265 | .cpu.store_tr = native_store_tr, |
| 266 | .cpu.load_tls = native_load_tls, |
| 267 | .cpu.load_gs_index = native_load_gs_index, |
| 268 | .cpu.write_ldt_entry = native_write_ldt_entry, |
| 269 | .cpu.write_gdt_entry = native_write_gdt_entry, |
| 270 | .cpu.write_idt_entry = native_write_idt_entry, |
| 271 | |
| 272 | .cpu.alloc_ldt = paravirt_nop, |
| 273 | .cpu.free_ldt = paravirt_nop, |
| 274 | |
| 275 | .cpu.load_sp0 = native_load_sp0, |
| 276 | |
| 277 | #ifdef CONFIG_X86_IOPL_IOPERM |
| 278 | .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, |
| 279 | .cpu.update_io_bitmap = native_tss_update_io_bitmap, |
| 280 | #endif |
| 281 | |
| 282 | .cpu.start_context_switch = paravirt_nop, |
| 283 | .cpu.end_context_switch = paravirt_nop, |
| 284 | |
| 285 | /* Irq ops. */ |
| 286 | .irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl), |
| 287 | .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), |
| 288 | .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), |
| 289 | .irq.safe_halt = pv_native_safe_halt, |
| 290 | .irq.halt = native_halt, |
| 291 | #endif /* CONFIG_PARAVIRT_XXL */ |
| 292 | |
| 293 | /* Mmu ops. */ |
| 294 | .mmu.flush_tlb_user = native_flush_tlb_local, |
| 295 | .mmu.flush_tlb_kernel = native_flush_tlb_global, |
| 296 | .mmu.flush_tlb_one_user = native_flush_tlb_one_user, |
| 297 | .mmu.flush_tlb_multi = native_flush_tlb_multi, |
| 298 | .mmu.tlb_remove_table = |
| 299 | (void (*)(struct mmu_gather *, void *))tlb_remove_page, |
| 300 | |
| 301 | .mmu.exit_mmap = paravirt_nop, |
| 302 | .mmu.notify_page_enc_status_changed = paravirt_nop, |
| 303 | |
| 304 | #ifdef CONFIG_PARAVIRT_XXL |
| 305 | .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2), |
| 306 | .mmu.write_cr2 = pv_native_write_cr2, |
| 307 | .mmu.read_cr3 = __native_read_cr3, |
| 308 | .mmu.write_cr3 = native_write_cr3, |
| 309 | |
| 310 | .mmu.pgd_alloc = __paravirt_pgd_alloc, |
| 311 | .mmu.pgd_free = paravirt_nop, |
| 312 | |
| 313 | .mmu.alloc_pte = paravirt_nop, |
| 314 | .mmu.alloc_pmd = paravirt_nop, |
| 315 | .mmu.alloc_pud = paravirt_nop, |
| 316 | .mmu.alloc_p4d = paravirt_nop, |
| 317 | .mmu.release_pte = paravirt_nop, |
| 318 | .mmu.release_pmd = paravirt_nop, |
| 319 | .mmu.release_pud = paravirt_nop, |
| 320 | .mmu.release_p4d = paravirt_nop, |
| 321 | |
| 322 | .mmu.set_pte = native_set_pte, |
| 323 | .mmu.set_pmd = native_set_pmd, |
| 324 | |
| 325 | .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, |
| 326 | .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, |
| 327 | |
| 328 | .mmu.set_pud = native_set_pud, |
| 329 | |
| 330 | .mmu.pmd_val = PTE_IDENT, |
| 331 | .mmu.make_pmd = PTE_IDENT, |
| 332 | |
| 333 | .mmu.pud_val = PTE_IDENT, |
| 334 | .mmu.make_pud = PTE_IDENT, |
| 335 | |
| 336 | .mmu.set_p4d = native_set_p4d, |
| 337 | |
| 338 | #if CONFIG_PGTABLE_LEVELS >= 5 |
| 339 | .mmu.p4d_val = PTE_IDENT, |
| 340 | .mmu.make_p4d = PTE_IDENT, |
| 341 | |
| 342 | .mmu.set_pgd = native_set_pgd, |
| 343 | #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ |
| 344 | |
| 345 | .mmu.pte_val = PTE_IDENT, |
| 346 | .mmu.pgd_val = PTE_IDENT, |
| 347 | |
| 348 | .mmu.make_pte = PTE_IDENT, |
| 349 | .mmu.make_pgd = PTE_IDENT, |
| 350 | |
| 351 | .mmu.enter_mmap = paravirt_nop, |
| 352 | |
| 353 | .mmu.lazy_mode = { |
| 354 | .enter = paravirt_nop, |
| 355 | .leave = paravirt_nop, |
| 356 | .flush = paravirt_nop, |
| 357 | }, |
| 358 | |
| 359 | .mmu.set_fixmap = native_set_fixmap, |
| 360 | #endif /* CONFIG_PARAVIRT_XXL */ |
| 361 | |
| 362 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
| 363 | /* Lock ops. */ |
| 364 | #ifdef CONFIG_SMP |
| 365 | .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, |
| 366 | .lock.queued_spin_unlock = |
| 367 | PV_CALLEE_SAVE(__native_queued_spin_unlock), |
| 368 | .lock.wait = paravirt_nop, |
| 369 | .lock.kick = paravirt_nop, |
| 370 | .lock.vcpu_is_preempted = |
| 371 | PV_CALLEE_SAVE(__native_vcpu_is_preempted), |
| 372 | #endif /* SMP */ |
| 373 | #endif |
| 374 | }; |
| 375 | |
| 376 | #ifdef CONFIG_PARAVIRT_XXL |
| 377 | NOKPROBE_SYMBOL(native_load_idt); |
| 378 | #endif |
| 379 | |
| 380 | EXPORT_SYMBOL(pv_ops); |
| 381 | EXPORT_SYMBOL_GPL(pv_info); |