1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
22 #include <asm/irq_remapping.h>
31 * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
32 * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
33 * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
35 * For the vCPU ID, use however many bits are currently allowed for the max
36 * guest physical APIC ID (limited by the size of the physical ID table), and
37 * use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
38 * size of the GATag is defined by hardware (32 bits), but is an opaque value
39 * as far as hardware is concerned.
41 #define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
43 #define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
44 #define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
46 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
47 (y & AVIC_VCPU_ID_MASK))
48 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
49 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
51 static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
53 static bool force_avic;
54 module_param_unsafe(force_avic, bool, 0444);
57 * This hash table is used to map VM_ID to a struct kvm_svm,
58 * when handling AMD IOMMU GALOG notification to schedule in
61 #define SVM_VM_DATA_HASH_BITS 8
62 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
63 static u32 next_vm_id = 0;
64 static bool next_vm_id_wrapped = 0;
65 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
69 * This is a wrapper of struct amd_iommu_ir_data.
71 struct amd_svm_iommu_ir {
72 struct list_head node; /* Used by SVM for per-vcpu ir_list */
73 void *data; /* Storing pointer to struct amd_ir_data */
76 static void avic_activate_vmcb(struct vcpu_svm *svm)
78 struct vmcb *vmcb = svm->vmcb01.ptr;
80 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
81 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
83 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
86 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR
87 * accesses, while interrupt injection to a running vCPU can be
88 * achieved using AVIC doorbell. KVM disables the APIC access page
89 * (deletes the memslot) if any vCPU has x2APIC enabled, thus enabling
90 * AVIC in hybrid mode activates only the doorbell mechanism.
92 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
93 vmcb->control.int_ctl |= X2APIC_MODE_MASK;
94 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
95 /* Disabling MSR intercept for x2APIC registers */
96 svm_set_x2apic_msr_interception(svm, false);
99 * Flush the TLB, the guest may have inserted a non-APIC
100 * mapping into the TLB while AVIC was disabled.
102 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
104 /* For xAVIC and hybrid-xAVIC modes */
105 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
106 /* Enabling MSR intercept for x2APIC registers */
107 svm_set_x2apic_msr_interception(svm, true);
111 static void avic_deactivate_vmcb(struct vcpu_svm *svm)
113 struct vmcb *vmcb = svm->vmcb01.ptr;
115 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
116 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
119 * If running nested and the guest uses its own MSR bitmap, there
120 * is no need to update L0's msr bitmap
122 if (is_guest_mode(&svm->vcpu) &&
123 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))
126 /* Enabling MSR intercept for x2APIC registers */
127 svm_set_x2apic_msr_interception(svm, true);
131 * This function is called from IOMMU driver to notify
132 * SVM to schedule in a particular vCPU of a particular VM.
134 int avic_ga_log_notifier(u32 ga_tag)
137 struct kvm_svm *kvm_svm;
138 struct kvm_vcpu *vcpu = NULL;
139 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
140 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
142 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
143 trace_kvm_avic_ga_log(vm_id, vcpu_id);
145 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
146 hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
147 if (kvm_svm->avic_vm_id != vm_id)
149 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
152 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
155 * At this point, the IOMMU should have already set the pending
156 * bit in the vAPIC backing page. So, we just need to schedule
160 kvm_vcpu_wake_up(vcpu);
165 void avic_vm_destroy(struct kvm *kvm)
168 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
173 if (kvm_svm->avic_logical_id_table_page)
174 __free_page(kvm_svm->avic_logical_id_table_page);
175 if (kvm_svm->avic_physical_id_table_page)
176 __free_page(kvm_svm->avic_physical_id_table_page);
178 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
179 hash_del(&kvm_svm->hnode);
180 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
183 int avic_vm_init(struct kvm *kvm)
187 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
196 /* Allocating physical APIC ID table (4KB) */
197 p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
201 kvm_svm->avic_physical_id_table_page = p_page;
203 /* Allocating logical APIC ID table (4KB) */
204 l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
208 kvm_svm->avic_logical_id_table_page = l_page;
210 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
212 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
213 if (vm_id == 0) { /* id is 1-based, zero is not okay */
214 next_vm_id_wrapped = 1;
217 /* Is it still in use? Only possible if wrapped at least once */
218 if (next_vm_id_wrapped) {
219 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
220 if (k2->avic_vm_id == vm_id)
224 kvm_svm->avic_vm_id = vm_id;
225 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
226 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
231 avic_vm_destroy(kvm);
235 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
237 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
238 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
239 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
240 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
242 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
243 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
244 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
245 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
247 if (kvm_apicv_activated(svm->vcpu.kvm))
248 avic_activate_vmcb(svm);
250 avic_deactivate_vmcb(svm);
253 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
256 u64 *avic_physical_id_table;
257 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
259 if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
260 (index > X2AVIC_MAX_PHYSICAL_ID))
263 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
265 return &avic_physical_id_table[index];
268 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
270 u64 *entry, new_entry;
271 int id = vcpu->vcpu_id;
272 struct vcpu_svm *svm = to_svm(vcpu);
274 if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||
275 (id > X2AVIC_MAX_PHYSICAL_ID))
278 if (!vcpu->arch.apic->regs)
281 if (kvm_apicv_activated(vcpu->kvm)) {
285 * Note, AVIC hardware walks the nested page table to check
286 * permissions, but does not use the SPA address specified in
287 * the leaf SPTE since it uses address in the AVIC_BACKING_PAGE
288 * pointer field of the VMCB.
290 ret = kvm_alloc_apic_access_page(vcpu->kvm);
295 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
297 /* Setting AVIC backing page address in the phy APIC ID table */
298 entry = avic_get_physical_id_entry(vcpu, id);
302 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
303 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
304 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
305 WRITE_ONCE(*entry, new_entry);
307 svm->avic_physical_id_cache = entry;
312 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
315 * Note, the vCPU could get migrated to a different pCPU at any point,
316 * which could result in signalling the wrong/previous pCPU. But if
317 * that happens the vCPU is guaranteed to do a VMRUN (after being
318 * migrated) and thus will process pending interrupts, i.e. a doorbell
319 * is not needed (and the spurious one is harmless).
321 int cpu = READ_ONCE(vcpu->cpu);
323 if (cpu != get_cpu()) {
324 wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
325 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
331 static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
333 vcpu->arch.apic->irr_pending = true;
334 svm_complete_interrupt_delivery(vcpu,
335 icrl & APIC_MODE_MASK,
336 icrl & APIC_INT_LEVELTRIG,
337 icrl & APIC_VECTOR_MASK);
340 static void avic_kick_vcpu_by_physical_id(struct kvm *kvm, u32 physical_id,
344 * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
345 * i.e. APIC ID == vCPU ID.
347 struct kvm_vcpu *target_vcpu = kvm_get_vcpu_by_id(kvm, physical_id);
349 /* Once again, nothing to do if the target vCPU doesn't exist. */
350 if (unlikely(!target_vcpu))
353 avic_kick_vcpu(target_vcpu, icrl);
356 static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
357 u32 logid_index, u32 icrl)
361 if (avic_logical_id_table) {
362 u32 logid_entry = avic_logical_id_table[logid_index];
364 /* Nothing to do if the logical destination is invalid. */
365 if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
368 physical_id = logid_entry &
369 AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
372 * For x2APIC, the logical APIC ID is a read-only value that is
373 * derived from the x2APIC ID, thus the x2APIC ID can be found
374 * by reversing the calculation (stored in logid_index). Note,
375 * bits 31:20 of the x2APIC ID aren't propagated to the logical
376 * ID, but KVM limits the x2APIC ID limited to KVM_MAX_VCPU_IDS.
378 physical_id = logid_index;
381 avic_kick_vcpu_by_physical_id(kvm, physical_id, icrl);
385 * A fast-path version of avic_kick_target_vcpus(), which attempts to match
386 * destination APIC ID to vCPU without looping through all vCPUs.
388 static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
389 u32 icrl, u32 icrh, u32 index)
391 int dest_mode = icrl & APIC_DEST_MASK;
392 int shorthand = icrl & APIC_SHORT_MASK;
393 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
396 if (shorthand != APIC_DEST_NOSHORT)
399 if (apic_x2apic_mode(source))
402 dest = GET_XAPIC_DEST_FIELD(icrh);
404 if (dest_mode == APIC_DEST_PHYSICAL) {
405 /* broadcast destination, use slow path */
406 if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
408 if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
411 if (WARN_ON_ONCE(dest != index))
414 avic_kick_vcpu_by_physical_id(kvm, dest, icrl);
416 u32 *avic_logical_id_table;
417 unsigned long bitmap, i;
420 if (apic_x2apic_mode(source)) {
421 /* 16 bit dest mask, 16 bit cluster id */
422 bitmap = dest & 0xFFFF;
423 cluster = (dest >> 16) << 4;
424 } else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
429 /* 4 bit desk mask, 4 bit cluster id */
431 cluster = (dest >> 4) << 2;
434 /* Nothing to do if there are no destinations in the cluster. */
435 if (unlikely(!bitmap))
438 if (apic_x2apic_mode(source))
439 avic_logical_id_table = NULL;
441 avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
444 * AVIC is inhibited if vCPUs aren't mapped 1:1 with logical
445 * IDs, thus each bit in the destination is guaranteed to map
446 * to at most one vCPU.
448 for_each_set_bit(i, &bitmap, 16)
449 avic_kick_vcpu_by_logical_id(kvm, avic_logical_id_table,
456 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
457 u32 icrl, u32 icrh, u32 index)
459 u32 dest = apic_x2apic_mode(source) ? icrh : GET_XAPIC_DEST_FIELD(icrh);
461 struct kvm_vcpu *vcpu;
463 if (!avic_kick_target_vcpus_fast(kvm, source, icrl, icrh, index))
466 trace_kvm_avic_kick_vcpu_slowpath(icrh, icrl, index);
469 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
470 * event. There's no need to signal doorbells, as hardware has handled
471 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
472 * since entered the guest will have processed pending IRQs at VMRUN.
474 kvm_for_each_vcpu(i, vcpu, kvm) {
475 if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
476 dest, icrl & APIC_DEST_MASK))
477 avic_kick_vcpu(vcpu, icrl);
481 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
483 struct vcpu_svm *svm = to_svm(vcpu);
484 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
485 u32 icrl = svm->vmcb->control.exit_info_1;
486 u32 id = svm->vmcb->control.exit_info_2 >> 32;
487 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF;
488 struct kvm_lapic *apic = vcpu->arch.apic;
490 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
493 case AVIC_IPI_FAILURE_INVALID_TARGET:
494 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
496 * Emulate IPIs that are not handled by AVIC hardware, which
497 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
498 * if _any_ targets are invalid, e.g. if the logical mode mask
499 * is a superset of running vCPUs.
501 * The exit is a trap, e.g. ICR holds the correct value and RIP
502 * has been advanced, KVM is responsible only for emulating the
503 * IPI. Sadly, hardware may sometimes leave the BUSY flag set,
504 * in which case KVM needs to emulate the ICR write as well in
505 * order to clear the BUSY flag.
507 if (icrl & APIC_ICR_BUSY)
508 kvm_apic_write_nodecode(vcpu, APIC_ICR);
510 kvm_apic_send_ipi(apic, icrl, icrh);
512 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
514 * At this point, we expect that the AVIC HW has already
515 * set the appropriate IRR bits on the valid target
516 * vcpus. So, we just need to kick the appropriate vcpu.
518 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
520 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
521 WARN_ONCE(1, "Invalid backing page\n");
524 pr_err("Unknown IPI interception\n");
530 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
532 if (is_guest_mode(vcpu))
533 return APICV_INHIBIT_REASON_NESTED;
537 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
539 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
540 u32 *logical_apic_id_table;
543 ldr = GET_APIC_LOGICAL_ID(ldr);
548 cluster = (ldr >> 4);
553 if (!ldr || !is_power_of_2(ldr))
557 if (WARN_ON_ONCE(index > 7))
559 index += (cluster << 2);
561 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
563 return &logical_apic_id_table[index];
566 static void avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
569 u32 *entry, new_entry;
571 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
572 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
576 new_entry = READ_ONCE(*entry);
577 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
578 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
579 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
580 WRITE_ONCE(*entry, new_entry);
583 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
585 struct vcpu_svm *svm = to_svm(vcpu);
586 bool flat = svm->dfr_reg == APIC_DFR_FLAT;
589 /* Note: x2AVIC does not use logical APIC ID table */
590 if (apic_x2apic_mode(vcpu->arch.apic))
593 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
595 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
598 static void avic_handle_ldr_update(struct kvm_vcpu *vcpu)
600 struct vcpu_svm *svm = to_svm(vcpu);
601 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
602 u32 id = kvm_xapic_id(vcpu->arch.apic);
604 /* AVIC does not support LDR update for x2APIC */
605 if (apic_x2apic_mode(vcpu->arch.apic))
608 if (ldr == svm->ldr_reg)
611 avic_invalidate_logical_id_entry(vcpu);
614 avic_ldr_write(vcpu, id, ldr);
617 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
619 struct vcpu_svm *svm = to_svm(vcpu);
620 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
622 if (svm->dfr_reg == dfr)
625 avic_invalidate_logical_id_entry(vcpu);
629 static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
631 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 &
632 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
636 avic_handle_ldr_update(vcpu);
639 avic_handle_dfr_update(vcpu);
642 /* Ignore writes to Read Remote Data, it's read-only. */
648 kvm_apic_write_nodecode(vcpu, offset);
652 static bool is_avic_unaccelerated_access_trap(u32 offset)
681 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
683 struct vcpu_svm *svm = to_svm(vcpu);
685 u32 offset = svm->vmcb->control.exit_info_1 &
686 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
687 u32 vector = svm->vmcb->control.exit_info_2 &
688 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
689 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
690 AVIC_UNACCEL_ACCESS_WRITE_MASK;
691 bool trap = is_avic_unaccelerated_access_trap(offset);
693 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
694 trap, write, vector);
697 WARN_ONCE(!write, "svm: Handling trap read.\n");
698 ret = avic_unaccel_trap_write(vcpu);
701 ret = kvm_emulate_instruction(vcpu, 0);
707 int avic_init_vcpu(struct vcpu_svm *svm)
710 struct kvm_vcpu *vcpu = &svm->vcpu;
712 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
715 ret = avic_init_backing_page(vcpu);
719 INIT_LIST_HEAD(&svm->ir_list);
720 spin_lock_init(&svm->ir_list_lock);
721 svm->dfr_reg = APIC_DFR_FLAT;
726 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
728 avic_handle_dfr_update(vcpu);
729 avic_handle_ldr_update(vcpu);
732 static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
736 struct amd_svm_iommu_ir *ir;
737 struct vcpu_svm *svm = to_svm(vcpu);
739 if (!kvm_arch_has_assigned_device(vcpu->kvm))
743 * Here, we go through the per-vcpu ir_list to update all existing
744 * interrupt remapping table entry targeting this vcpu.
746 spin_lock_irqsave(&svm->ir_list_lock, flags);
748 if (list_empty(&svm->ir_list))
751 list_for_each_entry(ir, &svm->ir_list, node) {
753 ret = amd_iommu_activate_guest_mode(ir->data);
755 ret = amd_iommu_deactivate_guest_mode(ir->data);
760 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
764 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
767 struct amd_svm_iommu_ir *cur;
769 spin_lock_irqsave(&svm->ir_list_lock, flags);
770 list_for_each_entry(cur, &svm->ir_list, node) {
771 if (cur->data != pi->ir_data)
773 list_del(&cur->node);
777 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
780 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
784 struct amd_svm_iommu_ir *ir;
787 * In some cases, the existing irte is updated and re-set,
788 * so we need to check here if it's already been * added
791 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
792 struct kvm *kvm = svm->vcpu.kvm;
793 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
794 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
795 struct vcpu_svm *prev_svm;
802 prev_svm = to_svm(prev_vcpu);
803 svm_ir_list_del(prev_svm, pi);
807 * Allocating new amd_iommu_pi_data, which will get
808 * add to the per-vcpu ir_list.
810 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
815 ir->data = pi->ir_data;
817 spin_lock_irqsave(&svm->ir_list_lock, flags);
818 list_add(&ir->node, &svm->ir_list);
819 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
826 * The HW cannot support posting multicast/broadcast
827 * interrupts to a vCPU. So, we still use legacy interrupt
828 * remapping for these kind of interrupts.
830 * For lowest-priority interrupts, we only support
831 * those with single CPU as the destination, e.g. user
832 * configures the interrupts via /proc/irq or uses
833 * irqbalance to make the interrupts single-CPU.
836 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
837 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
839 struct kvm_lapic_irq irq;
840 struct kvm_vcpu *vcpu = NULL;
842 kvm_set_msi_irq(kvm, e, &irq);
844 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
845 !kvm_irq_is_postable(&irq)) {
846 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
847 __func__, irq.vector);
851 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
854 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
855 vcpu_info->vector = irq.vector;
861 * avic_pi_update_irte - set IRTE for Posted-Interrupts
864 * @host_irq: host irq of the interrupt
865 * @guest_irq: gsi of the interrupt
866 * @set: set or unset PI
867 * returns 0 on success, < 0 on failure
869 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
870 uint32_t guest_irq, bool set)
872 struct kvm_kernel_irq_routing_entry *e;
873 struct kvm_irq_routing_table *irq_rt;
876 if (!kvm_arch_has_assigned_device(kvm) ||
877 !irq_remapping_cap(IRQ_POSTING_CAP))
880 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
881 __func__, host_irq, guest_irq, set);
883 idx = srcu_read_lock(&kvm->irq_srcu);
884 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
886 if (guest_irq >= irq_rt->nr_rt_entries ||
887 hlist_empty(&irq_rt->map[guest_irq])) {
888 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
889 guest_irq, irq_rt->nr_rt_entries);
893 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
894 struct vcpu_data vcpu_info;
895 struct vcpu_svm *svm = NULL;
897 if (e->type != KVM_IRQ_ROUTING_MSI)
901 * Here, we setup with legacy mode in the following cases:
902 * 1. When cannot target interrupt to a specific vcpu.
903 * 2. Unsetting posted interrupt.
904 * 3. APIC virtualization is disabled for the vcpu.
905 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
907 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
908 kvm_vcpu_apicv_active(&svm->vcpu)) {
909 struct amd_iommu_pi_data pi;
911 /* Try to enable guest_mode in IRTE */
912 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
914 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
916 pi.is_guest_mode = true;
917 pi.vcpu_data = &vcpu_info;
918 ret = irq_set_vcpu_affinity(host_irq, &pi);
921 * Here, we successfully setting up vcpu affinity in
922 * IOMMU guest mode. Now, we need to store the posted
923 * interrupt information in a per-vcpu ir_list so that
924 * we can reference to them directly when we update vcpu
925 * scheduling information in IOMMU irte.
927 if (!ret && pi.is_guest_mode)
928 svm_ir_list_add(svm, &pi);
930 /* Use legacy mode in IRTE */
931 struct amd_iommu_pi_data pi;
934 * Here, pi is used to:
935 * - Tell IOMMU to use legacy mode for this interrupt.
936 * - Retrieve ga_tag of prior interrupt remapping data.
939 pi.is_guest_mode = false;
940 ret = irq_set_vcpu_affinity(host_irq, &pi);
943 * Check if the posted interrupt was previously
944 * setup with the guest_mode by checking if the ga_tag
945 * was cached. If so, we need to clean up the per-vcpu
948 if (!ret && pi.prev_ga_tag) {
949 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
950 struct kvm_vcpu *vcpu;
952 vcpu = kvm_get_vcpu_by_id(kvm, id);
954 svm_ir_list_del(to_svm(vcpu), &pi);
959 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
960 e->gsi, vcpu_info.vector,
961 vcpu_info.pi_desc_addr, set);
965 pr_err("%s: failed to update PI IRTE\n", __func__);
972 srcu_read_unlock(&kvm->irq_srcu, idx);
977 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
981 struct amd_svm_iommu_ir *ir;
982 struct vcpu_svm *svm = to_svm(vcpu);
984 if (!kvm_arch_has_assigned_device(vcpu->kvm))
988 * Here, we go through the per-vcpu ir_list to update all existing
989 * interrupt remapping table entry targeting this vcpu.
991 spin_lock_irqsave(&svm->ir_list_lock, flags);
993 if (list_empty(&svm->ir_list))
996 list_for_each_entry(ir, &svm->ir_list, node) {
997 ret = amd_iommu_update_ga(cpu, r, ir->data);
1002 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1006 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1009 int h_physical_id = kvm_cpu_get_apicid(cpu);
1010 struct vcpu_svm *svm = to_svm(vcpu);
1012 lockdep_assert_preemption_disabled();
1014 if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1018 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
1019 * is being scheduled in after being preempted. The CPU entries in the
1020 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
1021 * If the vCPU was migrated, its new CPU value will be stuffed when the
1024 if (kvm_vcpu_is_blocking(vcpu))
1027 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1028 WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1030 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1031 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1032 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1034 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1035 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
1038 void avic_vcpu_put(struct kvm_vcpu *vcpu)
1041 struct vcpu_svm *svm = to_svm(vcpu);
1043 lockdep_assert_preemption_disabled();
1045 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1047 /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
1048 if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
1051 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1053 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1054 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1057 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
1059 struct vcpu_svm *svm = to_svm(vcpu);
1060 struct vmcb *vmcb = svm->vmcb01.ptr;
1062 if (!lapic_in_kernel(vcpu) || !enable_apicv)
1065 if (kvm_vcpu_apicv_active(vcpu)) {
1067 * During AVIC temporary deactivation, guest could update
1068 * APIC ID, DFR and LDR registers, which would not be trapped
1069 * by avic_unaccelerated_access_interception(). In this case,
1070 * we need to check and update the AVIC logical APIC ID table
1071 * accordingly before re-activating.
1073 avic_apicv_post_state_restore(vcpu);
1074 avic_activate_vmcb(svm);
1076 avic_deactivate_vmcb(svm);
1078 vmcb_mark_dirty(vmcb, VMCB_AVIC);
1081 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
1083 bool activated = kvm_vcpu_apicv_active(vcpu);
1088 avic_refresh_virtual_apic_mode(vcpu);
1091 avic_vcpu_load(vcpu, vcpu->cpu);
1093 avic_vcpu_put(vcpu);
1095 avic_set_pi_irte_mode(vcpu, activated);
1098 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
1100 if (!kvm_vcpu_apicv_active(vcpu))
1104 * Unload the AVIC when the vCPU is about to block, _before_
1105 * the vCPU actually blocks.
1107 * Any IRQs that arrive before IsRunning=0 will not cause an
1108 * incomplete IPI vmexit on the source, therefore vIRR will also
1109 * be checked by kvm_vcpu_check_block() before blocking. The
1110 * memory barrier implicit in set_current_state orders writing
1111 * IsRunning=0 before reading the vIRR. The processor needs a
1112 * matching memory barrier on interrupt delivery between writing
1113 * IRR and reading IsRunning; the lack of this barrier might be
1114 * the cause of errata #1235).
1116 avic_vcpu_put(vcpu);
1119 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
1121 if (!kvm_vcpu_apicv_active(vcpu))
1124 avic_vcpu_load(vcpu, vcpu->cpu);
1129 * - The module param avic enable both xAPIC and x2APIC mode.
1130 * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1131 * - The mode can be switched at run-time.
1133 bool avic_hardware_setup(void)
1138 /* AVIC is a prerequisite for x2AVIC. */
1139 if (!boot_cpu_has(X86_FEATURE_AVIC) && !force_avic) {
1140 if (boot_cpu_has(X86_FEATURE_X2AVIC)) {
1141 pr_warn(FW_BUG "Cannot support x2AVIC due to AVIC is disabled");
1142 pr_warn(FW_BUG "Try enable AVIC using force_avic option");
1147 if (boot_cpu_has(X86_FEATURE_AVIC)) {
1148 pr_info("AVIC enabled\n");
1149 } else if (force_avic) {
1151 * Some older systems does not advertise AVIC support.
1152 * See Revision Guide for specific AMD processor for more detail.
1154 pr_warn("AVIC is not supported in CPUID but force enabled");
1155 pr_warn("Your system might crash and burn");
1158 /* AVIC is a prerequisite for x2AVIC. */
1159 x2avic_enabled = boot_cpu_has(X86_FEATURE_X2AVIC);
1161 pr_info("x2AVIC enabled\n");
1163 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);