Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Sep 2019 16:49:13 +0000 (09:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Sep 2019 16:49:13 +0000 (09:49 -0700)
Pull KVM updates from Paolo Bonzini:
 "s390:
   - ioctl hardening
   - selftests

  ARM:
   - ITS translation cache
   - support for 512 vCPUs
   - various cleanups and bugfixes

  PPC:
   - various minor fixes and preparation

  x86:
   - bugfixes all over the place (posted interrupts, SVM, emulation
     corner cases, blocked INIT)
   - some IPI optimizations"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (75 commits)
  KVM: X86: Use IPI shorthands in kvm guest when support
  KVM: x86: Fix INIT signal handling in various CPU states
  KVM: VMX: Introduce exit reason for receiving INIT signal on guest-mode
  KVM: VMX: Stop the preemption timer during vCPU reset
  KVM: LAPIC: Micro optimize IPI latency
  kvm: Nested KVM MMUs need PAE root too
  KVM: x86: set ctxt->have_exception in x86_decode_insn()
  KVM: x86: always stop emulation on page fault
  KVM: nVMX: trace nested VM-Enter failures detected by H/W
  KVM: nVMX: add tracepoint for failed nested VM-Enter
  x86: KVM: svm: Fix a check in nested_svm_vmrun()
  KVM: x86: Return to userspace with internal error on unexpected exit reason
  KVM: x86: Add kvm_emulate_{rd,wr}msr() to consolidate VXM/SVM code
  KVM: x86: Refactor up kvm_{g,s}et_msr() to simplify callers
  doc: kvm: Fix return description of KVM_SET_MSRS
  KVM: X86: Tune PLE Window tracepoint
  KVM: VMX: Change ple_window type to unsigned int
  KVM: X86: Remove tailing newline for tracepoints
  KVM: X86: Trace vcpu_id for vmexit
  KVM: x86: Manually calculate reserved bits when loading PDPTRS
  ...

13 files changed:
1  2 
arch/s390/kvm/kvm-s390.c
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c

Simple merge
Simple merge
Simple merge
Simple merge
index a63964e7cec7bdd67f2b1886adefe98f03972185,9086ee4b64cbf40d3d99eef91fb93e050ec9d159..a10af9c87f8ac1a2b42bf5a6f182840bd2fce276
@@@ -5665,87 -5650,21 +5669,99 @@@ int kvm_mmu_create(struct kvm_vcpu *vcp
                vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
  
        vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
-       return alloc_mmu_pages(vcpu);
+       ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
+       if (ret)
+               return ret;
+       ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
+       if (ret)
+               goto fail_allocate_root;
+       return ret;
+  fail_allocate_root:
+       free_mmu_pages(&vcpu->arch.guest_mmu);
+       return ret;
  }
  
 +
 +static void kvm_zap_obsolete_pages(struct kvm *kvm)
 +{
 +      struct kvm_mmu_page *sp, *node;
 +      LIST_HEAD(invalid_list);
 +      int ign;
 +
 +restart:
 +      list_for_each_entry_safe_reverse(sp, node,
 +            &kvm->arch.active_mmu_pages, link) {
 +              /*
 +               * No obsolete valid page exists before a newly created page
 +               * since active_mmu_pages is a FIFO list.
 +               */
 +              if (!is_obsolete_sp(kvm, sp))
 +                      break;
 +
 +              /*
 +               * Do not repeatedly zap a root page to avoid unnecessary
 +               * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
 +               * progress:
 +               *    vcpu 0                        vcpu 1
 +               *                         call vcpu_enter_guest():
 +               *                            1): handle KVM_REQ_MMU_RELOAD
 +               *                                and require mmu-lock to
 +               *                                load mmu
 +               * repeat:
 +               *    1): zap root page and
 +               *        send KVM_REQ_MMU_RELOAD
 +               *
 +               *    2): if (cond_resched_lock(mmu-lock))
 +               *
 +               *                            2): hold mmu-lock and load mmu
 +               *
 +               *                            3): see KVM_REQ_MMU_RELOAD bit
 +               *                                on vcpu->requests is set
 +               *                                then return 1 to call
 +               *                                vcpu_enter_guest() again.
 +               *            goto repeat;
 +               *
 +               * Since we are reversely walking the list and the invalid
 +               * list will be moved to the head, skip the invalid page
 +               * can help us to avoid the infinity list walking.
 +               */
 +              if (sp->role.invalid)
 +                      continue;
 +
 +              if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
 +                      kvm_mmu_commit_zap_page(kvm, &invalid_list);
 +                      cond_resched_lock(&kvm->mmu_lock);
 +                      goto restart;
 +              }
 +
 +              if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
 +                      goto restart;
 +      }
 +
 +      kvm_mmu_commit_zap_page(kvm, &invalid_list);
 +}
 +
 +/*
 + * Fast invalidate all shadow pages and use lock-break technique
 + * to zap obsolete pages.
 + *
 + * It's required when memslot is being deleted or VM is being
 + * destroyed, in these cases, we should ensure that KVM MMU does
 + * not use any resource of the being-deleted slot or all slots
 + * after calling the function.
 + */
 +static void kvm_mmu_zap_all_fast(struct kvm *kvm)
 +{
 +      spin_lock(&kvm->mmu_lock);
 +      kvm->arch.mmu_valid_gen++;
 +
 +      kvm_zap_obsolete_pages(kvm);
 +      spin_unlock(&kvm->mmu_lock);
 +}
 +
  static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
index e0368076a1ef90660a460bf64f56bfa7b549fd2c,d24050b647c79c648ef10122b4b60e2b8614e88a..04fe21849b6e46adcc7dc4f7b32bea3541bf6cea
@@@ -7326,9 -7318,11 +7312,11 @@@ static struct kvm_x86_ops svm_x86_ops _
        .mem_enc_unreg_region = svm_unregister_enc_region,
  
        .nested_enable_evmcs = nested_enable_evmcs,
 -      .nested_get_evmcs_version = nested_get_evmcs_version,
 +      .nested_get_evmcs_version = NULL,
  
        .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
+       .apic_init_signal_blocked = svm_apic_init_signal_blocked,
  };
  
  static int __init svm_init(void)
Simple merge
index c030c96fc81a817f6e11e3b1580aa907b8bc63f7,73bf9a2e6fb6aa044651a4528b8b129158537bd3..4a99be1fae4e58193116d5e08a98ef42a4debd2b
@@@ -7797,8 -7798,8 +7798,9 @@@ static struct kvm_x86_ops vmx_x86_ops _
        .set_nested_state = NULL,
        .get_vmcs12_pages = NULL,
        .nested_enable_evmcs = NULL,
 +      .nested_get_evmcs_version = NULL,
        .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
+       .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
  };
  
  static void vmx_cleanup_l1d_flush(void)
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge