Merge tag 'kvmarm-fixes-6.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
authorPaolo Bonzini <pbonzini@redhat.com>
Sat, 22 Oct 2022 07:32:23 +0000 (03:32 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 22 Oct 2022 07:32:23 +0000 (03:32 -0400)
KVM/arm64 fixes for 6.1, take #1

- Fix for stage-2 invalidation holding the VM MMU lock
  for too long by limiting the walk to the largest
  block mapping size

- Enable stack protection and branch profiling for VHE

- Two selftest fixes

1  2 
arch/arm64/kvm/mmu.c

diff --combined arch/arm64/kvm/mmu.c
index 34c5feed9dc17480f516a4743ef5e3be78065220,caf6cfeff35be43a3f51172646602aab13897e5c..60ee3d9f01f8c198b0dd90a35f32594e1cfff2d1
@@@ -31,6 -31,13 +31,13 @@@ static phys_addr_t hyp_idmap_vector
  
  static unsigned long io_map_base;
  
+ static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
+ {
+       phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
+       phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
+       return (boundary - 1 < end - 1) ? boundary : end;
+ }
  
  /*
   * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
@@@ -52,7 -59,7 +59,7 @@@ static int stage2_apply_range(struct kv
                if (!pgt)
                        return -EINVAL;
  
-               next = stage2_pgd_addr_end(kvm, addr, end);
+               next = stage2_range_addr_end(addr, end);
                ret = fn(pgt, addr, next - addr);
                if (ret)
                        break;
@@@ -92,13 -99,9 +99,13 @@@ static bool kvm_is_device_pfn(unsigned 
  static void *stage2_memcache_zalloc_page(void *arg)
  {
        struct kvm_mmu_memory_cache *mc = arg;
 +      void *virt;
  
        /* Allocated with __GFP_ZERO, so no need to zero */
 -      return kvm_mmu_memory_cache_alloc(mc);
 +      virt = kvm_mmu_memory_cache_alloc(mc);
 +      if (virt)
 +              kvm_account_pgtable_pages(virt, 1);
 +      return virt;
  }
  
  static void *kvm_host_zalloc_pages_exact(size_t size)
        return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
  }
  
 +static void *kvm_s2_zalloc_pages_exact(size_t size)
 +{
 +      void *virt = kvm_host_zalloc_pages_exact(size);
 +
 +      if (virt)
 +              kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
 +      return virt;
 +}
 +
 +static void kvm_s2_free_pages_exact(void *virt, size_t size)
 +{
 +      kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
 +      free_pages_exact(virt, size);
 +}
 +
  static void kvm_host_get_page(void *addr)
  {
        get_page(virt_to_page(addr));
@@@ -131,15 -119,6 +138,15 @@@ static void kvm_host_put_page(void *add
        put_page(virt_to_page(addr));
  }
  
 +static void kvm_s2_put_page(void *addr)
 +{
 +      struct page *p = virt_to_page(addr);
 +      /* Dropping last refcount, the page will be freed */
 +      if (page_count(p) == 1)
 +              kvm_account_pgtable_pages(addr, -1);
 +      put_page(p);
 +}
 +
  static int kvm_host_page_count(void *addr)
  {
        return page_count(virt_to_page(addr));
@@@ -653,10 -632,10 +660,10 @@@ static int get_user_mapping_size(struc
  
  static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
        .zalloc_page            = stage2_memcache_zalloc_page,
 -      .zalloc_pages_exact     = kvm_host_zalloc_pages_exact,
 -      .free_pages_exact       = free_pages_exact,
 +      .zalloc_pages_exact     = kvm_s2_zalloc_pages_exact,
 +      .free_pages_exact       = kvm_s2_free_pages_exact,
        .get_page               = kvm_host_get_page,
 -      .put_page               = kvm_host_put_page,
 +      .put_page               = kvm_s2_put_page,
        .page_count             = kvm_host_page_count,
        .phys_to_virt           = kvm_host_va,
        .virt_to_phys           = kvm_host_pa,