Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / virt / kvm / arm / mmu.c
index b4b69c2d10120237e12bc6524243071bf645f1d9..71dc178ac3bd0125f4347af7c8a2760301def756 100644 (file)
@@ -924,6 +924,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
        return 0;
 }
 
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+{
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       pmdp = stage2_get_pmd(kvm, NULL, addr);
+       if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
+               return false;
+
+       if (pmd_thp_or_huge(*pmdp))
+               return kvm_s2pmd_exec(pmdp);
+
+       ptep = pte_offset_kernel(pmdp, addr);
+       if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
+               return false;
+
+       return kvm_s2pte_exec(ptep);
+}
+
 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
                          phys_addr_t addr, const pte_t *new_pte,
                          unsigned long flags)
@@ -1255,10 +1274,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
        kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
-                                     unsigned long size)
+static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
 {
-       __coherent_cache_guest_page(vcpu, pfn, size);
+       __clean_dcache_guest_page(pfn, size);
+}
+
+static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
+{
+       __invalidate_icache_guest_page(pfn, size);
 }
 
 static void kvm_send_hwpoison_signal(unsigned long address,
@@ -1284,7 +1307,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          unsigned long fault_status)
 {
        int ret;
-       bool write_fault, writable, hugetlb = false, force_pte = false;
+       bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
        unsigned long mmu_seq;
        gfn_t gfn = fault_ipa >> PAGE_SHIFT;
        struct kvm *kvm = vcpu->kvm;
@@ -1296,7 +1319,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        unsigned long flags = 0;
 
        write_fault = kvm_is_write_fault(vcpu);
-       if (fault_status == FSC_PERM && !write_fault) {
+       exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+       VM_BUG_ON(write_fault && exec_fault);
+
+       if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
@@ -1310,7 +1336,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (is_vm_hugetlb_page(vma) && !logging_active) {
+       if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
                hugetlb = true;
                gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
        } else {
@@ -1389,7 +1415,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        new_pmd = kvm_s2pmd_mkwrite(new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
+
+               if (fault_status != FSC_PERM)
+                       clean_dcache_guest_page(pfn, PMD_SIZE);
+
+               if (exec_fault) {
+                       new_pmd = kvm_s2pmd_mkexec(new_pmd);
+                       invalidate_icache_guest_page(pfn, PMD_SIZE);
+               } else if (fault_status == FSC_PERM) {
+                       /* Preserve execute if XN was already cleared */
+                       if (stage2_is_exec(kvm, fault_ipa))
+                               new_pmd = kvm_s2pmd_mkexec(new_pmd);
+               }
+
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1399,7 +1437,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_pfn_dirty(pfn);
                        mark_page_dirty(kvm, gfn);
                }
-               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
+
+               if (fault_status != FSC_PERM)
+                       clean_dcache_guest_page(pfn, PAGE_SIZE);
+
+               if (exec_fault) {
+                       new_pte = kvm_s2pte_mkexec(new_pte);
+                       invalidate_icache_guest_page(pfn, PAGE_SIZE);
+               } else if (fault_status == FSC_PERM) {
+                       /* Preserve execute if XN was already cleared */
+                       if (stage2_is_exec(kvm, fault_ipa))
+                               new_pte = kvm_s2pte_mkexec(new_pte);
+               }
+
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
        }