Linux 6.10-rc3
[linux-2.6-block.git] / arch / x86 / mm / fault.c
index 67b18adc75ddc12d89513ac71b411c8f33561fdd..e6c469b323ccb748de22adc7d9f0a16dd195edad 100644 (file)
@@ -515,18 +515,19 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
 
        if (error_code & X86_PF_INSTR) {
                unsigned int level;
+               bool nx, rw;
                pgd_t *pgd;
                pte_t *pte;
 
                pgd = __va(read_cr3_pa());
                pgd += pgd_index(address);
 
-               pte = lookup_address_in_pgd(pgd, address, &level);
+               pte = lookup_address_in_pgd_attr(pgd, address, &level, &nx, &rw);
 
-               if (pte && pte_present(*pte) && !pte_exec(*pte))
+               if (pte && pte_present(*pte) && (!pte_exec(*pte) || nx))
                        pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
                                from_kuid(&init_user_ns, current_uid()));
-               if (pte && pte_present(*pte) && pte_exec(*pte) &&
+               if (pte && pte_present(*pte) && pte_exec(*pte) && !nx &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (__read_cr4() & X86_CR4_SMEP))
                        pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
@@ -724,39 +725,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
        WARN_ON_ONCE(user_mode(regs));
 
        /* Are we prepared to handle this kernel fault? */
-       if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
-               /*
-                * Any interrupt that takes a fault gets the fixup. This makes
-                * the below recursive fault logic only apply to a faults from
-                * task context.
-                */
-               if (in_interrupt())
-                       return;
-
-               /*
-                * Per the above we're !in_interrupt(), aka. task context.
-                *
-                * In this case we need to make sure we're not recursively
-                * faulting through the emulate_vsyscall() logic.
-                */
-               if (current->thread.sig_on_uaccess_err && signal) {
-                       sanitize_error_code(address, &error_code);
-
-                       set_signal_archinfo(address, error_code);
-
-                       if (si_code == SEGV_PKUERR) {
-                               force_sig_pkuerr((void __user *)address, pkey);
-                       } else {
-                               /* XXX: hwpoison faults will set the wrong code. */
-                               force_sig_fault(signal, si_code, (void __user *)address);
-                       }
-               }
-
-               /*
-                * Barring that, we can do the fixup and be happy.
-                */
+       if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
                return;
-       }
 
        /*
         * AMD erratum #91 manifests as a spurious page fault on a PREFETCH