powerpc/align: Convert emulate_spe() to user_access_begin
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Fri, 12 Mar 2021 13:25:11 +0000 (13:25 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sat, 3 Apr 2021 10:21:39 +0000 (21:21 +1100)
This patch converts emulate_spe() to using user_access_begin
logic.

Since commit 662bbcb2747c ("mm, sched: Allow uaccess in atomic with
pagefault_disable()"), might_fault() doesn't fire when called from
sections where pagefaults are disabled, which must be the case
when using _inatomic variants of __get_user and __put_user. So
the might_fault() in user_access_begin() is not a problem.

There was a verification of user_mode() together with the access_ok(),
but there is a second verification of user_mode() just after, that
leads to immediate return. The access_ok() is now part of the
user_access_begin which is called after that other user_mode()
verification, so no need to check user_mode() again.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c95a648fdf75992c9d88f3c73cc23e7537fcf2ad.1615555354.git.christophe.leroy@csgroup.eu
arch/powerpc/kernel/align.c

index c7797eb958c73e94eb6e6bb0c1417de797632505..f362c99213be26a1a29eea3846188fc2004bdd3e 100644 (file)
@@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = {
 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
                       struct ppc_inst ppc_instr)
 {
-       int ret;
        union {
                u64 ll;
                u32 w[2];
@@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
        nb = spe_aligninfo[instr].len;
        flags = spe_aligninfo[instr].flags;
 
-       /* Verify the address of the operand */
-       if (unlikely(user_mode(regs) &&
-                    !access_ok(addr, nb)))
-               return -EFAULT;
-
        /* userland only */
        if (unlikely(!user_mode(regs)))
                return 0;
@@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
                }
        } else {
                temp.ll = data.ll = 0;
-               ret = 0;
                p = addr;
 
+               if (!user_read_access_begin(addr, nb))
+                       return -EFAULT;
+
                switch (nb) {
                case 8:
-                       ret |= __get_user_inatomic(temp.v[0], p++);
-                       ret |= __get_user_inatomic(temp.v[1], p++);
-                       ret |= __get_user_inatomic(temp.v[2], p++);
-                       ret |= __get_user_inatomic(temp.v[3], p++);
+                       unsafe_get_user(temp.v[0], p++, Efault_read);
+                       unsafe_get_user(temp.v[1], p++, Efault_read);
+                       unsafe_get_user(temp.v[2], p++, Efault_read);
+                       unsafe_get_user(temp.v[3], p++, Efault_read);
                        fallthrough;
                case 4:
-                       ret |= __get_user_inatomic(temp.v[4], p++);
-                       ret |= __get_user_inatomic(temp.v[5], p++);
+                       unsafe_get_user(temp.v[4], p++, Efault_read);
+                       unsafe_get_user(temp.v[5], p++, Efault_read);
                        fallthrough;
                case 2:
-                       ret |= __get_user_inatomic(temp.v[6], p++);
-                       ret |= __get_user_inatomic(temp.v[7], p++);
-                       if (unlikely(ret))
-                               return -EFAULT;
+                       unsafe_get_user(temp.v[6], p++, Efault_read);
+                       unsafe_get_user(temp.v[7], p++, Efault_read);
                }
+               user_read_access_end();
 
                switch (instr) {
                case EVLDD:
@@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
 
        /* Store result to memory or update registers */
        if (flags & ST) {
-               ret = 0;
                p = addr;
+
+               if (!user_write_access_begin(addr, nb))
+                       return -EFAULT;
+
                switch (nb) {
                case 8:
-                       ret |= __put_user_inatomic(data.v[0], p++);
-                       ret |= __put_user_inatomic(data.v[1], p++);
-                       ret |= __put_user_inatomic(data.v[2], p++);
-                       ret |= __put_user_inatomic(data.v[3], p++);
+                       unsafe_put_user(data.v[0], p++, Efault_write);
+                       unsafe_put_user(data.v[1], p++, Efault_write);
+                       unsafe_put_user(data.v[2], p++, Efault_write);
+                       unsafe_put_user(data.v[3], p++, Efault_write);
                        fallthrough;
                case 4:
-                       ret |= __put_user_inatomic(data.v[4], p++);
-                       ret |= __put_user_inatomic(data.v[5], p++);
+                       unsafe_put_user(data.v[4], p++, Efault_write);
+                       unsafe_put_user(data.v[5], p++, Efault_write);
                        fallthrough;
                case 2:
-                       ret |= __put_user_inatomic(data.v[6], p++);
-                       ret |= __put_user_inatomic(data.v[7], p++);
+                       unsafe_put_user(data.v[6], p++, Efault_write);
+                       unsafe_put_user(data.v[7], p++, Efault_write);
                }
-               if (unlikely(ret))
-                       return -EFAULT;
+               user_write_access_end();
        } else {
                *evr = data.w[0];
                regs->gpr[reg] = data.w[1];
        }
 
        return 1;
+
+Efault_read:
+       user_read_access_end();
+       return -EFAULT;
+
+Efault_write:
+       user_write_access_end();
+       return -EFAULT;
 }
 #endif /* CONFIG_SPE */