powerpc/uaccess: Enable hardened usercopy
authorKees Cook <keescook@chromium.org>
Thu, 23 Jun 2016 22:10:01 +0000 (15:10 -0700)
committerKees Cook <keescook@chromium.org>
Tue, 26 Jul 2016 21:41:51 +0000 (14:41 -0700)
Enables CONFIG_HARDENED_USERCOPY checks on powerpc.

Based on code from PaX and grsecurity.

Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/Kconfig
arch/powerpc/include/asm/uaccess.h

index 0a9d439bcda6c1ade0025d5e894e14d2db4b24be..0ac48a81632dbcadf8c75e3d71cfd335a770a90d 100644 (file)
@@ -164,6 +164,7 @@ config PPC
        select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index b7c20f0b8fbeebe03a1c57a4c62f86c0c42e962d..c1dc6c14deb84a261ab19e509f12078a271937d1 100644 (file)
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        if ((unsigned long)from < TASK_SIZE) {
                over = (unsigned long)from + n - TASK_SIZE;
+               if (!__builtin_constant_p(n - over))
+                       check_object_size(to, n - over, false);
                return __copy_tofrom_user((__force void __user *)to, from,
                                n - over) + over;
        }
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
+       }
        if ((unsigned long)to < TASK_SIZE) {
                over = (unsigned long)to + n - TASK_SIZE;
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n - over, true);
                return __copy_tofrom_user(to, (__force void __user *)from,
                                n - over) + over;
        }
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }