x86/uaccess: Introduce user_access_{save,restore}()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 3 Apr 2019 07:39:48 +0000 (09:39 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 Apr 2019 09:02:19 +0000 (11:02 +0200)
Introduce common helpers for when we need to safely suspend a
uaccess section; for instance to generate a {KA,UB}SAN report.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/smap.h
arch/x86/include/asm/uaccess.h
include/linux/uaccess.h

index db333300bd4be17205daf3b2e820c0af101ccc2d..6cfe431710203f3e7cf329f9c2b82a8391cfa65c 100644 (file)
@@ -58,6 +58,23 @@ static __always_inline void stac(void)
        alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
 }
 
+static __always_inline unsigned long smap_save(void)
+{
+       unsigned long flags;
+
+       asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
+                                 X86_FEATURE_SMAP)
+                     : "=rm" (flags) : : "memory", "cc");
+
+       return flags;
+}
+
+static __always_inline void smap_restore(unsigned long flags)
+{
+       asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+                     : : "g" (flags) : "memory", "cc");
+}
+
 /* These macros can be used in asm() statements */
 #define ASM_CLAC \
        ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
@@ -69,6 +86,9 @@ static __always_inline void stac(void)
 static inline void clac(void) { }
 static inline void stac(void) { }
 
+static inline unsigned long smap_save(void) { return 0; }
+static inline void smap_restore(unsigned long flags) { }
+
 #define ASM_CLAC
 #define ASM_STAC
 
index ae5783b5fab017f3d75493f5a0fef9091ccd948f..5ca7b91faf67c552d347c3806d4e0e3b8f825975 100644 (file)
@@ -715,6 +715,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
 #define user_access_begin(a,b) user_access_begin(a,b)
 #define user_access_end()      __uaccess_end()
 
+#define user_access_save()     smap_save()
+#define user_access_restore(x) smap_restore(x)
+
 #define unsafe_put_user(x, ptr, label) \
        __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
 
index 37b226e8df13f3b6235277485519b5de37cf6fe2..2b70130af58578da68627927201efd1c5160900a 100644 (file)
@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #define user_access_end() do { } while (0)
 #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
 #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long flags) { }
 #endif
 
 #ifdef CONFIG_HARDENED_USERCOPY