From 6f9bd8ae0340326a7c711bc681321bf9a4e15fb2 Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Tue, 1 Apr 2025 22:30:29 +0200 Subject: [PATCH] x86/uaccess: Predict valid_user_address() returning true This works around what seems to be an optimization bug in GCC (at least 13.3.0), where it predicts access_ok() to fail despite the hint to the contrary. _copy_to_user() contains: if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } Where access_ok() is likely(__access_ok(addr, size)), yet the compiler emits conditional jumps forward for the case where it succeeds: <+0>: endbr64 <+4>: mov %rdx,%rcx <+7>: mov %rdx,%rax <+10>: xor %edx,%edx <+12>: add %rdi,%rcx <+15>: setb %dl <+18>: movabs $0x123456789abcdef,%r8 <+28>: test %rdx,%rdx <+31>: jne 0xffffffff81b3b7c6 <_copy_to_user+38> <+33>: cmp %rcx,%r8 <+36>: jae 0xffffffff81b3b7cb <_copy_to_user+43> <+38>: jmp 0xffffffff822673e0 <__x86_return_thunk> <+43>: nop <+44>: nop <+45>: nop <+46>: mov %rax,%rcx <+49>: rep movsb %ds:(%rsi),%es:(%rdi) <+51>: nop <+52>: nop <+53>: nop <+54>: mov %rcx,%rax <+57>: nop <+58>: nop <+59>: nop <+60>: jmp 0xffffffff822673e0 <__x86_return_thunk> Patching _copy_to_user() to likely() around the access_ok() use does not change the asm. However, spelling out the prediction *within* valid_user_address() does the trick: <+0>: endbr64 <+4>: xor %eax,%eax <+6>: mov %rdx,%rcx <+9>: add %rdi,%rdx <+12>: setb %al <+15>: movabs $0x123456789abcdef,%r8 <+25>: test %rax,%rax <+28>: jne 0xffffffff81b315e6 <_copy_to_user+54> <+30>: cmp %rdx,%r8 <+33>: jb 0xffffffff81b315e6 <_copy_to_user+54> <+35>: nop <+36>: nop <+37>: nop <+38>: rep movsb %ds:(%rsi),%es:(%rdi) <+40>: nop <+41>: nop <+42>: nop <+43>: nop <+44>: nop <+45>: nop <+46>: mov %rcx,%rax <+49>: jmp 0xffffffff82255ba0 <__x86_return_thunk> <+54>: mov %rcx,%rax <+57>: jmp 0xffffffff82255ba0 <__x86_return_thunk> Since we kinda expect valid_user_address() to be likely anyway, add the likely() annotation that also happens to work around this compiler bug. [ mingo: Moved the unlikely() branch into valid_user_address() & updated the changelog ] Signed-off-by: Mateusz Guzik Signed-off-by: Ingo Molnar Cc: H. Peter Anvin Cc: Linus Torvalds Link: https://lore.kernel.org/r/20250401203029.1132135-1-mjguzik@gmail.com --- arch/x86/include/asm/uaccess_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index c52f0133425b..4c13883371aa 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, #endif #define valid_user_address(x) \ - ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) + likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) /* * Masking the user address is an alternative to a conditional -- 2.25.1