From 1bdb67e5aa2d5d43c48cb7d93393fcba276c9e71 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 14 Apr 2019 17:59:56 +0200 Subject: [PATCH] x86/exceptions: Enable IST guard pages All usage sites which expected that the exception stacks in the CPU entry area are mapped linearly are fixed up. Enable guard pages between the IST stacks. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Sean Christopherson Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190414160145.349862042@linutronix.de --- arch/x86/include/asm/cpu_entry_area.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 310eeb62d418..9c96406e6d2b 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -26,13 +26,9 @@ struct exception_stacks { ESTACKS_MEMBERS(0) }; -/* - * The effective cpu entry area mapping with guard pages. Guard size is - * zero until the code which makes assumptions about linear mappings is - * cleaned up. - */ +/* The effective cpu entry area mapping with guard pages. */ struct cea_exception_stacks { - ESTACKS_MEMBERS(0) + ESTACKS_MEMBERS(PAGE_SIZE) }; /* -- 2.25.1