x86/cpu_entry_area: Provide exception stack accessor
authorThomas Gleixner <tglx@linutronix.de>
Sun, 14 Apr 2019 15:59:49 +0000 (17:59 +0200)
committerBorislav Petkov <bp@suse.de>
Wed, 17 Apr 2019 11:00:22 +0000 (13:00 +0200)
Store a pointer to the per cpu entry area exception stack mappings to allow
fast retrieval.

Required for converting various places from using the shadow IST array to
directly doing address calculations on the actual mapping address.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160144.680960459@linutronix.de
arch/x86/include/asm/cpu_entry_area.h
arch/x86/mm/cpu_entry_area.c

index af8c312673ded227fc4bacbfc3e5b6fc3237f285..9b406f067ecf460e0a0665fc19cf1430e2fcf685 100644 (file)
@@ -99,6 +99,7 @@ struct cpu_entry_area {
 #define CPU_ENTRY_AREA_TOT_SIZE        (CPU_ENTRY_AREA_SIZE * NR_CPUS)
 
 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
 
 extern void setup_cpu_entry_areas(void);
 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
@@ -118,4 +119,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
        return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
 }
 
+#define __this_cpu_ist_top_va(name)                                    \
+       CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
+
 #endif
index 2b1407662a6d5c748332af88c67f802d74e8b45e..a00d0d059c8addda579e0877ca09aaa446eb83e6 100644 (file)
@@ -14,6 +14,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
+DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
 #endif
 
 struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -92,6 +93,9 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
        unsigned int npages;
 
        BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+
+       per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
+
        /*
         * The exceptions stack mappings in the per cpu area are protected
         * by guard pages so each stack must be mapped separately.