Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-block.git] / arch / x86 / mm / cpu_entry_area.c
index 42cd96e7d733efddf3c420d5ae01877dc4c0cc8f..8bb1aa6a7aa3af4f1fb876a825f04efbe1f89a78 100644 (file)
@@ -9,22 +9,60 @@
 #include <asm/cpu_entry_area.h>
 #include <asm/fixmap.h>
 #include <asm/desc.h>
+#include <asm/kasan.h>
 
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
-#endif
 
-#ifdef CONFIG_X86_32
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
+
+static __always_inline unsigned int cea_offset(unsigned int cpu)
+{
+       return per_cpu(_cea_offset, cpu);
+}
+
+static __init void init_cea_offsets(void)
+{
+       unsigned int max_cea;
+       unsigned int i, j;
+
+       max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
+
+       /* O(sodding terrible) */
+       for_each_possible_cpu(i) {
+               unsigned int cea;
+
+again:
+               cea = prandom_u32_max(max_cea);
+
+               for_each_possible_cpu(j) {
+                       if (cea_offset(j) == cea)
+                               goto again;
+
+                       if (i == j)
+                               break;
+               }
+
+               per_cpu(_cea_offset, i) = cea;
+       }
+}
+#else /* !X86_64 */
 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
+
+static __always_inline unsigned int cea_offset(unsigned int cpu)
+{
+       return cpu;
+}
+static inline void init_cea_offsets(void) { }
 #endif
 
 /* Is called from entry code, so must be noinstr */
 noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
 {
-       unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
+       unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
        BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
 
        return (struct cpu_entry_area *) va;
@@ -148,6 +186,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
        pgprot_t tss_prot = PAGE_KERNEL;
 #endif
 
+       kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
+                                       early_cpu_to_node(cpu));
+
        cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
 
        cea_map_percpu_pages(&cea->entry_stack_page,
@@ -201,7 +242,6 @@ static __init void setup_cpu_entry_area_ptes(void)
 
        /* The +1 is for the readonly IDT: */
        BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
-       BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
        BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
 
        start = CPU_ENTRY_AREA_BASE;
@@ -217,6 +257,8 @@ void __init setup_cpu_entry_areas(void)
 {
        unsigned int cpu;
 
+       init_cea_offsets();
+
        setup_cpu_entry_area_ptes();
 
        for_each_possible_cpu(cpu)