Merge branch 'linus' into x86/urgent, to pick up dependent changes
authorIngo Molnar <mingo@kernel.org>
Thu, 16 May 2019 07:04:48 +0000 (09:04 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 May 2019 07:04:48 +0000 (09:04 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/Kconfig
arch/x86/entry/vdso/vdso2c.c
arch/x86/include/asm/arch_hweight.h
arch/x86/include/asm/vdso.h
arch/x86/kernel/kprobes/core.c
arch/x86/mm/init_64.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/mm_internal.h
lib/hweight.c

index 326b2d5bab9d73d51f4f07b4597dc100c23fa22b..6bc9dd6e7534c16775266724c20a9ba551770233 100644 (file)
@@ -259,9 +259,6 @@ config GENERIC_BUG
 config GENERIC_BUG_RELATIVE_POINTERS
        bool
 
-config GENERIC_HWEIGHT
-       def_bool y
-
 config ARCH_MAY_HAVE_PC_FDC
        def_bool y
        depends on ISA_DMA_API
index 8e470b018512c29f41d0adc7dd8717b155a07229..3a4d8d4d39f87bb073c4c8b7504a293344ef3cfe 100644 (file)
@@ -73,14 +73,12 @@ const char *outfilename;
 enum {
        sym_vvar_start,
        sym_vvar_page,
-       sym_hpet_page,
        sym_pvclock_page,
        sym_hvclock_page,
 };
 
 const int special_pages[] = {
        sym_vvar_page,
-       sym_hpet_page,
        sym_pvclock_page,
        sym_hvclock_page,
 };
@@ -93,7 +91,6 @@ struct vdso_sym {
 struct vdso_sym required_syms[] = {
        [sym_vvar_start] = {"vvar_start", true},
        [sym_vvar_page] = {"vvar_page", true},
-       [sym_hpet_page] = {"hpet_page", true},
        [sym_pvclock_page] = {"pvclock_page", true},
        [sym_hvclock_page] = {"hvclock_page", true},
        {"VDSO32_NOTE_MASK", true},
index fc0693569f7aae7baebcf8c6606ce66c09af701e..ba88edd0d58b1dde141899aee64ce0e33edc4449 100644 (file)
@@ -12,8 +12,6 @@
 #define REG_OUT "a"
 #endif
 
-#define __HAVE_ARCH_SW_HWEIGHT
-
 static __always_inline unsigned int __arch_hweight32(unsigned int w)
 {
        unsigned int res;
index 27566e57e87d999c386d3ade383045e921747d6f..230474e2ddb5b3eb687cdd647a15f750a65e5f32 100644 (file)
@@ -19,7 +19,6 @@ struct vdso_image {
        long sym_vvar_start;  /* Negative offset to the vvar area */
 
        long sym_vvar_page;
-       long sym_hpet_page;
        long sym_pvclock_page;
        long sym_hvclock_page;
        long sym_VDSO32_NOTE_MASK;
index cf52ee0d87111c13e0eef8a2429db889ed585962..9e4fa2484d10dd276d4804017466d7b191c07b12 100644 (file)
@@ -768,7 +768,7 @@ static struct kprobe kretprobe_kprobe = {
 /*
  * Called from kretprobe_trampoline
  */
-static __used void *trampoline_handler(struct pt_regs *regs)
+__used __visible void *trampoline_handler(struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb;
        struct kretprobe_instance *ri = NULL;
index 20d14254b6869263a00beb160694ff4f36f41628..62fc457f3849af0bf867fa202149f4fa556cd6dc 100644 (file)
 
 #include "ident_map.c"
 
+#define DEFINE_POPULATE(fname, type1, type2, init)             \
+static inline void fname##_init(struct mm_struct *mm,          \
+               type1##_t *arg1, type2##_t *arg2, bool init)    \
+{                                                              \
+       if (init)                                               \
+               fname##_safe(mm, arg1, arg2);                   \
+       else                                                    \
+               fname(mm, arg1, arg2);                          \
+}
+
+DEFINE_POPULATE(p4d_populate, p4d, pud, init)
+DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
+DEFINE_POPULATE(pud_populate, pud, pmd, init)
+DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
+
+#define DEFINE_ENTRY(type1, type2, init)                       \
+static inline void set_##type1##_init(type1##_t *arg1,         \
+                       type2##_t arg2, bool init)              \
+{                                                              \
+       if (init)                                               \
+               set_##type1##_safe(arg1, arg2);                 \
+       else                                                    \
+               set_##type1(arg1, arg2);                        \
+}
+
+DEFINE_ENTRY(p4d, p4d, init)
+DEFINE_ENTRY(pud, pud, init)
+DEFINE_ENTRY(pmd, pmd, init)
+DEFINE_ENTRY(pte, pte, init)
+
+
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  * physical space so we can cache the place of the first one and move
@@ -414,7 +445,7 @@ void __init cleanup_highmap(void)
  */
 static unsigned long __meminit
 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
-             pgprot_t prot)
+             pgprot_t prot, bool init)
 {
        unsigned long pages = 0, paddr_next;
        unsigned long paddr_last = paddr_end;
@@ -432,7 +463,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
                                             E820_TYPE_RAM) &&
                            !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
                                             E820_TYPE_RESERVED_KERN))
-                               set_pte_safe(pte, __pte(0));
+                               set_pte_init(pte, __pte(0), init);
                        continue;
                }
 
@@ -452,7 +483,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
                        pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
                                pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
                pages++;
-               set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+               set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
                paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
        }
 
@@ -468,7 +499,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
  */
 static unsigned long __meminit
 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
-             unsigned long page_size_mask, pgprot_t prot)
+             unsigned long page_size_mask, pgprot_t prot, bool init)
 {
        unsigned long pages = 0, paddr_next;
        unsigned long paddr_last = paddr_end;
@@ -487,7 +518,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
                                             E820_TYPE_RAM) &&
                            !e820__mapped_any(paddr & PMD_MASK, paddr_next,
                                             E820_TYPE_RESERVED_KERN))
-                               set_pmd_safe(pmd, __pmd(0));
+                               set_pmd_init(pmd, __pmd(0), init);
                        continue;
                }
 
@@ -496,7 +527,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
                                spin_lock(&init_mm.page_table_lock);
                                pte = (pte_t *)pmd_page_vaddr(*pmd);
                                paddr_last = phys_pte_init(pte, paddr,
-                                                          paddr_end, prot);
+                                                          paddr_end, prot,
+                                                          init);
                                spin_unlock(&init_mm.page_table_lock);
                                continue;
                        }
@@ -524,19 +556,20 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
                if (page_size_mask & (1<<PG_LEVEL_2M)) {
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
-                       set_pte_safe((pte_t *)pmd,
-                               pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
-                                       __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+                       set_pte_init((pte_t *)pmd,
+                                    pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
+                                            __pgprot(pgprot_val(prot) | _PAGE_PSE)),
+                                    init);
                        spin_unlock(&init_mm.page_table_lock);
                        paddr_last = paddr_next;
                        continue;
                }
 
                pte = alloc_low_page();
-               paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
+               paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
 
                spin_lock(&init_mm.page_table_lock);
-               pmd_populate_kernel_safe(&init_mm, pmd, pte);
+               pmd_populate_kernel_init(&init_mm, pmd, pte, init);
                spin_unlock(&init_mm.page_table_lock);
        }
        update_page_count(PG_LEVEL_2M, pages);
@@ -551,7 +584,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
  */
 static unsigned long __meminit
 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
-             unsigned long page_size_mask)
+             unsigned long page_size_mask, bool init)
 {
        unsigned long pages = 0, paddr_next;
        unsigned long paddr_last = paddr_end;
@@ -573,7 +606,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
                                             E820_TYPE_RAM) &&
                            !e820__mapped_any(paddr & PUD_MASK, paddr_next,
                                             E820_TYPE_RESERVED_KERN))
-                               set_pud_safe(pud, __pud(0));
+                               set_pud_init(pud, __pud(0), init);
                        continue;
                }
 
@@ -583,7 +616,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
                                paddr_last = phys_pmd_init(pmd, paddr,
                                                           paddr_end,
                                                           page_size_mask,
-                                                          prot);
+                                                          prot, init);
                                continue;
                        }
                        /*
@@ -610,9 +643,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
                if (page_size_mask & (1<<PG_LEVEL_1G)) {
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
-                       set_pte_safe((pte_t *)pud,
-                               pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
-                                       PAGE_KERNEL_LARGE));
+                       set_pte_init((pte_t *)pud,
+                                    pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
+                                            PAGE_KERNEL_LARGE),
+                                    init);
                        spin_unlock(&init_mm.page_table_lock);
                        paddr_last = paddr_next;
                        continue;
@@ -620,10 +654,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
 
                pmd = alloc_low_page();
                paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
-                                          page_size_mask, prot);
+                                          page_size_mask, prot, init);
 
                spin_lock(&init_mm.page_table_lock);
-               pud_populate_safe(&init_mm, pud, pmd);
+               pud_populate_init(&init_mm, pud, pmd, init);
                spin_unlock(&init_mm.page_table_lock);
        }
 
@@ -634,14 +668,15 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
 
 static unsigned long __meminit
 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
-             unsigned long page_size_mask)
+             unsigned long page_size_mask, bool init)
 {
        unsigned long paddr_next, paddr_last = paddr_end;
        unsigned long vaddr = (unsigned long)__va(paddr);
        int i = p4d_index(vaddr);
 
        if (!pgtable_l5_enabled())
-               return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
+               return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
+                                    page_size_mask, init);
 
        for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
                p4d_t *p4d;
@@ -657,39 +692,34 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
                                             E820_TYPE_RAM) &&
                            !e820__mapped_any(paddr & P4D_MASK, paddr_next,
                                             E820_TYPE_RESERVED_KERN))
-                               set_p4d_safe(p4d, __p4d(0));
+                               set_p4d_init(p4d, __p4d(0), init);
                        continue;
                }
 
                if (!p4d_none(*p4d)) {
                        pud = pud_offset(p4d, 0);
-                       paddr_last = phys_pud_init(pud, paddr,
-                                       paddr_end,
-                                       page_size_mask);
+                       paddr_last = phys_pud_init(pud, paddr, paddr_end,
+                                                  page_size_mask, init);
                        continue;
                }
 
                pud = alloc_low_page();
                paddr_last = phys_pud_init(pud, paddr, paddr_end,
-                                          page_size_mask);
+                                          page_size_mask, init);
 
                spin_lock(&init_mm.page_table_lock);
-               p4d_populate_safe(&init_mm, p4d, pud);
+               p4d_populate_init(&init_mm, p4d, pud, init);
                spin_unlock(&init_mm.page_table_lock);
        }
 
        return paddr_last;
 }
 
-/*
- * Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PMD level
- * down. It returns the last physical address mapped.
- */
-unsigned long __meminit
-kernel_physical_mapping_init(unsigned long paddr_start,
-                            unsigned long paddr_end,
-                            unsigned long page_size_mask)
+static unsigned long __meminit
+__kernel_physical_mapping_init(unsigned long paddr_start,
+                              unsigned long paddr_end,
+                              unsigned long page_size_mask,
+                              bool init)
 {
        bool pgd_changed = false;
        unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
@@ -709,19 +739,22 @@ kernel_physical_mapping_init(unsigned long paddr_start,
                        p4d = (p4d_t *)pgd_page_vaddr(*pgd);
                        paddr_last = phys_p4d_init(p4d, __pa(vaddr),
                                                   __pa(vaddr_end),
-                                                  page_size_mask);
+                                                  page_size_mask,
+                                                  init);
                        continue;
                }
 
                p4d = alloc_low_page();
                paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
-                                          page_size_mask);
+                                          page_size_mask, init);
 
                spin_lock(&init_mm.page_table_lock);
                if (pgtable_l5_enabled())
-                       pgd_populate_safe(&init_mm, pgd, p4d);
+                       pgd_populate_init(&init_mm, pgd, p4d, init);
                else
-                       p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
+                       p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
+                                         (pud_t *) p4d, init);
+
                spin_unlock(&init_mm.page_table_lock);
                pgd_changed = true;
        }
@@ -732,6 +765,37 @@ kernel_physical_mapping_init(unsigned long paddr_start,
        return paddr_last;
 }
 
+
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. Note that it can only be used to populate non-present entries.
+ * The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
+unsigned long __meminit
+kernel_physical_mapping_init(unsigned long paddr_start,
+                            unsigned long paddr_end,
+                            unsigned long page_size_mask)
+{
+       return __kernel_physical_mapping_init(paddr_start, paddr_end,
+                                             page_size_mask, true);
+}
+
+/*
+ * This function is similar to kernel_physical_mapping_init() above with the
+ * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
+ * when updating the mapping. The caller is responsible to flush the TLBs after
+ * the function returns.
+ */
+unsigned long __meminit
+kernel_physical_mapping_change(unsigned long paddr_start,
+                              unsigned long paddr_end,
+                              unsigned long page_size_mask)
+{
+       return __kernel_physical_mapping_init(paddr_start, paddr_end,
+                                             page_size_mask, false);
+}
+
 #ifndef CONFIG_NUMA
 void __init initmem_init(void)
 {
index 385afa2b9e17a7ac15683b0d75252274499a8bc0..51f50a7a07ef7842c7e038f58066314dc04d01b5 100644 (file)
@@ -301,9 +301,13 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
                else
                        split_page_size_mask = 1 << PG_LEVEL_2M;
 
-               kernel_physical_mapping_init(__pa(vaddr & pmask),
-                                            __pa((vaddr_end & pmask) + psize),
-                                            split_page_size_mask);
+               /*
+                * kernel_physical_mapping_change() does not flush the TLBs, so
+                * a TLB flush is required after we exit from the for loop.
+                */
+               kernel_physical_mapping_change(__pa(vaddr & pmask),
+                                              __pa((vaddr_end & pmask) + psize),
+                                              split_page_size_mask);
        }
 
        ret = 0;
index 319bde386d5f4a9402f695ffb6948b76b58f47bd..eeae142062ed4b06afbb8247a08d3a19b2f00766 100644 (file)
@@ -13,6 +13,9 @@ void early_ioremap_page_table_range_init(void);
 unsigned long kernel_physical_mapping_init(unsigned long start,
                                             unsigned long end,
                                             unsigned long page_size_mask);
+unsigned long kernel_physical_mapping_change(unsigned long start,
+                                            unsigned long end,
+                                            unsigned long page_size_mask);
 void zone_sizes_init(void);
 
 extern int after_bootmem;
index 7660d88fd4960c8571d16f035d4d92b76fd17f2c..c94586b6255187906cfde38fda8b84f33c9fc21f 100644 (file)
@@ -10,7 +10,6 @@
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 
-#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned int __sw_hweight32(unsigned int w)
 {
 #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
@@ -27,7 +26,6 @@ unsigned int __sw_hweight32(unsigned int w)
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight32);
-#endif
 
 unsigned int __sw_hweight16(unsigned int w)
 {
@@ -46,7 +44,6 @@ unsigned int __sw_hweight8(unsigned int w)
 }
 EXPORT_SYMBOL(__sw_hweight8);
 
-#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned long __sw_hweight64(__u64 w)
 {
 #if BITS_PER_LONG == 32
@@ -69,4 +66,3 @@ unsigned long __sw_hweight64(__u64 w)
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight64);
-#endif