Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 23:10:23 +0000 (16:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 23:10:23 +0000 (16:10 -0700)
Merge updates from Andrew Morton:

 - a few hot fixes

 - ocfs2 updates

 - almost all of -mm (slab-generic, slab, slub, kmemleak, kasan,
   cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug,
   sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy,
   oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap,
   zsmalloc)

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (132 commits)
  mm/zsmalloc.c: fix a -Wunused-function warning
  zswap: do not map same object twice
  zswap: use movable memory if zpool support allocate movable memory
  zpool: add malloc_support_movable to zpool_driver
  shmem: fix obsolete comment in shmem_getpage_gfp()
  mm/madvise: reduce code duplication in error handling paths
  mm: mmap: increase sockets maximum memory size pgoff for 32bits
  mm/mmap.c: refine find_vma_prev() with rb_last()
  riscv: make mmap allocation top-down by default
  mips: use generic mmap top-down layout and brk randomization
  mips: replace arch specific way to determine 32bit task with generic version
  mips: adjust brk randomization offset to fit generic version
  mips: use STACK_TOP when computing mmap base address
  mips: properly account for stack randomization and stack guard gap
  arm: use generic mmap top-down layout and brk randomization
  arm: use STACK_TOP when computing mmap base address
  arm: properly account for stack randomization and stack guard gap
  arm64, mm: make randomization selected by generic topdown mmap layout
  arm64, mm: move generic mmap layout functions to mm
  arm64: consider stack randomization for mmap base only when necessary
  ...

204 files changed:
Documentation/ABI/testing/sysfs-kernel-slab
Documentation/admin-guide/cgroup-v1/memory.rst
Documentation/admin-guide/kernel-parameters.txt
arch/Kconfig
arch/alpha/include/asm/pgalloc.h
arch/alpha/include/asm/pgtable.h
arch/arc/include/asm/pgalloc.h
arch/arc/include/asm/pgtable.h
arch/arm/Kconfig
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-nommu.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/processor.h
arch/arm/kernel/process.c
arch/arm/mm/flush.c
arch/arm/mm/mmap.c
arch/arm64/Kconfig
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/process.c
arch/arm64/mm/flush.c
arch/arm64/mm/mmap.c
arch/arm64/mm/pgd.c
arch/c6x/include/asm/pgtable.h
arch/csky/include/asm/pgalloc.h
arch/csky/include/asm/pgtable.h
arch/h8300/include/asm/pgtable.h
arch/hexagon/include/asm/pgalloc.h
arch/hexagon/include/asm/pgtable.h
arch/hexagon/mm/Makefile
arch/hexagon/mm/pgalloc.c [deleted file]
arch/ia64/Kconfig
arch/ia64/include/asm/pgalloc.h
arch/ia64/include/asm/pgtable.h
arch/ia64/mm/init.c
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/pgtable_no.h
arch/microblaze/include/asm/pgalloc.h
arch/microblaze/include/asm/pgtable.h
arch/microblaze/mm/pgtable.c
arch/mips/Kconfig
arch/mips/include/asm/pgalloc.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/mm/mmap.c
arch/nds32/include/asm/pgalloc.h
arch/nds32/include/asm/pgtable.h
arch/nios2/include/asm/pgalloc.h
arch/nios2/include/asm/pgtable.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/include/asm/pgtable.h
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/pgtable.h
arch/powerpc/include/asm/pgalloc.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/iommu_api.c
arch/powerpc/mm/hugetlbpage.c
arch/riscv/Kconfig
arch/riscv/include/asm/pgalloc.h
arch/riscv/include/asm/pgtable.h
arch/s390/include/asm/pgtable.h
arch/sh/include/asm/pgalloc.h
arch/sh/include/asm/pgtable.h
arch/sh/mm/Kconfig
arch/sh/mm/nommu.c
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/mm/init_32.c
arch/um/include/asm/pgalloc.h
arch/um/include/asm/pgtable.h
arch/unicore32/include/asm/pgalloc.h
arch/unicore32/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/mm/pgtable.c
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/tlbflush.h
drivers/base/memory.c
drivers/base/node.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/gpu/drm/via/via_dmablit.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/hfi1/user_pages.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/sw/siw/siw_mem.c
drivers/staging/android/ion/ion_system_heap.c
drivers/target/tcm_fc/tfc_io.c
drivers/vfio/vfio_iommu_spapr_tce.c
fs/binfmt_elf.c
fs/fat/dir.c
fs/fat/fatent.c
fs/inode.c
fs/io_uring.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/ocfs2/alloc.c
fs/ocfs2/aops.c
fs/ocfs2/blockcheck.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdebug.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/journal.h
fs/ocfs2/namei.c
fs/ocfs2/ocfs2.h
fs/ocfs2/super.c
fs/open.c
fs/proc/meminfo.c
fs/proc/task_mmu.c
include/asm-generic/pgalloc.h
include/asm-generic/pgtable.h
include/linux/compaction.h
include/linux/fs.h
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/jbd2.h
include/linux/khugepaged.h
include/linux/memcontrol.h
include/linux/memory.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/page_ext.h
include/linux/pagemap.h
include/linux/quicklist.h [deleted file]
include/linux/shrinker.h
include/linux/slab.h
include/linux/vmalloc.h
include/linux/zpool.h
init/main.c
kernel/events/uprobes.c
kernel/resource.c
kernel/sched/idle.c
kernel/sysctl.c
lib/Kconfig.debug
lib/Kconfig.kasan
lib/iov_iter.c
lib/show_mem.c
lib/test_kasan.c
mm/Kconfig
mm/Kconfig.debug
mm/Makefile
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/init-mm.c
mm/kasan/common.c
mm/kasan/kasan.h
mm/kasan/report.c
mm/kasan/tags_report.c
mm/khugepaged.c
mm/kmemleak.c
mm/ksm.c
mm/madvise.c
mm/memcontrol.c
mm/memfd.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/mmu_gather.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_owner.c
mm/page_poison.c
mm/page_vma_mapped.c
mm/quicklist.c [deleted file]
mm/rmap.c
mm/shmem.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/sparse.c
mm/swap.c
mm/swap_state.c
mm/util.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
mm/z3fold.c
mm/zpool.c
mm/zsmalloc.c
mm/zswap.c
net/xdp/xdp_umem.c
net/xdp/xsk.c
usr/Makefile

index 29601d93a1c2ea112899007c37a2f7297c655338..ed35833ad7f05592c53fa615bcf72126de404821 100644 (file)
@@ -429,10 +429,15 @@ KernelVersion:    2.6.22
 Contact:       Pekka Enberg <penberg@cs.helsinki.fi>,
                Christoph Lameter <cl@linux-foundation.org>
 Description:
-               The shrink file is written when memory should be reclaimed from
-               a cache.  Empty partial slabs are freed and the partial list is
-               sorted so the slabs with the fewest available objects are used
-               first.
+               The shrink file is used to reclaim unused slab cache
+               memory from a cache.  Empty per-cpu or partial slabs
+               are freed and the partial list is sorted so the slabs
+               with the fewest available objects are used first.
+               It only accepts a value of "1" on write for shrinking
+               the cache. Other input values are considered invalid.
+               Shrinking slab caches might be expensive and can
+               adversely impact other running applications.  So it
+               should be used with care.
 
 What:          /sys/kernel/slab/cache/slab_size
 Date:          May 2007
index 41bdc038dad97f2963068c4ea229efbc5d144d0e..0ae4f564c2d68dc50ff07e3e590673ce373e3253 100644 (file)
@@ -85,8 +85,10 @@ Brief summary of control files.
  memory.oom_control                 set/show oom controls.
  memory.numa_stat                   show the number of memory usage per numa
                                     node
-
  memory.kmem.limit_in_bytes          set/show hard limit for kernel memory
+                                     This knob is deprecated and shouldn't be
+                                     used. It is planned that this be removed in
+                                     the foreseeable future.
  memory.kmem.usage_in_bytes          show current kernel memory allocation
  memory.kmem.failcnt                 show the number of kernel memory usage
                                     hits limits
index 254d8a369f328c82313d6cbcb8d235f543994ce8..944e03e29f65c9d31075d59bb06f8732d9fc4245 100644 (file)
                        enables the feature at boot time. By default, it is
                        disabled and the system will work mostly the same as a
                        kernel built without CONFIG_DEBUG_PAGEALLOC.
+                       Note: to get most of debug_pagealloc error reports, it's
+                       useful to also enable the page_owner functionality.
                        on: enable the feature
 
        debugpat        [X86] Enable PAT debugging
index 0fcf8ec1e09883a2fbab69f02f02068dd913afc3..5f8a5d84dbbe9257f0a2a33c3ed81687a4eb0aad 100644 (file)
@@ -706,6 +706,17 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
          and vice-versa 32-bit applications to call 64-bit mmap().
          Required for applications doing different bitness syscalls.
 
+# This allows to use a set of generic functions to determine mmap base
+# address by giving priority to top-down scheme only if the process
+# is not in legacy mode (compat task, unlimited stack size or
+# sysctl_legacy_va_layout).
+# Architecture that selects this option can provide its own version of:
+# - STACK_RND_MASK
+config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+       bool
+       depends on MMU
+       select ARCH_HAS_ELF_RANDOMIZE
+
 config HAVE_COPY_THREAD_TLS
        bool
        help
index 71ded3b7d82decb113a0dd25c7e89801fa2b2b57..eb91f1e8562906682d757e5ac8ba2f5d3d3add5f 100644 (file)
@@ -53,6 +53,4 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-#define check_pgt_cache()      do { } while (0)
-
 #endif /* _ALPHA_PGALLOC_H */
index 89c2032f99606b51968a41b4fd449af49fcd9d0f..065b57f408c353389a012a61999219ff2d34ac06 100644 (file)
@@ -359,11 +359,6 @@ extern void paging_init(void);
 
 #include <asm-generic/pgtable.h>
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
 #define HAVE_ARCH_UNMAPPED_AREA
 
index 9bdb8ed5b0dbd514325a1ad7c149a9285189717a..4751f2251cd91cc2dd2fc5f9c047340f77727fc4 100644 (file)
@@ -129,7 +129,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
 
 #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
 
-#define check_pgt_cache()   do { } while (0)
 #define pmd_pgtable(pmd)       ((pgtable_t) pmd_page_vaddr(pmd))
 
 #endif /* _ASM_ARC_PGALLOC_H */
index 1d87c18a2976ea21f9d17f068ccc331af7ac0736..7addd0301c51a73671197cfaa0cacc0b6cb71a64 100644 (file)
@@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 /* to cope with aliasing VIPT cache */
 #define HAVE_ARCH_UNMAPPED_AREA
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 #endif /* __ASSEMBLY__ */
 
 #endif
index 229f2cdd81ca68d90507b433392d7a4148232ed3..8a50efb559f35a2c75f6fa379f34e4580e6c5a8d 100644 (file)
@@ -34,6 +34,7 @@ config ARM
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_IPC_PARSE_VERSION
        select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
        select BUILDTIME_EXTABLE_SORT if MMU
index a2a68b75197186ec219ce6d74c724c6712d7704c..069da393110cce066141bff93b03286c9f2bdb09 100644 (file)
@@ -15,8 +15,6 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-#define check_pgt_cache()              do { } while (0)
-
 #ifdef CONFIG_MMU
 
 #define _PAGE_USER_TABLE       (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
index d0de24f0672443c06efce96477157333c9186060..010fa1a35a6836293f6ae0136251d9b41dcd38c9 100644 (file)
@@ -70,11 +70,6 @@ typedef pte_t *pte_addr_t;
  */
 extern unsigned int kobjsize(const void *objp);
 
-/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
index f2e990dc27e75c42df401468275dd37226992b75..3ae120cd1715fb7ebd5e39773a141799418bbe17 100644 (file)
@@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-#define pgtable_cache_init() do { } while (0)
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* CONFIG_MMU */
index 20c2f42454b89d7117dedae9b3bf10a165a11daf..614bf829e454853b000b60596533e0244b7c90d7 100644 (file)
@@ -140,8 +140,6 @@ static inline void prefetchw(const void *ptr)
 #endif
 #endif
 
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
 #endif
 
 #endif /* __ASM_ARM_PROCESSOR_H */
index f934a6739fc053649c53e7537f6729f7ac4afd4f..9485acc520a413451f0b925276c81c8d23a00d3f 100644 (file)
@@ -319,11 +319,6 @@ unsigned long get_wchan(struct task_struct *p)
        return 0;
 }
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       return randomize_page(mm->brk, 0x02000000);
-}
-
 #ifdef CONFIG_MMU
 #ifdef CONFIG_KUSER_HELPERS
 /*
index 6ecbda87ee4683f0f6252636fd5aa81af74cf8d4..6d89db7895d14c85743c09e2c60d92f5410f3fbf 100644 (file)
@@ -204,18 +204,17 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               size_t page_size = PAGE_SIZE << compound_order(page);
-               __cpuc_flush_dcache_area(page_address(page), page_size);
+               __cpuc_flush_dcache_area(page_address(page), page_size(page));
        } else {
                unsigned long i;
                if (cache_is_vipt_nonaliasing()) {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_atomic(page + i);
                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                                kunmap_atomic(addr);
                        }
                } else {
-                       for (i = 0; i < (1 << compound_order(page)); i++) {
+                       for (i = 0; i < compound_nr(page); i++) {
                                void *addr = kmap_high_get(page + i);
                                if (addr) {
                                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
index f866870db749c4bf2b0e5ff03f687cda5569e651..b8d912ac9e61495aaeab9db3fd5d294c70f5aa41 100644 (file)
        ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
         (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 
-/* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
-       if (current->personality & ADDR_COMPAT_LAYOUT)
-               return 1;
-
-       if (rlim_stack->rlim_cur == RLIM_INFINITY)
-               return 1;
-
-       return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
-       unsigned long gap = rlim_stack->rlim_cur;
-
-       if (gap < MIN_GAP)
-               gap = MIN_GAP;
-       else if (gap > MAX_GAP)
-               gap = MAX_GAP;
-
-       return PAGE_ALIGN(TASK_SIZE - gap - rnd);
-}
-
 /*
  * We need to ensure that shared mappings are correctly aligned to
  * avoid aliasing issues with VIPT caches.  We need to ensure that
@@ -171,31 +144,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        return addr;
 }
 
-unsigned long arch_mmap_rnd(void)
-{
-       unsigned long rnd;
-
-       rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
-
-       return rnd << PAGE_SHIFT;
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
-       unsigned long random_factor = 0UL;
-
-       if (current->flags & PF_RANDOMIZE)
-               random_factor = arch_mmap_rnd();
-
-       if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               mm->get_unmapped_area = arch_get_unmapped_area;
-       } else {
-               mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-       }
-}
-
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This
  * might go away in the future.
index 37c610963eeeda6f6560a0d5f7c61e869ed99881..866e05882799069f0a54ccac0cc45b1a2775163c 100644 (file)
@@ -15,7 +15,6 @@ config ARM64
        select ARCH_HAS_DMA_COHERENT_TO_PFN
        select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
-       select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
@@ -71,6 +70,7 @@ config ARM64
        select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
        select ARCH_SUPPORTS_NUMA_BALANCING
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
        select ARCH_HAS_UBSAN_SANITIZE_ALL
index 14d0bc44d451b090b582e72415dcc00b313b4797..172d76fa02451daff973b101846221ba0e2192ae 100644 (file)
@@ -15,8 +15,6 @@
 
 #include <asm-generic/pgalloc.h>       /* for pte_{alloc,free}_one */
 
-#define check_pgt_cache()              do { } while (0)
-
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
index 57427d17580eb892db10404a1140fb81c21f10ac..7576df00eb50e8f7109d711846f758369a50eb85 100644 (file)
@@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);
 
 #include <asm-generic/pgtable.h>
 
-static inline void pgtable_cache_init(void) { }
-
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
  */
index c67848c55009d9744ff68deea54eb58844d1c7f1..5623685c7d138556dd5268db6363ee8faeb99cd4 100644 (file)
@@ -280,8 +280,6 @@ static inline void spin_lock_prefetch(const void *ptr)
                     "nop") : : "p" (ptr));
 }
 
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
 extern void __init minsigstksz_setup(void);
 
index 03689c0beb34863ee501274a959efce24c067d88..a47462def04bf5d67acc21f43714198603d1f295 100644 (file)
@@ -557,14 +557,6 @@ unsigned long arch_align_stack(unsigned long sp)
        return sp & ~0xf;
 }
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       if (is_compat_task())
-               return randomize_page(mm->brk, SZ_32M);
-       else
-               return randomize_page(mm->brk, SZ_1G);
-}
-
 /*
  * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
  */
index dc19300309d2880e4ad74c4baaaa40e3b5f619d8..ac485163a4a7669f0e6d1b1eca683c65ad8c59ec 100644 (file)
@@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
        struct page *page = pte_page(pte);
 
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-               sync_icache_aliases(page_address(page),
-                                   PAGE_SIZE << compound_order(page));
+               sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
index b050641b51392bbd5a0c8c0fb698b5dbc17f09ed..3028bacbc4e9c40135b1f84a28a67500218c3078 100644 (file)
 
 #include <asm/cputype.h>
 
-/*
- * Leave enough space between the mmap area and the stack to honour ulimit in
- * the face of randomisation.
- */
-#define MIN_GAP (SZ_128M)
-#define MAX_GAP        (STACK_TOP/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
-       if (current->personality & ADDR_COMPAT_LAYOUT)
-               return 1;
-
-       if (rlim_stack->rlim_cur == RLIM_INFINITY)
-               return 1;
-
-       return sysctl_legacy_va_layout;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
-       unsigned long rnd;
-
-#ifdef CONFIG_COMPAT
-       if (test_thread_flag(TIF_32BIT))
-               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
-       else
-#endif
-               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
-       return rnd << PAGE_SHIFT;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
-       unsigned long gap = rlim_stack->rlim_cur;
-       unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
-
-       /* Values close to RLIM_INFINITY can overflow. */
-       if (gap + pad > gap)
-               gap += pad;
-
-       if (gap < MIN_GAP)
-               gap = MIN_GAP;
-       else if (gap > MAX_GAP)
-               gap = MAX_GAP;
-
-       return PAGE_ALIGN(STACK_TOP - gap - rnd);
-}
-
-/*
- * This function, called very early during the creation of a new process VM
- * image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
-       unsigned long random_factor = 0UL;
-
-       if (current->flags & PF_RANDOMIZE)
-               random_factor = arch_mmap_rnd();
-
-       /*
-        * Fall back to the standard layout if the personality bit is set, or
-        * if the expected stack growth is unlimited:
-        */
-       if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               mm->get_unmapped_area = arch_get_unmapped_area;
-       } else {
-               mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-       }
-}
-
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This might go
  * away in the future.
index 7548f9ca1f11142c6879281521f9a751b710c87e..4a64089e5771c1e2fd06448fe6c0edfb7f5ab635 100644 (file)
@@ -35,7 +35,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
                kmem_cache_free(pgd_cache, pgd);
 }
 
-void __init pgd_cache_init(void)
+void __init pgtable_cache_init(void)
 {
        if (PGD_SIZE == PAGE_SIZE)
                return;
index 0bd805964ea662d69bb5b242570cde19f0d76eda..0b6919c004131d81314d0054b00955b0b10f0a06 100644 (file)
@@ -59,11 +59,6 @@ extern unsigned long empty_zero_page;
 
 #define swapper_pg_dir ((pgd_t *) 0)
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /*
  * c6x is !MMU, so define the simpliest implementation
  */
index 98c5716708d6103a29316faef9ed9e5a3de81524..d089113fe41f71762f58d34e0935daa562e21d09 100644 (file)
@@ -75,8 +75,6 @@ do {                                                  \
        tlb_remove_page(tlb, pte);                      \
 } while (0)
 
-#define check_pgt_cache()      do {} while (0)
-
 extern void pagetable_init(void);
 extern void pre_mmu_init(void);
 extern void pre_trap_init(void);
index c429a6f347de9d7e9a7b9269fa92ad4b138389af..0040b3a05b61d5ef29abfef4397ac0026a5548dc 100644 (file)
@@ -296,11 +296,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 #define kern_addr_valid(addr)  (1)
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do {} while (0)
-
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
        remap_pfn_range(vma, vaddr, pfn, size, prot)
 
index a99caa49d26563ff6c69c37dc45a128356b0a04c..4d00152fab58d337e7f65ba8c8caa9e5a9014a95 100644 (file)
@@ -4,7 +4,6 @@
 #define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 #include <asm-generic/pgtable.h>
-#define pgtable_cache_init()   do { } while (0)
 extern void paging_init(void);
 #define PAGE_NONE              __pgprot(0)    /* these mean nothing to NO_MM */
 #define PAGE_SHARED            __pgprot(0)    /* these mean nothing to NO_MM */
@@ -34,11 +33,6 @@ static inline int pte_file(pte_t pte) { return 0; }
 extern unsigned int kobjsize(const void *objp);
 extern int is_in_rom(unsigned long);
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
index d6544dc712587eef6e857a5405ac6b237d81ac8e..5a6e79e7926d4708b6e44c729be19f71fdacc078 100644 (file)
@@ -13,8 +13,6 @@
 
 #include <asm-generic/pgalloc.h>       /* for pte_{alloc,free}_one */
 
-#define check_pgt_cache() do {} while (0)
-
 extern unsigned long long kmap_generation;
 
 /*
index a3ff6d24c09ed3384930d03a1bfca5405f9e1ec0..2fec20ad939eed62a9e99182f2c270c896534b4c 100644 (file)
@@ -431,9 +431,6 @@ static inline int pte_exec(pte_t pte)
 
 #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
-/*  I think this is in case we have page table caches; needed by init/main.c  */
-#define pgtable_cache_init()    do { } while (0)
-
 /*
  * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
  * interpreted as swap information.  The remaining free bits are interpreted as
index 1894263ae5bcea4929147bbd4d6cec01cac5f774..893838499591d797aceb3bbc81ff3107bcf8039c 100644 (file)
@@ -3,5 +3,5 @@
 # Makefile for Hexagon memory management subsystem
 #
 
-obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
+obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
 obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c
deleted file mode 100644 (file)
index 4d43161..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/init.h>
-
-void __init pgtable_cache_init(void)
-{
-}
index 685a3df126cab909cb92bc008cd14adf5023b436..16714477eef429847cf5a57da5180f83b0167d44 100644 (file)
@@ -72,10 +72,6 @@ config 64BIT
 config ZONE_DMA32
        def_bool y
 
-config QUICKLIST
-       bool
-       default y
-
 config MMU
        bool
        default y
index c9e481023c25bd48857ad3ec9eaaf9e925fb8795..f4c491044882886b5687af1d91f75ba548eaa61c 100644 (file)
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/threads.h>
-#include <linux/quicklist.h>
+
+#include <asm-generic/pgalloc.h>
 
 #include <asm/mmu_context.h>
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return quicklist_alloc(0, GFP_KERNEL, NULL);
+       return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       quicklist_free(0, NULL, pgd);
+       free_page((unsigned long)pgd);
 }
 
 #if CONFIG_PGTABLE_LEVELS == 4
@@ -42,12 +43,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return quicklist_alloc(0, GFP_KERNEL, NULL);
+       return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 {
-       quicklist_free(0, NULL, pud);
+       free_page((unsigned long)pud);
 }
 #define __pud_free_tlb(tlb, pud, address)      pud_free((tlb)->mm, pud)
 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
@@ -60,12 +61,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return quicklist_alloc(0, GFP_KERNEL, NULL);
+       return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-       quicklist_free(0, NULL, pmd);
+       free_page((unsigned long)pmd);
 }
 
 #define __pmd_free_tlb(tlb, pmd, address)      pmd_free((tlb)->mm, pmd)
@@ -83,43 +84,6 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
        pmd_val(*pmd_entry) = __pa(pte);
 }
 
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
-       struct page *page;
-       void *pg;
-
-       pg = quicklist_alloc(0, GFP_KERNEL, NULL);
-       if (!pg)
-               return NULL;
-       page = virt_to_page(pg);
-       if (!pgtable_page_ctor(page)) {
-               quicklist_free(0, NULL, pg);
-               return NULL;
-       }
-       return page;
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
-       return quicklist_alloc(0, GFP_KERNEL, NULL);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       quicklist_free_page(0, NULL, pte);
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       quicklist_free(0, NULL, pte);
-}
-
-static inline void check_pgt_cache(void)
-{
-       quicklist_trim(0, NULL, 25, 16);
-}
-
 #define __pte_free_tlb(tlb, pte, address)      pte_free((tlb)->mm, pte)
 
 #endif                         /* _ASM_IA64_PGALLOC_H */
index b1e7468eb65a14008954b59afae083320e48ae9c..d602e7c622dbf685c3c51a8733dafbf72e0574e7 100644 (file)
@@ -566,11 +566,6 @@ extern struct page *zero_page_memmap_ptr;
 #define KERNEL_TR_PAGE_SHIFT   _PAGE_SIZE_64M
 #define KERNEL_TR_PAGE_SIZE    (1 << KERNEL_TR_PAGE_SHIFT)
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /* These tell get_user_pages() that the first gate page is accessible from user-level.  */
 #define FIXADDR_USER_START     GATE_ADDR
 #ifdef HAVE_BUGGY_SEGREL
index 678b98a09c854c78cb2ac3e69a7a2f9261e2a890..bf9df2625bc8393c92e427311f29b8b7cc5ce728 100644 (file)
@@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
        if (test_bit(PG_arch_1, &page->flags))
                return;                         /* i-cache is already coherent with d-cache */
 
-       flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+       flush_icache_range(addr, addr + page_size(page));
        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
 }
 
index fde4534b974fb314371f0ae7ea712e57440841c0..646c174fff9919facd700f20943b640757c65631 100644 (file)
@@ -176,11 +176,4 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
 #include <asm-generic/pgtable.h>
 #endif /* !__ASSEMBLY__ */
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
-#define check_pgt_cache()      do { } while (0)
-
 #endif /* _M68K_PGTABLE_H */
index fc3a96c77bd87626a249533001ca0366d4d39d61..c18165b0d90436eac5622dfffde0e69d911b96a8 100644 (file)
@@ -44,11 +44,6 @@ extern void paging_init(void);
  */
 #define ZERO_PAGE(vaddr)       (virt_to_page(0))
 
-/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init()   do { } while (0)
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
@@ -60,6 +55,4 @@ extern void paging_init(void);
 
 #include <asm-generic/pgtable.h>
 
-#define check_pgt_cache()      do { } while (0)
-
 #endif /* _M68KNOMMU_PGTABLE_H */
index f4cc9ffc449e10219831a60a841775a032e0b15b..7ecb05baa601f875dba9cad9e5f56da0f1007ae8 100644 (file)
 #include <asm/cache.h>
 #include <asm/pgtable.h>
 
-#define PGDIR_ORDER    0
-
-/*
- * This is handled very differently on MicroBlaze since out page tables
- * are all 0's and I want to be able to use these zero'd pages elsewhere
- * as well - it gives us quite a speedup.
- * -- Cort
- */
-extern struct pgtable_cache_struct {
-       unsigned long *pgd_cache;
-       unsigned long *pte_cache;
-       unsigned long pgtable_cache_sz;
-} quicklists;
-
-#define pgd_quicklist          (quicklists.pgd_cache)
-#define pmd_quicklist          ((unsigned long *)0)
-#define pte_quicklist          (quicklists.pte_cache)
-#define pgtable_cache_size     (quicklists.pgtable_cache_sz)
-
-extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
-extern atomic_t zero_sz; /* # currently pre-zero'd pages */
-extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
-extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
-extern atomic_t zerototal; /* # pages zero'd over time */
-
-#define zero_quicklist         (zero_cache)
-#define zero_cache_sz          (zero_sz)
-#define zero_cache_calls       (zeropage_calls)
-#define zero_cache_hits                (zeropage_hits)
-#define zero_cache_total       (zerototal)
-
-/*
- * return a pre-zero'd page from the list,
- * return NULL if none available -- Cort
- */
-extern unsigned long get_zero_page_fast(void);
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#include <asm-generic/pgalloc.h>
 
 extern void __bad_pte(pmd_t *pmd);
 
-static inline pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd(void)
 {
-       pgd_t *ret;
-
-       ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
-       if (ret != NULL)
-               clear_page(ret);
-       return ret;
+       return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
 }
 
-static inline pgd_t *get_pgd_fast(void)
-{
-       unsigned long *ret;
-
-       ret = pgd_quicklist;
-       if (ret != NULL) {
-               pgd_quicklist = (unsigned long *)(*ret);
-               ret[0] = 0;
-               pgtable_cache_size--;
-       } else
-               ret = (unsigned long *)get_pgd_slow();
-       return (pgd_t *)ret;
-}
-
-static inline void free_pgd_fast(pgd_t *pgd)
-{
-       *(unsigned long **)pgd = pgd_quicklist;
-       pgd_quicklist = (unsigned long *) pgd;
-       pgtable_cache_size++;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd(pgd_t *pgd)
 {
        free_page((unsigned long)pgd);
 }
 
-#define pgd_free(mm, pgd)        free_pgd_fast(pgd)
-#define pgd_alloc(mm)          get_pgd_fast()
+#define pgd_free(mm, pgd)      free_pgd(pgd)
+#define pgd_alloc(mm)          get_pgd()
 
 #define pmd_pgtable(pmd)       pmd_page(pmd)
 
@@ -110,50 +50,6 @@ static inline void free_pgd_slow(pgd_t *pgd)
 
 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
 
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
-{
-       struct page *ptepage;
-
-#ifdef CONFIG_HIGHPTE
-       int flags = GFP_KERNEL | __GFP_HIGHMEM;
-#else
-       int flags = GFP_KERNEL;
-#endif
-
-       ptepage = alloc_pages(flags, 0);
-       if (!ptepage)
-               return NULL;
-       clear_highpage(ptepage);
-       if (!pgtable_page_ctor(ptepage)) {
-               __free_page(ptepage);
-               return NULL;
-       }
-       return ptepage;
-}
-
-static inline void pte_free_fast(pte_t *pte)
-{
-       *(unsigned long **)pte = pte_quicklist;
-       pte_quicklist = (unsigned long *) pte;
-       pgtable_cache_size++;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       free_page((unsigned long)pte);
-}
-
-static inline void pte_free_slow(struct page *ptepage)
-{
-       __free_page(ptepage);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
-{
-       pgtable_page_dtor(ptepage);
-       __free_page(ptepage);
-}
-
 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
 
 #define pmd_populate(mm, pmd, pte) \
@@ -171,10 +67,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
 #define __pmd_free_tlb(tlb, x, addr)   pmd_free((tlb)->mm, x)
 #define pgd_populate(mm, pmd, pte)     BUG()
 
-extern int do_check_pgt_cache(int, int);
-
 #endif /* CONFIG_MMU */
 
-#define check_pgt_cache()              do { } while (0)
-
 #endif /* _ASM_MICROBLAZE_PGALLOC_H */
index 142d3f004848e95cb9ffb426686bd321d56f51e0..954b69af451fb595530731acc4208c08fafa94de 100644 (file)
@@ -46,8 +46,6 @@ extern int mem_init_done;
 
 #define swapper_pg_dir ((pgd_t *) NULL)
 
-#define pgtable_cache_init()   do {} while (0)
-
 #define arch_enter_lazy_cpu_mode()     do {} while (0)
 
 #define pgprot_noncached_wc(prot)      prot
@@ -526,11 +524,6 @@ extern unsigned long iopa(unsigned long addr);
 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 #define kern_addr_valid(addr)  (1)
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 void do_page_fault(struct pt_regs *regs, unsigned long address,
                   unsigned long error_code);
 
index 8fe54fda31dc3e0327da6abea28ee9d274d7d165..010bb9cee2e417bc41b8e3f116bd57ef77bd3ec0 100644 (file)
@@ -44,10 +44,6 @@ unsigned long ioremap_base;
 unsigned long ioremap_bot;
 EXPORT_SYMBOL(ioremap_bot);
 
-#ifndef CONFIG_SMP
-struct pgtable_cache_struct quicklists;
-#endif
-
 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
                unsigned long flags)
 {
index cc8e2b1032a540502ec6d4e937636e41e3c16f6e..a0bd9bdb5f8302ae19e52a0eb7ad1641eca2f5d1 100644 (file)
@@ -5,7 +5,6 @@ config MIPS
        select ARCH_32BIT_OFF_T if !64BIT
        select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
        select ARCH_CLOCKSOURCE_DATA
-       select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARCH_SUPPORTS_UPROBES
@@ -13,6 +12,7 @@ config MIPS
        select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
index aa16b85ddffcc8a4ce5dcac613387ba9f0d8c767..aa73cb187a077e07a30d365e23f84123e69e3744 100644 (file)
@@ -105,8 +105,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 #endif /* __PAGETABLE_PUD_FOLDED */
 
-#define check_pgt_cache()      do { } while (0)
-
 extern void pagetable_init(void);
 
 #endif /* _ASM_PGALLOC_H */
index 4dca733d5076107c3d55d93ad436f4969cb730e5..f85bd5b15f51fc5f6ae29819d0d059546169554a 100644 (file)
@@ -661,9 +661,4 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 #endif /* _ASM_PGTABLE_H */
index aca909bd784104b9a303f99b2896aec5672e5f19..fba18d4a9190990cedf7ab4bb939a5db92bb5641 100644 (file)
 
 extern unsigned int vced_count, vcei_count;
 
-/*
- * MIPS does have an arch_pick_mmap_layout()
- */
-#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-
 #ifdef CONFIG_32BIT
 #ifdef CONFIG_KVM_GUEST
 /* User space process size is limited to 1GB in KVM Guest Mode */
index d79f2b4323187afb23f487bc203fdd36c8407661..00fe90c6db3e8079323f06d4bf49626fa345014c 100644 (file)
 unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
 EXPORT_SYMBOL(shm_align_mask);
 
-/* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
-       if (current->personality & ADDR_COMPAT_LAYOUT)
-               return 1;
-
-       if (rlim_stack->rlim_cur == RLIM_INFINITY)
-               return 1;
-
-       return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
-       unsigned long gap = rlim_stack->rlim_cur;
-
-       if (gap < MIN_GAP)
-               gap = MIN_GAP;
-       else if (gap > MAX_GAP)
-               gap = MAX_GAP;
-
-       return PAGE_ALIGN(TASK_SIZE - gap - rnd);
-}
-
 #define COLOUR_ALIGN(addr, pgoff)                              \
        ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
         (((pgoff) << PAGE_SHIFT) & shm_align_mask))
@@ -144,63 +117,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
                        addr0, len, pgoff, flags, DOWN);
 }
 
-unsigned long arch_mmap_rnd(void)
-{
-       unsigned long rnd;
-
-#ifdef CONFIG_COMPAT
-       if (TASK_IS_32BIT_ADDR)
-               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
-       else
-#endif /* CONFIG_COMPAT */
-               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
-
-       return rnd << PAGE_SHIFT;
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
-       unsigned long random_factor = 0UL;
-
-       if (current->flags & PF_RANDOMIZE)
-               random_factor = arch_mmap_rnd();
-
-       if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               mm->get_unmapped_area = arch_get_unmapped_area;
-       } else {
-               mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-       }
-}
-
-static inline unsigned long brk_rnd(void)
-{
-       unsigned long rnd = get_random_long();
-
-       rnd = rnd << PAGE_SHIFT;
-       /* 8MB for 32bit, 256MB for 64bit */
-       if (TASK_IS_32BIT_ADDR)
-               rnd = rnd & 0x7ffffful;
-       else
-               rnd = rnd & 0xffffffful;
-
-       return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long base = mm->brk;
-       unsigned long ret;
-
-       ret = PAGE_ALIGN(base + brk_rnd());
-
-       if (ret < mm->brk)
-               return mm->brk;
-
-       return ret;
-}
-
 bool __virt_addr_valid(const volatile void *kaddr)
 {
        unsigned long vaddr = (unsigned long)kaddr;
index e78b43d8389f0326c476328b7dcc8ca70ee32e8d..37125e6884d78ef479727de577933c7e8b911aee 100644 (file)
@@ -23,8 +23,6 @@
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
 
-#define check_pgt_cache()              do { } while (0)
-
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
        pgtable_t pte;
index c70cc56bec09b7501e687e4760676532e65b0a55..0588ec99725c96371848045f9c2c6b4270b9f274 100644 (file)
@@ -403,8 +403,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * into virtual address `from'
  */
 
-#define pgtable_cache_init()       do { } while (0)
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASMNDS32_PGTABLE_H */
index 4bc8cf72067e98980ce0e2189ce22eae50098c4c..750d18d5980bb0f4a427fecbef26e33d6d7ff961 100644 (file)
@@ -45,6 +45,4 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
                tlb_remove_page((tlb), (pte));                  \
        } while (0)
 
-#define check_pgt_cache()      do { } while (0)
-
 #endif /* _ASM_NIOS2_PGALLOC_H */
index 95237b7f6fc1728cdc8ee5ad4c8f88da631b463e..99985d8b71664a97718d3999af3ce27f124b6ebf 100644 (file)
@@ -291,8 +291,6 @@ static inline void pte_clear(struct mm_struct *mm,
 
 #include <asm-generic/pgtable.h>
 
-#define pgtable_cache_init()           do { } while (0)
-
 extern void __init paging_init(void);
 extern void __init mmu_init(void);
 
index 3d4b397c2d06bf5115d2162ed03f7bbad6d59cdd..787c1b9d2f6d6eb05ad4fcf4157d0753261765da 100644 (file)
@@ -101,6 +101,4 @@ do {                                        \
 
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
-#define check_pgt_cache()          do { } while (0)
-
 #endif
index 2fe9ff5b5d6f7d8b4ac00f35858036032820f207..248d22d8faa7b1d7629256f693a3767511c3b4e2 100644 (file)
@@ -443,11 +443,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #include <asm-generic/pgtable.h>
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()           do { } while (0)
-
 typedef pte_t *pte_addr_t;
 
 #endif /* __ASSEMBLY__ */
index 4f2059a50faee17e7da547a369d9e49ef4fd1db2..d98647c29b7424ab3c48c7f3aab7d4e866befefd 100644 (file)
@@ -124,6 +124,4 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
        pmd_populate_kernel(mm, pmd, page_address(pte_page))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
-#define check_pgt_cache()      do { } while (0)
-
 #endif
index 6d58c1739b4239619095fe181431cdf7eaa2d4b5..4ac374b3a99fdf469fa09051c8b54e20ecf56d43 100644 (file)
@@ -132,8 +132,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
 
 /* Definitions for 2nd level */
-#define pgtable_cache_init()   do { } while (0)
-
 #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
 #define PMD_SIZE       (1UL << PMD_SHIFT)
 #define PMD_MASK       (~(PMD_SIZE-1))
index 2b2c60a1a66df9a96dc36141434060518e59b1e9..6dd78a2dc03aff1b410b982a76d048a4f2d1d7ad 100644 (file)
@@ -64,8 +64,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 extern struct kmem_cache *pgtable_cache[];
 #define PGT_CACHE(shift) pgtable_cache[shift]
 
-static inline void check_pgt_cache(void) { }
-
 #ifdef CONFIG_PPC_BOOK3S
 #include <asm/book3s/pgalloc.h>
 #else
index 8b7865a2d576630613e7a4e1a5a3ae2e3386bade..4053b2ab427cc5579a43fcd16971d9739b5db2dc 100644 (file)
@@ -87,7 +87,6 @@ extern unsigned long ioremap_bot;
 unsigned long vmalloc_to_phys(void *vmalloc_addr);
 
 void pgtable_cache_add(unsigned int shift);
-void pgtable_cache_init(void);
 
 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
 void mark_initmem_nx(void);
index 3410ea9f4de1c1a1627b83b22a8ff4b1fa78d012..6c123760164e8f66dcc427e34ba9e4aee44e8031 100644 (file)
@@ -1748,7 +1748,7 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
        /*
         * IF we try to do a HUGE PTE update after a withdraw is done.
         * we will find the below NULL. This happens when we do
-        * split_huge_page_pmd
+        * split_huge_pmd
         */
        if (!hpte_slot_array)
                return;
index b056cae3388b76e139a941ca1b4d48417ac89d5f..56cc845205779b17def2e37ca728f01e046efd05 100644 (file)
@@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
                 * Allow to use larger than 64k IOMMU pages. Only do that
                 * if we are backed by hugetlb.
                 */
-               if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
-                       struct page *head = compound_head(page);
-
-                       pageshift = compound_order(head) + PAGE_SHIFT;
-               }
+               if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
+                       pageshift = page_shift(compound_head(page));
                mem->pageshift = min(mem->pageshift, pageshift);
                /*
                 * We don't need struct page reference any more, switch
index a8953f10880897eb62352b6b6734db312d902739..73d4873fc7f85442eafc08399446bc176fd82b9a 100644 (file)
@@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
 
        BUG_ON(!PageCompound(page));
 
-       for (i = 0; i < (1UL << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (!PageHighMem(page)) {
                        __flush_dcache_icache(page_address(page+i));
                } else {
index 71d29fb4008a7fb10694a1de1b9c056f12466688..8eebbc8860bbd10bd18af96d81dec8370b042c9e 100644 (file)
@@ -59,6 +59,18 @@ config RISCV
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select SPARSEMEM_STATIC if 32BIT
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+       select HAVE_ARCH_MMAP_RND_BITS
+
+config ARCH_MMAP_RND_BITS_MIN
+       default 18 if 64BIT
+       default 8
+
+# max bits determined by the following formula:
+#  VA_BITS - PAGE_SHIFT - 3
+config ARCH_MMAP_RND_BITS_MAX
+       default 24 if 64BIT # SV39 based
+       default 17
 
 config MMU
        def_bool y
index 56a67d66f72fbbead3aa7283ebb806c5ac7bc48f..f66a00d8cb19f1bf0d1f362694351e5385e1f2be 100644 (file)
@@ -82,8 +82,4 @@ do {                                    \
        tlb_remove_page((tlb), pte);    \
 } while (0)
 
-static inline void check_pgt_cache(void)
-{
-}
-
 #endif /* _ASM_RISCV_PGALLOC_H */
index 80905b27ee987a7fa37b61945e34366429e06fea..c60123f018f500578124d44f5e3fc2b7e7882898 100644 (file)
@@ -424,11 +424,6 @@ extern void *dtb_early_va;
 extern void setup_bootmem(void);
 extern void paging_init(void);
 
-static inline void pgtable_cache_init(void)
-{
-       /* No page table caches to initialize */
-}
-
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
index 0c4600725fc2e908b98578d9febaa93f82df4ed5..36c578c0ff969fe99209cfe7fa826dd0f495ce13 100644 (file)
@@ -1682,12 +1682,6 @@ extern void s390_reset_cmma(struct mm_struct *mm);
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-/*
- * No page table caches to initialise
- */
-static inline void pgtable_cache_init(void) { }
-static inline void check_pgt_cache(void) { }
-
 #include <asm-generic/pgtable.h>
 
 #endif /* _S390_PAGE_H */
index b56f908b13950e31335984ec09c2f88973915c90..8c6341a4d8072984645b7e73d3091aed5649417a 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef __ASM_SH_PGALLOC_H
 #define __ASM_SH_PGALLOC_H
 
-#include <linux/quicklist.h>
 #include <asm/page.h>
-
-#define QUICK_PT 0     /* Other page table pages that are zero on free */
+#include <asm-generic/pgalloc.h>
 
 extern pgd_t *pgd_alloc(struct mm_struct *);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@@ -29,41 +27,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
-/*
- * Allocate and free page tables.
- */
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
-       struct page *page;
-       void *pg;
-
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
-       if (!pg)
-               return NULL;
-       page = virt_to_page(pg);
-       if (!pgtable_page_ctor(page)) {
-               quicklist_free(QUICK_PT, NULL, pg);
-               return NULL;
-       }
-       return page;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-       quicklist_free(QUICK_PT, NULL, pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-       pgtable_page_dtor(pte);
-       quicklist_free_page(QUICK_PT, NULL, pte);
-}
-
 #define __pte_free_tlb(tlb,pte,addr)                   \
 do {                                                   \
        pgtable_page_dtor(pte);                         \
@@ -79,9 +42,4 @@ do {                                                  \
 } while (0);
 #endif
 
-static inline void check_pgt_cache(void)
-{
-       quicklist_trim(QUICK_PT, NULL, 25, 16);
-}
-
 #endif /* __ASM_SH_PGALLOC_H */
index 9085d1142fa345d81467e166bf451131a5264ab3..cbd0f3c55a0c0de541d47af92d34cc057571d2ef 100644 (file)
@@ -123,11 +123,6 @@ typedef pte_t *pte_addr_t;
 
 #define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 
-/*
- * Initialise the page table caches
- */
-extern void pgtable_cache_init(void);
-
 struct vm_area_struct;
 struct mm_struct;
 
index 02ed2df25a5403b07d8f8538cda71efc907a7c49..5c8a2ebfc720085a37d6cfe45335d78550fe949c 100644 (file)
@@ -1,9 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 menu "Memory management options"
 
-config QUICKLIST
-       def_bool y
-
 config MMU
         bool "Support for memory management hardware"
        depends on !CPU_SH2
index cc779a90d917f333994af4a566d73c5fc63d4bef..dca946f426c6ca1827694cee6a5d53126d3196c7 100644 (file)
@@ -97,7 +97,3 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
 }
-
-void pgtable_cache_init(void)
-{
-}
index 282be50a4adfcf8fc7a6bdb231cef62dad9c7bb3..10538a4d1a1e9ab7ad79686b6a968d7093aa830f 100644 (file)
@@ -17,8 +17,6 @@ void srmmu_free_nocache(void *addr, int size);
 
 extern struct resource sparc_iomap;
 
-#define check_pgt_cache()      do { } while (0)
-
 pgd_t *get_pgd_fast(void);
 static inline void free_pgd_fast(pgd_t *pgd)
 {
index 48abccba49915f18dbdade44ccefb778a91404b9..9d3e5cc95bbb705c978c535b0cac97151a86c541 100644 (file)
@@ -69,8 +69,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
 #define pmd_populate(MM, PMD, PTE)             pmd_set(MM, PMD, PTE)
 #define pmd_pgtable(PMD)                       ((pte_t *)__pmd_page(PMD))
 
-#define check_pgt_cache()      do { } while (0)
-
 void pgtable_free(void *table, bool is_page);
 
 #ifdef CONFIG_SMP
index 4eebed6c6781aae875edc8aa3142978234f0ef2a..31da4482664576e2b4e4cf77972b26c595064810 100644 (file)
@@ -445,9 +445,4 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 /* We provide our own get_unmapped_area to cope with VA holes for userland */
 #define HAVE_ARCH_UNMAPPED_AREA
 
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
 #endif /* !(_SPARC_PGTABLE_H) */
index 1599de7305327cbe506504e229232c4dc1f50aeb..b57f9c631eca093fcd4922cbdeede046562c6080 100644 (file)
@@ -1135,7 +1135,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
                                   unsigned long);
 #define HAVE_ARCH_FB_UNMAPPED_AREA
 
-void pgtable_cache_init(void);
 void sun4v_register_fault_status(void);
 void sun4v_ktsb_register(void);
 void __init cheetah_ecache_flush_init(void);
index 046ab116cc8c6ecc2deeff6d26c0cd9624b0d626..906eda1158b4df1990e577042a75237d210d5859 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/vaddrs.h>
-#include <asm/pgalloc.h>       /* bug in asm-generic/tlb.h: check_pgt_cache */
 #include <asm/setup.h>
 #include <asm/tlb.h>
 #include <asm/prom.h>
index 023599c3fa5122c6fdf2db7e4d3852dfcead0c29..446e0c0f4018d5a22f76e8d652a458143a0e532c 100644 (file)
@@ -43,7 +43,5 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 #define __pmd_free_tlb(tlb,x, address)   tlb_remove_page((tlb),virt_to_page(x))
 #endif
 
-#define check_pgt_cache()      do { } while (0)
-
 #endif
 
index e4d3ed980d822c61ee1b7d7f0c2ed247f1b9e785..36a44d58f3739af63e17d43785120ca8aadb78c5 100644 (file)
@@ -32,8 +32,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 /* zero page used for uninitialized stuff */
 extern unsigned long *empty_zero_page;
 
-#define pgtable_cache_init() do ; while (0)
-
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
index 3f0903bd98e9d663be263e973a37d67dfb33975c..ba1c9a79993bc7843b687169b48a17da5598fe3a 100644 (file)
@@ -18,8 +18,6 @@
 #define __HAVE_ARCH_PTE_ALLOC_ONE
 #include <asm-generic/pgalloc.h>
 
-#define check_pgt_cache()              do { } while (0)
-
 #define _PAGE_USER_TABLE       (PMD_TYPE_TABLE | PMD_PRESENT)
 #define _PAGE_KERNEL_TABLE     (PMD_TYPE_TABLE | PMD_PRESENT)
 
index 126e961a8cb08f0f6820aa3125d58a3b07e27dc9..c8f7ba12f309b2ce7ffa7a5cd8b8acb3e3f273e1 100644 (file)
@@ -285,8 +285,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 #include <asm-generic/pgtable.h>
 
-#define pgtable_cache_init() do { } while (0)
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __UNICORE_PGTABLE_H__ */
index c78da8eda8f2c8b565a2f87a6a6e6030eb2f530d..0dca7f7aeff2baa141cba616446227909b60d76f 100644 (file)
@@ -29,8 +29,6 @@ extern pgd_t swapper_pg_dir[1024];
 extern pgd_t initial_page_table[1024];
 extern pmd_t initial_pg_pmd[];
 
-static inline void pgtable_cache_init(void) { }
-static inline void check_pgt_cache(void) { }
 void paging_init(void);
 void sync_initial_page_table(void);
 
index 4990d26dfc733a5ad80c82fc3d4b32918c180288..0b6c4042942a22247c68e9174622157095f62495 100644 (file)
@@ -241,9 +241,6 @@ extern void cleanup_highmap(void);
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-#define pgtable_cache_init()   do { } while (0)
-#define check_pgt_cache()      do { } while (0)
-
 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
 #define HAVE_PAGE_AGP 1
 
index 44816ff6411f950a72e1923c676f82f37890349c..463940faf52f4f7b1da9c4129f93b5414e373664 100644 (file)
@@ -357,7 +357,7 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
 
 static struct kmem_cache *pgd_cache;
 
-void __init pgd_cache_init(void)
+void __init pgtable_cache_init(void)
 {
        /*
         * When PAE kernel is running as a Xen domain, it does not use
@@ -402,10 +402,6 @@ static inline void _pgd_free(pgd_t *pgd)
 }
 #else
 
-void __init pgd_cache_init(void)
-{
-}
-
 static inline pgd_t *_pgd_alloc(void)
 {
        return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
index ce3ff5e591b9b5e0b58b13636f0a3b6599a9a1ca..3f7fe5a8c286d3647ecbf95e934e240538660f94 100644 (file)
@@ -238,7 +238,6 @@ extern void paging_init(void);
 # define swapper_pg_dir NULL
 static inline void paging_init(void) { }
 #endif
-static inline void pgtable_cache_init(void) { }
 
 /*
  * The pmd contains the kernel virtual address of the pte page.
index 06875feb27c28ebb870820706dc286cd9740f1ce..856e2da2e397fe2156055887c22ed5806d45e204 100644 (file)
@@ -160,9 +160,6 @@ static inline void invalidate_dtlb_mapping (unsigned address)
                invalidate_dtlb_entry(tlb_entry);
 }
 
-#define check_pgt_cache()      do { } while (0)
-
-
 /*
  * DO NOT USE THESE FUNCTIONS.  These instructions aren't part of the Xtensa
  * ISA and exist only for test purposes..
index 20c39d1bcef8ae52ae187c98fef208247af35f6f..6bea4f3f8040dd77c87cf34647b26515061a76a1 100644 (file)
@@ -100,26 +100,9 @@ unsigned long __weak memory_block_size_bytes(void)
 }
 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
 
-static unsigned long get_memory_block_size(void)
-{
-       unsigned long block_sz;
-
-       block_sz = memory_block_size_bytes();
-
-       /* Validate blk_sz is a power of 2 and not less than section size */
-       if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
-               WARN_ON(1);
-               block_sz = MIN_MEMORY_BLOCK_SIZE;
-       }
-
-       return block_sz;
-}
-
 /*
- * use this as the physical section index that this memsection
- * uses.
+ * Show the first physical section index (number) of this memory block.
  */
-
 static ssize_t phys_index_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
@@ -131,7 +114,10 @@ static ssize_t phys_index_show(struct device *dev,
 }
 
 /*
- * Show whether the section of memory is likely to be hot-removable
+ * Show whether the memory block is likely to be offlineable (or is already
+ * offline). Once offline, the memory block could be removed. The return
+ * value does, however, not indicate that there is a way to remove the
+ * memory block.
  */
 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
                              char *buf)
@@ -455,12 +441,12 @@ static DEVICE_ATTR_RO(phys_device);
 static DEVICE_ATTR_RO(removable);
 
 /*
- * Block size attribute stuff
+ * Show the memory block size (shared by all memory blocks).
  */
 static ssize_t block_size_bytes_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       return sprintf(buf, "%lx\n", get_memory_block_size());
+       return sprintf(buf, "%lx\n", memory_block_size_bytes());
 }
 
 static DEVICE_ATTR_RO(block_size_bytes);
@@ -670,10 +656,10 @@ static int init_memory_block(struct memory_block **memory,
                return -ENOMEM;
 
        mem->start_section_nr = block_id * sections_per_block;
-       mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
        mem->state = state;
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        mem->phys_device = arch_get_memory_phys_device(start_pfn);
+       mem->nid = NUMA_NO_NODE;
 
        ret = register_memory(mem);
 
@@ -810,19 +796,22 @@ static const struct attribute_group *memory_root_attr_groups[] = {
 /*
  * Initialize the sysfs support for memory devices...
  */
-int __init memory_dev_init(void)
+void __init memory_dev_init(void)
 {
        int ret;
        int err;
        unsigned long block_sz, nr;
 
+       /* Validate the configured memory block size */
+       block_sz = memory_block_size_bytes();
+       if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
+               panic("Memory block size not suitable: 0x%lx\n", block_sz);
+       sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+
        ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
        if (ret)
                goto out;
 
-       block_sz = get_memory_block_size();
-       sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
-
        /*
         * Create entries for memory sections that were found
         * during boot and have been initialized
@@ -838,8 +827,7 @@ int __init memory_dev_init(void)
 
 out:
        if (ret)
-               printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
-       return ret;
+               panic("%s() failed: %d\n", __func__, ret);
 }
 
 /**
index 75b7e6f6535b5ac54351a1fd7a4c772ccada5e25..296546ffed6c124e949c47da292784958f4ca7b7 100644 (file)
@@ -427,6 +427,8 @@ static ssize_t node_read_meminfo(struct device *dev,
                       "Node %d AnonHugePages:  %8lu kB\n"
                       "Node %d ShmemHugePages: %8lu kB\n"
                       "Node %d ShmemPmdMapped: %8lu kB\n"
+                      "Node %d FileHugePages: %8lu kB\n"
+                      "Node %d FilePmdMapped: %8lu kB\n"
 #endif
                        ,
                       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
@@ -452,6 +454,10 @@ static ssize_t node_read_meminfo(struct device *dev,
                       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
                                       HPAGE_PMD_NR),
                       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
+                                      HPAGE_PMD_NR),
+                      nid, K(node_page_state(pgdat, NR_FILE_THPS) *
+                                      HPAGE_PMD_NR),
+                      nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
                                       HPAGE_PMD_NR)
 #endif
                       );
@@ -756,15 +762,13 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
 static int register_mem_sect_under_node(struct memory_block *mem_blk,
                                         void *arg)
 {
+       unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
+       unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+       unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
        int ret, nid = *(int *)arg;
-       unsigned long pfn, sect_start_pfn, sect_end_pfn;
+       unsigned long pfn;
 
-       mem_blk->nid = nid;
-
-       sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
-       sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
-       sect_end_pfn += PAGES_PER_SECTION - 1;
-       for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+       for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
                int page_nid;
 
                /*
@@ -789,6 +793,13 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
                        if (page_nid != nid)
                                continue;
                }
+
+               /*
+                * If this memory block spans multiple nodes, we only indicate
+                * the last processed node.
+                */
+               mem_blk->nid = nid;
+
                ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
                                        &mem_blk->dev.kobj,
                                        kobject_name(&mem_blk->dev.kobj));
@@ -804,32 +815,18 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
 }
 
 /*
- * Unregister memory block device under all nodes that it spans.
- * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes).
+ * Unregister a memory block device under the node it spans. Memory blocks
+ * with multiple nodes cannot be offlined and therefore also never be removed.
  */
 void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
 {
-       unsigned long pfn, sect_start_pfn, sect_end_pfn;
-       static nodemask_t unlinked_nodes;
-
-       nodes_clear(unlinked_nodes);
-       sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
-       sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
-       for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
-               int nid;
+       if (mem_blk->nid == NUMA_NO_NODE)
+               return;
 
-               nid = get_nid_for_pfn(pfn);
-               if (nid < 0)
-                       continue;
-               if (!node_online(nid))
-                       continue;
-               if (node_test_and_set(nid, unlinked_nodes))
-                       continue;
-               sysfs_remove_link(&node_devices[nid]->dev.kobj,
-                        kobject_name(&mem_blk->dev.kobj));
-               sysfs_remove_link(&mem_blk->dev.kobj,
-                        kobject_name(&node_devices[nid]->dev.kobj));
-       }
+       sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
+                         kobject_name(&mem_blk->dev.kobj));
+       sysfs_remove_link(&mem_blk->dev.kobj,
+                         kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
 }
 
 int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
index c70cb5f272cf05a8c7ab962976e9bc324ea57c91..0891ab829b1b6b1318353e945e6394fab29f7c1e 100644 (file)
@@ -1078,7 +1078,7 @@ new_buf:
                        bool merge;
 
                        if (page)
-                               pg_size <<= compound_order(page);
+                               pg_size = page_size(page);
                        if (off < pg_size &&
                            skb_can_coalesce(skb, i, page, off)) {
                                merge = 1;
@@ -1105,8 +1105,7 @@ new_buf:
                                                           __GFP_NORETRY,
                                                           order);
                                        if (page)
-                                               pg_size <<=
-                                                       compound_order(page);
+                                               pg_size <<= order;
                                }
                                if (!page) {
                                        page = alloc_page(gfp);
index feaa538026a0a9e7317a147035b4a5c427445970..3db000aacd26c6ec1f556e23a0fa4a35b6705eaf 100644 (file)
@@ -174,7 +174,6 @@ via_map_blit_for_device(struct pci_dev *pdev,
 static void
 via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
 {
-       struct page *page;
        int i;
 
        switch (vsg->state) {
@@ -189,13 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
                kfree(vsg->desc_pages);
                /* fall through */
        case dr_via_pages_locked:
-               for (i = 0; i < vsg->num_pages; ++i) {
-                       if (NULL != (page = vsg->pages[i])) {
-                               if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
-                                       SetPageDirty(page);
-                               put_page(page);
-                       }
-               }
+               put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+                                         (vsg->direction == DMA_FROM_DEVICE));
                /* fall through */
        case dr_via_pages_alloc:
                vfree(vsg->pages);
index 41f9e268e3fb9b81a994210bdda28d561f1342ff..24244a2f68cc57cab6d79fd49e0cac95dce1d93c 100644 (file)
@@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
 
        for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
                page = sg_page_iter_page(&sg_iter);
-               if (umem->writable && dirty)
-                       put_user_pages_dirty_lock(&page, 1);
-               else
-                       put_user_page(page);
+               put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
        }
 
        sg_free_table(&umem->sg_head);
index b89a9b9aef7ae71ca9cc4a076206f41aab91f418..469acb961fbd2e034c898cafd6d86d6150ac0f62 100644 (file)
@@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
                             size_t npages, bool dirty)
 {
-       if (dirty)
-               put_user_pages_dirty_lock(p, npages);
-       else
-               put_user_pages(p, npages);
+       put_user_pages_dirty_lock(p, npages, dirty);
 
        if (mm) { /* during close after signal, mm can be NULL */
                atomic64_sub(npages, &mm->pinned_vm);
index bfbfbb7e0ff461299520d3d072d388bd13ab0420..6bf764e418919bc89360fe05a4d574e405ff7533 100644 (file)
 static void __qib_release_user_pages(struct page **p, size_t num_pages,
                                     int dirty)
 {
-       if (dirty)
-               put_user_pages_dirty_lock(p, num_pages);
-       else
-               put_user_pages(p, num_pages);
+       put_user_pages_dirty_lock(p, num_pages, dirty);
 }
 
 /**
index 0b0237d41613fc4cb61ba4f1cd02430aeda74490..62e6ffa9ad78efbd138c9d19a3db386cd4497ccd 100644 (file)
@@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
                for_each_sg(chunk->page_list, sg, chunk->nents, i) {
                        page = sg_page(sg);
                        pa = sg_phys(sg);
-                       if (dirty)
-                               put_user_pages_dirty_lock(&page, 1);
-                       else
-                               put_user_page(page);
+                       put_user_pages_dirty_lock(&page, 1, dirty);
                        usnic_dbg("pa: %pa\n", &pa);
                }
                kfree(chunk);
index 87a56039f0ef11192fe3c73e6aaef19b660f417e..e99983f076631737cafec40b71d8a2eda9a8221e 100644 (file)
@@ -63,15 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
                           bool dirty)
 {
-       struct page **p = chunk->plist;
-
-       while (num_pages--) {
-               if (!PageDirty(*p) && dirty)
-                       put_user_pages_dirty_lock(p, 1);
-               else
-                       put_user_page(*p);
-               p++;
-       }
+       put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
 }
 
 void siw_umem_release(struct siw_umem *umem, bool dirty)
index aa8d8425be25e29b53a31b1e5f95c51e036622f4..b83a1d16bd8983a3620ee526cbde3de2e84592cb 100644 (file)
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
                if (!page)
                        goto free_pages;
                list_add_tail(&page->lru, &pages);
-               size_remaining -= PAGE_SIZE << compound_order(page);
+               size_remaining -= page_size(page);
                max_order = compound_order(page);
                i++;
        }
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 
        sg = table->sgl;
        list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-               sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+               sg_set_page(sg, page, page_size(page), 0);
                sg = sg_next(sg);
                list_del(&page->lru);
        }
index a254792d882cc1541578436f5588bfeda59ccf15..1354a157e9afcfc6d743649bbe170b3436e7253c 100644 (file)
@@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
                                           page, off_in_page, tlen);
                        fr_len(fp) += tlen;
                        fp_skb(fp)->data_len += tlen;
-                       fp_skb(fp)->truesize +=
-                                       PAGE_SIZE << compound_order(page);
+                       fp_skb(fp)->truesize += page_size(page);
                } else {
                        BUG_ON(!page);
                        from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
index 3b18fa4d090a301e051f6057a31794a621f01585..26cef65b41e7a167cbc500d672e093e851c59504 100644 (file)
@@ -176,13 +176,13 @@ put_exit:
 }
 
 static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
-               unsigned int page_shift)
+               unsigned int it_page_shift)
 {
        struct page *page;
        unsigned long size = 0;
 
-       if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
-               return size == (1UL << page_shift);
+       if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
+               return size == (1UL << it_page_shift);
 
        page = pfn_to_page(hpa >> PAGE_SHIFT);
        /*
@@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
         * a page we just found. Otherwise the hardware can get access to
         * a bigger memory chunk that it should.
         */
-       return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
+       return page_shift(compound_head(page)) >= it_page_shift;
 }
 
 static inline bool tce_groups_attached(struct tce_container *container)
index d4e11b2e04f68aa34869594a4c7d07d2a1c5b83c..cec3b4146440033cb65136f384db3b18197ebfc0 100644 (file)
@@ -670,26 +670,6 @@ out:
  * libraries.  There is no binary dependent code anywhere else.
  */
 
-#ifndef STACK_RND_MASK
-#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))    /* 8MB of VA */
-#endif
-
-static unsigned long randomize_stack_top(unsigned long stack_top)
-{
-       unsigned long random_variable = 0;
-
-       if (current->flags & PF_RANDOMIZE) {
-               random_variable = get_random_long();
-               random_variable &= STACK_RND_MASK;
-               random_variable <<= PAGE_SHIFT;
-       }
-#ifdef CONFIG_STACK_GROWSUP
-       return PAGE_ALIGN(stack_top) + random_variable;
-#else
-       return PAGE_ALIGN(stack_top) - random_variable;
-#endif
-}
-
 static int load_elf_binary(struct linux_binprm *bprm)
 {
        struct file *interpreter = NULL; /* to shut gcc up */
index 1bda2ab6745ba13860d8ec89740d8d31c330010e..814ad2c2ba808030dc95011e5cf41db0a5be1834 100644 (file)
@@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
                        err = -ENOMEM;
                        goto error;
                }
+               /* Avoid race with userspace read via bdev */
+               lock_buffer(bhs[n]);
                memset(bhs[n]->b_data, 0, sb->s_blocksize);
                set_buffer_uptodate(bhs[n]);
+               unlock_buffer(bhs[n]);
                mark_buffer_dirty_inode(bhs[n], dir);
 
                n++;
@@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
        fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
 
        de = (struct msdos_dir_entry *)bhs[0]->b_data;
+       /* Avoid race with userspace read via bdev */
+       lock_buffer(bhs[0]);
        /* filling the new directory slots ("." and ".." entries) */
        memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
        memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
        de[0].size = de[1].size = 0;
        memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
        set_buffer_uptodate(bhs[0]);
+       unlock_buffer(bhs[0]);
        mark_buffer_dirty_inode(bhs[0], dir);
 
        err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
 
                        /* fill the directory entry */
                        copy = min(size, sb->s_blocksize);
+                       /* Avoid race with userspace read via bdev */
+                       lock_buffer(bhs[n]);
                        memcpy(bhs[n]->b_data, slots, copy);
-                       slots += copy;
-                       size -= copy;
                        set_buffer_uptodate(bhs[n]);
+                       unlock_buffer(bhs[n]);
                        mark_buffer_dirty_inode(bhs[n], dir);
+                       slots += copy;
+                       size -= copy;
                        if (!size)
                                break;
                        n++;
index 265983635f2be7b5d67078cc7127e43ea74cf288..3647c65a0f4826cae4ef4b410b73c71bba6874cb 100644 (file)
@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
                                err = -ENOMEM;
                                goto error;
                        }
+                       /* Avoid race with userspace read via bdev */
+                       lock_buffer(c_bh);
                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
                        set_buffer_uptodate(c_bh);
+                       unlock_buffer(c_bh);
                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
                        if (sb->s_flags & SB_SYNCHRONOUS)
                                err = sync_dirty_buffer(c_bh);
index 64bf28cf05cde859ab0b72abd4a30cbdc7e3912b..fef457a42882ba64092ae01577d8a3746f03f6e8 100644 (file)
@@ -181,6 +181,9 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping->flags = 0;
        mapping->wb_err = 0;
        atomic_set(&mapping->i_mmap_writable, 0);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       atomic_set(&mapping->nr_thps, 0);
+#endif
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
index 0dadbdbead0fbfef8b0f2373756b3254832ada53..f83de4c6a826e1d146dc389517069efcd2e02b4d 100644 (file)
@@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
        }
 
        page = virt_to_head_page(ptr);
-       if (sz > (PAGE_SIZE << compound_order(page)))
+       if (sz > page_size(page))
                return -EINVAL;
 
        pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
index 953990eb70a979b3e9f518151435dff6c87f1c11..1c58859aa59245506b3cea0b5f955434f167a1f5 100644 (file)
@@ -89,8 +89,6 @@ EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
 EXPORT_SYMBOL(jbd2_journal_invalidatepage);
 EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
 EXPORT_SYMBOL(jbd2_journal_force_commit);
-EXPORT_SYMBOL(jbd2_journal_inode_add_write);
-EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
 EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
 EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
index afc06daee5bb05dd0264188dfa12b156788f857c..bee8498d7792987b2b53e6bd4e76e66da5d4e81c 100644 (file)
@@ -2622,18 +2622,6 @@ done:
        return 0;
 }
 
-int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
-{
-       return jbd2_journal_file_inode(handle, jinode,
-                       JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
-}
-
-int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
-{
-       return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
-                       LLONG_MAX);
-}
-
 int jbd2_journal_inode_ranged_write(handle_t *handle,
                struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
 {
index 0c335b51043dcf922e7c71d66a532f424131037d..f9baefc76cf9b682f49fba84fdb395c46399f17d 100644 (file)
@@ -5993,6 +5993,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
        struct buffer_head *data_alloc_bh = NULL;
        struct ocfs2_dinode *di;
        struct ocfs2_truncate_log *tl;
+       struct ocfs2_journal *journal = osb->journal;
 
        BUG_ON(inode_trylock(tl_inode));
 
@@ -6013,6 +6014,20 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
                goto out;
        }
 
+       /* Appending truncate log(TA) and and flushing truncate log(TF) are
+        * two separated transactions. They can be both committed but not
+        * checkpointed. If crash occurs then, both two transaction will be
+        * replayed with several already released to global bitmap clusters.
+        * Then truncate log will be replayed resulting in cluster double free.
+        */
+       jbd2_journal_lock_updates(journal->j_journal);
+       status = jbd2_journal_flush(journal->j_journal);
+       jbd2_journal_unlock_updates(journal->j_journal);
+       if (status < 0) {
+               mlog_errno(status);
+               goto out;
+       }
+
        data_alloc_inode = ocfs2_get_system_file_inode(osb,
                                                       GLOBAL_BITMAP_SYSTEM_INODE,
                                                       OCFS2_INVALID_SLOT);
@@ -6792,6 +6807,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
                              struct page *page, int zero, u64 *phys)
 {
        int ret, partial = 0;
+       loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
+       loff_t length = to - from;
 
        ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
        if (ret)
@@ -6811,7 +6828,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
        if (ret < 0)
                mlog_errno(ret);
        else if (ocfs2_should_order_data(inode)) {
-               ret = ocfs2_jbd2_file_inode(handle, inode);
+               ret = ocfs2_jbd2_inode_add_write(handle, inode,
+                                                start_byte, length);
                if (ret < 0)
                        mlog_errno(ret);
        }
index a4c905d6b5755d81460ff413caf1a6fe6c955bb4..8de1c9d644f62ca203ad33d7b5cda3613309f866 100644 (file)
@@ -942,7 +942,8 @@ static void ocfs2_write_failure(struct inode *inode,
 
                if (tmppage && page_has_buffers(tmppage)) {
                        if (ocfs2_should_order_data(inode))
-                               ocfs2_jbd2_file_inode(wc->w_handle, inode);
+                               ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
+                                                          user_pos, user_len);
 
                        block_commit_write(tmppage, from, to);
                }
@@ -2023,8 +2024,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
                }
 
                if (page_has_buffers(tmppage)) {
-                       if (handle && ocfs2_should_order_data(inode))
-                               ocfs2_jbd2_file_inode(handle, inode);
+                       if (handle && ocfs2_should_order_data(inode)) {
+                               loff_t start_byte =
+                                       ((loff_t)tmppage->index << PAGE_SHIFT) +
+                                       from;
+                               loff_t length = to - from;
+                               ocfs2_jbd2_inode_add_write(handle, inode,
+                                                          start_byte, length);
+                       }
                        block_commit_write(tmppage, from, to);
                }
        }
index 429e6a8359a55a82671d8e950d9a53ee50f7598c..eaf042feaf5e0f4725a045d819e6d4c2e4917c8b 100644 (file)
@@ -231,14 +231,6 @@ static int blockcheck_u64_get(void *data, u64 *val)
 }
 DEFINE_SIMPLE_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n");
 
-static struct dentry *blockcheck_debugfs_create(const char *name,
-                                               struct dentry *parent,
-                                               u64 *value)
-{
-       return debugfs_create_file(name, S_IFREG | S_IRUSR, parent, value,
-                                  &blockcheck_fops);
-}
-
 static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
 {
        if (stats) {
@@ -250,16 +242,20 @@ static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
 static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
                                           struct dentry *parent)
 {
-       stats->b_debug_dir = debugfs_create_dir("blockcheck", parent);
+       struct dentry *dir;
+
+       dir = debugfs_create_dir("blockcheck", parent);
+       stats->b_debug_dir = dir;
+
+       debugfs_create_file("blocks_checked", S_IFREG | S_IRUSR, dir,
+                           &stats->b_check_count, &blockcheck_fops);
 
-       blockcheck_debugfs_create("blocks_checked", stats->b_debug_dir,
-                                 &stats->b_check_count);
+       debugfs_create_file("checksums_failed", S_IFREG | S_IRUSR, dir,
+                           &stats->b_failure_count, &blockcheck_fops);
 
-       blockcheck_debugfs_create("checksums_failed", stats->b_debug_dir,
-                                 &stats->b_failure_count);
+       debugfs_create_file("ecc_recoveries", S_IFREG | S_IRUSR, dir,
+                           &stats->b_recover_count, &blockcheck_fops);
 
-       blockcheck_debugfs_create("ecc_recoveries", stats->b_debug_dir,
-                                 &stats->b_recover_count);
 }
 #else
 static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
index f1b613327ac8faab050e19921aac9f1ef585569d..a368350d4c27931a21963379df64d991e748dfcf 100644 (file)
@@ -225,10 +225,6 @@ struct o2hb_region {
        unsigned int            hr_region_num;
 
        struct dentry           *hr_debug_dir;
-       struct dentry           *hr_debug_livenodes;
-       struct dentry           *hr_debug_regnum;
-       struct dentry           *hr_debug_elapsed_time;
-       struct dentry           *hr_debug_pinned;
        struct o2hb_debug_buf   *hr_db_livenodes;
        struct o2hb_debug_buf   *hr_db_regnum;
        struct o2hb_debug_buf   *hr_db_elapsed_time;
@@ -1394,21 +1390,20 @@ void o2hb_exit(void)
        kfree(o2hb_db_failedregions);
 }
 
-static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
-                                       struct o2hb_debug_buf **db, int db_len,
-                                       int type, int size, int len, void *data)
+static void o2hb_debug_create(const char *name, struct dentry *dir,
+                             struct o2hb_debug_buf **db, int db_len, int type,
+                             int size, int len, void *data)
 {
        *db = kmalloc(db_len, GFP_KERNEL);
        if (!*db)
-               return NULL;
+               return;
 
        (*db)->db_type = type;
        (*db)->db_size = size;
        (*db)->db_len = len;
        (*db)->db_data = data;
 
-       return debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db,
-                                  &o2hb_debug_fops);
+       debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, &o2hb_debug_fops);
 }
 
 static void o2hb_debug_init(void)
@@ -1525,11 +1520,7 @@ static void o2hb_region_release(struct config_item *item)
 
        kfree(reg->hr_slots);
 
-       debugfs_remove(reg->hr_debug_livenodes);
-       debugfs_remove(reg->hr_debug_regnum);
-       debugfs_remove(reg->hr_debug_elapsed_time);
-       debugfs_remove(reg->hr_debug_pinned);
-       debugfs_remove(reg->hr_debug_dir);
+       debugfs_remove_recursive(reg->hr_debug_dir);
        kfree(reg->hr_db_livenodes);
        kfree(reg->hr_db_regnum);
        kfree(reg->hr_db_elapsed_time);
@@ -1988,69 +1979,33 @@ static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group
                : NULL;
 }
 
-static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
+static void o2hb_debug_region_init(struct o2hb_region *reg,
+                                  struct dentry *parent)
 {
-       int ret = -ENOMEM;
+       struct dentry *dir;
 
-       reg->hr_debug_dir =
-               debugfs_create_dir(config_item_name(&reg->hr_item), dir);
-       if (!reg->hr_debug_dir) {
-               mlog_errno(ret);
-               goto bail;
-       }
+       dir = debugfs_create_dir(config_item_name(&reg->hr_item), parent);
+       reg->hr_debug_dir = dir;
 
-       reg->hr_debug_livenodes =
-                       o2hb_debug_create(O2HB_DEBUG_LIVENODES,
-                                         reg->hr_debug_dir,
-                                         &(reg->hr_db_livenodes),
-                                         sizeof(*(reg->hr_db_livenodes)),
-                                         O2HB_DB_TYPE_REGION_LIVENODES,
-                                         sizeof(reg->hr_live_node_bitmap),
-                                         O2NM_MAX_NODES, reg);
-       if (!reg->hr_debug_livenodes) {
-               mlog_errno(ret);
-               goto bail;
-       }
+       o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes),
+                         sizeof(*(reg->hr_db_livenodes)),
+                         O2HB_DB_TYPE_REGION_LIVENODES,
+                         sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES,
+                         reg);
 
-       reg->hr_debug_regnum =
-                       o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER,
-                                         reg->hr_debug_dir,
-                                         &(reg->hr_db_regnum),
-                                         sizeof(*(reg->hr_db_regnum)),
-                                         O2HB_DB_TYPE_REGION_NUMBER,
-                                         0, O2NM_MAX_NODES, reg);
-       if (!reg->hr_debug_regnum) {
-               mlog_errno(ret);
-               goto bail;
-       }
+       o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum),
+                         sizeof(*(reg->hr_db_regnum)),
+                         O2HB_DB_TYPE_REGION_NUMBER, 0, O2NM_MAX_NODES, reg);
 
-       reg->hr_debug_elapsed_time =
-                       o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME,
-                                         reg->hr_debug_dir,
-                                         &(reg->hr_db_elapsed_time),
-                                         sizeof(*(reg->hr_db_elapsed_time)),
-                                         O2HB_DB_TYPE_REGION_ELAPSED_TIME,
-                                         0, 0, reg);
-       if (!reg->hr_debug_elapsed_time) {
-               mlog_errno(ret);
-               goto bail;
-       }
+       o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, dir,
+                         &(reg->hr_db_elapsed_time),
+                         sizeof(*(reg->hr_db_elapsed_time)),
+                         O2HB_DB_TYPE_REGION_ELAPSED_TIME, 0, 0, reg);
 
-       reg->hr_debug_pinned =
-                       o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
-                                         reg->hr_debug_dir,
-                                         &(reg->hr_db_pinned),
-                                         sizeof(*(reg->hr_db_pinned)),
-                                         O2HB_DB_TYPE_REGION_PINNED,
-                                         0, 0, reg);
-       if (!reg->hr_debug_pinned) {
-               mlog_errno(ret);
-               goto bail;
-       }
+       o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned),
+                         sizeof(*(reg->hr_db_pinned)),
+                         O2HB_DB_TYPE_REGION_PINNED, 0, 0, reg);
 
-       ret = 0;
-bail:
-       return ret;
 }
 
 static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
@@ -2106,11 +2061,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
        if (ret)
                goto unregister_handler;
 
-       ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
-       if (ret) {
-               config_item_put(&reg->hr_item);
-               goto unregister_handler;
-       }
+       o2hb_debug_region_init(reg, o2hb_debug_dir);
 
        return &reg->hr_item;
 
index 784426dee56c8adf0f2489f8a00cebcbe06979d5..bdef72c0f0991c7c74b2322be331b7fd88c54a3c 100644 (file)
@@ -3636,7 +3636,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
        int i, j, num_used;
        u32 major_hash;
        struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
-       struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
+       struct ocfs2_dx_entry_list *orig_list, *tmp_list;
        struct ocfs2_dx_entry *dx_entry;
 
        tmp_list = &tmp_dx_leaf->dl_list;
@@ -3645,7 +3645,6 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
                orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
                orig_list = &orig_dx_leaf->dl_list;
                new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
-               new_list = &new_dx_leaf->dl_list;
 
                num_used = le16_to_cpu(orig_list->de_num_used);
 
index 69a429b625cce6feb6868ae0a61cd37d7d08fdaf..aaf24548b02a10f96d1725686cd37d08363d6eb5 100644 (file)
@@ -142,7 +142,6 @@ struct dlm_ctxt
        atomic_t res_tot_count;
        atomic_t res_cur_count;
 
-       struct dlm_debug_ctxt *dlm_debug_ctxt;
        struct dentry *dlm_debugfs_subroot;
 
        /* NOTE: Next three are protected by dlm_domain_lock */
index a4b58ba999278ae16373edd5544bf84897a41432..4d0b452012b252f4bc3be3eb222b997fb6f50556 100644 (file)
@@ -853,67 +853,34 @@ static const struct file_operations debug_state_fops = {
 /* files in subroot */
 void dlm_debug_init(struct dlm_ctxt *dlm)
 {
-       struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
        /* for dumping dlm_ctxt */
-       dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE,
-                                                    S_IFREG|S_IRUSR,
-                                                    dlm->dlm_debugfs_subroot,
-                                                    dlm, &debug_state_fops);
+       debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR,
+                           dlm->dlm_debugfs_subroot, dlm, &debug_state_fops);
 
        /* for dumping lockres */
-       dc->debug_lockres_dentry =
-                       debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE,
-                                           S_IFREG|S_IRUSR,
-                                           dlm->dlm_debugfs_subroot,
-                                           dlm, &debug_lockres_fops);
+       debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR,
+                           dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops);
 
        /* for dumping mles */
-       dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE,
-                                                  S_IFREG|S_IRUSR,
-                                                  dlm->dlm_debugfs_subroot,
-                                                  dlm, &debug_mle_fops);
+       debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR,
+                           dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops);
 
        /* for dumping lockres on the purge list */
-       dc->debug_purgelist_dentry =
-                       debugfs_create_file(DLM_DEBUGFS_PURGE_LIST,
-                                           S_IFREG|S_IRUSR,
-                                           dlm->dlm_debugfs_subroot,
-                                           dlm, &debug_purgelist_fops);
-}
-
-void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
-       struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
-       if (dc) {
-               debugfs_remove(dc->debug_purgelist_dentry);
-               debugfs_remove(dc->debug_mle_dentry);
-               debugfs_remove(dc->debug_lockres_dentry);
-               debugfs_remove(dc->debug_state_dentry);
-               kfree(dc);
-               dc = NULL;
-       }
+       debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR,
+                           dlm->dlm_debugfs_subroot, dlm,
+                           &debug_purgelist_fops);
 }
 
 /* subroot - domain dir */
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-       dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt),
-                                     GFP_KERNEL);
-       if (!dlm->dlm_debug_ctxt) {
-               mlog_errno(-ENOMEM);
-               return -ENOMEM;
-       }
-
        dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
                                                      dlm_debugfs_root);
-       return 0;
 }
 
 void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-       debugfs_remove(dlm->dlm_debugfs_subroot);
+       debugfs_remove_recursive(dlm->dlm_debugfs_subroot);
 }
 
 /* debugfs root */
index 7d0c7c9013ce15068758b58e21d222a076b37fd1..f8fd8680a4b6c83259da6901a605095f5f5ef67a 100644 (file)
@@ -14,13 +14,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle);
 
 #ifdef CONFIG_DEBUG_FS
 
-struct dlm_debug_ctxt {
-       struct dentry *debug_state_dentry;
-       struct dentry *debug_lockres_dentry;
-       struct dentry *debug_mle_dentry;
-       struct dentry *debug_purgelist_dentry;
-};
-
 struct debug_lockres {
        int dl_len;
        char *dl_buf;
@@ -29,9 +22,8 @@ struct debug_lockres {
 };
 
 void dlm_debug_init(struct dlm_ctxt *dlm);
-void dlm_debug_shutdown(struct dlm_ctxt *dlm);
 
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
 void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
 
 void dlm_create_debugfs_root(void);
@@ -42,12 +34,8 @@ void dlm_destroy_debugfs_root(void);
 static inline void dlm_debug_init(struct dlm_ctxt *dlm)
 {
 }
-static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
-}
-static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+static inline void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-       return 0;
 }
 static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
 {
index 7338b5d4647c9d75b5429e232859a6b12a32c1f0..ee6f459f97706a88a3cba2b988f0059690a487d9 100644 (file)
@@ -387,7 +387,6 @@ static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
 {
        dlm_unregister_domain_handlers(dlm);
-       dlm_debug_shutdown(dlm);
        dlm_complete_thread(dlm);
        dlm_complete_recovery_thread(dlm);
        dlm_destroy_dlm_worker(dlm);
@@ -1938,7 +1937,6 @@ bail:
 
        if (status) {
                dlm_unregister_domain_handlers(dlm);
-               dlm_debug_shutdown(dlm);
                dlm_complete_thread(dlm);
                dlm_complete_recovery_thread(dlm);
                dlm_destroy_dlm_worker(dlm);
@@ -1992,9 +1990,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
        dlm->key = key;
        dlm->node_num = o2nm_this_node();
 
-       ret = dlm_create_debugfs_subroot(dlm);
-       if (ret < 0)
-               goto leave;
+       dlm_create_debugfs_subroot(dlm);
 
        spin_lock_init(&dlm->spinlock);
        spin_lock_init(&dlm->master_lock);
@@ -2056,6 +2052,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
        mlog(0, "context init: refcount %u\n",
                  kref_read(&dlm->dlm_refs));
 
+       ret = 0;
 leave:
        if (ret < 0 && dlm) {
                if (dlm->master_hash)
index e78657742bd893e09c149372589d06ca2329e3c8..3883633e82eb9a200e14d48c090d5e5e36814124 100644 (file)
@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
        enum dlm_status status;
        int actions = 0;
        int in_use;
-        u8 owner;
+       u8 owner;
+       int recovery_wait = 0;
 
        mlog(0, "master_node = %d, valblk = %d\n", master_node,
             flags & LKM_VALBLK);
@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                }
                if (flags & LKM_CANCEL)
                        lock->cancel_pending = 0;
-               else
-                       lock->unlock_pending = 0;
-
+               else {
+                       if (!lock->unlock_pending)
+                               recovery_wait = 1;
+                       else
+                               lock->unlock_pending = 0;
+               }
        }
 
        /* get an extra ref on lock.  if we are just switching
@@ -229,6 +233,17 @@ leave:
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
 
+       if (recovery_wait) {
+               spin_lock(&res->spinlock);
+               /* Unlock request will directly succeed after owner dies,
+                * and the lock is already removed from grant list. We have to
+                * wait for RECOVERING done or we miss the chance to purge it
+                * since the removement is much faster than RECOVERING proc.
+                */
+               __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+               spin_unlock(&res->spinlock);
+       }
+
        /* let the caller's final dlm_lock_put handle the actual kfree */
        if (actions & DLM_UNLOCK_FREE_LOCK) {
                /* this should always be coupled with list removal */
index 14207234fa3d72934b5c98472500f7752ca0fc69..6e774c5ea13b37c0210785fd4fcf14aaa00dd0fa 100644 (file)
@@ -2508,9 +2508,7 @@ bail:
                        ocfs2_inode_unlock(inode, ex);
        }
 
-       if (local_bh)
-               brelse(local_bh);
-
+       brelse(local_bh);
        return status;
 }
 
@@ -2593,8 +2591,7 @@ int ocfs2_inode_lock_atime(struct inode *inode,
                *level = 1;
                if (ocfs2_should_update_atime(inode, vfsmnt))
                        ocfs2_update_inode_atime(inode, bh);
-               if (bh)
-                       brelse(bh);
+               brelse(bh);
        } else
                *level = 0;
 
@@ -3012,8 +3009,6 @@ struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
 
        kref_init(&dlm_debug->d_refcnt);
        INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
-       dlm_debug->d_locking_state = NULL;
-       dlm_debug->d_locking_filter = NULL;
        dlm_debug->d_filter_secs = 0;
 out:
        return dlm_debug;
@@ -3282,27 +3277,19 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
 {
        struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
 
-       dlm_debug->d_locking_state = debugfs_create_file("locking_state",
-                                                        S_IFREG|S_IRUSR,
-                                                        osb->osb_debug_root,
-                                                        osb,
-                                                        &ocfs2_dlm_debug_fops);
+       debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
+                           osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
 
-       dlm_debug->d_locking_filter = debugfs_create_u32("locking_filter",
-                                               0600,
-                                               osb->osb_debug_root,
-                                               &dlm_debug->d_filter_secs);
+       debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
+                          &dlm_debug->d_filter_secs);
 }
 
 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
 {
        struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
 
-       if (dlm_debug) {
-               debugfs_remove(dlm_debug->d_locking_state);
-               debugfs_remove(dlm_debug->d_locking_filter);
+       if (dlm_debug)
                ocfs2_put_dlm_debug(dlm_debug);
-       }
 }
 
 int ocfs2_dlm_init(struct ocfs2_super *osb)
index e66a249fe07c125e5d6c3eab5a8a74a6cc7b85d1..e3e2d1b2af51a267cb056efad36042ee3ee88bf1 100644 (file)
@@ -590,8 +590,7 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
                        *extent_flags = rec->e_flags;
        }
 out:
-       if (eb_bh)
-               brelse(eb_bh);
+       brelse(eb_bh);
        return ret;
 }
 
index 4435df3e5adb975fb15042d0c5892e5ff73b4281..2e982db3e1ae4b55eb8bc741b9ca9400c791afad 100644 (file)
@@ -706,7 +706,9 @@ leave:
  * Thus, we need to explicitly order the zeroed pages.
  */
 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
-                                               struct buffer_head *di_bh)
+                                                     struct buffer_head *di_bh,
+                                                     loff_t start_byte,
+                                                     loff_t length)
 {
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        handle_t *handle = NULL;
@@ -722,7 +724,7 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_jbd2_file_inode(handle, inode);
+       ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
        if (ret < 0) {
                mlog_errno(ret);
                goto out;
@@ -761,7 +763,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
+       handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
+                                                     abs_from,
+                                                     abs_to - abs_from);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                goto out;
@@ -2126,7 +2130,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = d_inode(dentry);
        struct buffer_head *di_bh = NULL;
-       loff_t end;
 
        /*
         * We start with a read level meta lock and only jump to an ex
@@ -2190,8 +2193,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                        }
                }
 
-               end = pos + count;
-
                ret = ocfs2_check_range_for_refcount(inode, pos, count);
                if (ret == 1) {
                        ocfs2_inode_unlock(inode, meta_level);
index 7ad9d65908183841f5272fac3ed79fc3a6a46d82..7c9dfd50c1c176f01a2f61b2f2b391466bb9bddd 100644 (file)
@@ -534,7 +534,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
         */
        mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) !=
                        !!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE),
-                       "Inode %llu: system file state is ambigous\n",
+                       "Inode %llu: system file state is ambiguous\n",
                        (unsigned long long)args->fi_blkno);
 
        if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
index c0fe6ed08ab12dd5715eb2b6f766152943602830..3103ba7f97a28e4723bffcd40dc016af129b84f9 100644 (file)
@@ -144,7 +144,6 @@ static inline void ocfs2_ci_set_new(struct ocfs2_super *osb,
 void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
 void ocfs2_orphan_scan_start(struct ocfs2_super *osb);
 void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
-void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
 
 void ocfs2_complete_recovery(struct work_struct *work);
 void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
@@ -232,8 +231,8 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
  *                          ocfs2_journal_access_*() unless you intend to
  *                          manage the checksum by hand.
  *  ocfs2_journal_dirty    - Mark a journalled buffer as having dirty data.
- *  ocfs2_jbd2_file_inode  - Mark an inode so that its data goes out before
- *                           the current handle commits.
+ *  ocfs2_jbd2_inode_add_write  - Mark an inode with range so that its data goes
+ *                                out before the current handle commits.
  */
 
 /* You must always start_trans with a number of buffs > 0, but it's
@@ -441,7 +440,7 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
  * previous dirblock update in the free list */
 static inline int ocfs2_link_credits(struct super_block *sb)
 {
-       return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
+       return 2 * OCFS2_INODE_UPDATE_CREDITS + 4 +
               ocfs2_quota_trans_credits(sb);
 }
 
@@ -575,37 +574,12 @@ static inline int ocfs2_calc_bg_discontig_credits(struct super_block *sb)
        return ocfs2_extent_recs_per_gd(sb);
 }
 
-static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
-                                               unsigned int clusters_to_del,
-                                               struct ocfs2_dinode *fe,
-                                               struct ocfs2_extent_list *last_el)
+static inline int ocfs2_jbd2_inode_add_write(handle_t *handle, struct inode *inode,
+                                            loff_t start_byte, loff_t length)
 {
-       /* for dinode + all headers in this pass + update to next leaf */
-       u16 next_free = le16_to_cpu(last_el->l_next_free_rec);
-       u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth);
-       int credits = 1 + tree_depth + 1;
-       int i;
-
-       i = next_free - 1;
-       BUG_ON(i < 0);
-
-       /* We may be deleting metadata blocks, so metadata alloc dinode +
-          one desc. block for each possible delete. */
-       if (tree_depth && next_free == 1 &&
-           ocfs2_rec_clusters(last_el, &last_el->l_recs[i]) == clusters_to_del)
-               credits += 1 + tree_depth;
-
-       /* update to the truncate log. */
-       credits += OCFS2_TRUNCATE_LOG_UPDATE;
-
-       credits += ocfs2_quota_trans_credits(sb);
-
-       return credits;
-}
-
-static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode)
-{
-       return jbd2_journal_inode_add_write(handle, &OCFS2_I(inode)->ip_jinode);
+       return jbd2_journal_inode_ranged_write(handle,
+                                              &OCFS2_I(inode)->ip_jinode,
+                                              start_byte, length);
 }
 
 static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
index 6f8e1c4fdb9c3684e73d10a77d19a00903c768a5..8ea51cf27b9707a25468fdf7a96029f666d52197 100644 (file)
@@ -2486,7 +2486,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct inode *inode = NULL;
        struct inode *orphan_dir = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
-       struct ocfs2_dinode *di = NULL;
        handle_t *handle = NULL;
        char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
        struct buffer_head *parent_di_bh = NULL;
@@ -2552,7 +2551,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        }
 
-       di = (struct ocfs2_dinode *)new_di_bh->b_data;
        status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
                                  &orphan_insert, orphan_dir, false);
        if (status < 0) {
index fddbbd60f4343f845a1cd0bf516eb84e1f09ada8..9150cfa4df7dc6bf4a0744a37eb7cfe9ef47801b 100644 (file)
@@ -223,8 +223,6 @@ struct ocfs2_orphan_scan {
 
 struct ocfs2_dlm_debug {
        struct kref d_refcnt;
-       struct dentry *d_locking_state;
-       struct dentry *d_locking_filter;
        u32 d_filter_secs;
        struct list_head d_lockres_tracking;
 };
@@ -401,7 +399,6 @@ struct ocfs2_super
        struct ocfs2_dlm_debug *osb_dlm_debug;
 
        struct dentry *osb_debug_root;
-       struct dentry *osb_ctxt;
 
        wait_queue_head_t recovery_event;
 
index 8b2f39506648c0ca5ffbf8ea8bdf23d474f2ecaf..c81e86c62380788f888f297bd74741d70fd1a63c 100644 (file)
@@ -1080,10 +1080,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
        osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
                                                 ocfs2_debugfs_root);
 
-       osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR,
-                                           osb->osb_debug_root,
-                                           osb,
-                                           &ocfs2_osb_debug_fops);
+       debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
+                           osb, &ocfs2_osb_debug_fops);
 
        if (ocfs2_meta_ecc(osb))
                ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
@@ -1861,8 +1859,6 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 
        kset_unregister(osb->osb_dev_kset);
 
-       debugfs_remove(osb->osb_ctxt);
-
        /* Orphan scan should be stopped as early as possible */
        ocfs2_orphan_scan_stop(osb);
 
@@ -1918,7 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
                ocfs2_dlm_shutdown(osb, hangup_needed);
 
        ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
-       debugfs_remove(osb->osb_debug_root);
+       debugfs_remove_recursive(osb->osb_debug_root);
 
        if (hangup_needed)
                ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str));
index a59abe3c669ae13ac7f3fa68bc0fb5f54a781ee4..c60cd22cc052a41d2e0b2b1f954051268ae10bf8 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -818,6 +818,14 @@ static int do_dentry_open(struct file *f,
                if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
                        return -EINVAL;
        }
+
+       /*
+        * XXX: Huge page cache doesn't support writing yet. Drop all page
+        * cache for this file before processing writes.
+        */
+       if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
+               truncate_pagecache(inode, 0);
+
        return 0;
 
 cleanup_all:
index 465ea0153b2a34df85207fbb4971cda0ea72af57..ac9247371871d9069687519b0cb06a21f30330e5 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/mmzone.h>
 #include <linux/proc_fs.h>
 #include <linux/percpu.h>
-#include <linux/quicklist.h>
 #include <linux/seq_file.h>
 #include <linux/swap.h>
 #include <linux/vmstat.h>
@@ -106,9 +105,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                   global_zone_page_state(NR_KERNEL_STACK_KB));
        show_val_kb(m, "PageTables:     ",
                    global_zone_page_state(NR_PAGETABLE));
-#ifdef CONFIG_QUICKLIST
-       show_val_kb(m, "Quicklists:     ", quicklist_total_size());
-#endif
 
        show_val_kb(m, "NFS_Unstable:   ",
                    global_node_page_state(NR_UNSTABLE_NFS));
@@ -136,6 +132,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
        show_val_kb(m, "ShmemPmdMapped: ",
                    global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
+       show_val_kb(m, "FileHugePages: ",
+                   global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
+       show_val_kb(m, "FilePmdMapped: ",
+                   global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
 #endif
 
 #ifdef CONFIG_CMA
index bf43d1d600592680a8ce82635dab2792d9990bd4..9442631fd4afcb16e3a09d6a661ebdc630fcbe87 100644 (file)
@@ -417,6 +417,7 @@ struct mem_size_stats {
        unsigned long lazyfree;
        unsigned long anonymous_thp;
        unsigned long shmem_thp;
+       unsigned long file_thp;
        unsigned long swap;
        unsigned long shared_hugetlb;
        unsigned long private_hugetlb;
@@ -461,7 +462,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
                bool compound, bool young, bool dirty, bool locked)
 {
-       int i, nr = compound ? 1 << compound_order(page) : 1;
+       int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
        /*
@@ -588,7 +589,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        else if (is_zone_device_page(page))
                /* pass */;
        else
-               VM_BUG_ON_PAGE(1, page);
+               mss->file_thp += HPAGE_PMD_SIZE;
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 }
 #else
@@ -809,6 +810,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
        SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
        SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
        SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
+       SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
        SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
        seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
                                  mss->private_hugetlb >> 10, 7);
index 8476175c07e7ec54aee9943e86371997ece0a348..6f8cc06ee44e3ec4b04a739821f4ad53e16241bd 100644 (file)
@@ -102,11 +102,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
        __free_page(pte_page);
 }
 
-#else /* CONFIG_MMU */
-
-/* This is enough for a nommu architecture */
-#define check_pgt_cache()          do { } while (0)
-
 #endif /* CONFIG_MMU */
 
 #endif /* __ASM_GENERIC_PGALLOC_H */
index 75d9d68a6de7a7f240acf48503533a09e156ca8d..818691846c909354375b7220f5b0c943fd9e7031 100644 (file)
@@ -1002,9 +1002,8 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
  * need this). If THP is not enabled, the pmd can't go away under the
  * code even if MADV_DONTNEED runs, but if THP is enabled we need to
  * run a pmd_trans_unstable before walking the ptes after
- * split_huge_page_pmd returns (because it may have run when the pmd
- * become null, but then a page fault can map in a THP and not a
- * regular page).
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
  */
 static inline int pmd_trans_unstable(pmd_t *pmd)
 {
@@ -1126,7 +1125,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 static inline void init_espfix_bsp(void) { }
 #endif
 
-extern void __init pgd_cache_init(void);
+extern void __init pgtable_cache_init(void);
 
 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
index 9569e7c786d3e1c997494874d0224701444bbb75..4b898cdbdf056a6af0cbdf57967cde66e7f1de66 100644 (file)
@@ -129,11 +129,8 @@ static inline bool compaction_failed(enum compact_result result)
        return false;
 }
 
-/*
- * Compaction  has backed off for some reason. It might be throttling or
- * lock contention. Retrying is still worthwhile.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
+/* Compaction needs reclaim to be performed first, so it can continue. */
+static inline bool compaction_needs_reclaim(enum compact_result result)
 {
        /*
         * Compaction backed off due to watermark checks for order-0
@@ -142,6 +139,16 @@ static inline bool compaction_withdrawn(enum compact_result result)
        if (result == COMPACT_SKIPPED)
                return true;
 
+       return false;
+}
+
+/*
+ * Compaction has backed off for some reason after doing some work or none
+ * at all. It might be throttling or lock contention. Retrying might be still
+ * worthwhile, but with a higher priority if allowed.
+ */
+static inline bool compaction_withdrawn(enum compact_result result)
+{
        /*
         * If compaction is deferred for high-order allocations, it is
         * because sync compaction recently failed. If this is the case
@@ -207,6 +214,11 @@ static inline bool compaction_failed(enum compact_result result)
        return false;
 }
 
+static inline bool compaction_needs_reclaim(enum compact_result result)
+{
+       return false;
+}
+
 static inline bool compaction_withdrawn(enum compact_result result)
 {
        return true;
index 866268c2c6e3a0e127cf87b1276a4ebadf52c37f..b0c6b0d34d0213569c88c9bd83280c46ecaf2be2 100644 (file)
@@ -429,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
  * @i_pages: Cached pages.
  * @gfp_mask: Memory allocation flags to use for allocating pages.
  * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
  * @i_mmap: Tree of private and shared mappings.
  * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
  * @nrpages: Number of page entries, protected by the i_pages lock.
@@ -446,6 +447,10 @@ struct address_space {
        struct xarray           i_pages;
        gfp_t                   gfp_mask;
        atomic_t                i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       /* number of thp, only for non-shmem files */
+       atomic_t                nr_thps;
+#endif
        struct rb_root_cached   i_mmap;
        struct rw_semaphore     i_mmap_rwsem;
        unsigned long           nrpages;
@@ -2798,6 +2803,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
        return errseq_sample(&mapping->wb_err);
 }
 
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       return atomic_read(&mapping->nr_thps);
+#else
+       return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       atomic_inc(&mapping->nr_thps);
+#else
+       WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+       atomic_dec(&mapping->nr_thps);
+#else
+       WARN_ON_ONCE(1);
+#endif
+}
+
 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
                           int datasync);
 extern int vfs_fsync(struct file *file, int datasync);
index 45ede62aa85be47370719dae4004c88ec1796e06..61c9ffd89b0521d10d64eecb801a243d16e744f6 100644 (file)
@@ -267,6 +267,15 @@ static inline bool thp_migration_supported(void)
        return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
 }
 
+static inline struct list_head *page_deferred_list(struct page *page)
+{
+       /*
+        * Global or memcg deferred list in the second tail pages is
+        * occupied by compound_head.
+        */
+       return &page[2].deferred_list;
+}
+
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
index edfca427831928abc6a072c4e0d94daee6d7811a..53fc34f930d08cd8edcfb95834e882d713de49fe 100644 (file)
@@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 static inline struct hstate *page_hstate(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHuge(page), page);
-       return size_to_hstate(PAGE_SIZE << compound_order(page));
+       return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
index df03825ad1a1a9e35d18c896e2738ac44f915717..603fbc4e2f70d6ecc2f3183cf5f904724a1c8def 100644 (file)
@@ -1410,8 +1410,6 @@ extern int           jbd2_journal_clear_err  (journal_t *);
 extern int        jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
 extern int        jbd2_journal_force_commit(journal_t *);
 extern int        jbd2_journal_force_commit_nested(journal_t *);
-extern int        jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
-extern int        jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
 extern int        jbd2_journal_inode_ranged_write(handle_t *handle,
                        struct jbd2_inode *inode, loff_t start_byte,
                        loff_t length);
index 082d1d2a5216977262d3a1817b30a9131efe873a..bc45ea1efbf7973362c6cb99b433ecba4f95612a 100644 (file)
@@ -15,6 +15,14 @@ extern int __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
                                      unsigned long vm_flags);
+#ifdef CONFIG_SHMEM
+extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+#else
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+                                          unsigned long addr)
+{
+}
+#endif
 
 #define khugepaged_enabled()                                          \
        (transparent_hugepage_flags &                                  \
@@ -73,6 +81,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 {
        return 0;
 }
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+                                          unsigned long addr)
+{
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #endif /* _LINUX_KHUGEPAGED_H */
index ad8f1a397ae4f8260fc1149c8d9a0de7c59b5e5e..9b60863429ccff3c5d70e17fe5621ec4bf0a93bc 100644 (file)
@@ -128,9 +128,8 @@ struct mem_cgroup_per_node {
 
        struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
 
-#ifdef CONFIG_MEMCG_KMEM
        struct memcg_shrinker_map __rcu *shrinker_map;
-#endif
+
        struct rb_node          tree_node;      /* RB tree node */
        unsigned long           usage_in_excess;/* Set to the value by which */
                                                /* the soft limit is exceeded*/
@@ -331,6 +330,10 @@ struct mem_cgroup {
        struct list_head event_list;
        spinlock_t event_list_lock;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       struct deferred_split deferred_split_queue;
+#endif
+
        struct mem_cgroup_per_node *nodeinfo[0];
        /* WARNING: nodeinfo must be the last member here */
 };
@@ -1311,6 +1314,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
        } while ((memcg = parent_mem_cgroup(memcg)));
        return false;
 }
+
+extern int memcg_expand_shrinker_maps(int new_id);
+
+extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+                                  int nid, int shrinker_id);
 #else
 #define mem_cgroup_sockets_enabled 0
 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1319,6 +1327,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 {
        return false;
 }
+
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+                                         int nid, int shrinker_id)
+{
+}
 #endif
 
 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
@@ -1390,10 +1403,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
        return memcg ? memcg->kmemcg_id : -1;
 }
 
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
-                                  int nid, int shrinker_id);
 #else
 
 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1435,8 +1444,6 @@ static inline void memcg_put_cache_ids(void)
 {
 }
 
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
-                                         int nid, int shrinker_id) { }
 #endif /* CONFIG_MEMCG_KMEM */
 
 #endif /* _LINUX_MEMCONTROL_H */
index 02e633f3ede05e83bde2ed499462661f08b9c7b5..0ebb105eb261554cceac2d006f38cbeced91b278 100644 (file)
@@ -25,7 +25,6 @@
 
 struct memory_block {
        unsigned long start_section_nr;
-       unsigned long end_section_nr;
        unsigned long state;            /* serialized by the dev->lock */
        int section_count;              /* serialized by mem_sysfs_mutex */
        int online_type;                /* for passing data to online routine */
@@ -80,9 +79,9 @@ struct mem_section;
 #define IPC_CALLBACK_PRI        10
 
 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
-static inline int memory_dev_init(void)
+static inline void memory_dev_init(void)
 {
-       return 0;
+       return;
 }
 static inline int register_memory_notifier(struct notifier_block *nb)
 {
@@ -113,7 +112,7 @@ extern int register_memory_isolate_notifier(struct notifier_block *nb);
 extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
 int create_memory_block_devices(unsigned long start, unsigned long size);
 void remove_memory_block_devices(unsigned long start, unsigned long size);
-extern int memory_dev_init(void);
+extern void memory_dev_init(void);
 extern int memory_notify(unsigned long val, void *v);
 extern int memory_isolate_notify(unsigned long val, void *v);
 extern struct memory_block *find_memory_block(struct mem_section *);
index 7cf955feb8235d2dec0e52e4ceece12f36dfd955..294a67b94147083867208be03000c53f839c846f 100644 (file)
@@ -805,6 +805,24 @@ static inline void set_compound_order(struct page *page, unsigned int order)
        page[1].compound_order = order;
 }
 
+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+       return 1UL << compound_order(page);
+}
+
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+       return PAGE_SIZE << compound_order(page);
+}
+
+/* Returns the number of bits needed for the number of bytes in a page */
+static inline unsigned int page_shift(struct page *page)
+{
+       return PAGE_SHIFT + compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
@@ -1057,8 +1075,9 @@ static inline void put_user_page(struct page *page)
        put_page(page);
 }
 
-void put_user_pages_dirty(struct page **pages, unsigned long npages);
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+                              bool make_dirty);
+
 void put_user_pages(struct page **pages, unsigned long npages);
 
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -1405,7 +1424,11 @@ extern void pagefault_out_of_memory(void);
 
 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
 
+#ifdef CONFIG_MMU
 extern bool can_do_mlock(void);
+#else
+static inline bool can_do_mlock(void) { return false; }
+#endif
 extern int user_shm_lock(size_t, struct user_struct *);
 extern void user_shm_unlock(size_t, struct user_struct *);
 
@@ -2305,6 +2328,8 @@ extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags, struct page **pages);
 
+unsigned long randomize_stack_top(unsigned long stack_top);
+
 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2568,6 +2593,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 #define FOLL_COW       0x4000  /* internal GUP flag */
 #define FOLL_ANON      0x8000  /* don't do file mappings */
 #define FOLL_LONGTERM  0x10000 /* mapping lifetime is indefinite: see below */
+#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
 
 /*
  * NOTE on FOLL_LONGTERM:
@@ -2845,5 +2871,12 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+extern int memcmp_pages(struct page *page1, struct page *page2);
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+       return !memcmp_pages(page1, page2);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 0b739f360cec4c53f0cb4f2b6ade4650e89aec92..5183e0d77dfa6cf6fdcfb1c928bf315a61aec900 100644 (file)
@@ -138,6 +138,7 @@ struct page {
                struct {        /* Second tail page of compound page */
                        unsigned long _compound_pad_1;  /* compound_head */
                        unsigned long _compound_pad_2;
+                       /* For both global and memcg */
                        struct list_head deferred_list;
                };
                struct {        /* Page table pages */
index 3f38c30d2f13d1e3b86408a6edf1e0eb8ef52778..bda20282746b92d055a573c7855b70e070fcd41f 100644 (file)
@@ -235,6 +235,8 @@ enum node_stat_item {
        NR_SHMEM,               /* shmem pages (included tmpfs/GEM pages) */
        NR_SHMEM_THPS,
        NR_SHMEM_PMDMAPPED,
+       NR_FILE_THPS,
+       NR_FILE_PMDMAPPED,
        NR_ANON_THPS,
        NR_UNSTABLE_NFS,        /* NFS unstable pages */
        NR_VMSCAN_WRITE,
@@ -677,6 +679,14 @@ struct zonelist {
 extern struct page *mem_map;
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct deferred_split {
+       spinlock_t split_queue_lock;
+       struct list_head split_queue;
+       unsigned long split_queue_len;
+};
+#endif
+
 /*
  * On NUMA machines, each NUMA node would have a pg_data_t to describe
  * it's memory layout. On UMA machines there is a single pglist_data which
@@ -756,9 +766,7 @@ typedef struct pglist_data {
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       spinlock_t split_queue_lock;
-       struct list_head split_queue;
-       unsigned long split_queue_len;
+       struct deferred_split deferred_split_queue;
 #endif
 
        /* Fields commonly accessed by the page reclaim scanner */
index 09592951725cc0b578f356c951348fcc48bfc0ba..682fd465df060c0ac2cde1f67b7b6c11090dd17f 100644 (file)
@@ -18,6 +18,7 @@ struct page_ext_operations {
 
 enum page_ext_flags {
        PAGE_EXT_OWNER,
+       PAGE_EXT_OWNER_ACTIVE,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
        PAGE_EXT_YOUNG,
        PAGE_EXT_IDLE,
index c7552459a15f5763f6c47b58dcda6980f6881a53..37a4d9e32cd3fc45fd848fe4fa4a37e693e47d23 100644 (file)
@@ -333,6 +333,16 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
                        mapping_gfp_mask(mapping));
 }
 
+static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+{
+       if (PageHuge(page))
+               return page;
+
+       VM_BUG_ON_PAGE(PageTail(page), page);
+
+       return page + (offset & (compound_nr(page) - 1));
+}
+
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
deleted file mode 100644 (file)
index 034982c..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_QUICKLIST_H
-#define LINUX_QUICKLIST_H
-/*
- * Fast allocations and disposal of pages. Pages must be in the condition
- * as needed after allocation when they are freed. Per cpu lists of pages
- * are kept that only contain node local pages.
- *
- * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/percpu.h>
-
-#ifdef CONFIG_QUICKLIST
-
-struct quicklist {
-       void *page;
-       int nr_pages;
-};
-
-DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
-
-/*
- * The two key functions quicklist_alloc and quicklist_free are inline so
- * that they may be custom compiled for the platform.
- * Specifying a NULL ctor can remove constructor support. Specifying
- * a constant quicklist allows the determination of the exact address
- * in the per cpu area.
- *
- * The fast patch in quicklist_alloc touched only a per cpu cacheline and
- * the first cacheline of the page itself. There is minmal overhead involved.
- */
-static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
-{
-       struct quicklist *q;
-       void **p = NULL;
-
-       q =&get_cpu_var(quicklist)[nr];
-       p = q->page;
-       if (likely(p)) {
-               q->page = p[0];
-               p[0] = NULL;
-               q->nr_pages--;
-       }
-       put_cpu_var(quicklist);
-       if (likely(p))
-               return p;
-
-       p = (void *)__get_free_page(flags | __GFP_ZERO);
-       if (ctor && p)
-               ctor(p);
-       return p;
-}
-
-static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
-       struct page *page)
-{
-       struct quicklist *q;
-
-       q = &get_cpu_var(quicklist)[nr];
-       *(void **)p = q->page;
-       q->page = p;
-       q->nr_pages++;
-       put_cpu_var(quicklist);
-}
-
-static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
-{
-       __quicklist_free(nr, dtor, pp, virt_to_page(pp));
-}
-
-static inline void quicklist_free_page(int nr, void (*dtor)(void *),
-                                                       struct page *page)
-{
-       __quicklist_free(nr, dtor, page_address(page), page);
-}
-
-void quicklist_trim(int nr, void (*dtor)(void *),
-       unsigned long min_pages, unsigned long max_free);
-
-unsigned long quicklist_total_size(void);
-
-#else
-
-static inline unsigned long quicklist_total_size(void)
-{
-       return 0;
-}
-
-#endif
-
-#endif /* LINUX_QUICKLIST_H */
-
index 9443cafd19696a65919514abb140a522ea6312b6..0f80123650e23fb95ad5d6d6219c9864912feeb7 100644 (file)
@@ -69,7 +69,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        /* ID in shrinker_idr */
        int id;
 #endif
@@ -81,6 +81,11 @@ struct shrinker {
 /* Flags */
 #define SHRINKER_NUMA_AWARE    (1 << 0)
 #define SHRINKER_MEMCG_AWARE   (1 << 1)
+/*
+ * It just makes sense when the shrinker is also MEMCG_AWARE for now,
+ * non-MEMCG_AWARE shrinker should not have this flag set.
+ */
+#define SHRINKER_NONSLAB       (1 << 2)
 
 extern int prealloc_shrinker(struct shrinker *shrinker);
 extern void register_shrinker_prepared(struct shrinker *shrinker);
index 56c9c7eed34edf8869830fab1f2d24b71b5177c6..ab2b98ad76e102f14eeaeb412c9769dd714b02d5 100644 (file)
@@ -595,68 +595,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
        return __kmalloc_node(size, flags, node);
 }
 
-struct memcg_cache_array {
-       struct rcu_head rcu;
-       struct kmem_cache *entries[0];
-};
-
-/*
- * This is the main placeholder for memcg-related information in kmem caches.
- * Both the root cache and the child caches will have it. For the root cache,
- * this will hold a dynamically allocated array large enough to hold
- * information about the currently limited memcgs in the system. To allow the
- * array to be accessed without taking any locks, on relocation we free the old
- * version only after a grace period.
- *
- * Root and child caches hold different metadata.
- *
- * @root_cache:        Common to root and child caches.  NULL for root, pointer to
- *             the root cache for children.
- *
- * The following fields are specific to root caches.
- *
- * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
- *             used to index child cachces during allocation and cleared
- *             early during shutdown.
- *
- * @root_caches_node: List node for slab_root_caches list.
- *
- * @children:  List of all child caches.  While the child caches are also
- *             reachable through @memcg_caches, a child cache remains on
- *             this list until it is actually destroyed.
- *
- * The following fields are specific to child caches.
- *
- * @memcg:     Pointer to the memcg this cache belongs to.
- *
- * @children_node: List node for @root_cache->children list.
- *
- * @kmem_caches_node: List node for @memcg->kmem_caches list.
- */
-struct memcg_cache_params {
-       struct kmem_cache *root_cache;
-       union {
-               struct {
-                       struct memcg_cache_array __rcu *memcg_caches;
-                       struct list_head __root_caches_node;
-                       struct list_head children;
-                       bool dying;
-               };
-               struct {
-                       struct mem_cgroup *memcg;
-                       struct list_head children_node;
-                       struct list_head kmem_caches_node;
-                       struct percpu_ref refcnt;
-
-                       void (*work_fn)(struct kmem_cache *);
-                       union {
-                               struct rcu_head rcu_head;
-                               struct work_struct work;
-                       };
-               };
-       };
-};
-
 int memcg_update_all_caches(int num_memcgs);
 
 /**
index dfa718ffdd4f7a3f15280dbcd9f4c24e08fb2596..4e7809408073d5a066d8632fcafc0615a0b0cfd3 100644 (file)
@@ -53,15 +53,21 @@ struct vmap_area {
        unsigned long va_start;
        unsigned long va_end;
 
-       /*
-        * Largest available free size in subtree.
-        */
-       unsigned long subtree_max_size;
-       unsigned long flags;
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
-       struct llist_node purge_list;    /* "lazy purge" list */
-       struct vm_struct *vm;
+
+       /*
+        * The following three variables can be packed, because
+        * a vmap_area object is always one of the three states:
+        *    1) in "free" tree (root is vmap_area_root)
+        *    2) in "busy" tree (root is free_vmap_area_root)
+        *    3) in purge list  (head is vmap_purge_list)
+        */
+       union {
+               unsigned long subtree_max_size; /* in "free" tree */
+               struct vm_struct *vm;           /* in "busy" tree */
+               struct llist_node purge_list;   /* in purge list */
+       };
 };
 
 /*
index 7238865e75b00115f710d7c0d17e6e452edb51b3..51bf43076165d76aa0f313a65d526e912a15963e 100644 (file)
@@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);
 
 void zpool_destroy_pool(struct zpool *pool);
 
+bool zpool_malloc_support_movable(struct zpool *pool);
+
 int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
                        unsigned long *handle);
 
@@ -90,6 +92,7 @@ struct zpool_driver {
                        struct zpool *zpool);
        void (*destroy)(void *pool);
 
+       bool malloc_support_movable;
        int (*malloc)(void *pool, size_t size, gfp_t gfp,
                                unsigned long *handle);
        void (*free)(void *pool, unsigned long handle);
index 653693da8da61529678ffdb6376dc2b5cc3df2ca..208b8fa1808e029db9cf74270ba06007f340e111 100644 (file)
@@ -507,7 +507,7 @@ void __init __weak mem_encrypt_init(void) { }
 
 void __init __weak poking_init(void) { }
 
-void __init __weak pgd_cache_init(void) { }
+void __init __weak pgtable_cache_init(void) { }
 
 bool initcall_debug;
 core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -556,6 +556,7 @@ static void __init mm_init(void)
        report_meminit();
        mem_init();
        kmem_cache_init();
+       kmemleak_init();
        pgtable_init();
        debug_objects_mem_init();
        vmalloc_init();
@@ -564,7 +565,6 @@ static void __init mm_init(void)
        init_espfix_bsp();
        /* Should be run after espfix64 is set up. */
        pti_init();
-       pgd_cache_init();
 }
 
 void __init __weak arch_call_rest_init(void)
@@ -594,7 +594,6 @@ asmlinkage __visible void __init start_kernel(void)
        page_address_init();
        pr_notice("%s", linux_banner);
        setup_arch(&command_line);
-       mm_init_cpumask(&init_mm);
        setup_command_line(command_line);
        setup_nr_cpu_ids();
        setup_per_cpu_areas();
@@ -740,7 +739,6 @@ asmlinkage __visible void __init start_kernel(void)
                initrd_start = 0;
        }
 #endif
-       kmemleak_init();
        setup_per_cpu_pageset();
        numa_policy_init();
        acpi_early_init();
index 84fa00497c49f9f8d30538a75b9bb3d6727968a6..94d38a39d72ecaf3ad0cbee26f5aa9fa1e26fb0e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/percpu-rwsem.h>
 #include <linux/task_work.h>
 #include <linux/shmem_fs.h>
+#include <linux/khugepaged.h>
 
 #include <linux/uprobes.h>
 
@@ -143,17 +144,19 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
  *
  * @vma:      vma that holds the pte pointing to page
  * @addr:     address the old @page is mapped at
- * @page:     the cowed page we are replacing by kpage
- * @kpage:    the modified page we replace page by
+ * @old_page: the page we are replacing by new_page
+ * @new_page: the modified page we replace page by
  *
- * Returns 0 on success, -EFAULT on failure.
+ * If @new_page is NULL, only unmap @old_page.
+ *
+ * Returns 0 on success, negative error code otherwise.
  */
 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                                struct page *old_page, struct page *new_page)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct page_vma_mapped_walk pvmw = {
-               .page = old_page,
+               .page = compound_head(old_page),
                .vma = vma,
                .address = addr,
        };
@@ -164,12 +167,12 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
                                addr + PAGE_SIZE);
 
-       VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
-
-       err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
-                       false);
-       if (err)
-               return err;
+       if (new_page) {
+               err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
+                                           &memcg, false);
+               if (err)
+                       return err;
+       }
 
        /* For try_to_free_swap() and munlock_vma_page() below */
        lock_page(old_page);
@@ -177,15 +180,20 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        mmu_notifier_invalidate_range_start(&range);
        err = -EAGAIN;
        if (!page_vma_mapped_walk(&pvmw)) {
-               mem_cgroup_cancel_charge(new_page, memcg, false);
+               if (new_page)
+                       mem_cgroup_cancel_charge(new_page, memcg, false);
                goto unlock;
        }
        VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 
-       get_page(new_page);
-       page_add_new_anon_rmap(new_page, vma, addr, false);
-       mem_cgroup_commit_charge(new_page, memcg, false, false);
-       lru_cache_add_active_or_unevictable(new_page, vma);
+       if (new_page) {
+               get_page(new_page);
+               page_add_new_anon_rmap(new_page, vma, addr, false);
+               mem_cgroup_commit_charge(new_page, memcg, false, false);
+               lru_cache_add_active_or_unevictable(new_page, vma);
+       } else
+               /* no new page, just dec_mm_counter for old_page */
+               dec_mm_counter(mm, MM_ANONPAGES);
 
        if (!PageAnon(old_page)) {
                dec_mm_counter(mm, mm_counter_file(old_page));
@@ -194,8 +202,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
        flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
        ptep_clear_flush_notify(vma, addr, pvmw.pte);
-       set_pte_at_notify(mm, addr, pvmw.pte,
-                       mk_pte(new_page, vma->vm_page_prot));
+       if (new_page)
+               set_pte_at_notify(mm, addr, pvmw.pte,
+                                 mk_pte(new_page, vma->vm_page_prot));
 
        page_remove_rmap(old_page, false);
        if (!page_mapped(old_page))
@@ -464,6 +473,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        struct page *old_page, *new_page;
        struct vm_area_struct *vma;
        int ret, is_register, ref_ctr_updated = 0;
+       bool orig_page_huge = false;
 
        is_register = is_swbp_insn(&opcode);
        uprobe = container_of(auprobe, struct uprobe, arch);
@@ -471,7 +481,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 retry:
        /* Read the page with vaddr into memory */
        ret = get_user_pages_remote(NULL, mm, vaddr, 1,
-                       FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
+                       FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
        if (ret <= 0)
                return ret;
 
@@ -488,6 +498,10 @@ retry:
                ref_ctr_updated = 1;
        }
 
+       ret = 0;
+       if (!is_register && !PageAnon(old_page))
+               goto put_old;
+
        ret = anon_vma_prepare(vma);
        if (ret)
                goto put_old;
@@ -501,8 +515,33 @@ retry:
        copy_highpage(new_page, old_page);
        copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 
+       if (!is_register) {
+               struct page *orig_page;
+               pgoff_t index;
+
+               VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
+
+               index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
+               orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
+                                         index);
+
+               if (orig_page) {
+                       if (PageUptodate(orig_page) &&
+                           pages_identical(new_page, orig_page)) {
+                               /* let go new_page */
+                               put_page(new_page);
+                               new_page = NULL;
+
+                               if (PageCompound(orig_page))
+                                       orig_page_huge = true;
+                       }
+                       put_page(orig_page);
+               }
+       }
+
        ret = __replace_page(vma, vaddr, old_page, new_page);
-       put_page(new_page);
+       if (new_page)
+               put_page(new_page);
 put_old:
        put_page(old_page);
 
@@ -513,6 +552,10 @@ put_old:
        if (ret && is_register && ref_ctr_updated)
                update_ref_ctr(uprobe, mm, -1);
 
+       /* try collapse pmd for compound page */
+       if (!ret && orig_page_huge)
+               collapse_pte_mapped_thp(mm, vaddr);
+
        return ret;
 }
 
index 74877e9d90ca6402e937515b291661f5ea0c9377..76036a41143b9b136a51e0c56332e191669ac8bb 100644 (file)
@@ -487,8 +487,8 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
        while (start < end &&
               !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
                                    false, &res)) {
-               pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
-               end_pfn = (res.end + 1) >> PAGE_SHIFT;
+               pfn = PFN_UP(res.start);
+               end_pfn = PFN_DOWN(res.end + 1);
                if (end_pfn > pfn)
                        ret = (*func)(pfn, end_pfn - pfn, arg);
                if (ret)
index c892c6280c9f8e9d8ed45b57b2882789275d608d..8dad5aa600eacbff82480ef84eae684308e6da54 100644 (file)
@@ -238,7 +238,6 @@ static void do_idle(void)
        tick_nohz_idle_enter();
 
        while (!need_resched()) {
-               check_pgt_cache();
                rmb();
 
                local_irq_disable();
index 078950d9605ba2f6c109c5837f52b1a6cc07d186..00fcea236ebacffa71236af5ddfcaf2df8d0855d 100644 (file)
@@ -264,7 +264,8 @@ extern struct ctl_table epoll_table[];
 extern struct ctl_table firmware_config_table[];
 #endif
 
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+    defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
 int sysctl_legacy_va_layout;
 #endif
 
@@ -1573,7 +1574,8 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = proc_dointvec,
                .extra1         = SYSCTL_ZERO,
        },
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+    defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
        {
                .procname       = "legacy_va_layout",
                .data           = &sysctl_legacy_va_layout,
index e0e14780a13de49b1bbfa9024296a328b890f217..6b1b1703a646ed6a6598ec04a166e76545b8a9f1 100644 (file)
@@ -576,17 +576,18 @@ config DEBUG_KMEMLEAK
          In order to access the kmemleak file, debugfs needs to be
          mounted (usually at /sys/kernel/debug).
 
-config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
-       int "Maximum kmemleak early log entries"
+config DEBUG_KMEMLEAK_MEM_POOL_SIZE
+       int "Kmemleak memory pool size"
        depends on DEBUG_KMEMLEAK
-       range 200 40000
-       default 400
+       range 200 1000000
+       default 16000
        help
          Kmemleak must track all the memory allocations to avoid
          reporting false positives. Since memory may be allocated or
-         freed before kmemleak is initialised, an early log buffer is
-         used to store these actions. If kmemleak reports "early log
-         buffer exceeded", please increase this value.
+         freed before kmemleak is fully initialised, use a static pool
+         of metadata objects to track such callbacks. After kmemleak is
+         fully initialised, this memory pool acts as an emergency one
+         if slab allocations fail.
 
 config DEBUG_KMEMLEAK_TEST
        tristate "Simple test for the kernel memory leak detector"
index 7fa97a8b571778a1619940ab10a9a4bc9e96f823..6c9682ce02544e5dca4e73b5104c1474bf00738b 100644 (file)
@@ -134,6 +134,14 @@ config KASAN_S390_4_LEVEL_PAGING
          to 3TB of RAM with KASan enabled). This options allows to force
          4-level paging instead.
 
+config KASAN_SW_TAGS_IDENTIFY
+       bool "Enable memory corruption identification"
+       depends on KASAN_SW_TAGS
+       help
+         This option enables best-effort identification of bug type
+         (use-after-free or out-of-bounds) at the cost of increased
+         memory consumption.
+
 config TEST_KASAN
        tristate "Module for testing KASAN for bug detection"
        depends on m && KASAN
index f1e0569b4539b8b8e976f6aff26b79f016f2edfd..639d5e7014c1e5a3f29a1f530233c23f455df993 100644 (file)
@@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
        head = compound_head(page);
        v += (page - head) << PAGE_SHIFT;
 
-       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+       if (likely(n <= v && v <= (page_size(head))))
                return true;
        WARN_ON(1);
        return false;
index 5c86ef4c899f25ecbd6449784947bb35b7f4f7c4..1c26c14ffbb9bdfe8d442cb381e7c7d1fd242305 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <linux/mm.h>
-#include <linux/quicklist.h>
 #include <linux/cma.h>
 
 void show_mem(unsigned int filter, nodemask_t *nodemask)
@@ -39,10 +38,6 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
 #ifdef CONFIG_CMA
        printk("%lu pages cma reserved\n", totalcma_pages);
 #endif
-#ifdef CONFIG_QUICKLIST
-       printk("%lu pages in pagetable cache\n",
-               quicklist_total_size());
-#endif
 #ifdef CONFIG_MEMORY_FAILURE
        printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
 #endif
index b63b367a94e803e032337654e6470e7c97be28d5..49cc4d570a40b7336ec98ec6e9d4ea0ce032f7e5 100644 (file)
@@ -18,6 +18,9 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
 
 /*
  * Note: test functions are marked noinline so that their names appear in
@@ -337,6 +340,42 @@ static noinline void __init kmalloc_uaf2(void)
        kfree(ptr2);
 }
 
+static noinline void __init kfree_via_page(void)
+{
+       char *ptr;
+       size_t size = 8;
+       struct page *page;
+       unsigned long offset;
+
+       pr_info("invalid-free false positive (via page)\n");
+       ptr = kmalloc(size, GFP_KERNEL);
+       if (!ptr) {
+               pr_err("Allocation failed\n");
+               return;
+       }
+
+       page = virt_to_page(ptr);
+       offset = offset_in_page(ptr);
+       kfree(page_address(page) + offset);
+}
+
+static noinline void __init kfree_via_phys(void)
+{
+       char *ptr;
+       size_t size = 8;
+       phys_addr_t phys;
+
+       pr_info("invalid-free false positive (via phys)\n");
+       ptr = kmalloc(size, GFP_KERNEL);
+       if (!ptr) {
+               pr_err("Allocation failed\n");
+               return;
+       }
+
+       phys = virt_to_phys(ptr);
+       kfree(phys_to_virt(phys));
+}
+
 static noinline void __init kmem_cache_oob(void)
 {
        char *p;
@@ -737,6 +776,8 @@ static int __init kmalloc_tests_init(void)
        kmalloc_uaf();
        kmalloc_uaf_memset();
        kmalloc_uaf2();
+       kfree_via_page();
+       kfree_via_phys();
        kmem_cache_oob();
        memcg_accounted_kmem_cache();
        kasan_stack_oob();
index 2fe4902ad755c3c95f9d28d4715b575bff083f56..a5dae9a7eb510af8f5ae45ae5fc91d2c53a57a91 100644 (file)
@@ -273,11 +273,6 @@ config BOUNCE
          by default when ZONE_DMA or HIGHMEM is selected, but you
          may say n to override this.
 
-config NR_QUICK
-       int
-       depends on QUICKLIST
-       default "1"
-
 config VIRT_TO_BUS
        bool
        help
@@ -717,6 +712,17 @@ config GUP_BENCHMARK
 config GUP_GET_PTE_LOW_HIGH
        bool
 
+config READ_ONLY_THP_FOR_FS
+       bool "Read-only THP for filesystems (EXPERIMENTAL)"
+       depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM
+
+       help
+         Allow khugepaged to put read-only file-backed pages in THP.
+
+         This is marked experimental because it is a new feature. Write
+         support of file THPs will be developed in the next few release
+         cycles.
+
 config ARCH_HAS_PTE_SPECIAL
        bool
 
index 82b6a20898bd1ca40a715bb98362193d802610dd..327b3ebf23bf977563fd347c618b0a6d1a036dd9 100644 (file)
@@ -21,7 +21,9 @@ config DEBUG_PAGEALLOC
          Also, the state of page tracking structures is checked more often as
          pages are being allocated and freed, as unexpected state changes
          often happen for same reasons as memory corruption (e.g. double free,
-         use-after-free).
+         use-after-free). The error reports for these checks can be augmented
+         with stack traces of last allocation and freeing of the page, when
+         PAGE_OWNER is also selected and enabled on boot.
 
          For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
          fill the pages with poison patterns after free_pages() and verify
index d0b295c3b764bb753e4081b5847d6cef577f114d..d996846697ef5aeae1ba908e4b7b37087f5f7668 100644 (file)
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
 
+CFLAGS_init-mm.o += $(call cc-disable-warning, override-init)
+CFLAGS_init-mm.o += $(call cc-disable-warning, initializer-overrides)
+
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := highmem.o memory.o mincore.o \
                           mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
@@ -72,7 +75,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_MEMTEST)          += memtest.o
 obj-$(CONFIG_MIGRATION) += migrate.o
-obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
 obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
 obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
index 952dc2fb24e50a26bee9621965ec6070b5d13346..ce08b39d85d40f50011111b5ec0621f253ac30a8 100644 (file)
@@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         * is safe to read and it's 0 for tail pages.
                         */
                        if (unlikely(PageCompound(page))) {
-                               low_pfn += (1UL << compound_order(page)) - 1;
+                               low_pfn += compound_nr(page) - 1;
                                goto isolate_fail;
                        }
                }
@@ -1737,8 +1737,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
  * starting at the block pointed to by the migrate scanner pfn within
  * compact_control.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-                                       struct compact_control *cc)
+static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 {
        unsigned long block_start_pfn;
        unsigned long block_end_pfn;
@@ -1756,8 +1755,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
         */
        low_pfn = fast_find_migrateblock(cc);
        block_start_pfn = pageblock_start_pfn(low_pfn);
-       if (block_start_pfn < zone->zone_start_pfn)
-               block_start_pfn = zone->zone_start_pfn;
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
 
        /*
         * fast_find_migrateblock marks a pageblock skipped so to avoid
@@ -1787,8 +1786,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
                        cond_resched();
 
-               page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
-                                                                       zone);
+               page = pageblock_pfn_to_page(block_start_pfn,
+                                               block_end_pfn, cc->zone);
                if (!page)
                        continue;
 
@@ -2078,6 +2077,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
        const bool sync = cc->mode != MIGRATE_ASYNC;
        bool update_cached;
 
+       /*
+        * These counters track activities during zone compaction.  Initialize
+        * them before compacting a new zone.
+        */
+       cc->total_migrate_scanned = 0;
+       cc->total_free_scanned = 0;
+       cc->nr_migratepages = 0;
+       cc->nr_freepages = 0;
+       INIT_LIST_HEAD(&cc->freepages);
+       INIT_LIST_HEAD(&cc->migratepages);
+
        cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
        ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
@@ -2158,7 +2168,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
                        cc->rescan = true;
                }
 
-               switch (isolate_migratepages(cc->zone, cc)) {
+               switch (isolate_migratepages(cc)) {
                case ISOLATE_ABORT:
                        ret = COMPACT_CONTENDED;
                        putback_movable_pages(&cc->migratepages);
@@ -2281,10 +2291,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 {
        enum compact_result ret;
        struct compact_control cc = {
-               .nr_freepages = 0,
-               .nr_migratepages = 0,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .order = order,
                .search_order = order,
                .gfp_mask = gfp_mask,
@@ -2305,8 +2311,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 
        if (capture)
                current->capture_control = &capc;
-       INIT_LIST_HEAD(&cc.freepages);
-       INIT_LIST_HEAD(&cc.migratepages);
 
        ret = compact_zone(&cc, &capc);
 
@@ -2408,8 +2412,6 @@ static void compact_node(int nid)
        struct zone *zone;
        struct compact_control cc = {
                .order = -1,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
                .whole_zone = true,
@@ -2423,11 +2425,7 @@ static void compact_node(int nid)
                if (!populated_zone(zone))
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
                cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
 
                compact_zone(&cc, NULL);
 
@@ -2529,8 +2527,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
        struct compact_control cc = {
                .order = pgdat->kcompactd_max_order,
                .search_order = pgdat->kcompactd_max_order,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .classzone_idx = pgdat->kcompactd_classzone_idx,
                .mode = MIGRATE_SYNC_LIGHT,
                .ignore_skip_hint = false,
@@ -2554,16 +2550,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                                                        COMPACT_CONTINUE)
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
-               cc.total_migrate_scanned = 0;
-               cc.total_free_scanned = 0;
-               cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
-
                if (kthread_should_stop())
                        return;
+
+               cc.zone = zone;
                status = compact_zone(&cc, NULL);
 
                if (status == COMPACT_SUCCESS) {
index 40667c2f338372e229ec7f51f6eb165d02c858a7..1146fcfa321511b61e5dd258c9825b6bf759426e 100644 (file)
@@ -126,7 +126,7 @@ static void page_cache_delete(struct address_space *mapping,
        /* hugetlb pages are represented by a single entry in the xarray */
        if (!PageHuge(page)) {
                xas_set_order(&xas, page->index, compound_order(page));
-               nr = 1U << compound_order(page);
+               nr = compound_nr(page);
        }
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -203,8 +203,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
                __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
                if (PageTransHuge(page))
                        __dec_node_page_state(page, NR_SHMEM_THPS);
-       } else {
-               VM_BUG_ON_PAGE(PageTransHuge(page), page);
+       } else if (PageTransHuge(page)) {
+               __dec_node_page_state(page, NR_FILE_THPS);
+               filemap_nr_thps_dec(mapping);
        }
 
        /*
@@ -281,11 +282,11 @@ EXPORT_SYMBOL(delete_from_page_cache);
  * @pvec: pagevec with pages to delete
  *
  * The function walks over mapping->i_pages and removes pages passed in @pvec
- * from the mapping. The function expects @pvec to be sorted by page index.
+ * from the mapping. The function expects @pvec to be sorted by page index
+ * and is optimised for it to be dense.
  * It tolerates holes in @pvec (mapping entries at those indices are not
  * modified). The function expects only THP head pages to be present in the
- * @pvec and takes care to delete all corresponding tail pages from the
- * mapping as well.
+ * @pvec.
  *
  * The function expects the i_pages lock to be held.
  */
@@ -294,40 +295,43 @@ static void page_cache_delete_batch(struct address_space *mapping,
 {
        XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
        int total_pages = 0;
-       int i = 0, tail_pages = 0;
+       int i = 0;
        struct page *page;
 
        mapping_set_update(&xas, mapping);
        xas_for_each(&xas, page, ULONG_MAX) {
-               if (i >= pagevec_count(pvec) && !tail_pages)
+               if (i >= pagevec_count(pvec))
                        break;
+
+               /* A swap/dax/shadow entry got inserted? Skip it. */
                if (xa_is_value(page))
                        continue;
-               if (!tail_pages) {
-                       /*
-                        * Some page got inserted in our range? Skip it. We
-                        * have our pages locked so they are protected from
-                        * being removed.
-                        */
-                       if (page != pvec->pages[i]) {
-                               VM_BUG_ON_PAGE(page->index >
-                                               pvec->pages[i]->index, page);
-                               continue;
-                       }
-                       WARN_ON_ONCE(!PageLocked(page));
-                       if (PageTransHuge(page) && !PageHuge(page))
-                               tail_pages = HPAGE_PMD_NR - 1;
+               /*
+                * A page got inserted in our range? Skip it. We have our
+                * pages locked so they are protected from being removed.
+                * If we see a page whose index is higher than ours, it
+                * means our page has been removed, which shouldn't be
+                * possible because we're holding the PageLock.
+                */
+               if (page != pvec->pages[i]) {
+                       VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
+                                       page);
+                       continue;
+               }
+
+               WARN_ON_ONCE(!PageLocked(page));
+
+               if (page->index == xas.xa_index)
                        page->mapping = NULL;
-                       /*
-                        * Leave page->index set: truncation lookup relies
-                        * upon it
-                        */
+               /* Leave page->index set: truncation lookup relies on it */
+
+               /*
+                * Move to the next page in the vector if this is a regular
+                * page or the index is of the last sub-page of this compound
+                * page.
+                */
+               if (page->index + compound_nr(page) - 1 == xas.xa_index)
                        i++;
-               } else {
-                       VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
-                                       != pvec->pages[i]->index, page);
-                       tail_pages--;
-               }
                xas_store(&xas, NULL);
                total_pages++;
        }
@@ -408,7 +412,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                .range_end = end,
        };
 
-       if (!mapping_cap_writeback_dirty(mapping))
+       if (!mapping_cap_writeback_dirty(mapping) ||
+           !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
                return 0;
 
        wbc_attach_fdatawrite_inode(&wbc, mapping->host);
@@ -617,10 +622,13 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
 
+/* Returns true if writeback might be needed or already in progress. */
 static bool mapping_needs_writeback(struct address_space *mapping)
 {
-       return (!dax_mapping(mapping) && mapping->nrpages) ||
-           (dax_mapping(mapping) && mapping->nrexceptional);
+       if (dax_mapping(mapping))
+               return mapping->nrexceptional;
+
+       return mapping->nrpages;
 }
 
 int filemap_write_and_wait(struct address_space *mapping)
@@ -1516,7 +1524,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
 {
        XA_STATE(xas, &mapping->i_pages, offset);
-       struct page *head, *page;
+       struct page *page;
 
        rcu_read_lock();
 repeat:
@@ -1531,25 +1539,19 @@ repeat:
        if (!page || xa_is_value(page))
                goto out;
 
-       head = compound_head(page);
-       if (!page_cache_get_speculative(head))
+       if (!page_cache_get_speculative(page))
                goto repeat;
 
-       /* The page was split under us? */
-       if (compound_head(page) != head) {
-               put_page(head);
-               goto repeat;
-       }
-
        /*
-        * Has the page moved?
+        * Has the page moved or been split?
         * This is part of the lockless pagecache protocol. See
         * include/linux/pagemap.h for details.
         */
        if (unlikely(page != xas_reload(&xas))) {
-               put_page(head);
+               put_page(page);
                goto repeat;
        }
+       page = find_subpage(page, offset);
 out:
        rcu_read_unlock();
 
@@ -1646,7 +1648,7 @@ repeat:
                }
 
                /* Has the page been truncated? */
-               if (unlikely(page->mapping != mapping)) {
+               if (unlikely(compound_head(page)->mapping != mapping)) {
                        unlock_page(page);
                        put_page(page);
                        goto repeat;
@@ -1731,7 +1733,6 @@ unsigned find_get_entries(struct address_space *mapping,
 
        rcu_read_lock();
        xas_for_each(&xas, page, ULONG_MAX) {
-               struct page *head;
                if (xas_retry(&xas, page))
                        continue;
                /*
@@ -1742,17 +1743,13 @@ unsigned find_get_entries(struct address_space *mapping,
                if (xa_is_value(page))
                        goto export;
 
-               head = compound_head(page);
-               if (!page_cache_get_speculative(head))
+               if (!page_cache_get_speculative(page))
                        goto retry;
 
-               /* The page was split under us? */
-               if (compound_head(page) != head)
-                       goto put_page;
-
-               /* Has the page moved? */
+               /* Has the page moved or been split? */
                if (unlikely(page != xas_reload(&xas)))
                        goto put_page;
+               page = find_subpage(page, xas.xa_index);
 
 export:
                indices[ret] = xas.xa_index;
@@ -1761,7 +1758,7 @@ export:
                        break;
                continue;
 put_page:
-               put_page(head);
+               put_page(page);
 retry:
                xas_reset(&xas);
        }
@@ -1803,33 +1800,27 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 
        rcu_read_lock();
        xas_for_each(&xas, page, end) {
-               struct page *head;
                if (xas_retry(&xas, page))
                        continue;
                /* Skip over shadow, swap and DAX entries */
                if (xa_is_value(page))
                        continue;
 
-               head = compound_head(page);
-               if (!page_cache_get_speculative(head))
+               if (!page_cache_get_speculative(page))
                        goto retry;
 
-               /* The page was split under us? */
-               if (compound_head(page) != head)
-                       goto put_page;
-
-               /* Has the page moved? */
+               /* Has the page moved or been split? */
                if (unlikely(page != xas_reload(&xas)))
                        goto put_page;
 
-               pages[ret] = page;
+               pages[ret] = find_subpage(page, xas.xa_index);
                if (++ret == nr_pages) {
                        *start = xas.xa_index + 1;
                        goto out;
                }
                continue;
 put_page:
-               put_page(head);
+               put_page(page);
 retry:
                xas_reset(&xas);
        }
@@ -1874,7 +1865,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 
        rcu_read_lock();
        for (page = xas_load(&xas); page; page = xas_next(&xas)) {
-               struct page *head;
                if (xas_retry(&xas, page))
                        continue;
                /*
@@ -1884,24 +1874,19 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
                if (xa_is_value(page))
                        break;
 
-               head = compound_head(page);
-               if (!page_cache_get_speculative(head))
+               if (!page_cache_get_speculative(page))
                        goto retry;
 
-               /* The page was split under us? */
-               if (compound_head(page) != head)
-                       goto put_page;
-
-               /* Has the page moved? */
+               /* Has the page moved or been split? */
                if (unlikely(page != xas_reload(&xas)))
                        goto put_page;
 
-               pages[ret] = page;
+               pages[ret] = find_subpage(page, xas.xa_index);
                if (++ret == nr_pages)
                        break;
                continue;
 put_page:
-               put_page(head);
+               put_page(page);
 retry:
                xas_reset(&xas);
        }
@@ -1937,7 +1922,6 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 
        rcu_read_lock();
        xas_for_each_marked(&xas, page, end, tag) {
-               struct page *head;
                if (xas_retry(&xas, page))
                        continue;
                /*
@@ -1948,26 +1932,21 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
                if (xa_is_value(page))
                        continue;
 
-               head = compound_head(page);
-               if (!page_cache_get_speculative(head))
+               if (!page_cache_get_speculative(page))
                        goto retry;
 
-               /* The page was split under us? */
-               if (compound_head(page) != head)
-                       goto put_page;
-
-               /* Has the page moved? */
+               /* Has the page moved or been split? */
                if (unlikely(page != xas_reload(&xas)))
                        goto put_page;
 
-               pages[ret] = page;
+               pages[ret] = find_subpage(page, xas.xa_index);
                if (++ret == nr_pages) {
                        *index = xas.xa_index + 1;
                        goto out;
                }
                continue;
 put_page:
-               put_page(head);
+               put_page(page);
 retry:
                xas_reset(&xas);
        }
@@ -2562,12 +2541,12 @@ retry_find:
                goto out_retry;
 
        /* Did it get truncated? */
-       if (unlikely(page->mapping != mapping)) {
+       if (unlikely(compound_head(page)->mapping != mapping)) {
                unlock_page(page);
                put_page(page);
                goto retry_find;
        }
-       VM_BUG_ON_PAGE(page->index != offset, page);
+       VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
 
        /*
         * We have a locked page in the page cache, now we need to check
@@ -2648,7 +2627,7 @@ void filemap_map_pages(struct vm_fault *vmf,
        pgoff_t last_pgoff = start_pgoff;
        unsigned long max_idx;
        XA_STATE(xas, &mapping->i_pages, start_pgoff);
-       struct page *head, *page;
+       struct page *page;
 
        rcu_read_lock();
        xas_for_each(&xas, page, end_pgoff) {
@@ -2657,24 +2636,19 @@ void filemap_map_pages(struct vm_fault *vmf,
                if (xa_is_value(page))
                        goto next;
 
-               head = compound_head(page);
-
                /*
                 * Check for a locked page first, as a speculative
                 * reference may adversely influence page migration.
                 */
-               if (PageLocked(head))
+               if (PageLocked(page))
                        goto next;
-               if (!page_cache_get_speculative(head))
+               if (!page_cache_get_speculative(page))
                        goto next;
 
-               /* The page was split under us? */
-               if (compound_head(page) != head)
-                       goto skip;
-
-               /* Has the page moved? */
+               /* Has the page moved or been split? */
                if (unlikely(page != xas_reload(&xas)))
                        goto skip;
+               page = find_subpage(page, xas.xa_index);
 
                if (!PageUptodate(page) ||
                                PageReadahead(page) ||
index 98f13ab37bacc1d206760e6e6ab554a2536ff7a7..60c3915c8ee6c67e81fd2e675bd4cf9d2e895632 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,85 +29,70 @@ struct follow_page_context {
        unsigned int page_mask;
 };
 
-typedef int (*set_dirty_func_t)(struct page *page);
-
-static void __put_user_pages_dirty(struct page **pages,
-                                  unsigned long npages,
-                                  set_dirty_func_t sdf)
-{
-       unsigned long index;
-
-       for (index = 0; index < npages; index++) {
-               struct page *page = compound_head(pages[index]);
-
-               /*
-                * Checking PageDirty at this point may race with
-                * clear_page_dirty_for_io(), but that's OK. Two key cases:
-                *
-                * 1) This code sees the page as already dirty, so it skips
-                * the call to sdf(). That could happen because
-                * clear_page_dirty_for_io() called page_mkclean(),
-                * followed by set_page_dirty(). However, now the page is
-                * going to get written back, which meets the original
-                * intention of setting it dirty, so all is well:
-                * clear_page_dirty_for_io() goes on to call
-                * TestClearPageDirty(), and write the page back.
-                *
-                * 2) This code sees the page as clean, so it calls sdf().
-                * The page stays dirty, despite being written back, so it
-                * gets written back again in the next writeback cycle.
-                * This is harmless.
-                */
-               if (!PageDirty(page))
-                       sdf(page);
-
-               put_user_page(page);
-       }
-}
-
 /**
- * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
- * @pages:  array of pages to be marked dirty and released.
+ * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
+ * @pages:  array of pages to be maybe marked dirty, and definitely released.
  * @npages: number of pages in the @pages array.
+ * @make_dirty: whether to mark the pages dirty
  *
  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
  * variants called on that page.
  *
  * For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
+ * compound page) dirty, if @make_dirty is true, and if the page was previously
+ * listed as clean. In any case, releases all pages using put_user_page(),
+ * possibly via put_user_pages(), for the non-dirty case.
  *
  * Please see the put_user_page() documentation for details.
  *
- * set_page_dirty(), which does not lock the page, is used here.
- * Therefore, it is the caller's responsibility to ensure that this is
- * safe. If not, then put_user_pages_dirty_lock() should be called instead.
+ * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
+ * required, then the caller should a) verify that this is really correct,
+ * because _lock() is usually required, and b) hand code it:
+ * set_page_dirty_lock(), put_user_page().
  *
  */
-void put_user_pages_dirty(struct page **pages, unsigned long npages)
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+                              bool make_dirty)
 {
-       __put_user_pages_dirty(pages, npages, set_page_dirty);
-}
-EXPORT_SYMBOL(put_user_pages_dirty);
+       unsigned long index;
 
-/**
- * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages
- * @pages:  array of pages to be marked dirty and released.
- * @npages: number of pages in the @pages array.
- *
- * For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
- *
- * Please see the put_user_page() documentation for details.
- *
- * This is just like put_user_pages_dirty(), except that it invokes
- * set_page_dirty_lock(), instead of set_page_dirty().
- *
- */
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
-{
-       __put_user_pages_dirty(pages, npages, set_page_dirty_lock);
+       /*
+        * TODO: this can be optimized for huge pages: if a series of pages is
+        * physically contiguous and part of the same compound page, then a
+        * single operation to the head page should suffice.
+        */
+
+       if (!make_dirty) {
+               put_user_pages(pages, npages);
+               return;
+       }
+
+       for (index = 0; index < npages; index++) {
+               struct page *page = compound_head(pages[index]);
+               /*
+                * Checking PageDirty at this point may race with
+                * clear_page_dirty_for_io(), but that's OK. Two key
+                * cases:
+                *
+                * 1) This code sees the page as already dirty, so it
+                * skips the call to set_page_dirty(). That could happen
+                * because clear_page_dirty_for_io() called
+                * page_mkclean(), followed by set_page_dirty().
+                * However, now the page is going to get written back,
+                * which meets the original intention of setting it
+                * dirty, so all is well: clear_page_dirty_for_io() goes
+                * on to call TestClearPageDirty(), and write the page
+                * back.
+                *
+                * 2) This code sees the page as clean, so it calls
+                * set_page_dirty(). The page stays dirty, despite being
+                * written back, so it gets written back again in the
+                * next writeback cycle. This is harmless.
+                */
+               if (!PageDirty(page))
+                       set_page_dirty_lock(page);
+               put_user_page(page);
+       }
 }
 EXPORT_SYMBOL(put_user_pages_dirty_lock);
 
@@ -399,7 +384,7 @@ retry_locked:
                spin_unlock(ptl);
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
        }
-       if (flags & FOLL_SPLIT) {
+       if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
                int ret;
                page = pmd_page(*pmd);
                if (is_huge_zero_page(page)) {
@@ -408,7 +393,7 @@ retry_locked:
                        split_huge_pmd(vma, pmd, address);
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
-               } else {
+               } else if (flags & FOLL_SPLIT) {
                        if (unlikely(!try_get_page(page))) {
                                spin_unlock(ptl);
                                return ERR_PTR(-ENOMEM);
@@ -420,6 +405,10 @@ retry_locked:
                        put_page(page);
                        if (pmd_none(*pmd))
                                return no_page_table(vma, flags);
+               } else {  /* flags & FOLL_SPLIT_PMD */
+                       spin_unlock(ptl);
+                       split_huge_pmd(vma, pmd, address);
+                       ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
                }
 
                return ret ? ERR_PTR(ret) :
@@ -1460,7 +1449,7 @@ check_again:
                 * gup may start from a tail page. Advance step by the left
                 * part.
                 */
-               step = (1 << compound_order(head)) - (pages[i] - head);
+               step = compound_nr(head) - (pages[i] - head);
                /*
                 * If we get a page from the CMA zone, since we are going to
                 * be pinning these entries, we might as well move them out
index de1f15969e2782edd648a43fc5e7c6d7edacb38b..73fc517c08d222723b3e2a987775bd83dc6a4697 100644 (file)
@@ -496,11 +496,25 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
-static inline struct list_head *page_deferred_list(struct page *page)
+#ifdef CONFIG_MEMCG
+static inline struct deferred_split *get_deferred_split_queue(struct page *page)
 {
-       /* ->lru in the tail pages is occupied by compound_head. */
-       return &page[2].deferred_list;
+       struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+
+       if (memcg)
+               return &memcg->deferred_split_queue;
+       else
+               return &pgdat->deferred_split_queue;
+}
+#else
+static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+{
+       struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+
+       return &pgdat->deferred_split_queue;
 }
+#endif
 
 void prep_transhuge_page(struct page *page)
 {
@@ -2497,6 +2511,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        struct page *head = compound_head(page);
        pg_data_t *pgdat = page_pgdat(head);
        struct lruvec *lruvec;
+       struct address_space *swap_cache = NULL;
+       unsigned long offset = 0;
        int i;
 
        lruvec = mem_cgroup_page_lruvec(head, pgdat);
@@ -2504,6 +2520,14 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
 
+       if (PageAnon(head) && PageSwapCache(head)) {
+               swp_entry_t entry = { .val = page_private(head) };
+
+               offset = swp_offset(entry);
+               swap_cache = swap_address_space(entry);
+               xa_lock(&swap_cache->i_pages);
+       }
+
        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond i_size: drop them from page cache */
@@ -2513,6 +2537,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                        if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
                                shmem_uncharge(head->mapping->host, 1);
                        put_page(head + i);
+               } else if (!PageAnon(page)) {
+                       __xa_store(&head->mapping->i_pages, head[i].index,
+                                       head + i, 0);
+               } else if (swap_cache) {
+                       __xa_store(&swap_cache->i_pages, offset + i,
+                                       head + i, 0);
                }
        }
 
@@ -2523,10 +2553,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        /* See comment in __split_huge_page_tail() */
        if (PageAnon(head)) {
                /* Additional pin to swap cache */
-               if (PageSwapCache(head))
+               if (PageSwapCache(head)) {
                        page_ref_add(head, 2);
-               else
+                       xa_unlock(&swap_cache->i_pages);
+               } else {
                        page_ref_inc(head);
+               }
        } else {
                /* Additional pin to page cache */
                page_ref_add(head, 2);
@@ -2673,6 +2705,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct page *head = compound_head(page);
        struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
+       struct deferred_split *ds_queue = get_deferred_split_queue(page);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
        int count, mapcount, extra_pins, ret;
@@ -2759,17 +2792,17 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        /* Prevent deferred_split_scan() touching ->_refcount */
-       spin_lock(&pgdata->split_queue_lock);
+       spin_lock(&ds_queue->split_queue_lock);
        count = page_count(head);
        mapcount = total_mapcount(head);
        if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
                if (!list_empty(page_deferred_list(head))) {
-                       pgdata->split_queue_len--;
+                       ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
                }
                if (mapping)
                        __dec_node_page_state(page, NR_SHMEM_THPS);
-               spin_unlock(&pgdata->split_queue_lock);
+               spin_unlock(&ds_queue->split_queue_lock);
                __split_huge_page(page, list, end, flags);
                if (PageSwapCache(head)) {
                        swp_entry_t entry = { .val = page_private(head) };
@@ -2786,7 +2819,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        dump_page(page, "total_mapcount(head) > 0");
                        BUG();
                }
-               spin_unlock(&pgdata->split_queue_lock);
+               spin_unlock(&ds_queue->split_queue_lock);
 fail:          if (mapping)
                        xa_unlock(&mapping->i_pages);
                spin_unlock_irqrestore(&pgdata->lru_lock, flags);
@@ -2808,53 +2841,86 @@ out:
 
 void free_transhuge_page(struct page *page)
 {
-       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
+       struct deferred_split *ds_queue = get_deferred_split_queue(page);
        unsigned long flags;
 
-       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (!list_empty(page_deferred_list(page))) {
-               pgdata->split_queue_len--;
+               ds_queue->split_queue_len--;
                list_del(page_deferred_list(page));
        }
-       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
        free_compound_page(page);
 }
 
 void deferred_split_huge_page(struct page *page)
 {
-       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
+       struct deferred_split *ds_queue = get_deferred_split_queue(page);
+#ifdef CONFIG_MEMCG
+       struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+#endif
        unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 
-       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+       /*
+        * The try_to_unmap() in page reclaim path might reach here too,
+        * this may cause a race condition to corrupt deferred split queue.
+        * And, if page reclaim is already handling the same page, it is
+        * unnecessary to handle it again in shrinker.
+        *
+        * Check PageSwapCache to determine if the page is being
+        * handled by page reclaim since THP swap would add the page into
+        * swap cache before calling try_to_unmap().
+        */
+       if (PageSwapCache(page))
+               return;
+
+       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (list_empty(page_deferred_list(page))) {
                count_vm_event(THP_DEFERRED_SPLIT_PAGE);
-               list_add_tail(page_deferred_list(page), &pgdata->split_queue);
-               pgdata->split_queue_len++;
+               list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
+               ds_queue->split_queue_len++;
+#ifdef CONFIG_MEMCG
+               if (memcg)
+                       memcg_set_shrinker_bit(memcg, page_to_nid(page),
+                                              deferred_split_shrinker.id);
+#endif
        }
-       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
                struct shrink_control *sc)
 {
        struct pglist_data *pgdata = NODE_DATA(sc->nid);
-       return READ_ONCE(pgdata->split_queue_len);
+       struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
+
+#ifdef CONFIG_MEMCG
+       if (sc->memcg)
+               ds_queue = &sc->memcg->deferred_split_queue;
+#endif
+       return READ_ONCE(ds_queue->split_queue_len);
 }
 
 static unsigned long deferred_split_scan(struct shrinker *shrink,
                struct shrink_control *sc)
 {
        struct pglist_data *pgdata = NODE_DATA(sc->nid);
+       struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
        unsigned long flags;
        LIST_HEAD(list), *pos, *next;
        struct page *page;
        int split = 0;
 
-       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+#ifdef CONFIG_MEMCG
+       if (sc->memcg)
+               ds_queue = &sc->memcg->deferred_split_queue;
+#endif
+
+       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        /* Take pin on all head pages to avoid freeing them under us */
-       list_for_each_safe(pos, next, &pgdata->split_queue) {
+       list_for_each_safe(pos, next, &ds_queue->split_queue) {
                page = list_entry((void *)pos, struct page, mapping);
                page = compound_head(page);
                if (get_page_unless_zero(page)) {
@@ -2862,12 +2928,12 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                } else {
                        /* We lost race with put_compound_page() */
                        list_del_init(page_deferred_list(page));
-                       pgdata->split_queue_len--;
+                       ds_queue->split_queue_len--;
                }
                if (!--sc->nr_to_scan)
                        break;
        }
-       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 
        list_for_each_safe(pos, next, &list) {
                page = list_entry((void *)pos, struct page, mapping);
@@ -2881,15 +2947,15 @@ next:
                put_page(page);
        }
 
-       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
-       list_splice_tail(&list, &pgdata->split_queue);
-       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+       list_splice_tail(&list, &ds_queue->split_queue);
+       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 
        /*
         * Stop shrinker if we didn't split any page, but the queue is empty.
         * This can happen if pages were freed under us.
         */
-       if (!split && list_empty(&pgdata->split_queue))
+       if (!split && list_empty(&ds_queue->split_queue))
                return SHRINK_STOP;
        return split;
 }
@@ -2898,7 +2964,8 @@ static struct shrinker deferred_split_shrinker = {
        .count_objects = deferred_split_count,
        .scan_objects = deferred_split_scan,
        .seeks = DEFAULT_SEEKS,
-       .flags = SHRINKER_NUMA_AWARE,
+       .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
+                SHRINKER_NONSLAB,
 };
 
 #ifdef CONFIG_DEBUG_FS
index 6d7296dd11b83503a511986814ba5d1f2a84b26b..ef37c85423a5209c528dbbb3e08f48a8bfcb5009 100644 (file)
@@ -1405,12 +1405,25 @@ pgoff_t __basepage_index(struct page *page)
 }
 
 static struct page *alloc_buddy_huge_page(struct hstate *h,
-               gfp_t gfp_mask, int nid, nodemask_t *nmask)
+               gfp_t gfp_mask, int nid, nodemask_t *nmask,
+               nodemask_t *node_alloc_noretry)
 {
        int order = huge_page_order(h);
        struct page *page;
+       bool alloc_try_hard = true;
 
-       gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+       /*
+        * By default we always try hard to allocate the page with
+        * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
+        * a loop (to adjust global huge page counts) and previous allocation
+        * failed, do not continue to try hard on the same node.  Use the
+        * node_alloc_noretry bitmap to manage this state information.
+        */
+       if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
+               alloc_try_hard = false;
+       gfp_mask |= __GFP_COMP|__GFP_NOWARN;
+       if (alloc_try_hard)
+               gfp_mask |= __GFP_RETRY_MAYFAIL;
        if (nid == NUMA_NO_NODE)
                nid = numa_mem_id();
        page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
@@ -1419,6 +1432,22 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
        else
                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 
+       /*
+        * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
+        * indicates an overall state change.  Clear bit so that we resume
+        * normal 'try hard' allocations.
+        */
+       if (node_alloc_noretry && page && !alloc_try_hard)
+               node_clear(nid, *node_alloc_noretry);
+
+       /*
+        * If we tried hard to get a page but failed, set bit so that
+        * subsequent attempts will not try as hard until there is an
+        * overall state change.
+        */
+       if (node_alloc_noretry && !page && alloc_try_hard)
+               node_set(nid, *node_alloc_noretry);
+
        return page;
 }
 
@@ -1427,7 +1456,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
  * should use this function to get new hugetlb pages
  */
 static struct page *alloc_fresh_huge_page(struct hstate *h,
-               gfp_t gfp_mask, int nid, nodemask_t *nmask)
+               gfp_t gfp_mask, int nid, nodemask_t *nmask,
+               nodemask_t *node_alloc_noretry)
 {
        struct page *page;
 
@@ -1435,7 +1465,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
                page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
        else
                page = alloc_buddy_huge_page(h, gfp_mask,
-                               nid, nmask);
+                               nid, nmask, node_alloc_noretry);
        if (!page)
                return NULL;
 
@@ -1450,14 +1480,16 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
  * manner.
  */
-static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+                               nodemask_t *node_alloc_noretry)
 {
        struct page *page;
        int nr_nodes, node;
        gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 
        for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
-               page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
+               page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
+                                               node_alloc_noretry);
                if (page)
                        break;
        }
@@ -1601,7 +1633,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
                goto out_unlock;
        spin_unlock(&hugetlb_lock);
 
-       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
        if (!page)
                return NULL;
 
@@ -1637,7 +1669,7 @@ struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
        if (hstate_is_gigantic(h))
                return NULL;
 
-       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+       page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
        if (!page)
                return NULL;
 
@@ -2207,13 +2239,33 @@ static void __init gather_bootmem_prealloc(void)
 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 {
        unsigned long i;
+       nodemask_t *node_alloc_noretry;
+
+       if (!hstate_is_gigantic(h)) {
+               /*
+                * Bit mask controlling how hard we retry per-node allocations.
+                * Ignore errors as lower level routines can deal with
+                * node_alloc_noretry == NULL.  If this kmalloc fails at boot
+                * time, we are likely in bigger trouble.
+                */
+               node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
+                                               GFP_KERNEL);
+       } else {
+               /* allocations done at boot time */
+               node_alloc_noretry = NULL;
+       }
+
+       /* bit mask controlling how hard we retry per-node allocations */
+       if (node_alloc_noretry)
+               nodes_clear(*node_alloc_noretry);
 
        for (i = 0; i < h->max_huge_pages; ++i) {
                if (hstate_is_gigantic(h)) {
                        if (!alloc_bootmem_huge_page(h))
                                break;
                } else if (!alloc_pool_huge_page(h,
-                                        &node_states[N_MEMORY]))
+                                        &node_states[N_MEMORY],
+                                        node_alloc_noretry))
                        break;
                cond_resched();
        }
@@ -2225,6 +2277,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                        h->max_huge_pages, buf, i);
                h->max_huge_pages = i;
        }
+
+       kfree(node_alloc_noretry);
 }
 
 static void __init hugetlb_init_hstates(void)
@@ -2323,6 +2377,17 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
                              nodemask_t *nodes_allowed)
 {
        unsigned long min_count, ret;
+       NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
+
+       /*
+        * Bit mask controlling how hard we retry per-node allocations.
+        * If we can not allocate the bit mask, do not attempt to allocate
+        * the requested huge pages.
+        */
+       if (node_alloc_noretry)
+               nodes_clear(*node_alloc_noretry);
+       else
+               return -ENOMEM;
 
        spin_lock(&hugetlb_lock);
 
@@ -2356,6 +2421,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
        if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
                if (count > persistent_huge_pages(h)) {
                        spin_unlock(&hugetlb_lock);
+                       NODEMASK_FREE(node_alloc_noretry);
                        return -EINVAL;
                }
                /* Fall through to decrease pool */
@@ -2388,7 +2454,8 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
                /* yield cpu to avoid soft lockup */
                cond_resched();
 
-               ret = alloc_pool_huge_page(h, nodes_allowed);
+               ret = alloc_pool_huge_page(h, nodes_allowed,
+                                               node_alloc_noretry);
                spin_lock(&hugetlb_lock);
                if (!ret)
                        goto out;
@@ -2429,6 +2496,8 @@ out:
        h->max_huge_pages = persistent_huge_pages(h);
        spin_unlock(&hugetlb_lock);
 
+       NODEMASK_FREE(node_alloc_noretry);
+
        return 0;
 }
 
index 68c2f2f3c05b76203fae22ad8cf727482c95216e..f1930fa0b445dae721d9be0139e2b2ba7119ec97 100644 (file)
@@ -139,7 +139,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
        if (!page_hcg || page_hcg != h_cg)
                goto out;
 
-       nr_pages = 1 << compound_order(page);
+       nr_pages = compound_nr(page);
        if (!parent) {
                parent = root_h_cgroup;
                /* root has no limit */
index a787a319211ef9aea4d0bebb84055b377b52dfae..fb1e15028ef06b6ff5b68a125df7e434d52787b8 100644 (file)
@@ -35,6 +35,6 @@ struct mm_struct init_mm = {
        .arg_lock       =  __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
        .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
        .user_ns        = &init_user_ns,
-       .cpu_bitmap     = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+       .cpu_bitmap     = CPU_BITS_NONE,
        INIT_MM_CONTEXT(init_mm)
 };
index 95d16a42db6bc731e11c9e86bb639bb277cc345d..6814d6d6a023d8e00af10954bf5252ceea9fad10 100644 (file)
@@ -304,7 +304,6 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
                                        const void *object)
 {
-       BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
        return (void *)object + cache->kasan_info.alloc_meta_offset;
 }
 
@@ -315,14 +314,31 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
        return (void *)object + cache->kasan_info.free_meta_offset;
 }
 
+
+static void kasan_set_free_info(struct kmem_cache *cache,
+               void *object, u8 tag)
+{
+       struct kasan_alloc_meta *alloc_meta;
+       u8 idx = 0;
+
+       alloc_meta = get_alloc_info(cache, object);
+
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+       idx = alloc_meta->free_track_idx;
+       alloc_meta->free_pointer_tag[idx] = tag;
+       alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
+#endif
+
+       set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
+}
+
 void kasan_poison_slab(struct page *page)
 {
        unsigned long i;
 
-       for (i = 0; i < (1 << compound_order(page)); i++)
+       for (i = 0; i < compound_nr(page); i++)
                page_kasan_tag_reset(page + i);
-       kasan_poison_shadow(page_address(page),
-                       PAGE_SIZE << compound_order(page),
+       kasan_poison_shadow(page_address(page), page_size(page),
                        KASAN_KMALLOC_REDZONE);
 }
 
@@ -452,7 +468,8 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
                        unlikely(!(cache->flags & SLAB_KASAN)))
                return false;
 
-       set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
+       kasan_set_free_info(cache, object, tag);
+
        quarantine_put(get_free_info(cache, object), cache);
 
        return IS_ENABLED(CONFIG_KASAN_GENERIC);
@@ -524,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
        page = virt_to_page(ptr);
        redzone_start = round_up((unsigned long)(ptr + size),
                                KASAN_SHADOW_SCALE_SIZE);
-       redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+       redzone_end = (unsigned long)ptr + page_size(page);
 
        kasan_unpoison_shadow(ptr, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -560,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
                        kasan_report_invalid_free(ptr, ip);
                        return;
                }
-               kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-                               KASAN_FREE_PAGE);
+               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
        } else {
                __kasan_slab_free(page->slab_cache, ptr, ip, false);
        }
index 014f19e76247c7488bc2f3a943b2cb7caa40f9c4..35cff6bbb7162f885b76bcd7f5714368fff69739 100644 (file)
@@ -95,9 +95,19 @@ struct kasan_track {
        depot_stack_handle_t stack;
 };
 
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+#define KASAN_NR_FREE_STACKS 5
+#else
+#define KASAN_NR_FREE_STACKS 1
+#endif
+
 struct kasan_alloc_meta {
        struct kasan_track alloc_track;
-       struct kasan_track free_track;
+       struct kasan_track free_track[KASAN_NR_FREE_STACKS];
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+       u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
+       u8 free_track_idx;
+#endif
 };
 
 struct qlist_node {
@@ -146,6 +156,8 @@ void kasan_report(unsigned long addr, size_t size,
                bool is_write, unsigned long ip);
 void kasan_report_invalid_free(void *object, unsigned long ip);
 
+struct page *kasan_addr_to_page(const void *addr);
+
 #if defined(CONFIG_KASAN_GENERIC) && \
        (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
index 0e5f965f1882147307f0338376dae12e391e7418..621782100eaa0f81d930799c6899d598db390550 100644 (file)
@@ -111,7 +111,7 @@ static void print_track(struct kasan_track *track, const char *prefix)
        }
 }
 
-static struct page *addr_to_page(const void *addr)
+struct page *kasan_addr_to_page(const void *addr)
 {
        if ((addr >= (void *)PAGE_OFFSET) &&
                        (addr < high_memory))
@@ -151,15 +151,38 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
                (void *)(object_addr + cache->object_size));
 }
 
+static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
+               void *object, u8 tag)
+{
+       struct kasan_alloc_meta *alloc_meta;
+       int i = 0;
+
+       alloc_meta = get_alloc_info(cache, object);
+
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+       for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
+               if (alloc_meta->free_pointer_tag[i] == tag)
+                       break;
+       }
+       if (i == KASAN_NR_FREE_STACKS)
+               i = alloc_meta->free_track_idx;
+#endif
+
+       return &alloc_meta->free_track[i];
+}
+
 static void describe_object(struct kmem_cache *cache, void *object,
-                               const void *addr)
+                               const void *addr, u8 tag)
 {
        struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
 
        if (cache->flags & SLAB_KASAN) {
+               struct kasan_track *free_track;
+
                print_track(&alloc_info->alloc_track, "Allocated");
                pr_err("\n");
-               print_track(&alloc_info->free_track, "Freed");
+               free_track = kasan_get_free_track(cache, object, tag);
+               print_track(free_track, "Freed");
                pr_err("\n");
        }
 
@@ -344,9 +367,9 @@ static void print_address_stack_frame(const void *addr)
        print_decoded_frame_descr(frame_descr);
 }
 
-static void print_address_description(void *addr)
+static void print_address_description(void *addr, u8 tag)
 {
-       struct page *page = addr_to_page(addr);
+       struct page *page = kasan_addr_to_page(addr);
 
        dump_stack();
        pr_err("\n");
@@ -355,7 +378,7 @@ static void print_address_description(void *addr)
                struct kmem_cache *cache = page->slab_cache;
                void *object = nearest_obj(cache, page, addr);
 
-               describe_object(cache, object, addr);
+               describe_object(cache, object, addr, tag);
        }
 
        if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
@@ -435,13 +458,14 @@ static bool report_enabled(void)
 void kasan_report_invalid_free(void *object, unsigned long ip)
 {
        unsigned long flags;
+       u8 tag = get_tag(object);
 
+       object = reset_tag(object);
        start_report(&flags);
        pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
-       print_tags(get_tag(object), reset_tag(object));
-       object = reset_tag(object);
+       print_tags(tag, object);
        pr_err("\n");
-       print_address_description(object);
+       print_address_description(object, tag);
        pr_err("\n");
        print_shadow_for_address(object);
        end_report(&flags);
@@ -479,7 +503,7 @@ void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned lon
        pr_err("\n");
 
        if (addr_has_shadow(untagged_addr)) {
-               print_address_description(untagged_addr);
+               print_address_description(untagged_addr, get_tag(tagged_addr));
                pr_err("\n");
                print_shadow_for_address(info.first_bad_addr);
        } else {
index 8eaf5f72227180c2af1b9e6c5e258334ae269c88..969ae08f59d7fbc34405a24890e32b7411a56256 100644 (file)
 
 const char *get_bug_type(struct kasan_access_info *info)
 {
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+       struct kasan_alloc_meta *alloc_meta;
+       struct kmem_cache *cache;
+       struct page *page;
+       const void *addr;
+       void *object;
+       u8 tag;
+       int i;
+
+       tag = get_tag(info->access_addr);
+       addr = reset_tag(info->access_addr);
+       page = kasan_addr_to_page(addr);
+       if (page && PageSlab(page)) {
+               cache = page->slab_cache;
+               object = nearest_obj(cache, page, (void *)addr);
+               alloc_meta = get_alloc_info(cache, object);
+
+               for (i = 0; i < KASAN_NR_FREE_STACKS; i++)
+                       if (alloc_meta->free_pointer_tag[i] == tag)
+                               return "use-after-free";
+               return "out-of-bounds";
+       }
+
+#endif
        return "invalid-access";
 }
 
index ccede2425c3f88da0529a1040025771539748e8b..0a1b4b484ac5b4a0eed5e5148f04849d9e09607b 100644 (file)
@@ -48,6 +48,7 @@ enum scan_result {
        SCAN_CGROUP_CHARGE_FAIL,
        SCAN_EXCEED_SWAP_PTE,
        SCAN_TRUNCATED,
+       SCAN_PAGE_HAS_PRIVATE,
 };
 
 #define CREATE_TRACE_POINTS
@@ -76,6 +77,8 @@ static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
 
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
+#define MAX_PTE_MAPPED_THP 8
+
 /**
  * struct mm_slot - hash lookup from mm to mm_slot
  * @hash: hash collision list
@@ -86,6 +89,10 @@ struct mm_slot {
        struct hlist_node hash;
        struct list_head mm_node;
        struct mm_struct *mm;
+
+       /* pte-mapped THP in this mm */
+       int nr_pte_mapped_thp;
+       unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
 };
 
 /**
@@ -404,7 +411,11 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
            (vm_flags & VM_NOHUGEPAGE) ||
            test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
                return false;
-       if (shmem_file(vma->vm_file)) {
+
+       if (shmem_file(vma->vm_file) ||
+           (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+            vma->vm_file &&
+            (vm_flags & VM_DENYWRITE))) {
                if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
                        return false;
                return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
@@ -456,8 +467,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
        unsigned long hstart, hend;
 
        /*
-        * khugepaged does not yet work on non-shmem files or special
-        * mappings. And file-private shmem THP is not supported.
+        * khugepaged only supports read-only files for non-shmem files.
+        * khugepaged does not yet work on special mappings. And
+        * file-private shmem THP is not supported.
         */
        if (!hugepage_vma_check(vma, vm_flags))
                return 0;
@@ -1248,6 +1260,159 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 }
 
 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
+/*
+ * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
+ * khugepaged should try to collapse the page table.
+ */
+static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
+                                        unsigned long addr)
+{
+       struct mm_slot *mm_slot;
+
+       VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
+
+       spin_lock(&khugepaged_mm_lock);
+       mm_slot = get_mm_slot(mm);
+       if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
+               mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
+       spin_unlock(&khugepaged_mm_lock);
+       return 0;
+}
+
+/**
+ * Try to collapse a pte-mapped THP for mm at address haddr.
+ *
+ * This function checks whether all the PTEs in the PMD are pointing to the
+ * right THP. If so, retract the page table so the THP can refault in with
+ * as pmd-mapped.
+ */
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long haddr = addr & HPAGE_PMD_MASK;
+       struct vm_area_struct *vma = find_vma(mm, haddr);
+       struct page *hpage = NULL;
+       pte_t *start_pte, *pte;
+       pmd_t *pmd, _pmd;
+       spinlock_t *ptl;
+       int count = 0;
+       int i;
+
+       if (!vma || !vma->vm_file ||
+           vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
+               return;
+
+       /*
+        * This vm_flags may not have VM_HUGEPAGE if the page was not
+        * collapsed by this mm. But we can still collapse if the page is
+        * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
+        * will not fail the vma for missing VM_HUGEPAGE
+        */
+       if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+               return;
+
+       pmd = mm_find_pmd(mm, haddr);
+       if (!pmd)
+               return;
+
+       start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+
+       /* step 1: check all mapped PTEs are to the right huge page */
+       for (i = 0, addr = haddr, pte = start_pte;
+            i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
+               struct page *page;
+
+               /* empty pte, skip */
+               if (pte_none(*pte))
+                       continue;
+
+               /* page swapped out, abort */
+               if (!pte_present(*pte))
+                       goto abort;
+
+               page = vm_normal_page(vma, addr, *pte);
+
+               if (!page || !PageCompound(page))
+                       goto abort;
+
+               if (!hpage) {
+                       hpage = compound_head(page);
+                       /*
+                        * The mapping of the THP should not change.
+                        *
+                        * Note that uprobe, debugger, or MAP_PRIVATE may
+                        * change the page table, but the new page will
+                        * not pass PageCompound() check.
+                        */
+                       if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
+                               goto abort;
+               }
+
+               /*
+                * Confirm the page maps to the correct subpage.
+                *
+                * Note that uprobe, debugger, or MAP_PRIVATE may change
+                * the page table, but the new page will not pass
+                * PageCompound() check.
+                */
+               if (WARN_ON(hpage + i != page))
+                       goto abort;
+               count++;
+       }
+
+       /* step 2: adjust rmap */
+       for (i = 0, addr = haddr, pte = start_pte;
+            i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
+               struct page *page;
+
+               if (pte_none(*pte))
+                       continue;
+               page = vm_normal_page(vma, addr, *pte);
+               page_remove_rmap(page, false);
+       }
+
+       pte_unmap_unlock(start_pte, ptl);
+
+       /* step 3: set proper refcount and mm_counters. */
+       if (hpage) {
+               page_ref_sub(hpage, count);
+               add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
+       }
+
+       /* step 4: collapse pmd */
+       ptl = pmd_lock(vma->vm_mm, pmd);
+       _pmd = pmdp_collapse_flush(vma, addr, pmd);
+       spin_unlock(ptl);
+       mm_dec_nr_ptes(mm);
+       pte_free(mm, pmd_pgtable(_pmd));
+       return;
+
+abort:
+       pte_unmap_unlock(start_pte, ptl);
+}
+
+static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+{
+       struct mm_struct *mm = mm_slot->mm;
+       int i;
+
+       if (likely(mm_slot->nr_pte_mapped_thp == 0))
+               return 0;
+
+       if (!down_write_trylock(&mm->mmap_sem))
+               return -EBUSY;
+
+       if (unlikely(khugepaged_test_exit(mm)))
+               goto out;
+
+       for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
+               collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
+
+out:
+       mm_slot->nr_pte_mapped_thp = 0;
+       up_write(&mm->mmap_sem);
+       return 0;
+}
+
 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 {
        struct vm_area_struct *vma;
@@ -1256,7 +1421,22 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 
        i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
-               /* probably overkill */
+               /*
+                * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
+                * got written to. These VMAs are likely not worth investing
+                * down_write(mmap_sem) as PMD-mapping is likely to be split
+                * later.
+                *
+                * Not that vma->anon_vma check is racy: it can be set up after
+                * the check but before we took mmap_sem by the fault path.
+                * But page lock would prevent establishing any new ptes of the
+                * page, so we are safe.
+                *
+                * An alternative would be drop the check, but check that page
+                * table is clear before calling pmdp_collapse_flush() under
+                * ptl. It has higher chance to recover THP for the VMA, but
+                * has higher cost too.
+                */
                if (vma->anon_vma)
                        continue;
                addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
@@ -1269,9 +1449,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        continue;
                /*
                 * We need exclusive mmap_sem to retract page table.
-                * If trylock fails we would end up with pte-mapped THP after
-                * re-fault. Not ideal, but it's more important to not disturb
-                * the system too much.
+                *
+                * We use trylock due to lock inversion: we need to acquire
+                * mmap_sem while holding page lock. Fault path does it in
+                * reverse order. Trylock is a way to avoid deadlock.
                 */
                if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
@@ -1281,18 +1462,21 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        up_write(&vma->vm_mm->mmap_sem);
                        mm_dec_nr_ptes(vma->vm_mm);
                        pte_free(vma->vm_mm, pmd_pgtable(_pmd));
+               } else {
+                       /* Try again later */
+                       khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
                }
        }
        i_mmap_unlock_write(mapping);
 }
 
 /**
- * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
+ * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and lock a new huge page;
  *  - scan page cache replacing old pages with the new one
- *    + swap in pages if necessary;
+ *    + swap/gup in pages if necessary;
  *    + fill in gaps;
  *    + keep old pages around in case rollback is required;
  *  - if replacing succeeds:
@@ -1304,10 +1488,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *    + restore gaps in the page cache;
  *    + unlock and free huge page;
  */
-static void collapse_shmem(struct mm_struct *mm,
-               struct address_space *mapping, pgoff_t start,
+static void collapse_file(struct mm_struct *mm,
+               struct file *file, pgoff_t start,
                struct page **hpage, int node)
 {
+       struct address_space *mapping = file->f_mapping;
        gfp_t gfp;
        struct page *new_page;
        struct mem_cgroup *memcg;
@@ -1315,7 +1500,9 @@ static void collapse_shmem(struct mm_struct *mm,
        LIST_HEAD(pagelist);
        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
        int nr_none = 0, result = SCAN_SUCCEED;
+       bool is_shmem = shmem_file(file);
 
+       VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
        /* Only allocate from the target node */
@@ -1347,7 +1534,8 @@ static void collapse_shmem(struct mm_struct *mm,
        } while (1);
 
        __SetPageLocked(new_page);
-       __SetPageSwapBacked(new_page);
+       if (is_shmem)
+               __SetPageSwapBacked(new_page);
        new_page->index = start;
        new_page->mapping = mapping;
 
@@ -1362,41 +1550,75 @@ static void collapse_shmem(struct mm_struct *mm,
                struct page *page = xas_next(&xas);
 
                VM_BUG_ON(index != xas.xa_index);
-               if (!page) {
-                       /*
-                        * Stop if extent has been truncated or hole-punched,
-                        * and is now completely empty.
-                        */
-                       if (index == start) {
-                               if (!xas_next_entry(&xas, end - 1)) {
-                                       result = SCAN_TRUNCATED;
+               if (is_shmem) {
+                       if (!page) {
+                               /*
+                                * Stop if extent has been truncated or
+                                * hole-punched, and is now completely
+                                * empty.
+                                */
+                               if (index == start) {
+                                       if (!xas_next_entry(&xas, end - 1)) {
+                                               result = SCAN_TRUNCATED;
+                                               goto xa_locked;
+                                       }
+                                       xas_set(&xas, index);
+                               }
+                               if (!shmem_charge(mapping->host, 1)) {
+                                       result = SCAN_FAIL;
                                        goto xa_locked;
                                }
-                               xas_set(&xas, index);
+                               xas_store(&xas, new_page);
+                               nr_none++;
+                               continue;
                        }
-                       if (!shmem_charge(mapping->host, 1)) {
-                               result = SCAN_FAIL;
+
+                       if (xa_is_value(page) || !PageUptodate(page)) {
+                               xas_unlock_irq(&xas);
+                               /* swap in or instantiate fallocated page */
+                               if (shmem_getpage(mapping->host, index, &page,
+                                                 SGP_NOHUGE)) {
+                                       result = SCAN_FAIL;
+                                       goto xa_unlocked;
+                               }
+                       } else if (trylock_page(page)) {
+                               get_page(page);
+                               xas_unlock_irq(&xas);
+                       } else {
+                               result = SCAN_PAGE_LOCK;
                                goto xa_locked;
                        }
-                       xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
-                       nr_none++;
-                       continue;
-               }
-
-               if (xa_is_value(page) || !PageUptodate(page)) {
-                       xas_unlock_irq(&xas);
-                       /* swap in or instantiate fallocated page */
-                       if (shmem_getpage(mapping->host, index, &page,
-                                               SGP_NOHUGE)) {
+               } else {        /* !is_shmem */
+                       if (!page || xa_is_value(page)) {
+                               xas_unlock_irq(&xas);
+                               page_cache_sync_readahead(mapping, &file->f_ra,
+                                                         file, index,
+                                                         PAGE_SIZE);
+                               /* drain pagevecs to help isolate_lru_page() */
+                               lru_add_drain();
+                               page = find_lock_page(mapping, index);
+                               if (unlikely(page == NULL)) {
+                                       result = SCAN_FAIL;
+                                       goto xa_unlocked;
+                               }
+                       } else if (!PageUptodate(page)) {
+                               xas_unlock_irq(&xas);
+                               wait_on_page_locked(page);
+                               if (!trylock_page(page)) {
+                                       result = SCAN_PAGE_LOCK;
+                                       goto xa_unlocked;
+                               }
+                               get_page(page);
+                       } else if (PageDirty(page)) {
                                result = SCAN_FAIL;
-                               goto xa_unlocked;
+                               goto xa_locked;
+                       } else if (trylock_page(page)) {
+                               get_page(page);
+                               xas_unlock_irq(&xas);
+                       } else {
+                               result = SCAN_PAGE_LOCK;
+                               goto xa_locked;
                        }
-               } else if (trylock_page(page)) {
-                       get_page(page);
-                       xas_unlock_irq(&xas);
-               } else {
-                       result = SCAN_PAGE_LOCK;
-                       goto xa_locked;
                }
 
                /*
@@ -1425,6 +1647,12 @@ static void collapse_shmem(struct mm_struct *mm,
                        goto out_unlock;
                }
 
+               if (page_has_private(page) &&
+                   !try_to_release_page(page, GFP_KERNEL)) {
+                       result = SCAN_PAGE_HAS_PRIVATE;
+                       goto out_unlock;
+               }
+
                if (page_mapped(page))
                        unmap_mapping_pages(mapping, index, 1, false);
 
@@ -1454,7 +1682,7 @@ static void collapse_shmem(struct mm_struct *mm,
                list_add_tail(&page->lru, &pagelist);
 
                /* Finally, replace with the new page. */
-               xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+               xas_store(&xas, new_page);
                continue;
 out_unlock:
                unlock_page(page);
@@ -1462,12 +1690,20 @@ out_unlock:
                goto xa_unlocked;
        }
 
-       __inc_node_page_state(new_page, NR_SHMEM_THPS);
+       if (is_shmem)
+               __inc_node_page_state(new_page, NR_SHMEM_THPS);
+       else {
+               __inc_node_page_state(new_page, NR_FILE_THPS);
+               filemap_nr_thps_inc(mapping);
+       }
+
        if (nr_none) {
                struct zone *zone = page_zone(new_page);
 
                __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
-               __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+               if (is_shmem)
+                       __mod_node_page_state(zone->zone_pgdat,
+                                             NR_SHMEM, nr_none);
        }
 
 xa_locked:
@@ -1505,10 +1741,15 @@ xa_unlocked:
 
                SetPageUptodate(new_page);
                page_ref_add(new_page, HPAGE_PMD_NR - 1);
-               set_page_dirty(new_page);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
+
+               if (is_shmem) {
+                       set_page_dirty(new_page);
+                       lru_cache_add_anon(new_page);
+               } else {
+                       lru_cache_add_file(new_page);
+               }
                count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
-               lru_cache_add_anon(new_page);
 
                /*
                 * Remove pte page tables, so we can re-fault the page as huge.
@@ -1523,7 +1764,9 @@ xa_unlocked:
                /* Something went wrong: roll back page cache changes */
                xas_lock_irq(&xas);
                mapping->nrpages -= nr_none;
-               shmem_uncharge(mapping->host, nr_none);
+
+               if (is_shmem)
+                       shmem_uncharge(mapping->host, nr_none);
 
                xas_set(&xas, start);
                xas_for_each(&xas, page, end - 1) {
@@ -1563,11 +1806,11 @@ out:
        /* TODO: tracepoints */
 }
 
-static void khugepaged_scan_shmem(struct mm_struct *mm,
-               struct address_space *mapping,
-               pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm,
+               struct file *file, pgoff_t start, struct page **hpage)
 {
        struct page *page = NULL;
+       struct address_space *mapping = file->f_mapping;
        XA_STATE(xas, &mapping->i_pages, start);
        int present, swap;
        int node = NUMA_NO_NODE;
@@ -1606,7 +1849,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
                        break;
                }
 
-               if (page_count(page) != 1 + page_mapcount(page)) {
+               if (page_count(page) !=
+                   1 + page_mapcount(page) + page_has_private(page)) {
                        result = SCAN_PAGE_COUNT;
                        break;
                }
@@ -1631,19 +1875,23 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
                        result = SCAN_EXCEED_NONE_PTE;
                } else {
                        node = khugepaged_find_target_node();
-                       collapse_shmem(mm, mapping, start, hpage, node);
+                       collapse_file(mm, file, start, hpage, node);
                }
        }
 
        /* TODO: tracepoints */
 }
 #else
-static void khugepaged_scan_shmem(struct mm_struct *mm,
-               struct address_space *mapping,
-               pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm,
+               struct file *file, pgoff_t start, struct page **hpage)
 {
        BUILD_BUG();
 }
+
+static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+{
+       return 0;
+}
 #endif
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
@@ -1668,6 +1916,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                khugepaged_scan.mm_slot = mm_slot;
        }
        spin_unlock(&khugepaged_mm_lock);
+       khugepaged_collapse_pte_mapped_thps(mm_slot);
 
        mm = mm_slot->mm;
        /*
@@ -1713,17 +1962,18 @@ skip:
                        VM_BUG_ON(khugepaged_scan.address < hstart ||
                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
                                  hend);
-                       if (shmem_file(vma->vm_file)) {
+                       if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
                                struct file *file;
                                pgoff_t pgoff = linear_page_index(vma,
                                                khugepaged_scan.address);
-                               if (!shmem_huge_enabled(vma))
+
+                               if (shmem_file(vma->vm_file)
+                                   && !shmem_huge_enabled(vma))
                                        goto skip;
                                file = get_file(vma->vm_file);
                                up_read(&mm->mmap_sem);
                                ret = 1;
-                               khugepaged_scan_shmem(mm, file->f_mapping,
-                                               pgoff, hpage);
+                               khugepaged_scan_file(mm, file, pgoff, hpage);
                                fput(file);
                        } else {
                                ret = khugepaged_scan_pmd(mm, vma,
index f6e602918dac84a96fe16a8e2c202505256bcb8d..03a8d84badada040c3b8e16200e244ef9ba10094 100644 (file)
@@ -168,6 +168,8 @@ struct kmemleak_object {
 #define OBJECT_REPORTED                (1 << 1)
 /* flag set to not scan the object */
 #define OBJECT_NO_SCAN         (1 << 2)
+/* flag set to fully scan the object when scan_area allocation failed */
+#define OBJECT_FULL_SCAN       (1 << 3)
 
 #define HEX_PREFIX             "    "
 /* number of bytes to print per line; must be 16 or 32 */
@@ -183,6 +185,10 @@ struct kmemleak_object {
 static LIST_HEAD(object_list);
 /* the list of gray-colored objects (see color_gray comment below) */
 static LIST_HEAD(gray_list);
+/* memory pool allocation */
+static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
+static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
+static LIST_HEAD(mem_pool_free_list);
 /* search tree for object boundaries */
 static struct rb_root object_tree_root = RB_ROOT;
 /* rw_lock protecting the access to object_list and object_tree_root */
@@ -193,13 +199,11 @@ static struct kmem_cache *object_cache;
 static struct kmem_cache *scan_area_cache;
 
 /* set if tracing memory operations is enabled */
-static int kmemleak_enabled;
+static int kmemleak_enabled = 1;
 /* same as above but only for the kmemleak_free() callback */
-static int kmemleak_free_enabled;
+static int kmemleak_free_enabled = 1;
 /* set in the late_initcall if there were no errors */
 static int kmemleak_initialized;
-/* enables or disables early logging of the memory operations */
-static int kmemleak_early_log = 1;
 /* set if a kmemleak warning was issued */
 static int kmemleak_warning;
 /* set if a fatal kmemleak error has occurred */
@@ -227,49 +231,6 @@ static bool kmemleak_found_leaks;
 static bool kmemleak_verbose;
 module_param_named(verbose, kmemleak_verbose, bool, 0600);
 
-/*
- * Early object allocation/freeing logging. Kmemleak is initialized after the
- * kernel allocator. However, both the kernel allocator and kmemleak may
- * allocate memory blocks which need to be tracked. Kmemleak defines an
- * arbitrary buffer to hold the allocation/freeing information before it is
- * fully initialized.
- */
-
-/* kmemleak operation type for early logging */
-enum {
-       KMEMLEAK_ALLOC,
-       KMEMLEAK_ALLOC_PERCPU,
-       KMEMLEAK_FREE,
-       KMEMLEAK_FREE_PART,
-       KMEMLEAK_FREE_PERCPU,
-       KMEMLEAK_NOT_LEAK,
-       KMEMLEAK_IGNORE,
-       KMEMLEAK_SCAN_AREA,
-       KMEMLEAK_NO_SCAN,
-       KMEMLEAK_SET_EXCESS_REF
-};
-
-/*
- * Structure holding the information passed to kmemleak callbacks during the
- * early logging.
- */
-struct early_log {
-       int op_type;                    /* kmemleak operation type */
-       int min_count;                  /* minimum reference count */
-       const void *ptr;                /* allocated/freed memory block */
-       union {
-               size_t size;            /* memory block size */
-               unsigned long excess_ref; /* surplus reference passing */
-       };
-       unsigned long trace[MAX_TRACE]; /* stack trace */
-       unsigned int trace_len;         /* stack trace length */
-};
-
-/* early logging buffer and current position */
-static struct early_log
-       early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
-static int crt_early_log __initdata;
-
 static void kmemleak_disable(void);
 
 /*
@@ -449,6 +410,54 @@ static int get_object(struct kmemleak_object *object)
        return atomic_inc_not_zero(&object->use_count);
 }
 
+/*
+ * Memory pool allocation and freeing. kmemleak_lock must not be held.
+ */
+static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
+{
+       unsigned long flags;
+       struct kmemleak_object *object;
+
+       /* try the slab allocator first */
+       if (object_cache) {
+               object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+               if (object)
+                       return object;
+       }
+
+       /* slab allocation failed, try the memory pool */
+       write_lock_irqsave(&kmemleak_lock, flags);
+       object = list_first_entry_or_null(&mem_pool_free_list,
+                                         typeof(*object), object_list);
+       if (object)
+               list_del(&object->object_list);
+       else if (mem_pool_free_count)
+               object = &mem_pool[--mem_pool_free_count];
+       else
+               pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
+       write_unlock_irqrestore(&kmemleak_lock, flags);
+
+       return object;
+}
+
+/*
+ * Return the object to either the slab allocator or the memory pool.
+ */
+static void mem_pool_free(struct kmemleak_object *object)
+{
+       unsigned long flags;
+
+       if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
+               kmem_cache_free(object_cache, object);
+               return;
+       }
+
+       /* add the object to the memory pool free list */
+       write_lock_irqsave(&kmemleak_lock, flags);
+       list_add(&object->object_list, &mem_pool_free_list);
+       write_unlock_irqrestore(&kmemleak_lock, flags);
+}
+
 /*
  * RCU callback to free a kmemleak_object.
  */
@@ -467,7 +476,7 @@ static void free_object_rcu(struct rcu_head *rcu)
                hlist_del(&area->node);
                kmem_cache_free(scan_area_cache, area);
        }
-       kmem_cache_free(object_cache, object);
+       mem_pool_free(object);
 }
 
 /*
@@ -485,7 +494,15 @@ static void put_object(struct kmemleak_object *object)
        /* should only get here after delete_object was called */
        WARN_ON(object->flags & OBJECT_ALLOCATED);
 
-       call_rcu(&object->rcu, free_object_rcu);
+       /*
+        * It may be too early for the RCU callbacks, however, there is no
+        * concurrent object_list traversal when !object_cache and all objects
+        * came from the memory pool. Free the object directly.
+        */
+       if (object_cache)
+               call_rcu(&object->rcu, free_object_rcu);
+       else
+               free_object_rcu(&object->rcu);
 }
 
 /*
@@ -550,7 +567,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        struct rb_node **link, *rb_parent;
        unsigned long untagged_ptr;
 
-       object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+       object = mem_pool_alloc(gfp);
        if (!object) {
                pr_warn("Cannot allocate a kmemleak_object structure\n");
                kmemleak_disable();
@@ -689,9 +706,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
        /*
         * Create one or two objects that may result from the memory block
         * split. Note that partial freeing is only done by free_bootmem() and
-        * this happens before kmemleak_init() is called. The path below is
-        * only executed during early log recording in kmemleak_init(), so
-        * GFP_KERNEL is enough.
+        * this happens before kmemleak_init() is called.
         */
        start = object->pointer;
        end = object->pointer + object->size;
@@ -763,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 {
        unsigned long flags;
        struct kmemleak_object *object;
-       struct kmemleak_scan_area *area;
+       struct kmemleak_scan_area *area = NULL;
 
        object = find_and_get_object(ptr, 1);
        if (!object) {
@@ -772,13 +787,16 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
                return;
        }
 
-       area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
-       if (!area) {
-               pr_warn("Cannot allocate a scan area\n");
-               goto out;
-       }
+       if (scan_area_cache)
+               area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
        spin_lock_irqsave(&object->lock, flags);
+       if (!area) {
+               pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
+               /* mark the object for full scan to avoid false positives */
+               object->flags |= OBJECT_FULL_SCAN;
+               goto out_unlock;
+       }
        if (size == SIZE_MAX) {
                size = object->pointer + object->size - ptr;
        } else if (ptr + size > object->pointer + object->size) {
@@ -795,7 +813,6 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        hlist_add_head(&area->node, &object->area_list);
 out_unlock:
        spin_unlock_irqrestore(&object->lock, flags);
-out:
        put_object(object);
 }
 
@@ -845,86 +862,6 @@ static void object_no_scan(unsigned long ptr)
        put_object(object);
 }
 
-/*
- * Log an early kmemleak_* call to the early_log buffer. These calls will be
- * processed later once kmemleak is fully initialized.
- */
-static void __init log_early(int op_type, const void *ptr, size_t size,
-                            int min_count)
-{
-       unsigned long flags;
-       struct early_log *log;
-
-       if (kmemleak_error) {
-               /* kmemleak stopped recording, just count the requests */
-               crt_early_log++;
-               return;
-       }
-
-       if (crt_early_log >= ARRAY_SIZE(early_log)) {
-               crt_early_log++;
-               kmemleak_disable();
-               return;
-       }
-
-       /*
-        * There is no need for locking since the kernel is still in UP mode
-        * at this stage. Disabling the IRQs is enough.
-        */
-       local_irq_save(flags);
-       log = &early_log[crt_early_log];
-       log->op_type = op_type;
-       log->ptr = ptr;
-       log->size = size;
-       log->min_count = min_count;
-       log->trace_len = __save_stack_trace(log->trace);
-       crt_early_log++;
-       local_irq_restore(flags);
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc(struct early_log *log)
-{
-       struct kmemleak_object *object;
-       unsigned long flags;
-       int i;
-
-       if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
-               return;
-
-       /*
-        * RCU locking needed to ensure object is not freed via put_object().
-        */
-       rcu_read_lock();
-       object = create_object((unsigned long)log->ptr, log->size,
-                              log->min_count, GFP_ATOMIC);
-       if (!object)
-               goto out;
-       spin_lock_irqsave(&object->lock, flags);
-       for (i = 0; i < log->trace_len; i++)
-               object->trace[i] = log->trace[i];
-       object->trace_len = log->trace_len;
-       spin_unlock_irqrestore(&object->lock, flags);
-out:
-       rcu_read_unlock();
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc_percpu(struct early_log *log)
-{
-       unsigned int cpu;
-       const void __percpu *ptr = log->ptr;
-
-       for_each_possible_cpu(cpu) {
-               log->ptr = per_cpu_ptr(ptr, cpu);
-               early_alloc(log);
-       }
-}
-
 /**
  * kmemleak_alloc - register a newly allocated object
  * @ptr:       pointer to beginning of the object
@@ -946,8 +883,6 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                create_object((unsigned long)ptr, size, min_count, gfp);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
 
@@ -975,8 +910,6 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
                for_each_possible_cpu(cpu)
                        create_object((unsigned long)per_cpu_ptr(ptr, cpu),
                                      size, 0, gfp);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 
@@ -1001,11 +934,6 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
                create_object((unsigned long)area->addr, size, 2, gfp);
                object_set_excess_ref((unsigned long)area,
                                      (unsigned long)area->addr);
-       } else if (kmemleak_early_log) {
-               log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
-               /* reusing early_log.size for storing area->addr */
-               log_early(KMEMLEAK_SET_EXCESS_REF,
-                         area, (unsigned long)area->addr, 0);
        }
 }
 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
@@ -1023,8 +951,6 @@ void __ref kmemleak_free(const void *ptr)
 
        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
                delete_object_full((unsigned long)ptr);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_FREE, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free);
 
@@ -1043,8 +969,6 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                delete_object_part((unsigned long)ptr, size);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
 
@@ -1065,8 +989,6 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
                for_each_possible_cpu(cpu)
                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
                                                                      cpu));
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 
@@ -1117,8 +1039,6 @@ void __ref kmemleak_not_leak(const void *ptr)
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                make_gray_object((unsigned long)ptr);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_not_leak);
 
@@ -1137,8 +1057,6 @@ void __ref kmemleak_ignore(const void *ptr)
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                make_black_object((unsigned long)ptr);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_ignore);
 
@@ -1159,8 +1077,6 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 
        if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
                add_scan_area((unsigned long)ptr, size, gfp);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
 }
 EXPORT_SYMBOL(kmemleak_scan_area);
 
@@ -1179,8 +1095,6 @@ void __ref kmemleak_no_scan(const void *ptr)
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                object_no_scan((unsigned long)ptr);
-       else if (kmemleak_early_log)
-               log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_no_scan);
 
@@ -1408,7 +1322,8 @@ static void scan_object(struct kmemleak_object *object)
        if (!(object->flags & OBJECT_ALLOCATED))
                /* already freed object */
                goto out;
-       if (hlist_empty(&object->area_list)) {
+       if (hlist_empty(&object->area_list) ||
+           object->flags & OBJECT_FULL_SCAN) {
                void *start = (void *)object->pointer;
                void *end = (void *)(object->pointer + object->size);
                void *next;
@@ -1966,7 +1881,6 @@ static void kmemleak_disable(void)
 
        /* stop any memory operation tracing */
        kmemleak_enabled = 0;
-       kmemleak_early_log = 0;
 
        /* check whether it is too early for a kernel thread */
        if (kmemleak_initialized)
@@ -1994,20 +1908,11 @@ static int __init kmemleak_boot_config(char *str)
 }
 early_param("kmemleak", kmemleak_boot_config);
 
-static void __init print_log_trace(struct early_log *log)
-{
-       pr_notice("Early log backtrace:\n");
-       stack_trace_print(log->trace, log->trace_len, 2);
-}
-
 /*
  * Kmemleak initialization.
  */
 void __init kmemleak_init(void)
 {
-       int i;
-       unsigned long flags;
-
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
                kmemleak_disable();
@@ -2015,28 +1920,15 @@ void __init kmemleak_init(void)
        }
 #endif
 
+       if (kmemleak_error)
+               return;
+
        jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
        jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
 
        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
 
-       if (crt_early_log > ARRAY_SIZE(early_log))
-               pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
-                       crt_early_log);
-
-       /* the kernel is still in UP mode, so disabling the IRQs is enough */
-       local_irq_save(flags);
-       kmemleak_early_log = 0;
-       if (kmemleak_error) {
-               local_irq_restore(flags);
-               return;
-       } else {
-               kmemleak_enabled = 1;
-               kmemleak_free_enabled = 1;
-       }
-       local_irq_restore(flags);
-
        /* register the data/bss sections */
        create_object((unsigned long)_sdata, _edata - _sdata,
                      KMEMLEAK_GREY, GFP_ATOMIC);
@@ -2047,57 +1939,6 @@ void __init kmemleak_init(void)
                create_object((unsigned long)__start_ro_after_init,
                              __end_ro_after_init - __start_ro_after_init,
                              KMEMLEAK_GREY, GFP_ATOMIC);
-
-       /*
-        * This is the point where tracking allocations is safe. Automatic
-        * scanning is started during the late initcall. Add the early logged
-        * callbacks to the kmemleak infrastructure.
-        */
-       for (i = 0; i < crt_early_log; i++) {
-               struct early_log *log = &early_log[i];
-
-               switch (log->op_type) {
-               case KMEMLEAK_ALLOC:
-                       early_alloc(log);
-                       break;
-               case KMEMLEAK_ALLOC_PERCPU:
-                       early_alloc_percpu(log);
-                       break;
-               case KMEMLEAK_FREE:
-                       kmemleak_free(log->ptr);
-                       break;
-               case KMEMLEAK_FREE_PART:
-                       kmemleak_free_part(log->ptr, log->size);
-                       break;
-               case KMEMLEAK_FREE_PERCPU:
-                       kmemleak_free_percpu(log->ptr);
-                       break;
-               case KMEMLEAK_NOT_LEAK:
-                       kmemleak_not_leak(log->ptr);
-                       break;
-               case KMEMLEAK_IGNORE:
-                       kmemleak_ignore(log->ptr);
-                       break;
-               case KMEMLEAK_SCAN_AREA:
-                       kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
-                       break;
-               case KMEMLEAK_NO_SCAN:
-                       kmemleak_no_scan(log->ptr);
-                       break;
-               case KMEMLEAK_SET_EXCESS_REF:
-                       object_set_excess_ref((unsigned long)log->ptr,
-                                             log->excess_ref);
-                       break;
-               default:
-                       kmemleak_warn("Unknown early log operation: %d\n",
-                                     log->op_type);
-               }
-
-               if (kmemleak_warning) {
-                       print_log_trace(log);
-                       kmemleak_warning = 0;
-               }
-       }
 }
 
 /*
@@ -2126,7 +1967,8 @@ static int __init kmemleak_late_init(void)
                mutex_unlock(&scan_mutex);
        }
 
-       pr_info("Kernel memory leak detector initialized\n");
+       pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
+               mem_pool_free_count);
 
        return 0;
 }
index 3dc4346411e4e36a0ced02c755e0506481bc5455..dbee2eb4dd05eec01379ad1480384c55bdf43449 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1029,24 +1029,6 @@ static u32 calc_checksum(struct page *page)
        return checksum;
 }
 
-static int memcmp_pages(struct page *page1, struct page *page2)
-{
-       char *addr1, *addr2;
-       int ret;
-
-       addr1 = kmap_atomic(page1);
-       addr2 = kmap_atomic(page2);
-       ret = memcmp(addr1, addr2, PAGE_SIZE);
-       kunmap_atomic(addr2);
-       kunmap_atomic(addr1);
-       return ret;
-}
-
-static inline int pages_identical(struct page *page1, struct page *page2)
-{
-       return !memcmp_pages(page1, page2);
-}
-
 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                              pte_t *orig_pte)
 {
index 88babcc384b9d4362b929d1771921dda30a80b67..68ab988ad4333b43ae6102d147e94891097c0ae6 100644 (file)
@@ -107,28 +107,14 @@ static long madvise_behavior(struct vm_area_struct *vma,
        case MADV_MERGEABLE:
        case MADV_UNMERGEABLE:
                error = ksm_madvise(vma, start, end, behavior, &new_flags);
-               if (error) {
-                       /*
-                        * madvise() returns EAGAIN if kernel resources, such as
-                        * slab, are temporarily unavailable.
-                        */
-                       if (error == -ENOMEM)
-                               error = -EAGAIN;
-                       goto out;
-               }
+               if (error)
+                       goto out_convert_errno;
                break;
        case MADV_HUGEPAGE:
        case MADV_NOHUGEPAGE:
                error = hugepage_madvise(vma, &new_flags, behavior);
-               if (error) {
-                       /*
-                        * madvise() returns EAGAIN if kernel resources, such as
-                        * slab, are temporarily unavailable.
-                        */
-                       if (error == -ENOMEM)
-                               error = -EAGAIN;
-                       goto out;
-               }
+               if (error)
+                       goto out_convert_errno;
                break;
        }
 
@@ -154,15 +140,8 @@ static long madvise_behavior(struct vm_area_struct *vma,
                        goto out;
                }
                error = __split_vma(mm, vma, start, 1);
-               if (error) {
-                       /*
-                        * madvise() returns EAGAIN if kernel resources, such as
-                        * slab, are temporarily unavailable.
-                        */
-                       if (error == -ENOMEM)
-                               error = -EAGAIN;
-                       goto out;
-               }
+               if (error)
+                       goto out_convert_errno;
        }
 
        if (end != vma->vm_end) {
@@ -171,15 +150,8 @@ static long madvise_behavior(struct vm_area_struct *vma,
                        goto out;
                }
                error = __split_vma(mm, vma, end, 0);
-               if (error) {
-                       /*
-                        * madvise() returns EAGAIN if kernel resources, such as
-                        * slab, are temporarily unavailable.
-                        */
-                       if (error == -ENOMEM)
-                               error = -EAGAIN;
-                       goto out;
-               }
+               if (error)
+                       goto out_convert_errno;
        }
 
 success:
@@ -187,6 +159,14 @@ success:
         * vm_flags is protected by the mmap_sem held in write mode.
         */
        vma->vm_flags = new_flags;
+
+out_convert_errno:
+       /*
+        * madvise() returns EAGAIN if kernel resources, such as
+        * slab, are temporarily unavailable.
+        */
+       if (error == -ENOMEM)
+               error = -EAGAIN;
 out:
        return error;
 }
index f3c15bb07cce4be6dc9eb6143da2625828c56c4a..2156ef775d04beecd4f416d44eee76bb9012fcf2 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/lockdep.h>
 #include <linux/file.h>
 #include <linux/tracehook.h>
+#include <linux/psi.h>
 #include <linux/seq_buf.h>
 #include "internal.h"
 #include <net/sock.h>
@@ -317,6 +318,7 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 EXPORT_SYMBOL(memcg_kmem_enabled_key);
 
 struct workqueue_struct *memcg_kmem_cache_wq;
+#endif
 
 static int memcg_shrinker_map_size;
 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
@@ -440,14 +442,6 @@ void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
        }
 }
 
-#else /* CONFIG_MEMCG_KMEM */
-static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
-{
-       return 0;
-}
-static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
-#endif /* CONFIG_MEMCG_KMEM */
-
 /**
  * mem_cgroup_css_from_page - css of the memcg associated with a page
  * @page: page of interest
@@ -2270,21 +2264,22 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
                struct mem_cgroup *memcg;
+               bool flush = false;
 
+               rcu_read_lock();
                memcg = stock->cached;
-               if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
-                       continue;
-               if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
-                       css_put(&memcg->css);
-                       continue;
-               }
-               if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
+               if (memcg && stock->nr_pages &&
+                   mem_cgroup_is_descendant(memcg, root_memcg))
+                       flush = true;
+               rcu_read_unlock();
+
+               if (flush &&
+                   !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
                                drain_local_stock(&stock->work);
                        else
                                schedule_work_on(cpu, &stock->work);
                }
-               css_put(&memcg->css);
        }
        put_cpu();
        mutex_unlock(&percpu_charge_mutex);
@@ -2358,12 +2353,68 @@ static void high_work_func(struct work_struct *work)
        reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
 }
 
+/*
+ * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
+ * enough to still cause a significant slowdown in most cases, while still
+ * allowing diagnostics and tracing to proceed without becoming stuck.
+ */
+#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
+
+/*
+ * When calculating the delay, we use these either side of the exponentiation to
+ * maintain precision and scale to a reasonable number of jiffies (see the table
+ * below.
+ *
+ * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
+ *   overage ratio to a delay.
+ * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
+ *   proposed penalty in order to reduce to a reasonable number of jiffies, and
+ *   to produce a reasonable delay curve.
+ *
+ * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
+ * reasonable delay curve compared to precision-adjusted overage, not
+ * penalising heavily at first, but still making sure that growth beyond the
+ * limit penalises misbehaviour cgroups by slowing them down exponentially. For
+ * example, with a high of 100 megabytes:
+ *
+ *  +-------+------------------------+
+ *  | usage | time to allocate in ms |
+ *  +-------+------------------------+
+ *  | 100M  |                      0 |
+ *  | 101M  |                      6 |
+ *  | 102M  |                     25 |
+ *  | 103M  |                     57 |
+ *  | 104M  |                    102 |
+ *  | 105M  |                    159 |
+ *  | 106M  |                    230 |
+ *  | 107M  |                    313 |
+ *  | 108M  |                    409 |
+ *  | 109M  |                    518 |
+ *  | 110M  |                    639 |
+ *  | 111M  |                    774 |
+ *  | 112M  |                    921 |
+ *  | 113M  |                   1081 |
+ *  | 114M  |                   1254 |
+ *  | 115M  |                   1439 |
+ *  | 116M  |                   1638 |
+ *  | 117M  |                   1849 |
+ *  | 118M  |                   2000 |
+ *  | 119M  |                   2000 |
+ *  | 120M  |                   2000 |
+ *  +-------+------------------------+
+ */
+ #define MEMCG_DELAY_PRECISION_SHIFT 20
+ #define MEMCG_DELAY_SCALING_SHIFT 14
+
 /*
  * Scheduled by try_charge() to be executed from the userland return path
  * and reclaims memory over the high limit.
  */
 void mem_cgroup_handle_over_high(void)
 {
+       unsigned long usage, high, clamped_high;
+       unsigned long pflags;
+       unsigned long penalty_jiffies, overage;
        unsigned int nr_pages = current->memcg_nr_pages_over_high;
        struct mem_cgroup *memcg;
 
@@ -2372,8 +2423,75 @@ void mem_cgroup_handle_over_high(void)
 
        memcg = get_mem_cgroup_from_mm(current->mm);
        reclaim_high(memcg, nr_pages, GFP_KERNEL);
-       css_put(&memcg->css);
        current->memcg_nr_pages_over_high = 0;
+
+       /*
+        * memory.high is breached and reclaim is unable to keep up. Throttle
+        * allocators proactively to slow down excessive growth.
+        *
+        * We use overage compared to memory.high to calculate the number of
+        * jiffies to sleep (penalty_jiffies). Ideally this value should be
+        * fairly lenient on small overages, and increasingly harsh when the
+        * memcg in question makes it clear that it has no intention of stopping
+        * its crazy behaviour, so we exponentially increase the delay based on
+        * overage amount.
+        */
+
+       usage = page_counter_read(&memcg->memory);
+       high = READ_ONCE(memcg->high);
+
+       if (usage <= high)
+               goto out;
+
+       /*
+        * Prevent division by 0 in overage calculation by acting as if it was a
+        * threshold of 1 page
+        */
+       clamped_high = max(high, 1UL);
+
+       overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
+                         clamped_high);
+
+       penalty_jiffies = ((u64)overage * overage * HZ)
+               >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
+
+       /*
+        * Factor in the task's own contribution to the overage, such that four
+        * N-sized allocations are throttled approximately the same as one
+        * 4N-sized allocation.
+        *
+        * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
+        * larger the current charge patch is than that.
+        */
+       penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
+
+       /*
+        * Clamp the max delay per usermode return so as to still keep the
+        * application moving forwards and also permit diagnostics, albeit
+        * extremely slowly.
+        */
+       penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+
+       /*
+        * Don't sleep if the amount of jiffies this memcg owes us is so low
+        * that it's not even worth doing, in an attempt to be nice to those who
+        * go only a small amount over their memory.high value and maybe haven't
+        * been aggressively reclaimed enough yet.
+        */
+       if (penalty_jiffies <= HZ / 100)
+               goto out;
+
+       /*
+        * If we exit early, we're guaranteed to die (since
+        * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
+        * need to account for any ill-begotten jiffies to pay them off later.
+        */
+       psi_memstall_enter(&pflags);
+       schedule_timeout_killable(penalty_jiffies);
+       psi_memstall_leave(&pflags);
+
+out:
+       css_put(&memcg->css);
 }
 
 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
@@ -3512,6 +3630,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
                        ret = mem_cgroup_resize_max(memcg, nr_pages, true);
                        break;
                case _KMEM:
+                       pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
+                                    "Please report your usecase to linux-mm@kvack.org if you "
+                                    "depend on this functionality.\n");
                        ret = memcg_update_kmem_max(memcg, nr_pages);
                        break;
                case _TCP:
@@ -4805,11 +4926,6 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
        }
 }
 
-static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
-{
-       mem_cgroup_id_get_many(memcg, 1);
-}
-
 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
 {
        mem_cgroup_id_put_many(memcg, 1);
@@ -4954,6 +5070,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
                memcg->cgwb_frn[i].done =
                        __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
+       INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
+       memcg->deferred_split_queue.split_queue_len = 0;
 #endif
        idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
@@ -5333,6 +5454,14 @@ static int mem_cgroup_move_account(struct page *page,
                __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
        }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (compound && !list_empty(page_deferred_list(page))) {
+               spin_lock(&from->deferred_split_queue.split_queue_lock);
+               list_del_init(page_deferred_list(page));
+               from->deferred_split_queue.split_queue_len--;
+               spin_unlock(&from->deferred_split_queue.split_queue_lock);
+       }
+#endif
        /*
         * It is safe to change page->mem_cgroup here because the page
         * is referenced, charged, and isolated - we can't race with
@@ -5341,6 +5470,17 @@ static int mem_cgroup_move_account(struct page *page,
 
        /* caller should have done css_get */
        page->mem_cgroup = to;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (compound && list_empty(page_deferred_list(page))) {
+               spin_lock(&to->deferred_split_queue.split_queue_lock);
+               list_add_tail(page_deferred_list(page),
+                             &to->deferred_split_queue.split_queue);
+               to->deferred_split_queue.split_queue_len++;
+               spin_unlock(&to->deferred_split_queue.split_queue_lock);
+       }
+#endif
+
        spin_unlock_irqrestore(&from->move_lock, flags);
 
        ret = 0;
@@ -6511,7 +6651,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                unsigned int nr_pages = 1;
 
                if (PageTransHuge(page)) {
-                       nr_pages <<= compound_order(page);
+                       nr_pages = compound_nr(page);
                        ug->nr_huge += nr_pages;
                }
                if (PageAnon(page))
@@ -6523,7 +6663,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                }
                ug->pgpgout++;
        } else {
-               ug->nr_kmem += 1 << compound_order(page);
+               ug->nr_kmem += compound_nr(page);
                __ClearPageKmemcg(page);
        }
 
index 650e65a46b9cc7d8cfdc4046a807f30c769ee849..2647c898990c80491b512944a890d47c90f23aca 100644 (file)
@@ -39,6 +39,7 @@ static void memfd_tag_pins(struct xa_state *xas)
        xas_for_each(xas, page, ULONG_MAX) {
                if (xa_is_value(page))
                        continue;
+               page = find_subpage(page, xas->xa_index);
                if (page_count(page) - page_mapcount(page) > 1)
                        xas_set_mark(xas, MEMFD_TAG_PINNED);
 
@@ -88,6 +89,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
                        bool clear = true;
                        if (xa_is_value(page))
                                continue;
+                       page = find_subpage(page, xas.xa_index);
                        if (page_count(page) - page_mapcount(page) != 1) {
                                /*
                                 * On the last scan, we clean up all those tags
index b1dff75640b722d4bb40f2be6b76880ef8cb12ef..b1ca51a079f27465fba8529c75a11f65f3e09b85 100644 (file)
@@ -518,7 +518,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                 (long long)pte_val(pte), (long long)pmd_val(*pmd));
        if (page)
                dump_page(page, "bad pte");
-       pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+       pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
                 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
        pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
                 vma->vm_file,
@@ -1026,6 +1026,9 @@ again:
                if (pte_none(ptent))
                        continue;
 
+               if (need_resched())
+                       break;
+
                if (pte_present(ptent)) {
                        struct page *page;
 
@@ -1093,7 +1096,6 @@ again:
                if (unlikely(details))
                        continue;
 
-               entry = pte_to_swp_entry(ptent);
                if (!non_swap_entry(entry))
                        rss[MM_SWAPENTS]--;
                else if (is_migration_entry(entry)) {
@@ -1124,8 +1126,11 @@ again:
        if (force_flush) {
                force_flush = 0;
                tlb_flush_mmu(tlb);
-               if (addr != end)
-                       goto again;
+       }
+
+       if (addr != end) {
+               cond_resched();
+               goto again;
        }
 
        return addr;
index c73f0991316511fb5471d3382f2d719c6cbfd759..b1be791f772dc0fae4d0eef4bedc45f4312fb454 100644 (file)
@@ -632,33 +632,30 @@ static void generic_online_page(struct page *page, unsigned int order)
 #endif
 }
 
-static int online_pages_blocks(unsigned long start, unsigned long nr_pages)
-{
-       unsigned long end = start + nr_pages;
-       int order, onlined_pages = 0;
-
-       while (start < end) {
-               order = min(MAX_ORDER - 1,
-                       get_order(PFN_PHYS(end) - PFN_PHYS(start)));
-               (*online_page_callback)(pfn_to_page(start), order);
-
-               onlined_pages += (1UL << order);
-               start += (1UL << order);
-       }
-       return onlined_pages;
-}
-
 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
                        void *arg)
 {
-       unsigned long onlined_pages = *(unsigned long *)arg;
+       const unsigned long end_pfn = start_pfn + nr_pages;
+       unsigned long pfn;
+       int order;
 
-       if (PageReserved(pfn_to_page(start_pfn)))
-               onlined_pages += online_pages_blocks(start_pfn, nr_pages);
+       /*
+        * Online the pages. The callback might decide to keep some pages
+        * PG_reserved (to add them to the buddy later), but we still account
+        * them as being online/belonging to this zone ("present").
+        */
+       for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) {
+               order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn)));
+               /* __free_pages_core() wants pfns to be aligned to the order */
+               if (WARN_ON_ONCE(!IS_ALIGNED(pfn, 1ul << order)))
+                       order = 0;
+               (*online_page_callback)(pfn_to_page(pfn), order);
+       }
 
-       online_mem_sections(start_pfn, start_pfn + nr_pages);
+       /* mark all involved sections as online */
+       online_mem_sections(start_pfn, end_pfn);
 
-       *(unsigned long *)arg = onlined_pages;
+       *(unsigned long *)arg += nr_pages;
        return 0;
 }
 
@@ -714,8 +711,13 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
                pgdat->node_start_pfn = start_pfn;
 
        pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
-}
 
+}
+/*
+ * Associate the pfn range with the given zone, initializing the memmaps
+ * and resizing the pgdat/zone data to span the added pages. After this
+ * call, all affected pages are PG_reserved.
+ */
 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap)
 {
@@ -804,20 +806,6 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
        return default_zone_for_pfn(nid, start_pfn, nr_pages);
 }
 
-/*
- * Associates the given pfn range with the given node and the zone appropriate
- * for the given online type.
- */
-static struct zone * __meminit move_pfn_range(int online_type, int nid,
-               unsigned long start_pfn, unsigned long nr_pages)
-{
-       struct zone *zone;
-
-       zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
-       move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
-       return zone;
-}
-
 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
 {
        unsigned long flags;
@@ -840,7 +828,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
        put_device(&mem->dev);
 
        /* associate pfn range with the zone */
-       zone = move_pfn_range(online_type, nid, pfn, nr_pages);
+       zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
+       move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
 
        arg.start_pfn = pfn;
        arg.nr_pages = nr_pages;
@@ -864,6 +853,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
        ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
                online_pages_range);
        if (ret) {
+               /* not a single memory resource was applicable */
                if (need_zonelists_rebuild)
                        zone_pcp_reset(zone);
                goto failed_addition;
@@ -877,27 +867,22 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
 
        shuffle_zone(zone);
 
-       if (onlined_pages) {
-               node_states_set_node(nid, &arg);
-               if (need_zonelists_rebuild)
-                       build_all_zonelists(NULL);
-               else
-                       zone_pcp_update(zone);
-       }
+       node_states_set_node(nid, &arg);
+       if (need_zonelists_rebuild)
+               build_all_zonelists(NULL);
+       else
+               zone_pcp_update(zone);
 
        init_per_zone_wmark_min();
 
-       if (onlined_pages) {
-               kswapd_run(nid);
-               kcompactd_run(nid);
-       }
+       kswapd_run(nid);
+       kcompactd_run(nid);
 
        vm_total_pages = nr_free_pagecache_pages();
 
        writeback_set_ratelimit();
 
-       if (onlined_pages)
-               memory_notify(MEM_ONLINE, &arg);
+       memory_notify(MEM_ONLINE, &arg);
        mem_hotplug_done();
        return 0;
 
@@ -933,8 +918,11 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
                if (!pgdat)
                        return NULL;
 
+               pgdat->per_cpu_nodestats =
+                       alloc_percpu(struct per_cpu_nodestat);
                arch_refresh_nodedata(nid, pgdat);
        } else {
+               int cpu;
                /*
                 * Reset the nr_zones, order and classzone_idx before reuse.
                 * Note that kswapd will init kswapd_classzone_idx properly
@@ -943,6 +931,12 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
                pgdat->nr_zones = 0;
                pgdat->kswapd_order = 0;
                pgdat->kswapd_classzone_idx = 0;
+               for_each_online_cpu(cpu) {
+                       struct per_cpu_nodestat *p;
+
+                       p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
+                       memset(p, 0, sizeof(*p));
+               }
        }
 
        /* we can use NODE_DATA(nid) from here */
@@ -952,7 +946,6 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 
        /* init node's zones as empty zones, we don't have any present pages.*/
        free_area_init_core_hotplug(nid);
-       pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
 
        /*
         * The node we allocated has no zone fallback lists. For avoiding
@@ -1309,7 +1302,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                head = compound_head(page);
                if (page_huge_active(head))
                        return pfn;
-               skip = (1 << compound_order(head)) - (page - head);
+               skip = compound_nr(head) - (page - head);
                pfn += skip - 1;
        }
        return 0;
@@ -1347,7 +1340,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
                if (PageHuge(page)) {
                        struct page *head = compound_head(page);
-                       pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+                       pfn = page_to_pfn(head) + compound_nr(head) - 1;
                        isolate_huge_page(head, &source);
                        continue;
                } else if (PageTransHuge(page))
@@ -1662,7 +1655,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
                phys_addr_t beginpa, endpa;
 
                beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
-               endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
+               endpa = beginpa + memory_block_size_bytes() - 1;
                pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
                        &beginpa, &endpa);
 
@@ -1800,7 +1793,7 @@ void __remove_memory(int nid, u64 start, u64 size)
 {
 
        /*
-        * trigger BUG() is some memory is not offlined prior to calling this
+        * trigger BUG() if some memory is not offlined prior to calling this
         * function
         */
        if (try_remove_memory(nid, start, size))
index f000771558d8820cc6d8ff6efea968a17acbd1b7..464406e8da9109b63d705132c3ea5f2187dd8c2e 100644 (file)
@@ -1512,10 +1512,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
        if (nodes_empty(*new))
                goto out_put;
 
-       nodes_and(*new, *new, node_states[N_MEMORY]);
-       if (nodes_empty(*new))
-               goto out_put;
-
        err = security_task_movememory(task);
        if (err)
                goto out_put;
index 9f4ed4e985c1fe3468df8d2f55e803a6a41bb44a..73d476d690b10555909b8ac3974a9077faec5cf0 100644 (file)
@@ -460,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
                for (i = 1; i < HPAGE_PMD_NR; i++) {
                        xas_next(&xas);
-                       xas_store(&xas, newpage + i);
+                       xas_store(&xas, newpage);
                }
        }
 
@@ -1892,7 +1892,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
        VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
 
        /* Avoid migrating to a node that is nearly full */
-       if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
+       if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
                return 0;
 
        if (isolate_lru_page(page))
@@ -2218,17 +2218,15 @@ again:
                pte_t pte;
 
                pte = *ptep;
-               pfn = pte_pfn(pte);
 
                if (pte_none(pte)) {
                        mpfn = MIGRATE_PFN_MIGRATE;
                        migrate->cpages++;
-                       pfn = 0;
                        goto next;
                }
 
                if (!pte_present(pte)) {
-                       mpfn = pfn = 0;
+                       mpfn = 0;
 
                        /*
                         * Only care about unaddressable device page special
@@ -2245,10 +2243,10 @@ again:
                        if (is_write_device_private_entry(entry))
                                mpfn |= MIGRATE_PFN_WRITE;
                } else {
+                       pfn = pte_pfn(pte);
                        if (is_zero_pfn(pfn)) {
                                mpfn = MIGRATE_PFN_MIGRATE;
                                migrate->cpages++;
-                               pfn = 0;
                                goto next;
                        }
                        page = vm_normal_page(migrate->vma, addr, pte);
@@ -2258,10 +2256,9 @@ again:
 
                /* FIXME support THP */
                if (!page || !page->mapping || PageTransCompound(page)) {
-                       mpfn = pfn = 0;
+                       mpfn = 0;
                        goto next;
                }
-               pfn = page_to_pfn(page);
 
                /*
                 * By getting a reference on the page we pin it and that blocks
index 6bc21fca20bc388e5527d2b8ec30e5a725747b31..f1e8c7f93e04c61f825dbb22b2a8e3e25370f031 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1358,6 +1358,9 @@ static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
        if (S_ISBLK(inode->i_mode))
                return MAX_LFS_FILESIZE;
 
+       if (S_ISSOCK(inode->i_mode))
+               return MAX_LFS_FILESIZE;
+
        /* Special "we do even unsigned file positions" case */
        if (file->f_mode & FMODE_UNSIGNED_OFFSET)
                return 0;
@@ -2274,12 +2277,9 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
        if (vma) {
                *pprev = vma->vm_prev;
        } else {
-               struct rb_node *rb_node = mm->mm_rb.rb_node;
-               *pprev = NULL;
-               while (rb_node) {
-                       *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-                       rb_node = rb_node->rb_right;
-               }
+               struct rb_node *rb_node = rb_last(&mm->mm_rb);
+
+               *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
        }
        return vma;
 }
index 8c943a6e1696c095d77ae65bb5d784cc09a85afd..7d70e5c78f97487d53f9dbb800817241cb6a431f 100644 (file)
@@ -271,8 +271,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
 
        tlb_flush_mmu(tlb);
 
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
 #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
        tlb_batch_list_free(tlb);
 #endif
index fed1b6e9c89b4361c68100fec1bfb3e693dc9918..99b7ec318824c8689d22d4f005016fd61e3b8de8 100644 (file)
@@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
         * The ksize() function is only guaranteed to work for pointers
         * returned by kmalloc(). So handle arbitrary pointers here.
         */
-       return PAGE_SIZE << compound_order(page);
+       return page_size(page);
 }
 
 /**
index eda2e2a0bdc6260a936976523d6b4204d4f2a3e5..c1d9496b4c4326a1fb633234cce580b5019b9e39 100644 (file)
@@ -73,7 +73,7 @@ static inline bool is_memcg_oom(struct oom_control *oc)
 /**
  * oom_cpuset_eligible() - check task eligiblity for kill
  * @start: task struct of which task to consider
- * @mask: nodemask passed to page allocator for mempolicy ooms
+ * @oc: pointer to struct oom_control
  *
  * Task eligibility is determined by whether or not a candidate task, @tsk,
  * shares the same mempolicy nodes as current if it is bound by such a policy
@@ -287,7 +287,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
            !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
                oc->totalpages = total_swap_pages;
                for_each_node_mask(nid, *oc->nodemask)
-                       oc->totalpages += node_spanned_pages(nid);
+                       oc->totalpages += node_present_pages(nid);
                return CONSTRAINT_MEMORY_POLICY;
        }
 
@@ -300,7 +300,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
        if (cpuset_limited) {
                oc->totalpages = total_swap_pages;
                for_each_node_mask(nid, cpuset_current_mems_allowed)
-                       oc->totalpages += node_spanned_pages(nid);
+                       oc->totalpages += node_present_pages(nid);
                return CONSTRAINT_CPUSET;
        }
        return CONSTRAINT_NONE;
@@ -884,12 +884,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
         */
        do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
        mark_oom_victim(victim);
-       pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
-               message, task_pid_nr(victim), victim->comm,
-               K(victim->mm->total_vm),
-               K(get_mm_counter(victim->mm, MM_ANONPAGES)),
-               K(get_mm_counter(victim->mm, MM_FILEPAGES)),
-               K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
+       pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
+               message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
+               K(get_mm_counter(mm, MM_ANONPAGES)),
+               K(get_mm_counter(mm, MM_FILEPAGES)),
+               K(get_mm_counter(mm, MM_SHMEMPAGES)),
+               from_kuid(&init_user_ns, task_uid(victim)),
+               mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
        task_unlock(victim);
 
        /*
@@ -1068,9 +1069,10 @@ bool out_of_memory(struct oom_control *oc)
         * The OOM killer does not compensate for IO-less reclaim.
         * pagefault_out_of_memory lost its gfp context so we have to
         * make sure exclude 0 mask - all other users should have at least
-        * ___GFP_DIRECT_RECLAIM to get here.
+        * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
+        * invoke the OOM killer even if it is a GFP_NOFS allocation.
         */
-       if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
+       if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
                return true;
 
        /*
index ff5484fdbdf9908a9064129f2990a13f4cfc2247..3334a769eb91e1c1cc374560125c8c64e32da979 100644 (file)
@@ -670,6 +670,7 @@ out:
 
 void free_compound_page(struct page *page)
 {
+       mem_cgroup_uncharge(page);
        __free_pages_ok(page, compound_order(page));
 }
 
@@ -3954,15 +3955,23 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
        if (compaction_failed(compact_result))
                goto check_priority;
 
+       /*
+        * compaction was skipped because there are not enough order-0 pages
+        * to work with, so we retry only if it looks like reclaim can help.
+        */
+       if (compaction_needs_reclaim(compact_result)) {
+               ret = compaction_zonelist_suitable(ac, order, alloc_flags);
+               goto out;
+       }
+
        /*
         * make sure the compaction wasn't deferred or didn't bail out early
         * due to locks contention before we declare that we should give up.
-        * But do not retry if the given zonelist is not suitable for
-        * compaction.
+        * But the next retry should use a higher priority if allowed, so
+        * we don't just keep bailing out endlessly.
         */
        if (compaction_withdrawn(compact_result)) {
-               ret = compaction_zonelist_suitable(ac, order, alloc_flags);
-               goto out;
+               goto check_priority;
        }
 
        /*
@@ -6638,9 +6647,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {
-       spin_lock_init(&pgdat->split_queue_lock);
-       INIT_LIST_HEAD(&pgdat->split_queue);
-       pgdat->split_queue_len = 0;
+       struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
+
+       spin_lock_init(&ds_queue->split_queue_lock);
+       INIT_LIST_HEAD(&ds_queue->split_queue);
+       ds_queue->split_queue_len = 0;
 }
 #else
 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
@@ -8196,7 +8207,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        if (!hugepage_migration_supported(page_hstate(head)))
                                goto unmovable;
 
-                       skip_pages = (1 << compound_order(head)) - (page - head);
+                       skip_pages = compound_nr(head) - (page - head);
                        iter += skip_pages - 1;
                        continue;
                }
index addcbb2ae4e4f7e8efa3bc1df75a2f77bf9a094a..dee931184788399c920646cef75e29e8fd347186 100644 (file)
@@ -24,6 +24,9 @@ struct page_owner {
        short last_migrate_reason;
        gfp_t gfp_mask;
        depot_stack_handle_t handle;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       depot_stack_handle_t free_handle;
+#endif
 };
 
 static bool page_owner_disabled = true;
@@ -102,19 +105,6 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
        return (void *)page_ext + page_owner_ops.offset;
 }
 
-void __reset_page_owner(struct page *page, unsigned int order)
-{
-       int i;
-       struct page_ext *page_ext;
-
-       for (i = 0; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
-               if (unlikely(!page_ext))
-                       continue;
-               __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
-       }
-}
-
 static inline bool check_recursive_alloc(unsigned long *entries,
                                         unsigned int nr_entries,
                                         unsigned long ip)
@@ -154,18 +144,50 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
        return handle;
 }
 
-static inline void __set_page_owner_handle(struct page_ext *page_ext,
-       depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
+void __reset_page_owner(struct page *page, unsigned int order)
 {
+       int i;
+       struct page_ext *page_ext;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       depot_stack_handle_t handle = 0;
        struct page_owner *page_owner;
 
-       page_owner = get_page_owner(page_ext);
-       page_owner->handle = handle;
-       page_owner->order = order;
-       page_owner->gfp_mask = gfp_mask;
-       page_owner->last_migrate_reason = -1;
+       if (debug_pagealloc_enabled())
+               handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+#endif
 
-       __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+       for (i = 0; i < (1 << order); i++) {
+               page_ext = lookup_page_ext(page + i);
+               if (unlikely(!page_ext))
+                       continue;
+               __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+               if (debug_pagealloc_enabled()) {
+                       page_owner = get_page_owner(page_ext);
+                       page_owner->free_handle = handle;
+               }
+#endif
+       }
+}
+
+static inline void __set_page_owner_handle(struct page *page,
+       struct page_ext *page_ext, depot_stack_handle_t handle,
+       unsigned int order, gfp_t gfp_mask)
+{
+       struct page_owner *page_owner;
+       int i;
+
+       for (i = 0; i < (1 << order); i++) {
+               page_owner = get_page_owner(page_ext);
+               page_owner->handle = handle;
+               page_owner->order = order;
+               page_owner->gfp_mask = gfp_mask;
+               page_owner->last_migrate_reason = -1;
+               __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+               __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+
+               page_ext = lookup_page_ext(page + i);
+       }
 }
 
 noinline void __set_page_owner(struct page *page, unsigned int order,
@@ -178,7 +200,7 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
                return;
 
        handle = save_stack(gfp_mask);
-       __set_page_owner_handle(page_ext, handle, order, gfp_mask);
+       __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
 }
 
 void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -204,8 +226,11 @@ void __split_page_owner(struct page *page, unsigned int order)
 
        page_owner = get_page_owner(page_ext);
        page_owner->order = 0;
-       for (i = 1; i < (1 << order); i++)
-               __copy_page_owner(page, page + i);
+       for (i = 1; i < (1 << order); i++) {
+               page_ext = lookup_page_ext(page + i);
+               page_owner = get_page_owner(page_ext);
+               page_owner->order = 0;
+       }
 }
 
 void __copy_page_owner(struct page *oldpage, struct page *newpage)
@@ -235,6 +260,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
         * the new page, which will be freed.
         */
        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
+       __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
 }
 
 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -294,7 +320,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        if (unlikely(!page_ext))
                                continue;
 
-                       if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+                       if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
                                continue;
 
                        page_owner = get_page_owner(page_ext);
@@ -405,20 +431,36 @@ void __dump_page_owner(struct page *page)
        mt = gfpflags_to_migratetype(gfp_mask);
 
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
-               pr_alert("page_owner info is not active (free page?)\n");
+               pr_alert("page_owner info is not present (never set?)\n");
                return;
        }
 
+       if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+               pr_alert("page_owner tracks the page as allocated\n");
+       else
+               pr_alert("page_owner tracks the page as freed\n");
+
+       pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+                page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+
        handle = READ_ONCE(page_owner->handle);
        if (!handle) {
-               pr_alert("page_owner info is not active (free page?)\n");
-               return;
+               pr_alert("page_owner allocation stack trace missing\n");
+       } else {
+               nr_entries = stack_depot_fetch(handle, &entries);
+               stack_trace_print(entries, nr_entries, 0);
        }
 
-       nr_entries = stack_depot_fetch(handle, &entries);
-       pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
-                page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-       stack_trace_print(entries, nr_entries, 0);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       handle = READ_ONCE(page_owner->free_handle);
+       if (!handle) {
+               pr_alert("page_owner free stack trace missing\n");
+       } else {
+               nr_entries = stack_depot_fetch(handle, &entries);
+               pr_alert("page last free stack trace:\n");
+               stack_trace_print(entries, nr_entries, 0);
+       }
+#endif
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -481,8 +523,22 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                        continue;
 
+               /*
+                * Although we do have the info about past allocation of free
+                * pages, it's not relevant for current memory usage.
+                */
+               if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+                       continue;
+
                page_owner = get_page_owner(page_ext);
 
+               /*
+                * Don't print "tail" pages of high-order allocations as that
+                * would inflate the stats.
+                */
+               if (!IS_ALIGNED(pfn, 1 << page_owner->order))
+                       continue;
+
                /*
                 * Access to page_ext->handle isn't synchronous so we should
                 * be careful to access it.
@@ -562,7 +618,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                                continue;
 
                        /* Found early allocated page */
-                       __set_page_owner_handle(page_ext, early_handle, 0, 0);
+                       __set_page_owner_handle(page, page_ext, early_handle,
+                                               0, 0);
                        count++;
                }
                cond_resched();
index 21d4f97cb49ba4486e2c00c51e052e13370671e4..34b9181ee5d1344eb092c99596131ae5697b2402 100644 (file)
@@ -101,7 +101,7 @@ static void unpoison_page(struct page *page)
        /*
         * Page poisoning when enabled poisons each and every page
         * that is freed to buddy. Thus no extra check is done to
-        * see if a page was posioned.
+        * see if a page was poisoned.
         */
        check_poison_mem(addr, PAGE_SIZE);
        kunmap_atomic(addr);
index 11df03e71288c3fe0b78e164eca835ac4332e5ca..eff4b4520c8d5c7603efb2a5f77f3ec3d8d79a3e 100644 (file)
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 
        if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
-               pvmw->pte = huge_pte_offset(mm, pvmw->address,
-                                           PAGE_SIZE << compound_order(page));
+               pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
                if (!pvmw->pte)
                        return false;
 
diff --git a/mm/quicklist.c b/mm/quicklist.c
deleted file mode 100644 (file)
index 5e98ac7..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Quicklist support.
- *
- * Quicklists are light weight lists of pages that have a defined state
- * on alloc and free. Pages must be in the quicklist specific defined state
- * (zero by default) when the page is freed. It seems that the initial idea
- * for such lists first came from Dave Miller and then various other people
- * improved on it.
- *
- * Copyright (C) 2007 SGI,
- *     Christoph Lameter <cl@linux.com>
- *             Generalized, added support for multiple lists and
- *             constructors / destructors.
- */
-#include <linux/kernel.h>
-
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/quicklist.h>
-
-DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
-
-#define FRACTION_OF_NODE_MEM   16
-
-static unsigned long max_pages(unsigned long min_pages)
-{
-       unsigned long node_free_pages, max;
-       int node = numa_node_id();
-       struct zone *zones = NODE_DATA(node)->node_zones;
-       int num_cpus_on_node;
-
-       node_free_pages =
-#ifdef CONFIG_ZONE_DMA
-               zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
-#endif
-#ifdef CONFIG_ZONE_DMA32
-               zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
-#endif
-               zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
-
-       max = node_free_pages / FRACTION_OF_NODE_MEM;
-
-       num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
-       max /= num_cpus_on_node;
-
-       return max(max, min_pages);
-}
-
-static long min_pages_to_free(struct quicklist *q,
-       unsigned long min_pages, long max_free)
-{
-       long pages_to_free;
-
-       pages_to_free = q->nr_pages - max_pages(min_pages);
-
-       return min(pages_to_free, max_free);
-}
-
-/*
- * Trim down the number of pages in the quicklist
- */
-void quicklist_trim(int nr, void (*dtor)(void *),
-       unsigned long min_pages, unsigned long max_free)
-{
-       long pages_to_free;
-       struct quicklist *q;
-
-       q = &get_cpu_var(quicklist)[nr];
-       if (q->nr_pages > min_pages) {
-               pages_to_free = min_pages_to_free(q, min_pages, max_free);
-
-               while (pages_to_free > 0) {
-                       /*
-                        * We pass a gfp_t of 0 to quicklist_alloc here
-                        * because we will never call into the page allocator.
-                        */
-                       void *p = quicklist_alloc(nr, 0, NULL);
-
-                       if (dtor)
-                               dtor(p);
-                       free_page((unsigned long)p);
-                       pages_to_free--;
-               }
-       }
-       put_cpu_var(quicklist);
-}
-
-unsigned long quicklist_total_size(void)
-{
-       unsigned long count = 0;
-       int cpu;
-       struct quicklist *ql, *q;
-
-       for_each_online_cpu(cpu) {
-               ql = per_cpu(quicklist, cpu);
-               for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
-                       count += q->nr_pages;
-       }
-       return count;
-}
-
index 003377e2423232614525ae2e6cbdda9695aca75b..d9a23bb773bf7997773cb94952fbf73c313a0bf3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,15 +898,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                0, vma, vma->vm_mm, address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        mmu_notifier_invalidate_range_start(&range);
 
        while (page_vma_mapped_walk(&pvmw)) {
-               unsigned long cstart;
                int ret = 0;
 
-               cstart = address = pvmw.address;
+               address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -933,7 +931,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
                        set_pmd_at(vma->vm_mm, address, pmd, entry);
-                       cstart &= PMD_MASK;
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -1192,8 +1189,10 @@ void page_add_file_rmap(struct page *page, bool compound)
                }
                if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
                        goto out;
-               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-               __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
+               if (PageSwapBacked(page))
+                       __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
+               else
+                       __inc_node_page_state(page, NR_FILE_PMDMAPPED);
        } else {
                if (PageTransCompound(page) && page_mapping(page)) {
                        VM_WARN_ON_ONCE(!PageLocked(page));
@@ -1232,8 +1231,10 @@ static void page_remove_file_rmap(struct page *page, bool compound)
                }
                if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
                        goto out;
-               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
-               __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
+               if (PageSwapBacked(page))
+                       __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
+               else
+                       __dec_node_page_state(page, NR_FILE_PMDMAPPED);
        } else {
                if (!atomic_add_negative(-1, &page->_mapcount))
                        goto out;
@@ -1374,8 +1375,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
                                address,
-                               min(vma->vm_end, address +
-                                   (PAGE_SIZE << compound_order(page))));
+                               min(vma->vm_end, address + page_size(page)));
        if (PageHuge(page)) {
                /*
                 * If sharing is possible, start and end will be adjusted
@@ -1524,8 +1524,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (PageHuge(page)) {
-                               int nr = 1 << compound_order(page);
-                               hugetlb_count_sub(nr, mm);
+                               hugetlb_count_sub(compound_nr(page), mm);
                                set_huge_swap_pte_at(mm, address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
index 0f7fd4a85db6e5f3497d21c1d8d512cf121c62e1..30ce722c23fa976cbc79ed6fdadd73d556b92881 100644 (file)
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
 {
        XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
        unsigned long i = 0;
-       unsigned long nr = 1UL << compound_order(page);
+       unsigned long nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -631,7 +631,7 @@ static int shmem_add_to_page_cache(struct page *page,
                if (xas_error(&xas))
                        goto unlock;
 next:
-               xas_store(&xas, page + i);
+               xas_store(&xas, page);
                if (++i < nr) {
                        xas_next(&xas);
                        goto next;
@@ -1734,7 +1734,7 @@ unlock:
  * vm. If we swap it in we mark it dirty since we also free the swap
  * entry since a page cannot live in both the swap and page cache.
  *
- * fault_mm and fault_type are only supplied by shmem_fault:
+ * vmf and fault_type are only supplied by shmem_fault:
  * otherwise they are NULL.
  */
 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1884,7 +1884,7 @@ alloc_nohuge:
        lru_cache_add_anon(page);
 
        spin_lock_irq(&info->lock);
-       info->alloced += 1 << compound_order(page);
+       info->alloced += compound_nr(page);
        inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
        shmem_recalc_inode(inode);
        spin_unlock_irq(&info->lock);
@@ -1925,7 +1925,7 @@ clear:
                struct page *head = compound_head(page);
                int i;
 
-               for (i = 0; i < (1 << compound_order(head)); i++) {
+               for (i = 0; i < compound_nr(head); i++) {
                        clear_highpage(head + i);
                        flush_dcache_page(head + i);
                }
@@ -1952,7 +1952,7 @@ clear:
         * Error recovery.
         */
 unacct:
-       shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
+       shmem_inode_unacct_blocks(inode, compound_nr(page));
 
        if (PageTransHuge(page)) {
                unlock_page(page);
index 9057b8056b07b732052d0e318c7480382508f03a..68e455f2b698cbfab04ac4a1d61df3c1a32cfe9e 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -30,6 +30,69 @@ struct kmem_cache {
        struct list_head list;  /* List of all slab caches on the system */
 };
 
+#else /* !CONFIG_SLOB */
+
+struct memcg_cache_array {
+       struct rcu_head rcu;
+       struct kmem_cache *entries[0];
+};
+
+/*
+ * This is the main placeholder for memcg-related information in kmem caches.
+ * Both the root cache and the child caches will have it. For the root cache,
+ * this will hold a dynamically allocated array large enough to hold
+ * information about the currently limited memcgs in the system. To allow the
+ * array to be accessed without taking any locks, on relocation we free the old
+ * version only after a grace period.
+ *
+ * Root and child caches hold different metadata.
+ *
+ * @root_cache:        Common to root and child caches.  NULL for root, pointer to
+ *             the root cache for children.
+ *
+ * The following fields are specific to root caches.
+ *
+ * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
+ *             used to index child cachces during allocation and cleared
+ *             early during shutdown.
+ *
+ * @root_caches_node: List node for slab_root_caches list.
+ *
+ * @children:  List of all child caches.  While the child caches are also
+ *             reachable through @memcg_caches, a child cache remains on
+ *             this list until it is actually destroyed.
+ *
+ * The following fields are specific to child caches.
+ *
+ * @memcg:     Pointer to the memcg this cache belongs to.
+ *
+ * @children_node: List node for @root_cache->children list.
+ *
+ * @kmem_caches_node: List node for @memcg->kmem_caches list.
+ */
+struct memcg_cache_params {
+       struct kmem_cache *root_cache;
+       union {
+               struct {
+                       struct memcg_cache_array __rcu *memcg_caches;
+                       struct list_head __root_caches_node;
+                       struct list_head children;
+                       bool dying;
+               };
+               struct {
+                       struct mem_cgroup *memcg;
+                       struct list_head children_node;
+                       struct list_head kmem_caches_node;
+                       struct percpu_ref refcnt;
+
+                       void (*work_fn)(struct kmem_cache *);
+                       union {
+                               struct rcu_head rcu_head;
+                               struct work_struct work;
+                       };
+               };
+       };
+};
 #endif /* CONFIG_SLOB */
 
 #ifdef CONFIG_SLAB
@@ -174,6 +237,7 @@ int __kmem_cache_shrink(struct kmem_cache *);
 void __kmemcg_cache_deactivate(struct kmem_cache *s);
 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
 void slab_kmem_cache_release(struct kmem_cache *);
+void kmem_cache_shrink_all(struct kmem_cache *s);
 
 struct seq_file;
 struct file;
index 807490fe217a97d51b4575d19db8b0e04a40e000..6491c3a418053870ae600830f82fa7f72e16a58d 100644 (file)
@@ -981,6 +981,43 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
+/**
+ * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
+ * @s: The cache pointer
+ */
+void kmem_cache_shrink_all(struct kmem_cache *s)
+{
+       struct kmem_cache *c;
+
+       if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
+               kmem_cache_shrink(s);
+               return;
+       }
+
+       get_online_cpus();
+       get_online_mems();
+       kasan_cache_shrink(s);
+       __kmem_cache_shrink(s);
+
+       /*
+        * We have to take the slab_mutex to protect from the memcg list
+        * modification.
+        */
+       mutex_lock(&slab_mutex);
+       for_each_memcg_cache(c, s) {
+               /*
+                * Don't need to shrink deactivated memcg caches.
+                */
+               if (s->flags & SLAB_DEACTIVATED)
+                       continue;
+               kasan_cache_shrink(c);
+               __kmem_cache_shrink(c);
+       }
+       mutex_unlock(&slab_mutex);
+       put_online_mems();
+       put_online_cpus();
+}
+
 bool slab_is_available(void)
 {
        return slab_state >= UP;
index 7f421d0ca9abbcd3a17ca467f1221465537d982e..cf377beab96212bc8e717eaabfac6b263b9108cf 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@ size_t __ksize(const void *block)
 
        sp = virt_to_page(block);
        if (unlikely(!PageSlab(sp)))
-               return PAGE_SIZE << compound_order(sp);
+               return page_size(sp);
 
        align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
        m = (unsigned int *)(block - align);
index 8834563cdb4bd4bedc4c59fad15de6afa6850717..42c1b3af3c9805fd6ae0e7028aa614fac979f433 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                return 1;
 
        start = page_address(page);
-       length = PAGE_SIZE << compound_order(page);
+       length = page_size(page);
        end = start + length;
        remainder = length % s->size;
        if (!remainder)
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
-static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+static
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
 {
        if (!(s->flags & SLAB_POISON))
                return;
 
        metadata_access_enable();
-       memset(addr, POISON_INUSE, PAGE_SIZE << order);
+       memset(addr, POISON_INUSE, page_size(page));
        metadata_access_disable();
 }
 
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
 #else /* !CONFIG_SLUB_DEBUG */
 static inline void setup_object_debug(struct kmem_cache *s,
                        struct page *page, void *object) {}
-static inline void setup_page_debug(struct kmem_cache *s,
-                       void *addr, int order) {}
+static inline
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
 
 static inline int alloc_debug_processing(struct kmem_cache *s,
        struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        struct kmem_cache_order_objects oo = s->oo;
        gfp_t alloc_gfp;
        void *start, *p, *next;
-       int idx, order;
+       int idx;
        bool shuffle;
 
        flags &= gfp_allowed_mask;
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        page->objects = oo_objects(oo);
 
-       order = compound_order(page);
        page->slab_cache = s;
        __SetPageSlab(page);
        if (page_is_pfmemalloc(page))
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        start = page_address(page);
 
-       setup_page_debug(s, start, order);
+       setup_page_debug(s, page, start);
 
        shuffle = shuffle_freelist(s, page);
 
@@ -2004,6 +2004,7 @@ static inline unsigned long next_tid(unsigned long tid)
        return tid + TID_STEP;
 }
 
+#ifdef SLUB_DEBUG_CMPXCHG
 static inline unsigned int tid_to_cpu(unsigned long tid)
 {
        return tid % TID_STEP;
@@ -2013,6 +2014,7 @@ static inline unsigned long tid_to_event(unsigned long tid)
 {
        return tid / TID_STEP;
 }
+#endif
 
 static inline unsigned int init_tid(int cpu)
 {
@@ -3930,7 +3932,7 @@ size_t __ksize(const void *object)
 
        if (unlikely(!PageSlab(page))) {
                WARN_ON(!PageCompound(page));
-               return PAGE_SIZE << compound_order(page);
+               return page_size(page);
        }
 
        return slab_ksize(page->slab_cache);
@@ -5298,7 +5300,7 @@ static ssize_t shrink_store(struct kmem_cache *s,
                        const char *buf, size_t length)
 {
        if (buf[0] == '1')
-               kmem_cache_shrink(s);
+               kmem_cache_shrink_all(s);
        else
                return -EINVAL;
        return length;
index 72f010d9bff5014a47fd0ff5703e73ce5be1cf48..bf32de9e666b5697f74cdbe319d4c783237065ab 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/export.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
 
 #include "internal.h"
 #include <asm/dma.h>
@@ -470,6 +472,12 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
 static void *sparsemap_buf __meminitdata;
 static void *sparsemap_buf_end __meminitdata;
 
+static inline void __meminit sparse_buffer_free(unsigned long size)
+{
+       WARN_ON(!sparsemap_buf || size == 0);
+       memblock_free_early(__pa(sparsemap_buf), size);
+}
+
 static void __init sparse_buffer_init(unsigned long size, int nid)
 {
        phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
@@ -486,7 +494,7 @@ static void __init sparse_buffer_fini(void)
        unsigned long size = sparsemap_buf_end - sparsemap_buf;
 
        if (sparsemap_buf && size > 0)
-               memblock_free_early(__pa(sparsemap_buf), size);
+               sparse_buffer_free(size);
        sparsemap_buf = NULL;
 }
 
@@ -495,11 +503,15 @@ void * __meminit sparse_buffer_alloc(unsigned long size)
        void *ptr = NULL;
 
        if (sparsemap_buf) {
-               ptr = PTR_ALIGN(sparsemap_buf, size);
+               ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
                if (ptr + size > sparsemap_buf_end)
                        ptr = NULL;
-               else
+               else {
+                       /* Free redundant aligned space */
+                       if ((unsigned long)(ptr - sparsemap_buf) > 0)
+                               sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
                        sparsemap_buf = ptr + size;
+               }
        }
        return ptr;
 }
@@ -867,7 +879,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
         */
        page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
 
-       ms = __pfn_to_section(start_pfn);
+       ms = __nr_to_section(section_nr);
        set_section_nid(section_nr, nid);
        section_mark_present(ms);
 
@@ -884,9 +896,6 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 {
        int i;
 
-       if (!memmap)
-               return;
-
        /*
         * A further optimization is to have per section refcounted
         * num_poisoned_pages.  But that would need more space per memmap, so
@@ -898,7 +907,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 
        for (i = 0; i < nr_pages; i++) {
                if (PageHWPoison(&memmap[i])) {
-                       atomic_long_sub(1, &num_poisoned_pages);
+                       num_poisoned_pages_dec();
                        ClearPageHWPoison(&memmap[i]);
                }
        }
index ae300397dfdac994f0544fdbe127d184796ac00a..784dc162062004e9b9e8540095c6fa1201e223f8 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page)
                spin_unlock_irqrestore(&pgdat->lru_lock, flags);
        }
        __ClearPageWaiters(page);
-       mem_cgroup_uncharge(page);
 }
 
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
+       mem_cgroup_uncharge(page);
        free_unref_page(page);
 }
 
@@ -515,7 +515,6 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
        del_page_from_lru_list(page, lruvec, lru + active);
        ClearPageActive(page);
        ClearPageReferenced(page);
-       add_page_to_lru_list(page, lruvec, lru);
 
        if (PageWriteback(page) || PageDirty(page)) {
                /*
@@ -523,13 +522,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                 * It can make readahead confusing.  But race window
                 * is _really_ small and  it's non-critical problem.
                 */
+               add_page_to_lru_list(page, lruvec, lru);
                SetPageReclaim(page);
        } else {
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               list_move_tail(&page->lru, &lruvec->lists[lru]);
+               add_page_to_lru_list_tail(page, lruvec, lru);
                __count_vm_event(PGROTATED);
        }
 
@@ -844,17 +844,15 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
                get_page(page_tail);
                list_add_tail(&page_tail->lru, list);
        } else {
-               struct list_head *list_head;
                /*
                 * Head page has not yet been counted, as an hpage,
                 * so we must account for each subpage individually.
                 *
-                * Use the standard add function to put page_tail on the list,
-                * but then correct its position so they all end up in order.
+                * Put page_tail on the list at the correct position
+                * so they all end up in order.
                 */
-               add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
-               list_head = page_tail->lru.prev;
-               list_move_tail(&page_tail->lru, list_head);
+               add_page_to_lru_list_tail(page_tail, lruvec,
+                                         page_lru(page_tail));
        }
 
        if (!PageUnevictable(page))
index 8368621a0fc70cfc3743ac36b04b249aaa4c3d09..8e7ce9a9bc5ebef151adf2b1178fce24bea8503c 100644 (file)
@@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = 1UL << compound_order(page);
+       unsigned long i, nr = compound_nr(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -133,7 +133,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
                for (i = 0; i < nr; i++) {
                        VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
                        set_page_private(page + i, entry.val + i);
-                       xas_store(&xas, page + i);
+                       xas_store(&xas, page);
                        xas_next(&xas);
                }
                address_space->nrpages += nr;
@@ -168,7 +168,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
 
        for (i = 0; i < nr; i++) {
                void *entry = xas_store(&xas, NULL);
-               VM_BUG_ON_PAGE(entry != page + i, entry);
+               VM_BUG_ON_PAGE(entry != page, entry);
                set_page_private(page + i, 0);
                xas_next(&xas);
        }
index e6351a80f24885edd910c9a997ef6f4acbaceeef..3ad6db9a722e07a7ff4d8805441256b7556f46f2 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
 #include <linux/hugetlb.h>
 #include <linux/vmalloc.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/elf.h>
+#include <linux/elf-randomize.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/processor.h>
+#include <linux/sizes.h>
+#include <linux/compat.h>
 
 #include <linux/uaccess.h>
 
@@ -293,7 +300,105 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
-#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
+#ifndef STACK_RND_MASK
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
+#endif
+
+unsigned long randomize_stack_top(unsigned long stack_top)
+{
+       unsigned long random_variable = 0;
+
+       if (current->flags & PF_RANDOMIZE) {
+               random_variable = get_random_long();
+               random_variable &= STACK_RND_MASK;
+               random_variable <<= PAGE_SHIFT;
+       }
+#ifdef CONFIG_STACK_GROWSUP
+       return PAGE_ALIGN(stack_top) + random_variable;
+#else
+       return PAGE_ALIGN(stack_top) - random_variable;
+#endif
+}
+
+#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+       /* Is the current task 32bit ? */
+       if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
+               return randomize_page(mm->brk, SZ_32M);
+
+       return randomize_page(mm->brk, SZ_1G);
+}
+
+unsigned long arch_mmap_rnd(void)
+{
+       unsigned long rnd;
+
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+       if (is_compat_task())
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+       else
+#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+
+       return rnd << PAGE_SHIFT;
+}
+
+static int mmap_is_legacy(struct rlimit *rlim_stack)
+{
+       if (current->personality & ADDR_COMPAT_LAYOUT)
+               return 1;
+
+       if (rlim_stack->rlim_cur == RLIM_INFINITY)
+               return 1;
+
+       return sysctl_legacy_va_layout;
+}
+
+/*
+ * Leave enough space between the mmap area and the stack to honour ulimit in
+ * the face of randomisation.
+ */
+#define MIN_GAP                (SZ_128M)
+#define MAX_GAP                (STACK_TOP / 6 * 5)
+
+static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+{
+       unsigned long gap = rlim_stack->rlim_cur;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+       /* Values close to RLIM_INFINITY can overflow. */
+       if (gap + pad > gap)
+               gap += pad;
+
+       if (gap < MIN_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
+
+       return PAGE_ALIGN(STACK_TOP - gap - rnd);
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+{
+       unsigned long random_factor = 0UL;
+
+       if (current->flags & PF_RANDOMIZE)
+               random_factor = arch_mmap_rnd();
+
+       if (mmap_is_legacy(rlim_stack)) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+               mm->get_unmapped_area = arch_get_unmapped_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor, rlim_stack);
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
+}
+#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -521,7 +626,7 @@ bool page_mapped(struct page *page)
                return true;
        if (PageHuge(page))
                return false;
-       for (i = 0; i < (1 << compound_order(page)); i++) {
+       for (i = 0; i < compound_nr(page); i++) {
                if (atomic_read(&page[i]._mapcount) >= 0)
                        return true;
        }
@@ -783,3 +888,16 @@ out_mm:
 out:
        return res;
 }
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+       char *addr1, *addr2;
+       int ret;
+
+       addr1 = kmap_atomic(page1);
+       addr2 = kmap_atomic(page2);
+       ret = memcmp(addr1, addr2, PAGE_SIZE);
+       kunmap_atomic(addr2);
+       kunmap_atomic(addr1);
+       return ret;
+}
index c1246d77cf753010c6da91329bf2dd00049ef31f..fcadd3e25c0c8e1964506c4027c7e59ebc5e2c8d 100644 (file)
@@ -329,8 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 
-#define VM_LAZY_FREE   0x02
-#define VM_VM_AREA     0x04
 
 static DEFINE_SPINLOCK(vmap_area_lock);
 /* Export for kexec only */
@@ -1116,7 +1114,7 @@ retry:
 
        va->va_start = addr;
        va->va_end = addr + size;
-       va->flags = 0;
+       va->vm = NULL;
        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 
        spin_unlock(&vmap_area_lock);
@@ -1282,7 +1280,14 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
        llist_for_each_entry_safe(va, n_va, valist, purge_list) {
                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
-               __free_vmap_area(va);
+               /*
+                * Finally insert or merge lazily-freed area. It is
+                * detached and there is no need to "unlink" it from
+                * anything.
+                */
+               merge_or_add_vmap_area(va,
+                       &free_vmap_area_root, &free_vmap_area_list);
+
                atomic_long_sub(nr, &vmap_lazy_nr);
 
                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
@@ -1324,6 +1329,10 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 {
        unsigned long nr_lazy;
 
+       spin_lock(&vmap_area_lock);
+       unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
+
        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
                                PAGE_SHIFT, &vmap_lazy_nr);
 
@@ -1918,7 +1927,6 @@ void __init vmalloc_init(void)
                if (WARN_ON_ONCE(!va))
                        continue;
 
-               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
                va->vm = tmp;
@@ -2016,7 +2024,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
-       va->flags |= VM_VM_AREA;
        spin_unlock(&vmap_area_lock);
 }
 
@@ -2121,10 +2128,10 @@ struct vm_struct *find_vm_area(const void *addr)
        struct vmap_area *va;
 
        va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA)
-               return va->vm;
+       if (!va)
+               return NULL;
 
-       return NULL;
+       return va->vm;
 }
 
 /**
@@ -2143,14 +2150,12 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        might_sleep();
 
-       va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA) {
+       spin_lock(&vmap_area_lock);
+       va = __find_vmap_area((unsigned long)addr);
+       if (va && va->vm) {
                struct vm_struct *vm = va->vm;
 
-               spin_lock(&vmap_area_lock);
                va->vm = NULL;
-               va->flags &= ~VM_VM_AREA;
-               va->flags |= VM_LAZY_FREE;
                spin_unlock(&vmap_area_lock);
 
                kasan_free_shadow(vm);
@@ -2158,6 +2163,8 @@ struct vm_struct *remove_vm_area(const void *addr)
 
                return vm;
        }
+
+       spin_unlock(&vmap_area_lock);
        return NULL;
 }
 
@@ -2402,7 +2409,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
 
-       area->nr_pages = nr_pages;
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -2410,13 +2416,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
        }
-       area->pages = pages;
-       if (!area->pages) {
+
+       if (!pages) {
                remove_vm_area(area->addr);
                kfree(area);
                return NULL;
        }
 
+       area->pages = pages;
+       area->nr_pages = nr_pages;
+
        for (i = 0; i < area->nr_pages; i++) {
                struct page *page;
 
@@ -2851,7 +2860,7 @@ long vread(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -2931,7 +2940,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -3450,6 +3459,22 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
        }
 }
 
+static void show_purge_info(struct seq_file *m)
+{
+       struct llist_node *head;
+       struct vmap_area *va;
+
+       head = READ_ONCE(vmap_purge_list.first);
+       if (head == NULL)
+               return;
+
+       llist_for_each_entry(va, head, purge_list) {
+               seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+                       (void *)va->va_start, (void *)va->va_end,
+                       va->va_end - va->va_start);
+       }
+}
+
 static int s_show(struct seq_file *m, void *p)
 {
        struct vmap_area *va;
@@ -3458,14 +3483,13 @@ static int s_show(struct seq_file *m, void *p)
        va = list_entry(p, struct vmap_area, list);
 
        /*
-        * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
-        * behalf of vmap area is being tear down or vm_map_ram allocation.
+        * s_show can encounter race with remove_vm_area, !vm on behalf
+        * of vmap area is being tear down or vm_map_ram allocation.
         */
-       if (!(va->flags & VM_VM_AREA)) {
-               seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
+       if (!va->vm) {
+               seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
                        (void *)va->va_start, (void *)va->va_end,
-                       va->va_end - va->va_start,
-                       va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+                       va->va_end - va->va_start);
 
                return 0;
        }
@@ -3504,6 +3528,16 @@ static int s_show(struct seq_file *m, void *p)
 
        show_numa_info(m, v);
        seq_putc(m, '\n');
+
+       /*
+        * As a final step, dump "unpurged" areas. Note,
+        * that entire "/proc/vmallocinfo" output will not
+        * be address sorted, because the purge list is not
+        * sorted.
+        */
+       if (list_is_last(&va->list, &vmap_area_list))
+               show_purge_info(m);
+
        return 0;
 }
 
index a6c5d0b28321c383037aab176bfbfca35755a320..4911754c93b7ed956726db7906d047f3579214b3 100644 (file)
@@ -171,11 +171,22 @@ int vm_swappiness = 60;
  */
 unsigned long vm_total_pages;
 
+static void set_task_reclaim_state(struct task_struct *task,
+                                  struct reclaim_state *rs)
+{
+       /* Check for an overwrite */
+       WARN_ON_ONCE(rs && task->reclaim_state);
+
+       /* Check for the nulling of an already-nulled member */
+       WARN_ON_ONCE(!rs && !task->reclaim_state);
+
+       task->reclaim_state = rs;
+}
+
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
-#ifdef CONFIG_MEMCG_KMEM
-
+#ifdef CONFIG_MEMCG
 /*
  * We allow subsystems to populate their shrinker-related
  * LRU lists before register_shrinker_prepared() is called
@@ -227,30 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
        idr_remove(&shrinker_idr, id);
        up_write(&shrinker_rwsem);
 }
-#else /* CONFIG_MEMCG_KMEM */
-static int prealloc_memcg_shrinker(struct shrinker *shrinker)
-{
-       return 0;
-}
-
-static void unregister_memcg_shrinker(struct shrinker *shrinker)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
-static void set_task_reclaim_state(struct task_struct *task,
-                                  struct reclaim_state *rs)
-{
-       /* Check for an overwrite */
-       WARN_ON_ONCE(rs && task->reclaim_state);
-
-       /* Check for the nulling of an already-nulled member */
-       WARN_ON_ONCE(!rs && !task->reclaim_state);
 
-       task->reclaim_state = rs;
-}
-
-#ifdef CONFIG_MEMCG
 static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
@@ -305,6 +293,15 @@ static bool memcg_congested(pg_data_t *pgdat,
 
 }
 #else
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+       return 0;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+}
+
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
@@ -591,7 +588,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
        return freed;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
@@ -599,7 +596,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
        unsigned long ret, freed = 0;
        int i;
 
-       if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+       if (!mem_cgroup_online(memcg))
                return 0;
 
        if (!down_read_trylock(&shrinker_rwsem))
@@ -625,6 +622,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        continue;
                }
 
+               /* Call non-slab shrinkers even though kmem is disabled */
+               if (!memcg_kmem_enabled() &&
+                   !(shrinker->flags & SHRINKER_NONSLAB))
+                       continue;
+
                ret = do_shrink_slab(&sc, shrinker, priority);
                if (ret == SHRINK_EMPTY) {
                        clear_bit(i, map->map);
@@ -661,13 +663,13 @@ unlock:
        up_read(&shrinker_rwsem);
        return freed;
 }
-#else /* CONFIG_MEMCG_KMEM */
+#else /* CONFIG_MEMCG */
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
        return 0;
 }
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG */
 
 /**
  * shrink_slab - shrink slab caches
@@ -1149,7 +1151,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                VM_BUG_ON_PAGE(PageActive(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
 
                /* Account the number of base pages even though THP */
                sc->nr_scanned += nr_pages;
@@ -1487,10 +1489,9 @@ free_it:
                 * Is there need to periodically free_page_list? It would
                 * appear not as the counts should be low
                 */
-               if (unlikely(PageTransHuge(page))) {
-                       mem_cgroup_uncharge(page);
+               if (unlikely(PageTransHuge(page)))
                        (*get_compound_page_dtor(page))(page);
-               else
+               else
                        list_add(&page->lru, &free_pages);
                continue;
 
@@ -1705,7 +1706,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
                total_scan += nr_pages;
 
                if (page_zonenum(page) > sc->reclaim_idx) {
@@ -1911,7 +1912,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&pgdat->lru_lock);
-                               mem_cgroup_uncharge(page);
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&pgdat->lru_lock);
                        } else
@@ -2586,7 +2586,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
  */
 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                                        unsigned long nr_reclaimed,
-                                       unsigned long nr_scanned,
                                        struct scan_control *sc)
 {
        unsigned long pages_for_compaction;
@@ -2597,40 +2596,18 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        if (!in_reclaim_compaction(sc))
                return false;
 
-       /* Consider stopping depending on scan and reclaim activity */
-       if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
-               /*
-                * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the
-                * full LRU list has been scanned and we are still failing
-                * to reclaim pages. This full LRU scan is potentially
-                * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed
-                */
-               if (!nr_reclaimed && !nr_scanned)
-                       return false;
-       } else {
-               /*
-                * For non-__GFP_RETRY_MAYFAIL allocations which can presumably
-                * fail without consequence, stop if we failed to reclaim
-                * any pages from the last SWAP_CLUSTER_MAX number of
-                * pages that were scanned. This will return to the
-                * caller faster at the risk reclaim/compaction and
-                * the resulting allocation attempt fails
-                */
-               if (!nr_reclaimed)
-                       return false;
-       }
-
        /*
-        * If we have not reclaimed enough pages for compaction and the
-        * inactive lists are large enough, continue reclaiming
+        * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
+        * number of pages that were scanned. This will return to the caller
+        * with the risk reclaim/compaction and the resulting allocation attempt
+        * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
+        * allocations through requiring that the full LRU list has been scanned
+        * first, by assuming that zero delta of sc->nr_scanned means full LRU
+        * scan, but that approximation was wrong, and there were corner cases
+        * where always a non-zero amount of pages were scanned.
         */
-       pages_for_compaction = compact_gap(sc->order);
-       inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
-       if (get_nr_swap_pages() > 0)
-               inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
-       if (sc->nr_reclaimed < pages_for_compaction &&
-                       inactive_lru_pages > pages_for_compaction)
-               return true;
+       if (!nr_reclaimed)
+               return false;
 
        /* If compaction would go ahead or the allocation would succeed, stop */
        for (z = 0; z <= sc->reclaim_idx; z++) {
@@ -2647,7 +2624,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                        ;
                }
        }
-       return true;
+
+       /*
+        * If we have not reclaimed enough pages for compaction and the
+        * inactive lists are large enough, continue reclaiming
+        */
+       pages_for_compaction = compact_gap(sc->order);
+       inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
+       if (get_nr_swap_pages() > 0)
+               inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
+
+       return inactive_lru_pages > pages_for_compaction;
 }
 
 static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
@@ -2664,10 +2651,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
-               struct mem_cgroup_reclaim_cookie reclaim = {
-                       .pgdat = pgdat,
-                       .priority = sc->priority,
-               };
                unsigned long node_lru_pages = 0;
                struct mem_cgroup *memcg;
 
@@ -2676,7 +2659,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                nr_reclaimed = sc->nr_reclaimed;
                nr_scanned = sc->nr_scanned;
 
-               memcg = mem_cgroup_iter(root, NULL, &reclaim);
+               memcg = mem_cgroup_iter(root, NULL, NULL);
                do {
                        unsigned long lru_pages;
                        unsigned long reclaimed;
@@ -2719,21 +2702,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                                   sc->nr_scanned - scanned,
                                   sc->nr_reclaimed - reclaimed);
 
-                       /*
-                        * Kswapd have to scan all memory cgroups to fulfill
-                        * the overall scan target for the node.
-                        *
-                        * Limit reclaim, on the other hand, only cares about
-                        * nr_to_reclaim pages to be reclaimed and it will
-                        * retry with decreasing priority if one round over the
-                        * whole hierarchy is not sufficient.
-                        */
-                       if (!current_is_kswapd() &&
-                                       sc->nr_reclaimed >= sc->nr_to_reclaim) {
-                               mem_cgroup_iter_break(root, memcg);
-                               break;
-                       }
-               } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
+               } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
 
                if (reclaim_state) {
                        sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -2810,7 +2779,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                        wait_iff_congested(BLK_RW_ASYNC, HZ/10);
 
        } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
-                                        sc->nr_scanned - nr_scanned, sc));
+                                        sc));
 
        /*
         * Kswapd gives up on balancing particular nodes after too
index fd7e16ca6996cc09d9480a40d0e00f39e132d5ab..6afc892a148a0408a901695d42529a2a1be99939 100644 (file)
@@ -1158,6 +1158,8 @@ const char * const vmstat_text[] = {
        "nr_shmem",
        "nr_shmem_hugepages",
        "nr_shmem_pmdmapped",
+       "nr_file_hugepages",
+       "nr_file_pmdmapped",
        "nr_anon_transparent_hugepages",
        "nr_unstable",
        "nr_vmscan_write",
index 75b7962439ff8b21b9cacb6a542d21ad8313af3b..05bdf90646e7937ab8435327f785afbb0deaabf1 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/wait.h>
 #include <linux/zpool.h>
 #include <linux/magic.h>
 
@@ -146,8 +145,6 @@ struct z3fold_header {
  * @release_wq:        workqueue for safe page release
  * @work:      work_struct for safe page release
  * @inode:     inode for z3fold pseudo filesystem
- * @destroying: bool to stop migration once we start destruction
- * @isolated: int to count the number of pages currently in isolation
  *
  * This structure is allocated at pool creation time and maintains metadata
  * pertaining to a particular z3fold pool.
@@ -166,11 +163,8 @@ struct z3fold_pool {
        const struct zpool_ops *zpool_ops;
        struct workqueue_struct *compact_wq;
        struct workqueue_struct *release_wq;
-       struct wait_queue_head isolate_wait;
        struct work_struct work;
        struct inode *inode;
-       bool destroying;
-       int isolated;
 };
 
 /*
@@ -301,14 +295,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
  }
 
 /* Initializes the z3fold header of a newly allocated z3fold page */
-static struct z3fold_header *init_z3fold_page(struct page *page,
+static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
                                        struct z3fold_pool *pool, gfp_t gfp)
 {
        struct z3fold_header *zhdr = page_address(page);
-       struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
-
-       if (!slots)
-               return NULL;
+       struct z3fold_buddy_slots *slots;
 
        INIT_LIST_HEAD(&page->lru);
        clear_bit(PAGE_HEADLESS, &page->private);
@@ -316,6 +307,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
        clear_bit(NEEDS_COMPACTING, &page->private);
        clear_bit(PAGE_STALE, &page->private);
        clear_bit(PAGE_CLAIMED, &page->private);
+       if (headless)
+               return zhdr;
+
+       slots = alloc_slots(pool, gfp);
+       if (!slots)
+               return NULL;
 
        spin_lock_init(&zhdr->page_lock);
        kref_init(&zhdr->refcount);
@@ -372,9 +369,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
  * Encodes the handle of a particular buddy within a z3fold page
  * Pool lock should be held as this function accesses first_num
  */
-static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+static unsigned long __encode_handle(struct z3fold_header *zhdr,
+                               struct z3fold_buddy_slots *slots,
+                               enum buddy bud)
 {
-       struct z3fold_buddy_slots *slots;
        unsigned long h = (unsigned long)zhdr;
        int idx = 0;
 
@@ -391,11 +389,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
        if (bud == LAST)
                h |= (zhdr->last_chunks << BUDDY_SHIFT);
 
-       slots = zhdr->slots;
        slots->slot[idx] = h;
        return (unsigned long)&slots->slot[idx];
 }
 
+static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+{
+       return __encode_handle(zhdr, zhdr->slots, bud);
+}
+
 /* Returns the z3fold page where a given handle is stored */
 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
 {
@@ -630,6 +632,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
        }
 
        if (unlikely(PageIsolated(page) ||
+                    test_bit(PAGE_CLAIMED, &page->private) ||
                     test_bit(PAGE_STALE, &page->private))) {
                z3fold_page_unlock(zhdr);
                return;
@@ -775,7 +778,6 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
                goto out_c;
        spin_lock_init(&pool->lock);
        spin_lock_init(&pool->stale_lock);
-       init_waitqueue_head(&pool->isolate_wait);
        pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
        if (!pool->unbuddied)
                goto out_pool;
@@ -815,15 +817,6 @@ out:
        return NULL;
 }
 
-static bool pool_isolated_are_drained(struct z3fold_pool *pool)
-{
-       bool ret;
-
-       spin_lock(&pool->lock);
-       ret = pool->isolated == 0;
-       spin_unlock(&pool->lock);
-       return ret;
-}
 /**
  * z3fold_destroy_pool() - destroys an existing z3fold pool
  * @pool:      the z3fold pool to be destroyed
@@ -833,22 +826,6 @@ static bool pool_isolated_are_drained(struct z3fold_pool *pool)
 static void z3fold_destroy_pool(struct z3fold_pool *pool)
 {
        kmem_cache_destroy(pool->c_handle);
-       /*
-        * We set pool-> destroying under lock to ensure that
-        * z3fold_page_isolate() sees any changes to destroying. This way we
-        * avoid the need for any memory barriers.
-        */
-
-       spin_lock(&pool->lock);
-       pool->destroying = true;
-       spin_unlock(&pool->lock);
-
-       /*
-        * We need to ensure that no pages are being migrated while we destroy
-        * these workqueues, as migration can queue work on either of the
-        * workqueues.
-        */
-       wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
 
        /*
         * We need to destroy pool->compact_wq before pool->release_wq,
@@ -956,7 +933,7 @@ retry:
        if (!page)
                return -ENOMEM;
 
-       zhdr = init_z3fold_page(page, pool, gfp);
+       zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
        if (!zhdr) {
                __free_page(page);
                return -ENOMEM;
@@ -1132,6 +1109,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
        struct z3fold_header *zhdr = NULL;
        struct page *page = NULL;
        struct list_head *pos;
+       struct z3fold_buddy_slots slots;
        unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
 
        spin_lock(&pool->lock);
@@ -1150,16 +1128,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                        /* this bit could have been set by free, in which case
                         * we pass over to the next page in the pool.
                         */
-                       if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+                       if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+                               page = NULL;
                                continue;
+                       }
 
-                       if (unlikely(PageIsolated(page)))
+                       if (unlikely(PageIsolated(page))) {
+                               clear_bit(PAGE_CLAIMED, &page->private);
+                               page = NULL;
                                continue;
+                       }
+                       zhdr = page_address(page);
                        if (test_bit(PAGE_HEADLESS, &page->private))
                                break;
 
-                       zhdr = page_address(page);
                        if (!z3fold_page_trylock(zhdr)) {
+                               clear_bit(PAGE_CLAIMED, &page->private);
                                zhdr = NULL;
                                continue; /* can't evict at this point */
                        }
@@ -1177,26 +1161,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 
                if (!test_bit(PAGE_HEADLESS, &page->private)) {
                        /*
-                        * We need encode the handles before unlocking, since
-                        * we can race with free that will set
-                        * (first|last)_chunks to 0
+                        * We need encode the handles before unlocking, and
+                        * use our local slots structure because z3fold_free
+                        * can zero out zhdr->slots and we can't do much
+                        * about that
                         */
                        first_handle = 0;
                        last_handle = 0;
                        middle_handle = 0;
                        if (zhdr->first_chunks)
-                               first_handle = encode_handle(zhdr, FIRST);
+                               first_handle = __encode_handle(zhdr, &slots,
+                                                               FIRST);
                        if (zhdr->middle_chunks)
-                               middle_handle = encode_handle(zhdr, MIDDLE);
+                               middle_handle = __encode_handle(zhdr, &slots,
+                                                               MIDDLE);
                        if (zhdr->last_chunks)
-                               last_handle = encode_handle(zhdr, LAST);
+                               last_handle = __encode_handle(zhdr, &slots,
+                                                               LAST);
                        /*
                         * it's safe to unlock here because we hold a
                         * reference to this page
                         */
                        z3fold_page_unlock(zhdr);
                } else {
-                       first_handle = encode_handle(zhdr, HEADLESS);
+                       first_handle = __encode_handle(zhdr, &slots, HEADLESS);
                        last_handle = middle_handle = 0;
                }
 
@@ -1226,9 +1214,9 @@ next:
                        spin_lock(&pool->lock);
                        list_add(&page->lru, &pool->lru);
                        spin_unlock(&pool->lock);
+                       clear_bit(PAGE_CLAIMED, &page->private);
                } else {
                        z3fold_page_lock(zhdr);
-                       clear_bit(PAGE_CLAIMED, &page->private);
                        if (kref_put(&zhdr->refcount,
                                        release_z3fold_page_locked)) {
                                atomic64_dec(&pool->pages_nr);
@@ -1243,6 +1231,7 @@ next:
                        list_add(&page->lru, &pool->lru);
                        spin_unlock(&pool->lock);
                        z3fold_page_unlock(zhdr);
+                       clear_bit(PAGE_CLAIMED, &page->private);
                }
 
                /* We started off locked to we need to lock the pool back */
@@ -1339,28 +1328,6 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
        return atomic64_read(&pool->pages_nr);
 }
 
-/*
- * z3fold_dec_isolated() expects to be called while pool->lock is held.
- */
-static void z3fold_dec_isolated(struct z3fold_pool *pool)
-{
-       assert_spin_locked(&pool->lock);
-       VM_BUG_ON(pool->isolated <= 0);
-       pool->isolated--;
-
-       /*
-        * If we have no more isolated pages, we have to see if
-        * z3fold_destroy_pool() is waiting for a signal.
-        */
-       if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
-               wake_up_all(&pool->isolate_wait);
-}
-
-static void z3fold_inc_isolated(struct z3fold_pool *pool)
-{
-       pool->isolated++;
-}
-
 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
 {
        struct z3fold_header *zhdr;
@@ -1369,7 +1336,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        VM_BUG_ON_PAGE(PageIsolated(page), page);
 
-       if (test_bit(PAGE_HEADLESS, &page->private))
+       if (test_bit(PAGE_HEADLESS, &page->private) ||
+           test_bit(PAGE_CLAIMED, &page->private))
                return false;
 
        zhdr = page_address(page);
@@ -1387,34 +1355,6 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
                spin_lock(&pool->lock);
                if (!list_empty(&page->lru))
                        list_del(&page->lru);
-               /*
-                * We need to check for destruction while holding pool->lock, as
-                * otherwise destruction could see 0 isolated pages, and
-                * proceed.
-                */
-               if (unlikely(pool->destroying)) {
-                       spin_unlock(&pool->lock);
-                       /*
-                        * If this page isn't stale, somebody else holds a
-                        * reference to it. Let't drop our refcount so that they
-                        * can call the release logic.
-                        */
-                       if (unlikely(kref_put(&zhdr->refcount,
-                                             release_z3fold_page_locked))) {
-                               /*
-                                * If we get here we have kref problems, so we
-                                * should freak out.
-                                */
-                               WARN(1, "Z3fold is experiencing kref problems\n");
-                               z3fold_page_unlock(zhdr);
-                               return false;
-                       }
-                       z3fold_page_unlock(zhdr);
-                       return false;
-               }
-
-
-               z3fold_inc_isolated(pool);
                spin_unlock(&pool->lock);
                z3fold_page_unlock(zhdr);
                return true;
@@ -1483,10 +1423,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
 
        queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
 
-       spin_lock(&pool->lock);
-       z3fold_dec_isolated(pool);
-       spin_unlock(&pool->lock);
-
        page_mapcount_reset(page);
        put_page(page);
        return 0;
@@ -1506,14 +1442,10 @@ static void z3fold_page_putback(struct page *page)
        INIT_LIST_HEAD(&page->lru);
        if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
                atomic64_dec(&pool->pages_nr);
-               spin_lock(&pool->lock);
-               z3fold_dec_isolated(pool);
-               spin_unlock(&pool->lock);
                return;
        }
        spin_lock(&pool->lock);
        list_add(&page->lru, &pool->lru);
-       z3fold_dec_isolated(pool);
        spin_unlock(&pool->lock);
        z3fold_page_unlock(zhdr);
 }
index a2dd9107857d44bc2573bc6976df5f5afa0306ca..863669212070433f2a7d37f05e4cc66a54260fbc 100644 (file)
@@ -238,6 +238,22 @@ const char *zpool_get_type(struct zpool *zpool)
        return zpool->driver->type;
 }
 
+/**
+ * zpool_malloc_support_movable() - Check if the zpool support
+ * allocate movable memory
+ * @zpool:     The zpool to check
+ *
+ * This returns if the zpool support allocate movable memory.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: true if if the zpool support allocate movable memory, false if not
+ */
+bool zpool_malloc_support_movable(struct zpool *zpool)
+{
+       return zpool->driver->malloc_support_movable;
+}
+
 /**
  * zpool_malloc() - Allocate memory
  * @zpool:     The zpool to allocate from.
index e98bb6ab4f7e76c8b3f6d29c165b43fd6a6da964..2b2b9aae8a3c63dfc3c8a1b0551ec8c415d69567 100644 (file)
@@ -443,15 +443,16 @@ static u64 zs_zpool_total_size(void *pool)
 }
 
 static struct zpool_driver zs_zpool_driver = {
-       .type =         "zsmalloc",
-       .owner =        THIS_MODULE,
-       .create =       zs_zpool_create,
-       .destroy =      zs_zpool_destroy,
-       .malloc =       zs_zpool_malloc,
-       .free =         zs_zpool_free,
-       .map =          zs_zpool_map,
-       .unmap =        zs_zpool_unmap,
-       .total_size =   zs_zpool_total_size,
+       .type =                   "zsmalloc",
+       .owner =                  THIS_MODULE,
+       .create =                 zs_zpool_create,
+       .destroy =                zs_zpool_destroy,
+       .malloc_support_movable = true,
+       .malloc =                 zs_zpool_malloc,
+       .free =                   zs_zpool_free,
+       .map =                    zs_zpool_map,
+       .unmap =                  zs_zpool_unmap,
+       .total_size =             zs_zpool_total_size,
 };
 
 MODULE_ALIAS("zpool-zsmalloc");
@@ -476,10 +477,6 @@ static inline int get_zspage_inuse(struct zspage *zspage)
        return zspage->inuse;
 }
 
-static inline void set_zspage_inuse(struct zspage *zspage, int val)
-{
-       zspage->inuse = val;
-}
 
 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
 {
index 0e22744a76cb6582389c76e37070752970114f00..46a322316e52c92fcb95f443d2e4ca3c00cb7955 100644 (file)
@@ -856,7 +856,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
        /* extract swpentry from data */
        zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
        swpentry = zhdr->swpentry; /* here */
-       zpool_unmap_handle(pool, handle);
        tree = zswap_trees[swp_type(swpentry)];
        offset = swp_offset(swpentry);
 
@@ -866,6 +865,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
        if (!entry) {
                /* entry was invalidated */
                spin_unlock(&tree->lock);
+               zpool_unmap_handle(pool, handle);
                return 0;
        }
        spin_unlock(&tree->lock);
@@ -886,15 +886,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
        case ZSWAP_SWAPCACHE_NEW: /* page is locked */
                /* decompress */
                dlen = PAGE_SIZE;
-               src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
-                               ZPOOL_MM_RO) + sizeof(struct zswap_header);
+               src = (u8 *)zhdr + sizeof(struct zswap_header);
                dst = kmap_atomic(page);
                tfm = *get_cpu_ptr(entry->pool->tfm);
                ret = crypto_comp_decompress(tfm, src, entry->length,
                                             dst, &dlen);
                put_cpu_ptr(entry->pool->tfm);
                kunmap_atomic(dst);
-               zpool_unmap_handle(entry->pool->zpool, entry->handle);
                BUG_ON(ret);
                BUG_ON(dlen != PAGE_SIZE);
 
@@ -940,6 +938,7 @@ fail:
        spin_unlock(&tree->lock);
 
 end:
+       zpool_unmap_handle(pool, handle);
        return ret;
 }
 
@@ -997,6 +996,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        char *buf;
        u8 *src, *dst;
        struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
+       gfp_t gfp;
 
        /* THP isn't supported */
        if (PageTransHuge(page)) {
@@ -1070,9 +1070,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 
        /* store */
        hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
-       ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
-                          __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
-                          &handle);
+       gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+       if (zpool_malloc_support_movable(entry->pool->zpool))
+               gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+       ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
        if (ret == -ENOSPC) {
                zswap_reject_compress_poor++;
                goto put_dstmem;
index 947b8ff0227e64ad190116178a2e6c9c4a154102..bba3104f128f1abf04154085380b1ac24428dcff 100644 (file)
@@ -206,14 +206,7 @@ static int xdp_umem_map_pages(struct xdp_umem *umem)
 
 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
 {
-       unsigned int i;
-
-       for (i = 0; i < umem->npgs; i++) {
-               struct page *page = umem->pgs[i];
-
-               set_page_dirty_lock(page);
-               put_page(page);
-       }
+       put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
 
        kfree(umem->pgs);
        umem->pgs = NULL;
index c2f1af3b6a7c4ec2aed2beab304e0692fb462535..fa8fbb8fa3c823aff9cb06d25f4335b662cc93c2 100644 (file)
@@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
        /* Matches the smp_wmb() in xsk_init_queue */
        smp_rmb();
        qpg = virt_to_head_page(q->ring);
-       if (size > (PAGE_SIZE << compound_order(qpg)))
+       if (size > page_size(qpg))
                return -EINVAL;
 
        pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
index 6a89eb019275b6f209d45d66884a766672dc88eb..e6f7cb2f81db4a2936cd6d633ec1853c85355294 100644 (file)
@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
 datafile_d_y = .$(datafile_y).d
 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
 
+# clean rules do not have CONFIG_INITRAMFS_COMPRESSION.  So clean up after all
+# possible compression formats.
+clean-files += initramfs_data.cpio*
 
 # Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o