mm: fix memcg stack accounting for sub-page stacks
authorAndy Lutomirski <luto@kernel.org>
Thu, 28 Jul 2016 22:48:17 +0000 (15:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Jul 2016 23:07:41 +0000 (16:07 -0700)
We should account for stacks regardless of stack size, and we need to
account in sub-page units if THREAD_SIZE < PAGE_SIZE.  Change the units
to kilobytes and Move it into account_kernel_stack().

Fixes: 12580e4b54ba8 ("mm: memcontrol: report kernel stack usage in cgroup2 memory.stat")
Link: http://lkml.kernel.org/r/9b5314e3ee5eda61b0317ec1563768602c1ef438.1468523549.git.luto@kernel.org
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
kernel/fork.c
mm/memcontrol.c

index 5147e650287ad5f7c9a72f479b834ab0f726cf86..5d8ca6e02e396bd1eb5a00118ecf6141c43900ac 100644 (file)
@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
        MEM_CGROUP_STAT_NSTATS,
        /* default hierarchy stats */
-       MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
+       MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
        MEMCG_SLAB_RECLAIMABLE,
        MEMCG_SLAB_UNRECLAIMABLE,
        MEMCG_SOCK,
index af3637e0ee52dbd71e0a9075fbd5aaf47304f1f3..52e725d4a866b4ac30f16b4db075b9b197e4e53d 100644 (file)
@@ -165,20 +165,12 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
        struct page *page = alloc_pages_node(node, THREADINFO_GFP,
                                             THREAD_SIZE_ORDER);
 
-       if (page)
-               memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-                                           1 << THREAD_SIZE_ORDER);
-
        return page ? page_address(page) : NULL;
 }
 
 static inline void free_thread_stack(unsigned long *stack)
 {
-       struct page *page = virt_to_page(stack);
-
-       memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-                                   -(1 << THREAD_SIZE_ORDER));
-       __free_pages(page, THREAD_SIZE_ORDER);
+       __free_pages(virt_to_page(stack), THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_stack_cache;
@@ -223,10 +215,15 @@ static struct kmem_cache *mm_cachep;
 
 static void account_kernel_stack(unsigned long *stack, int account)
 {
-       struct zone *zone = page_zone(virt_to_page(stack));
+       /* All stack pages are in the same zone and belong to the same memcg. */
+       struct page *first_page = virt_to_page(stack);
 
-       mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
+       mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
                            THREAD_SIZE / 1024 * account);
+
+       memcg_kmem_update_page_stat(
+               first_page, MEMCG_KERNEL_STACK_KB,
+               account * (THREAD_SIZE / 1024));
 }
 
 void free_task(struct task_struct *tsk)
index 13be30c3ea7831e56e437380d7da3f6804c0dd5e..c265212bec8c0f412e9f23e260a5ff0d490ea046 100644 (file)
@@ -5171,7 +5171,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
        seq_printf(m, "file %llu\n",
                   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
        seq_printf(m, "kernel_stack %llu\n",
-                  (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
+                  (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
        seq_printf(m, "slab %llu\n",
                   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
                         stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);