mm: memcontrol: report slab usage in cgroup2 memory.stat
authorVladimir Davydov <vdavydov@virtuozzo.com>
Thu, 17 Mar 2016 21:17:35 +0000 (14:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 22:09:34 +0000 (15:09 -0700)
Show how much memory is used for storing reclaimable and unreclaimable
in-kernel data structures allocated from slab caches.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Documentation/cgroup-v2.txt
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab.c
mm/slab.h
mm/slub.c

index ff49cf901148d895b765800ec6ddb79c0e38ed53..e4e0c1d78ceee0efce7ced1e945e813b44950648 100644 (file)
@@ -843,6 +843,11 @@ PAGE_SIZE multiple when read back.
                Amount of memory used to cache filesystem data,
                including tmpfs and shared memory.
 
                Amount of memory used to cache filesystem data,
                including tmpfs and shared memory.
 
+         slab
+
+               Amount of memory used for storing in-kernel data
+               structures.
+
          sock
 
                Amount of memory used in network transmission buffers
          sock
 
                Amount of memory used in network transmission buffers
@@ -871,6 +876,16 @@ PAGE_SIZE multiple when read back.
                on the internal memory management lists used by the
                page reclaim algorithm
 
                on the internal memory management lists used by the
                page reclaim algorithm
 
+         slab_reclaimable
+
+               Part of "slab" that might be reclaimed, such as
+               dentries and inodes.
+
+         slab_unreclaimable
+
+               Part of "slab" that cannot be reclaimed on memory
+               pressure.
+
          pgfault
 
                Total number of page faults incurred
          pgfault
 
                Total number of page faults incurred
index f0c4bec6565bf6f69fbb8ad98230515dc3479b8f..e7af4834ffea74ef3d7bc059b2a8784a09467f86 100644 (file)
@@ -53,6 +53,8 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_NSTATS,
        /* default hierarchy stats */
        MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
        MEM_CGROUP_STAT_NSTATS,
        /* default hierarchy stats */
        MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
+       MEMCG_SLAB_RECLAIMABLE,
+       MEMCG_SLAB_UNRECLAIMABLE,
        MEMCG_NR_STAT,
 };
 
        MEMCG_NR_STAT,
 };
 
@@ -883,6 +885,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
        if (memcg_kmem_enabled())
                __memcg_kmem_put_cache(cachep);
 }
        if (memcg_kmem_enabled())
                __memcg_kmem_put_cache(cachep);
 }
+
+/**
+ * memcg_kmem_update_page_stat - update kmem page state statistics
+ * @page: the page
+ * @idx: page state item to account
+ * @val: number of pages (positive or negative)
+ */
+static inline void memcg_kmem_update_page_stat(struct page *page,
+                               enum mem_cgroup_stat_index idx, int val)
+{
+       if (memcg_kmem_enabled() && page->mem_cgroup)
+               this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+}
+
 #else
 #define for_each_memcg_cache_index(_idx)       \
        for (; NULL; )
 #else
 #define for_each_memcg_cache_index(_idx)       \
        for (; NULL; )
@@ -928,6 +944,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
 {
 }
 static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
 {
 }
+
+static inline void memcg_kmem_update_page_stat(struct page *page,
+                               enum mem_cgroup_stat_index idx, int val)
+{
+}
 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 #endif /* _LINUX_MEMCONTROL_H */
 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
 #endif /* _LINUX_MEMCONTROL_H */
index 430266071c3686ed3f6e91cb469f17584fa937bd..3ad64bf464fd62a843cb60e23b5c34bf6c977619 100644 (file)
@@ -5106,6 +5106,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
                   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
        seq_printf(m, "file %llu\n",
                   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
                   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
        seq_printf(m, "file %llu\n",
                   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
+       seq_printf(m, "slab %llu\n",
+                  (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
+                        stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
        seq_printf(m, "sock %llu\n",
                   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
        seq_printf(m, "sock %llu\n",
                   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
@@ -5126,6 +5129,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
                           mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
        }
 
                           mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
        }
 
+       seq_printf(m, "slab_reclaimable %llu\n",
+                  (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
+       seq_printf(m, "slab_unreclaimable %llu\n",
+                  (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
+
        /* Accumulated memory events */
 
        seq_printf(m, "pgfault %lu\n",
        /* Accumulated memory events */
 
        seq_printf(m, "pgfault %lu\n",
index 852fc5c79829b04a2b673e3e2e41de19d8ff55e5..56dd0df2a8ce52e9f40d1ddaf259cbaab42cfe56 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1442,9 +1442,10 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
  */
 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
 {
  */
 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
 {
-       const unsigned long nr_freed = (1 << cachep->gfporder);
+       int order = cachep->gfporder;
+       unsigned long nr_freed = (1 << order);
 
 
-       kmemcheck_free_shadow(page, cachep->gfporder);
+       kmemcheck_free_shadow(page, order);
 
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                sub_zone_page_state(page_zone(page),
 
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                sub_zone_page_state(page_zone(page),
@@ -1461,7 +1462,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
 
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += nr_freed;
 
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += nr_freed;
-       __free_kmem_pages(page, cachep->gfporder);
+       memcg_uncharge_slab(page, order, cachep);
+       __free_pages(page, order);
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
index b7934361f0263dd8fa95cebd73702468eadb1ad3..ff39a8fc3b3fcf53a9125c4da7ac1e184b84b116 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
+       int ret;
+
        if (!memcg_kmem_enabled())
                return 0;
        if (is_root_cache(s))
                return 0;
        if (!memcg_kmem_enabled())
                return 0;
        if (is_root_cache(s))
                return 0;
-       return __memcg_kmem_charge_memcg(page, gfp, order,
-                                        s->memcg_params.memcg);
+
+       ret = __memcg_kmem_charge_memcg(page, gfp, order,
+                                       s->memcg_params.memcg);
+       if (ret)
+               return ret;
+
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       1 << order);
+       return 0;
+}
+
+static __always_inline void memcg_uncharge_slab(struct page *page, int order,
+                                               struct kmem_cache *s)
+{
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       -(1 << order));
+       memcg_kmem_uncharge(page, order);
 }
 
 extern void slab_init_memcg_params(struct kmem_cache *);
 }
 
 extern void slab_init_memcg_params(struct kmem_cache *);
@@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
        return 0;
 }
 
        return 0;
 }
 
+static inline void memcg_uncharge_slab(struct page *page, int order,
+                                      struct kmem_cache *s)
+{
+}
+
 static inline void slab_init_memcg_params(struct kmem_cache *s)
 {
 }
 static inline void slab_init_memcg_params(struct kmem_cache *s)
 {
 }
index 6c91324f9370663749877fa2e8ea8d9c87c1df73..712d53474082a5725405b4c3ac8ca214ac95b0e4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1540,7 +1540,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        page_mapcount_reset(page);
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
        page_mapcount_reset(page);
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
-       __free_kmem_pages(page, order);
+       memcg_uncharge_slab(page, order, s);
+       __free_pages(page, order);
 }
 
 #define need_reserve_slab_rcu                                          \
 }
 
 #define need_reserve_slab_rcu                                          \