mm: memcontrol: report slab usage in cgroup2 memory.stat
[linux-2.6-block.git] / mm / slab.h
index fd231c9f5f93dc1afe818b3ceddbdec53d125e6d..ff39a8fc3b3fcf53a9125c4da7ac1e184b84b116 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -125,7 +125,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 #elif defined(CONFIG_SLUB_DEBUG)
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
-                         SLAB_TRACE | SLAB_DEBUG_FREE)
+                         SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
 #else
 #define SLAB_DEBUG_FLAGS (0)
 #endif
@@ -172,7 +172,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 /*
  * Generic implementation of bulk operations
  * These are useful for situations in which the allocator cannot
- * perform optimizations. In that case segments of the objecct listed
+ * perform optimizations. In that case segments of the object listed
  * may be allocated or freed using these operations.
  */
 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
@@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
+       int ret;
+
        if (!memcg_kmem_enabled())
                return 0;
        if (is_root_cache(s))
                return 0;
-       return __memcg_kmem_charge_memcg(page, gfp, order,
-                                        s->memcg_params.memcg);
+
+       ret = __memcg_kmem_charge_memcg(page, gfp, order,
+                                       s->memcg_params.memcg);
+       if (ret)
+               return ret;
+
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       1 << order);
+       return 0;
+}
+
+static __always_inline void memcg_uncharge_slab(struct page *page, int order,
+                                               struct kmem_cache *s)
+{
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       -(1 << order));
+       memcg_kmem_uncharge(page, order);
 }
 
 extern void slab_init_memcg_params(struct kmem_cache *);
@@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
        return 0;
 }
 
+static inline void memcg_uncharge_slab(struct page *page, int order,
+                                      struct kmem_cache *s)
+{
+}
+
 static inline void slab_init_memcg_params(struct kmem_cache *s)
 {
 }
@@ -311,7 +337,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
         * to not do even the assignment. In that case, slab_equal_or_root
         * will also be a constant.
         */
-       if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
+       if (!memcg_kmem_enabled() &&
+           !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
                return s;
 
        page = virt_to_head_page(x);
@@ -360,7 +387,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
        lockdep_trace_alloc(flags);
        might_sleep_if(gfpflags_allow_blocking(flags));
 
-       if (should_failslab(s->object_size, flags, s->flags))
+       if (should_failslab(sflags))
                return NULL;
 
        return memcg_kmem_get_cache(s, flags);