libceph: introduce and switch to reopen_session()
[linux-2.6-block.git] / mm / slab.h
index 2eedacea439de698bdf2103f54ae936fe40de48d..ff39a8fc3b3fcf53a9125c4da7ac1e184b84b116 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -38,6 +38,10 @@ struct kmem_cache {
 #endif
 
 #include <linux/memcontrol.h>
+#include <linux/fault-inject.h>
+#include <linux/kmemcheck.h>
+#include <linux/kasan.h>
+#include <linux/kmemleak.h>
 
 /*
  * State of the slab allocator.
@@ -121,7 +125,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 #elif defined(CONFIG_SLUB_DEBUG)
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
-                         SLAB_TRACE | SLAB_DEBUG_FREE)
+                         SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
 #else
 #define SLAB_DEBUG_FLAGS (0)
 #endif
@@ -168,7 +172,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 /*
  * Generic implementation of bulk operations
  * These are useful for situations in which the allocator cannot
- * perform optimizations. In that case segments of the objecct listed
+ * perform optimizations. In that case segments of the object listed
  * may be allocated or freed using these operations.
  */
 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
@@ -242,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
+       int ret;
+
        if (!memcg_kmem_enabled())
                return 0;
        if (is_root_cache(s))
                return 0;
-       return __memcg_kmem_charge_memcg(page, gfp, order,
-                                        s->memcg_params.memcg);
+
+       ret = __memcg_kmem_charge_memcg(page, gfp, order,
+                                       s->memcg_params.memcg);
+       if (ret)
+               return ret;
+
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       1 << order);
+       return 0;
+}
+
+static __always_inline void memcg_uncharge_slab(struct page *page, int order,
+                                               struct kmem_cache *s)
+{
+       memcg_kmem_update_page_stat(page,
+                       (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       -(1 << order));
+       memcg_kmem_uncharge(page, order);
 }
 
 extern void slab_init_memcg_params(struct kmem_cache *);
@@ -290,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
        return 0;
 }
 
+static inline void memcg_uncharge_slab(struct page *page, int order,
+                                      struct kmem_cache *s)
+{
+}
+
 static inline void slab_init_memcg_params(struct kmem_cache *s)
 {
 }
@@ -307,7 +337,8 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
         * to not do even the assignment. In that case, slab_equal_or_root
         * will also be a constant.
         */
-       if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
+       if (!memcg_kmem_enabled() &&
+           !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
                return s;
 
        page = virt_to_head_page(x);
@@ -321,6 +352,64 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
        return s;
 }
 
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifndef CONFIG_SLUB
+       return s->object_size;
+
+#else /* CONFIG_SLUB */
+# ifdef CONFIG_SLUB_DEBUG
+       /*
+        * Debugging requires use of the padding between object
+        * and whatever may come after it.
+        */
+       if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+               return s->object_size;
+# endif
+       /*
+        * If we have the need to store the freelist pointer
+        * back there or track user information then we can
+        * only use the space before that information.
+        */
+       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+               return s->inuse;
+       /*
+        * Else we can use all the padding etc for the allocation
+        */
+       return s->size;
+#endif
+}
+
+static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+                                                    gfp_t flags)
+{
+       flags &= gfp_allowed_mask;
+       lockdep_trace_alloc(flags);
+       might_sleep_if(gfpflags_allow_blocking(flags));
+
+       if (should_failslab(s, flags))
+               return NULL;
+
+       return memcg_kmem_get_cache(s, flags);
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+                                       size_t size, void **p)
+{
+       size_t i;
+
+       flags &= gfp_allowed_mask;
+       for (i = 0; i < size; i++) {
+               void *object = p[i];
+
+               kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
+               kmemleak_alloc_recursive(object, s->object_size, 1,
+                                        s->flags, flags);
+               kasan_slab_alloc(s, object);
+       }
+       memcg_kmem_put_cache(s);
+}
+
 #ifndef CONFIG_SLOB
 /*
  * The slab lists for all objects.