projects
/
linux-2.6-block.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index 7277413ebc8b9a75806fba91103f57557b248cc3..4dbb109eb8cd1f5eb03bd82e90285be1090eacd7 100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-1313,7
+1313,7
@@
static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
- kasan_kmalloc_large(ptr, size);
+ kasan_kmalloc_large(ptr, size
, flags
);
}
static inline void kfree_hook(const void *x)
}
static inline void kfree_hook(const void *x)
@@
-2596,7
+2596,7
@@
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size
, gfpflags
);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@
-2624,7
+2624,7
@@
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size
, gfpflags
);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@
-3182,7
+3182,8
@@
static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
- kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
+ GFP_KERNEL);
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
@@
-3561,7
+3562,7
@@
void *__kmalloc(size_t size, gfp_t flags)
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size
, flags
);
return ret;
}
return ret;
}
@@
-3606,7
+3607,7
@@
void *__kmalloc_node(size_t size, gfp_t flags, int node)
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
- kasan_kmalloc(s, ret, size);
+ kasan_kmalloc(s, ret, size
, flags
);
return ret;
}
return ret;
}
@@
-3635,7
+3636,7
@@
size_t ksize(const void *object)
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
so we need unpoison this area. */
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
so we need unpoison this area. */
- kasan_krealloc(object, size);
+ kasan_krealloc(object, size
, GFP_NOWAIT
);
return size;
}
EXPORT_SYMBOL(ksize);
return size;
}
EXPORT_SYMBOL(ksize);