kasan: move _RET_IP_ to inline wrappers
authorAndrey Konovalov <andreyknvl@google.com>
Wed, 24 Feb 2021 20:05:46 +0000 (12:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Feb 2021 21:38:31 +0000 (13:38 -0800)
Generic mm functions that call KASAN annotations that might report a bug
pass _RET_IP_ to them as an argument. This allows KASAN to include the
name of the function that called the mm function in its report's header.

Now that KASAN has inline wrappers for all of its annotations, move
_RET_IP_ to those wrappers to simplify annotation call sites.

Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
Link: https://lkml.kernel.org/r/5c1490eddf20b436b8c4eeea83fce47687d5e4a4.1610733117.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/kasan.h
mm/mempool.c
mm/slab.c
mm/slub.c

index 0aea9e2a2a01d18d4036997967518d56774f371e..a7254186558a1bff785cc0b7401733dfb2808d78 100644 (file)
@@ -185,19 +185,18 @@ static __always_inline void * __must_check kasan_init_slab_obj(
 }
 
 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-                                               unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        if (kasan_enabled())
-               return __kasan_slab_free(s, object, ip);
+               return __kasan_slab_free(s, object, _RET_IP_);
        return false;
 }
 
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
 {
        if (kasan_enabled())
-               __kasan_slab_free_mempool(ptr, ip);
+               __kasan_slab_free_mempool(ptr, _RET_IP_);
 }
 
 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -241,10 +240,10 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
 }
 
 void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+static __always_inline void kasan_kfree_large(void *ptr)
 {
        if (kasan_enabled())
-               __kasan_kfree_large(ptr, ip);
+               __kasan_kfree_large(ptr, _RET_IP_);
 }
 
 bool kasan_save_enable_multi_shot(void);
@@ -277,12 +276,11 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
 {
        return (void *)object;
 }
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-                                  unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        return false;
 }
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
                                   gfp_t flags)
 {
@@ -302,7 +300,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
 {
        return (void *)object;
 }
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}
 
 #endif /* CONFIG_KASAN */
 
index 624ed51b060f0549c398b659b1cc66d26e7ede5f..79959fac27d7b5a8dd08ea1da15e6d76f3c3a5d7 100644 (file)
@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-               kasan_slab_free_mempool(element, _RET_IP_);
+               kasan_slab_free_mempool(element);
        else if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
index 9c9e0c255c4b950ee24114f9416639ed2e66b9cd..35c68d99d460fd25cc050dd2fa9703a50844359c 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3420,7 +3420,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
                memset(objp, 0, cachep->object_size);
 
        /* Put the object into the quarantine, don't touch it for now. */
-       if (kasan_slab_free(cachep, objp, _RET_IP_))
+       if (kasan_slab_free(cachep, objp))
                return;
 
        /* Use KCSAN to help debug racy use-after-free. */
index 046d7194acaf7d546e830ce29695e40a680d1c5d..b2833ce85c92606f0a3c9744850cc8ebd5a3ce5e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1528,7 +1528,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 static __always_inline void kfree_hook(void *x)
 {
        kmemleak_free(x);
-       kasan_kfree_large(x, _RET_IP_);
+       kasan_kfree_large(x);
 }
 
 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
@@ -1558,7 +1558,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
                                     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
 
        /* KASAN might put x into memory quarantine, delaying its reuse */
-       return kasan_slab_free(s, x, _RET_IP_);
+       return kasan_slab_free(s, x);
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,