kasan, slub: fix conflicts with CONFIG_SLAB_FREELIST_HARDENED
authorAndrey Konovalov <andreyknvl@google.com>
Thu, 21 Feb 2019 06:19:28 +0000 (22:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Feb 2019 17:01:00 +0000 (09:01 -0800)
CONFIG_SLAB_FREELIST_HARDENED hashes freelist pointer with the address of
the object where the pointer gets stored.  With tag based KASAN we don't
account for that when building freelist, as we call set_freepointer() with
the first argument untagged.  This patch changes the code to properly
propagate tags throughout the loop.

Link: http://lkml.kernel.org/r/3df171559c52201376f246bf7ce3184fe21c1dc7.1549921721.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reported-by: Qian Cai <cai@lca.pw>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Evgeniy Stepanov <eugenis@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index a7e7c7f719f9d7a40c0cb72cbe2fd870b098c26f..80da3a40b74d19e4eb347e868d9f0b3b05fc2e58 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -303,11 +303,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
                __p < (__addr) + (__objects) * (__s)->size; \
                __p += (__s)->size)
 
-#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
-       for (__p = fixup_red_left(__s, __addr), __idx = 1; \
-               __idx <= __objects; \
-               __p += (__s)->size, __idx++)
-
 /* Determine object index from a given position */
 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 {
@@ -1664,17 +1659,16 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        shuffle = shuffle_freelist(s, page);
 
        if (!shuffle) {
-               for_each_object_idx(p, idx, s, start, page->objects) {
-                       if (likely(idx < page->objects)) {
-                               next = p + s->size;
-                               next = setup_object(s, page, next);
-                               set_freepointer(s, p, next);
-                       } else
-                               set_freepointer(s, p, NULL);
-               }
                start = fixup_red_left(s, start);
                start = setup_object(s, page, start);
                page->freelist = start;
+               for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+                       next = p + s->size;
+                       next = setup_object(s, page, next);
+                       set_freepointer(s, p, next);
+                       p = next;
+               }
+               set_freepointer(s, p, NULL);
        }
 
        page->inuse = page->objects;