mm/slab: simplify SLAB_* flag handling
authorKevin Brodsky <kevin.brodsky@arm.com>
Fri, 24 Jan 2025 16:48:58 +0000 (16:48 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Tue, 4 Mar 2025 07:53:50 +0000 (08:53 +0100)
SLUB is the only remaining allocator. We can therefore get rid of
the logic for allocator-specific flags:

* Merge SLAB_CACHE_FLAGS into SLAB_CORE_FLAGS.

* Remove CACHE_CREATE_MASK and instead mask out SLAB_DEBUG_FLAGS if
  !CONFIG_SLUB_DEBUG. SLAB_DEBUG_FLAGS is now defined
  unconditionally (no impact on existing code, which ignores it if
  !CONFIG_SLUB_DEBUG).

* Define SLAB_FLAGS_PERMITTED in terms of SLAB_CORE_FLAGS and
  SLAB_DEBUG_FLAGS (no functional change).

While at it also remove misleading comments that suggest that
multiple allocators are available.

Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slab.h
mm/slab_common.c

index e9fd9bf0bfa65b343a4ae0ecd5b4c2a325b04883..1a081f50f9477fe6d309e847ff0ce07213164c8e 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -457,39 +457,17 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
        return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
 }
 
-/* Legal flag mask for kmem_cache_create(), for various configurations */
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
                         SLAB_CACHE_DMA32 | SLAB_PANIC | \
-                        SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
+                        SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
+                        SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
+                        SLAB_TEMPORARY | SLAB_ACCOUNT | \
+                        SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
 
-#ifdef CONFIG_SLUB_DEBUG
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
                          SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
-#else
-#define SLAB_DEBUG_FLAGS (0)
-#endif
 
-#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
-                         SLAB_TEMPORARY | SLAB_ACCOUNT | \
-                         SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
-
-/* Common flags available with current configuration */
-#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
-
-/* Common flags permitted for kmem_cache_create */
-#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
-                             SLAB_RED_ZONE | \
-                             SLAB_POISON | \
-                             SLAB_STORE_USER | \
-                             SLAB_TRACE | \
-                             SLAB_CONSISTENCY_CHECKS | \
-                             SLAB_NOLEAKTRACE | \
-                             SLAB_RECLAIM_ACCOUNT | \
-                             SLAB_TEMPORARY | \
-                             SLAB_ACCOUNT | \
-                             SLAB_KMALLOC | \
-                             SLAB_NO_MERGE | \
-                             SLAB_NO_USER_FLAGS)
+#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
 
 bool __kmem_cache_empty(struct kmem_cache *);
 int __kmem_cache_shutdown(struct kmem_cache *);
index 4c9f0a87f733baaf0eab6cd72ea4a2c4749ae3da..58bb663dab6a66ebacd7f193ea759574a81aa934 100644 (file)
@@ -298,6 +298,8 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
                static_branch_enable(&slub_debug_enabled);
        if (flags & SLAB_STORE_USER)
                stack_depot_init();
+#else
+       flags &= ~SLAB_DEBUG_FLAGS;
 #endif
 
        mutex_lock(&slab_mutex);
@@ -307,20 +309,11 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
                goto out_unlock;
        }
 
-       /* Refuse requests with allocator specific flags */
        if (flags & ~SLAB_FLAGS_PERMITTED) {
                err = -EINVAL;
                goto out_unlock;
        }
 
-       /*
-        * Some allocators will constraint the set of valid flags to a subset
-        * of all flags. We expect them to define CACHE_CREATE_MASK in this
-        * case, and we'll just provide them with a sanitized version of the
-        * passed flags.
-        */
-       flags &= CACHE_CREATE_MASK;
-
        /* Fail closed on bad usersize of useroffset values. */
        if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
            WARN_ON(!args->usersize && args->useroffset) ||