mm: Move mm_cachep initialization to mm_init()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 25 Oct 2022 19:38:18 +0000 (21:38 +0200)
committerDave Hansen <dave.hansen@linux.intel.com>
Thu, 15 Dec 2022 18:37:26 +0000 (10:37 -0800)
In order to allow using mm_alloc() much earlier, move initializing
mm_cachep into mm_init().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org
include/linux/sched/task.h
init/main.c
kernel/fork.c

index d6c48163c6defba399fc5e0588d1ce855418c97e..8431558641a42d5c17ec6c235de3471010fc997c 100644 (file)
@@ -65,6 +65,7 @@ extern void sched_dead(struct task_struct *p);
 void __noreturn do_task_dead(void);
 void __noreturn make_task_dead(int signr);
 
+extern void mm_cache_init(void);
 extern void proc_caches_init(void);
 
 extern void fork_init(void);
index aa21add5f7c54aa64d846b0d9f7fd3299412baf5..f1d1a549e1ccbb71b71c47a7c30b3b078eb3cafd 100644 (file)
@@ -860,6 +860,7 @@ static void __init mm_init(void)
        /* Should be run after espfix64 is set up. */
        pti_init();
        kmsan_init_runtime();
+       mm_cache_init();
 }
 
 #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
index 08969f5aa38d5906b2f42f975054c62bc7f7bf80..451ce8063f850233f3b7f453b3e79827297a955f 100644 (file)
@@ -3015,10 +3015,27 @@ static void sighand_ctor(void *data)
        init_waitqueue_head(&sighand->signalfd_wqh);
 }
 
-void __init proc_caches_init(void)
+void __init mm_cache_init(void)
 {
        unsigned int mm_size;
 
+       /*
+        * The mm_cpumask is located at the end of mm_struct, and is
+        * dynamically sized based on the maximum CPU number this system
+        * can have, taking hotplug into account (nr_cpu_ids).
+        */
+       mm_size = sizeof(struct mm_struct) + cpumask_size();
+
+       mm_cachep = kmem_cache_create_usercopy("mm_struct",
+                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+                       offsetof(struct mm_struct, saved_auxv),
+                       sizeof_field(struct mm_struct, saved_auxv),
+                       NULL);
+}
+
+void __init proc_caches_init(void)
+{
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -3036,19 +3053,6 @@ void __init proc_caches_init(void)
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
 
-       /*
-        * The mm_cpumask is located at the end of mm_struct, and is
-        * dynamically sized based on the maximum CPU number this system
-        * can have, taking hotplug into account (nr_cpu_ids).
-        */
-       mm_size = sizeof(struct mm_struct) + cpumask_size();
-
-       mm_cachep = kmem_cache_create_usercopy("mm_struct",
-                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
-                       offsetof(struct mm_struct, saved_auxv),
-                       sizeof_field(struct mm_struct, saved_auxv),
-                       NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();
        nsproxy_cache_init();