Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-block.git] / kernel / fork.c
index 5e6cf0dd031ceb4e5019976ead989a217dd78be1..5c372c954f3b9213fe3d2e22c4fa9816b788afa3 100644 (file)
@@ -283,8 +283,9 @@ static void free_thread_stack(struct task_struct *tsk)
 
 void thread_stack_cache_init(void)
 {
-       thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
-                                             THREAD_SIZE, 0, NULL);
+       thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
+                                       THREAD_SIZE, THREAD_SIZE, 0, 0,
+                                       THREAD_SIZE, NULL);
        BUG_ON(thread_stack_cache == NULL);
 }
 # endif
@@ -693,6 +694,21 @@ static void set_max_threads(unsigned int max_threads_suggested)
 int arch_task_struct_size __read_mostly;
 #endif
 
+static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
+{
+       /* Fetch thread_struct whitelist for the architecture. */
+       arch_thread_struct_whitelist(offset, size);
+
+       /*
+        * Handle zero-sized whitelist or empty thread_struct, otherwise
+        * adjust offset to position of thread_struct in task_struct.
+        */
+       if (unlikely(*size == 0))
+               *offset = 0;
+       else
+               *offset += offsetof(struct task_struct, thread);
+}
+
 void __init fork_init(void)
 {
        int i;
@@ -701,11 +717,14 @@ void __init fork_init(void)
 #define ARCH_MIN_TASKALIGN     0
 #endif
        int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
+       unsigned long useroffset, usersize;
 
        /* create a slab on which task_structs can be allocated */
-       task_struct_cachep = kmem_cache_create("task_struct",
+       task_struct_whitelist(&useroffset, &usersize);
+       task_struct_cachep = kmem_cache_create_usercopy("task_struct",
                        arch_task_struct_size, align,
-                       SLAB_PANIC|SLAB_ACCOUNT, NULL);
+                       SLAB_PANIC|SLAB_ACCOUNT,
+                       useroffset, usersize, NULL);
 #endif
 
        /* do the arch specific task caches init */
@@ -2248,9 +2267,11 @@ void __init proc_caches_init(void)
         * maximum number of CPU's we can ever have.  The cpumask_allocation
         * is at the end of the structure, exactly for that reason.
         */
-       mm_cachep = kmem_cache_create("mm_struct",
+       mm_cachep = kmem_cache_create_usercopy("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+                       offsetof(struct mm_struct, saved_auxv),
+                       sizeof_field(struct mm_struct, saved_auxv),
                        NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();