free_task(tsk);
}
+void __attribute__((weak)) arch_task_cache_init(void)
+{
+}
+
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
#endif
+ /* do the arch specific task caches init */
+ arch_task_cache_init();
+
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
init_task.signal->rlim[RLIMIT_NPROC];
}
+int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src)
+{
+ *dst = *src;
+ return 0;
+}
+
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
return NULL;
}
- *tsk = *orig;
+ err = arch_dup_task_struct(tsk, orig);
+ if (err)
+ goto out;
+
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
- if (err) {
- free_thread_info(ti);
- free_task_struct(tsk);
- return NULL;
- }
+ if (err)
+ goto out;
setup_thread_stack(tsk, orig);
#endif
tsk->splice_pipe = NULL;
return tsk;
+
+out:
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
}
#ifdef CONFIG_MMU