fork: move the real prepare_to_copy() users to arch_dup_task_struct()
[linux-2.6-block.git] / arch / sh / kernel / process.c
CommitLineData
cbf6b1ba
PM
1#include <linux/mm.h>
2#include <linux/kernel.h>
5a0e3ad6 3#include <linux/slab.h>
cbf6b1ba
PM
4#include <linux/sched.h>
5
0ea820cf
PM
6struct kmem_cache *task_xstate_cachep = NULL;
7unsigned int xstate_size;
8
55ccf3fe
SS
9/*
10 * this gets called so that we can store lazy state into memory and copy the
11 * current task into the new thread.
12 */
0ea820cf
PM
13int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
14{
55ccf3fe
SS
15#ifdef CONFIG_SUPERH32
16 unlazy_fpu(src, task_pt_regs(src));
17#endif
0ea820cf
PM
18 *dst = *src;
19
20 if (src->thread.xstate) {
21 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
22 GFP_KERNEL);
23 if (!dst->thread.xstate)
24 return -ENOMEM;
25 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
26 }
27
28 return 0;
29}
30
31void free_thread_xstate(struct task_struct *tsk)
32{
33 if (tsk->thread.xstate) {
34 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
35 tsk->thread.xstate = NULL;
36 }
37}
38
cbf6b1ba
PM
39#if THREAD_SHIFT < PAGE_SHIFT
40static struct kmem_cache *thread_info_cache;
41
b15ed691 42struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
cbf6b1ba
PM
43{
44 struct thread_info *ti;
cbf6b1ba 45#ifdef CONFIG_DEBUG_STACK_USAGE
b6a84016
ED
46 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
47#else
48 gfp_t mask = GFP_KERNEL;
cbf6b1ba 49#endif
b6a84016
ED
50
51 ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
cbf6b1ba
PM
52 return ti;
53}
54
55void free_thread_info(struct thread_info *ti)
56{
0ea820cf 57 free_thread_xstate(ti->task);
cbf6b1ba
PM
58 kmem_cache_free(thread_info_cache, ti);
59}
60
61void thread_info_cache_init(void)
62{
63 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
a3705799 64 THREAD_SIZE, SLAB_PANIC, NULL);
cbf6b1ba
PM
65}
66#else
b15ed691 67struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
cbf6b1ba
PM
68{
69#ifdef CONFIG_DEBUG_STACK_USAGE
70 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
71#else
72 gfp_t mask = GFP_KERNEL;
73#endif
b6a84016
ED
74 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
75
76 return page ? page_address(page) : NULL;
cbf6b1ba
PM
77}
78
79void free_thread_info(struct thread_info *ti)
80{
0ea820cf 81 free_thread_xstate(ti->task);
cbf6b1ba
PM
82 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
83}
84#endif /* THREAD_SHIFT < PAGE_SHIFT */
0ea820cf
PM
85
86void arch_task_cache_init(void)
87{
88 if (!xstate_size)
89 return;
90
91 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
92 __alignof__(union thread_xstate),
93 SLAB_PANIC | SLAB_NOTRACK, NULL);
94}
95
96#ifdef CONFIG_SH_FPU_EMU
97# define HAVE_SOFTFP 1
98#else
99# define HAVE_SOFTFP 0
100#endif
101
4a6feab0 102void __cpuinit init_thread_xstate(void)
0ea820cf
PM
103{
104 if (boot_cpu_data.flags & CPU_HAS_FPU)
105 xstate_size = sizeof(struct sh_fpu_hard_struct);
106 else if (HAVE_SOFTFP)
107 xstate_size = sizeof(struct sh_fpu_soft_struct);
108 else
109 xstate_size = 0;
110}