fork: move the real prepare_to_copy() users to arch_dup_task_struct()
[linux-2.6-block.git] / arch / sh / kernel / process.c
1 #include <linux/mm.h>
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <linux/sched.h>
5
6 struct kmem_cache *task_xstate_cachep = NULL;
7 unsigned int xstate_size;
8
9 /*
10  * this gets called so that we can store lazy state into memory and copy the
11  * current task into the new thread.
12  */
13 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
14 {
15 #ifdef CONFIG_SUPERH32
16         unlazy_fpu(src, task_pt_regs(src));
17 #endif
18         *dst = *src;
19
20         if (src->thread.xstate) {
21                 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
22                                                       GFP_KERNEL);
23                 if (!dst->thread.xstate)
24                         return -ENOMEM;
25                 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
26         }
27
28         return 0;
29 }
30
31 void free_thread_xstate(struct task_struct *tsk)
32 {
33         if (tsk->thread.xstate) {
34                 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
35                 tsk->thread.xstate = NULL;
36         }
37 }
38
39 #if THREAD_SHIFT < PAGE_SHIFT
40 static struct kmem_cache *thread_info_cache;
41
42 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
43 {
44         struct thread_info *ti;
45 #ifdef CONFIG_DEBUG_STACK_USAGE
46         gfp_t mask = GFP_KERNEL | __GFP_ZERO;
47 #else
48         gfp_t mask = GFP_KERNEL;
49 #endif
50
51         ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
52         return ti;
53 }
54
55 void free_thread_info(struct thread_info *ti)
56 {
57         free_thread_xstate(ti->task);
58         kmem_cache_free(thread_info_cache, ti);
59 }
60
61 void thread_info_cache_init(void)
62 {
63         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
64                                               THREAD_SIZE, SLAB_PANIC, NULL);
65 }
66 #else
67 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
68 {
69 #ifdef CONFIG_DEBUG_STACK_USAGE
70         gfp_t mask = GFP_KERNEL | __GFP_ZERO;
71 #else
72         gfp_t mask = GFP_KERNEL;
73 #endif
74         struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
75
76         return page ? page_address(page) : NULL;
77 }
78
79 void free_thread_info(struct thread_info *ti)
80 {
81         free_thread_xstate(ti->task);
82         free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
83 }
84 #endif /* THREAD_SHIFT < PAGE_SHIFT */
85
86 void arch_task_cache_init(void)
87 {
88         if (!xstate_size)
89                 return;
90
91         task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
92                                                __alignof__(union thread_xstate),
93                                                SLAB_PANIC | SLAB_NOTRACK, NULL);
94 }
95
96 #ifdef CONFIG_SH_FPU_EMU
97 # define HAVE_SOFTFP    1
98 #else
99 # define HAVE_SOFTFP    0
100 #endif
101
102 void __cpuinit init_thread_xstate(void)
103 {
104         if (boot_cpu_data.flags & CPU_HAS_FPU)
105                 xstate_size = sizeof(struct sh_fpu_hard_struct);
106         else if (HAVE_SOFTFP)
107                 xstate_size = sizeof(struct sh_fpu_soft_struct);
108         else
109                 xstate_size = 0;
110 }