2 #include <linux/kernel.h>
3 #include <linux/sched.h>
5 struct kmem_cache *task_xstate_cachep = NULL;
6 unsigned int xstate_size;
8 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
12 if (src->thread.xstate) {
13 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
15 if (!dst->thread.xstate)
17 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
23 void free_thread_xstate(struct task_struct *tsk)
25 if (tsk->thread.xstate) {
26 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
27 tsk->thread.xstate = NULL;
31 #if THREAD_SHIFT < PAGE_SHIFT
32 static struct kmem_cache *thread_info_cache;
34 struct thread_info *alloc_thread_info(struct task_struct *tsk)
36 struct thread_info *ti;
38 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
39 if (unlikely(ti == NULL))
41 #ifdef CONFIG_DEBUG_STACK_USAGE
42 memset(ti, 0, THREAD_SIZE);
47 void free_thread_info(struct thread_info *ti)
49 free_thread_xstate(ti->task);
50 kmem_cache_free(thread_info_cache, ti);
53 void thread_info_cache_init(void)
55 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
56 THREAD_SIZE, SLAB_PANIC, NULL);
59 struct thread_info *alloc_thread_info(struct task_struct *tsk)
61 #ifdef CONFIG_DEBUG_STACK_USAGE
62 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
64 gfp_t mask = GFP_KERNEL;
66 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
69 void free_thread_info(struct thread_info *ti)
71 free_thread_xstate(ti->task);
72 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
74 #endif /* THREAD_SHIFT < PAGE_SHIFT */
76 void arch_task_cache_init(void)
81 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
82 __alignof__(union thread_xstate),
83 SLAB_PANIC | SLAB_NOTRACK, NULL);
86 #ifdef CONFIG_SH_FPU_EMU
87 # define HAVE_SOFTFP 1
89 # define HAVE_SOFTFP 0
92 void __cpuinit init_thread_xstate(void)
94 if (boot_cpu_data.flags & CPU_HAS_FPU)
95 xstate_size = sizeof(struct sh_fpu_hard_struct);
97 xstate_size = sizeof(struct sh_fpu_soft_struct);