Commit | Line | Data |
---|---|---|
cbf6b1ba PM |
1 | #include <linux/mm.h> |
2 | #include <linux/kernel.h> | |
3 | #include <linux/sched.h> | |
4 | ||
0ea820cf PM |
5 | struct kmem_cache *task_xstate_cachep = NULL; |
6 | unsigned int xstate_size; | |
7 | ||
8 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |
9 | { | |
10 | *dst = *src; | |
11 | ||
12 | if (src->thread.xstate) { | |
13 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | |
14 | GFP_KERNEL); | |
15 | if (!dst->thread.xstate) | |
16 | return -ENOMEM; | |
17 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | |
18 | } | |
19 | ||
20 | return 0; | |
21 | } | |
22 | ||
23 | void free_thread_xstate(struct task_struct *tsk) | |
24 | { | |
25 | if (tsk->thread.xstate) { | |
26 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | |
27 | tsk->thread.xstate = NULL; | |
28 | } | |
29 | } | |
30 | ||
cbf6b1ba PM |
31 | #if THREAD_SHIFT < PAGE_SHIFT |
32 | static struct kmem_cache *thread_info_cache; | |
33 | ||
34 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | |
35 | { | |
36 | struct thread_info *ti; | |
37 | ||
38 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | |
39 | if (unlikely(ti == NULL)) | |
40 | return NULL; | |
41 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
42 | memset(ti, 0, THREAD_SIZE); | |
43 | #endif | |
44 | return ti; | |
45 | } | |
46 | ||
47 | void free_thread_info(struct thread_info *ti) | |
48 | { | |
0ea820cf | 49 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
50 | kmem_cache_free(thread_info_cache, ti); |
51 | } | |
52 | ||
53 | void thread_info_cache_init(void) | |
54 | { | |
55 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | |
a3705799 | 56 | THREAD_SIZE, SLAB_PANIC, NULL); |
cbf6b1ba PM |
57 | } |
58 | #else | |
59 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | |
60 | { | |
61 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
62 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | |
63 | #else | |
64 | gfp_t mask = GFP_KERNEL; | |
65 | #endif | |
66 | return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); | |
67 | } | |
68 | ||
69 | void free_thread_info(struct thread_info *ti) | |
70 | { | |
0ea820cf | 71 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
72 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); |
73 | } | |
74 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | |
0ea820cf PM |
75 | |
76 | void arch_task_cache_init(void) | |
77 | { | |
78 | if (!xstate_size) | |
79 | return; | |
80 | ||
81 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, | |
82 | __alignof__(union thread_xstate), | |
83 | SLAB_PANIC | SLAB_NOTRACK, NULL); | |
84 | } | |
85 | ||
86 | #ifdef CONFIG_SH_FPU_EMU | |
87 | # define HAVE_SOFTFP 1 | |
88 | #else | |
89 | # define HAVE_SOFTFP 0 | |
90 | #endif | |
91 | ||
4a6feab0 | 92 | void __cpuinit init_thread_xstate(void) |
0ea820cf PM |
93 | { |
94 | if (boot_cpu_data.flags & CPU_HAS_FPU) | |
95 | xstate_size = sizeof(struct sh_fpu_hard_struct); | |
96 | else if (HAVE_SOFTFP) | |
97 | xstate_size = sizeof(struct sh_fpu_soft_struct); | |
98 | else | |
99 | xstate_size = 0; | |
100 | } |