x86, fpu: lazy allocation of FPU area - v5
authorSuresh Siddha <suresh.b.siddha@intel.com>
Mon, 10 Mar 2008 22:28:05 +0000 (15:28 -0700)
committerIngo Molnar <mingo@elte.hu>
Sat, 19 Apr 2008 17:19:55 +0000 (19:19 +0200)
Only allocate the FPU area when the application actually uses FPU, i.e., in the
first lazy FPU trap. This could save memory for non-fpu using apps.

for example: on my system after boot, there are around 300 processes, with
only 17 using FPU.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/i387.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/traps_32.c
arch/x86/kernel/traps_64.c
include/asm-x86/i387.h
include/asm-x86/processor.h

index baf632b221d43366b1f026b28add587e249e5840..db6839b53195e1a83d9186a76bf9a05562fc39e6 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/module.h>
 #include <linux/regset.h>
 #include <linux/sched.h>
-#include <linux/bootmem.h>
 
 #include <asm/sigcontext.h>
 #include <asm/processor.h>
@@ -63,7 +62,6 @@ void __init init_thread_xstate(void)
        else
                xstate_size = sizeof(struct i387_fsave_struct);
 #endif
-       init_task.thread.xstate = alloc_bootmem(xstate_size);
 }
 
 #ifdef CONFIG_X86_64
@@ -93,12 +91,22 @@ void __cpuinit fpu_init(void)
  * value at reset if we support XMM instructions and then
  * remeber the current task has used the FPU.
  */
-void init_fpu(struct task_struct *tsk)
+int init_fpu(struct task_struct *tsk)
 {
        if (tsk_used_math(tsk)) {
                if (tsk == current)
                        unlazy_fpu(tsk);
-               return;
+               return 0;
+       }
+
+       /*
+        * Memory allocation at the first usage of the FPU and other state.
+        */
+       if (!tsk->thread.xstate) {
+               tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+                                                     GFP_KERNEL);
+               if (!tsk->thread.xstate)
+                       return -ENOMEM;
        }
 
        if (cpu_has_fxsr) {
@@ -120,6 +128,7 @@ void init_fpu(struct task_struct *tsk)
         * Only the device not available exception or ptrace can call init_fpu.
         */
        set_stopped_child_used_math(tsk);
+       return 0;
 }
 
 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
@@ -136,10 +145,14 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
                unsigned int pos, unsigned int count,
                void *kbuf, void __user *ubuf)
 {
+       int ret;
+
        if (!cpu_has_fxsr)
                return -ENODEV;
 
-       init_fpu(target);
+       ret = init_fpu(target);
+       if (ret)
+               return ret;
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.xstate->fxsave, 0, -1);
@@ -154,7 +167,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!cpu_has_fxsr)
                return -ENODEV;
 
-       init_fpu(target);
+       ret = init_fpu(target);
+       if (ret)
+               return ret;
+
        set_stopped_child_used_math(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -312,11 +328,14 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
               void *kbuf, void __user *ubuf)
 {
        struct user_i387_ia32_struct env;
+       int ret;
 
        if (!HAVE_HWFP)
                return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
 
-       init_fpu(target);
+       ret = init_fpu(target);
+       if (ret)
+               return ret;
 
        if (!cpu_has_fxsr) {
                return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -344,7 +363,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!HAVE_HWFP)
                return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
 
-       init_fpu(target);
+       ret = init_fpu(target);
+       if (ret)
+               return ret;
+
        set_stopped_child_used_math(target);
 
        if (!cpu_has_fxsr) {
index ead24efbcba04f7404c621815b21f0aab01d7fcf..0e613e7e7b5ec3bff1357baabb3bbb24452c48fe 100644 (file)
@@ -5,24 +5,34 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 
-static struct kmem_cache *task_xstate_cachep;
+struct kmem_cache *task_xstate_cachep;
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        *dst = *src;
-       dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
-       if (!dst->thread.xstate)
-               return -ENOMEM;
-       WARN_ON((unsigned long)dst->thread.xstate & 15);
-       memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+       if (src->thread.xstate) {
+               dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+                                                     GFP_KERNEL);
+               if (!dst->thread.xstate)
+                       return -ENOMEM;
+               WARN_ON((unsigned long)dst->thread.xstate & 15);
+               memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+       }
        return 0;
 }
 
-void free_thread_info(struct thread_info *ti)
+void free_thread_xstate(struct task_struct *tsk)
 {
-       kmem_cache_free(task_xstate_cachep, ti->task->thread.xstate);
-       ti->task->thread.xstate = NULL;
+       if (tsk->thread.xstate) {
+               kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
+               tsk->thread.xstate = NULL;
+       }
+}
+
 
+void free_thread_info(struct thread_info *ti)
+{
+       free_thread_xstate(ti->task);
        free_pages((unsigned long)(ti), get_order(THREAD_SIZE));
 }
 
index 3890a5dd25f926241bb3e43d0d2a6014647a4bb5..7adad088e373fbf4bf5523dcf70f22665331b105 100644 (file)
@@ -521,6 +521,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        regs->cs                = __USER_CS;
        regs->ip                = new_ip;
        regs->sp                = new_sp;
+       /*
+        * Free the old FP and other extended state
+        */
+       free_thread_xstate(current);
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
index b795e831afd65b226f590bbb1472e9d20f29367d..891af1a1b48a5cbf0ef303d9f1b5c5601b485869 100644 (file)
@@ -533,6 +533,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        regs->ss                = __USER_DS;
        regs->flags             = 0x200;
        set_fs(USER_DS);
+       /*
+        * Free the old FP and other extended state
+        */
+       free_thread_xstate(current);
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
index 8d136a73ce8e37d9710bdbb1505b963a32533a6a..471e694d6713193baa5a25dac995f177ef34abf2 100644 (file)
@@ -1148,9 +1148,22 @@ asmlinkage void math_state_restore(void)
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = thread->task;
 
+       if (!tsk_used_math(tsk)) {
+               local_irq_enable();
+               /*
+                * does a slab alloc which can sleep
+                */
+               if (init_fpu(tsk)) {
+                       /*
+                        * ran out of memory!
+                        */
+                       do_group_exit(SIGKILL);
+                       return;
+               }
+               local_irq_disable();
+       }
+
        clts();                         /* Allow maths ops (or we recurse) */
-       if (!tsk_used_math(tsk))
-               init_fpu(tsk);
        restore_fpu(tsk);
        thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
        tsk->fpu_counter++;
index dc0cb497eec38d4a142d274f6eac7764b6ce6560..adff76ea97c4732de4b7766272c8b6ad26ace478 100644 (file)
@@ -1124,10 +1124,23 @@ asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
 asmlinkage void math_state_restore(void)
 {
        struct task_struct *me = current;
-       clts();                 /* Allow maths ops (or we recurse) */
 
-       if (!used_math())
-               init_fpu(me);
+       if (!used_math()) {
+               local_irq_enable();
+               /*
+                * does a slab alloc which can sleep
+                */
+               if (init_fpu(me)) {
+                       /*
+                        * ran out of memory!
+                        */
+                       do_group_exit(SIGKILL);
+                       return;
+               }
+               local_irq_disable();
+       }
+
+       clts();                 /* Allow maths ops (or we recurse) */
        restore_fpu_checking(&me->thread.xstate->fxsave);
        task_thread_info(me)->status |= TS_USEDFPU;
        me->fpu_counter++;
index 382a5fa9d492a1715f1c2b378139d6ad32c46951..4be7b58b1e16ca34f85851ffa2a828e0e6313836 100644 (file)
@@ -21,7 +21,7 @@
 
 extern void fpu_init(void);
 extern void mxcsr_feature_mask_init(void);
-extern void init_fpu(struct task_struct *child);
+extern int init_fpu(struct task_struct *child);
 extern asmlinkage void math_state_restore(void);
 extern void init_thread_xstate(void);
 
index 99d297885780084d7884b991ac32d2633b9a1f48..e6bf92ddeb21ded7de69cff3f37120df56520291 100644 (file)
@@ -366,6 +366,8 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
 
 extern void print_cpu_info(struct cpuinfo_x86 *);
 extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
 extern unsigned short num_cache_leaves;