Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Suspend support specific for i386. | |
3 | * | |
4 | * Distribute under GPLv2 | |
5 | * | |
6 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> | |
7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | |
8 | */ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/poll.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/sysrq.h> | |
19 | #include <linux/proc_fs.h> | |
20 | #include <linux/irq.h> | |
21 | #include <linux/pm.h> | |
22 | #include <linux/device.h> | |
23 | #include <linux/suspend.h> | |
24 | #include <asm/uaccess.h> | |
25 | #include <asm/acpi.h> | |
26 | #include <asm/tlbflush.h> | |
27 | #include <asm/io.h> | |
28 | #include <asm/proto.h> | |
29 | ||
30 | struct saved_context saved_context; | |
31 | ||
32 | unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; | |
33 | unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; | |
34 | unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; | |
35 | unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; | |
36 | unsigned long saved_context_eflags; | |
37 | ||
38 | void __save_processor_state(struct saved_context *ctxt) | |
39 | { | |
40 | kernel_fpu_begin(); | |
41 | ||
42 | /* | |
43 | * descriptor tables | |
44 | */ | |
45 | asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); | |
46 | asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); | |
47 | asm volatile ("sldt %0" : "=m" (ctxt->ldt)); | |
48 | asm volatile ("str %0" : "=m" (ctxt->tr)); | |
49 | ||
50 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | |
51 | /* EFER should be constant for kernel version, no need to handle it. */ | |
52 | /* | |
53 | * segment registers | |
54 | */ | |
55 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | |
56 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | |
57 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | |
58 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | |
59 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | |
60 | ||
61 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | |
62 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | |
63 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | |
64 | ||
65 | /* | |
66 | * control registers | |
67 | */ | |
68 | asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0)); | |
69 | asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); | |
70 | asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); | |
71 | asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); | |
72 | } | |
73 | ||
74 | void save_processor_state(void) | |
75 | { | |
76 | __save_processor_state(&saved_context); | |
77 | } | |
78 | ||
79 | static void | |
80 | do_fpu_end(void) | |
81 | { | |
82 | /* restore FPU regs if necessary */ | |
83 | /* Do it out of line so that gcc does not move cr0 load to some stupid place */ | |
84 | kernel_fpu_end(); | |
85 | mxcsr_feature_mask_init(); | |
86 | } | |
87 | ||
88 | void __restore_processor_state(struct saved_context *ctxt) | |
89 | { | |
90 | /* | |
91 | * control registers | |
92 | */ | |
93 | asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); | |
94 | asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); | |
95 | asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); | |
96 | asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); | |
97 | ||
98 | /* | |
99 | * segment registers | |
100 | */ | |
101 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | |
102 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | |
103 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | |
104 | load_gs_index(ctxt->gs); | |
105 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | |
106 | ||
107 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); | |
108 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | |
109 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | |
110 | ||
111 | /* | |
112 | * now restore the descriptor tables to their proper values | |
113 | * ltr is done i fix_processor_context(). | |
114 | */ | |
115 | asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); | |
116 | asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); | |
117 | asm volatile ("lldt %0" :: "m" (ctxt->ldt)); | |
118 | ||
119 | fix_processor_context(); | |
120 | ||
121 | do_fpu_end(); | |
122 | } | |
123 | ||
124 | void restore_processor_state(void) | |
125 | { | |
126 | __restore_processor_state(&saved_context); | |
127 | } | |
128 | ||
129 | void fix_processor_context(void) | |
130 | { | |
131 | int cpu = smp_processor_id(); | |
132 | struct tss_struct *t = &per_cpu(init_tss, cpu); | |
133 | ||
134 | set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ | |
135 | ||
136 | cpu_gdt_table[cpu][GDT_ENTRY_TSS].type = 9; | |
137 | ||
138 | syscall_init(); /* This sets MSR_*STAR and related */ | |
139 | load_TR_desc(); /* This does ltr */ | |
140 | load_LDT(¤t->active_mm->context); /* This does lldt */ | |
141 | ||
142 | /* | |
143 | * Now maybe reload the debug registers | |
144 | */ | |
145 | if (current->thread.debugreg7){ | |
146 | loaddebug(¤t->thread, 0); | |
147 | loaddebug(¤t->thread, 1); | |
148 | loaddebug(¤t->thread, 2); | |
149 | loaddebug(¤t->thread, 3); | |
150 | /* no 4 and 5 */ | |
151 | loaddebug(¤t->thread, 6); | |
152 | loaddebug(¤t->thread, 7); | |
153 | } | |
154 | ||
155 | } | |
156 | ||
157 |