2 * Suspend and hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/smp.h>
12 #include <linux/suspend.h>
13 #include <asm/proto.h>
15 #include <asm/pgtable.h>
18 /* References to section boundaries */
19 extern const void __nosave_begin, __nosave_end;
21 static void fix_processor_context(void);
23 struct saved_context saved_context;
26 * __save_processor_state - save CPU registers before creating a
27 * hibernation image and before restoring the memory state from it
28 * @ctxt - structure to store the registers contents in
30 * NOTE: If there is a CPU register the modification of which by the
31 * boot kernel (ie. the kernel used for loading the hibernation image)
32 * might affect the operations of the restored target kernel (ie. the one
33 * saved in the hibernation image), then its contents must be saved by this
34 * function. In other words, if kernel A is hibernated and different
35 * kernel B is used for loading the hibernation image into memory, the
36 * kernel A's __save_processor_state() function must save all registers
37 * needed by kernel A, so that it can operate correctly after the resume
38 * regardless of what kernel B does in the meantime.
40 static void __save_processor_state(struct saved_context *ctxt)
47 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
48 store_idt((struct desc_ptr *)&ctxt->idt_limit);
51 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
55 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
56 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
57 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
58 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
59 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
61 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
62 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
63 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
64 mtrr_save_fixed_ranges(NULL);
69 rdmsrl(MSR_EFER, ctxt->efer);
70 ctxt->cr0 = read_cr0();
71 ctxt->cr2 = read_cr2();
72 ctxt->cr3 = read_cr3();
73 ctxt->cr4 = read_cr4();
74 ctxt->cr8 = read_cr8();
77 void save_processor_state(void)
79 __save_processor_state(&saved_context);
82 static void do_fpu_end(void)
85 * Restore FPU regs if necessary
91 * __restore_processor_state - restore the contents of CPU registers saved
92 * by __save_processor_state()
93 * @ctxt - structure to load the registers contents from
95 static void __restore_processor_state(struct saved_context *ctxt)
100 wrmsrl(MSR_EFER, ctxt->efer);
101 write_cr8(ctxt->cr8);
102 write_cr4(ctxt->cr4);
103 write_cr3(ctxt->cr3);
104 write_cr2(ctxt->cr2);
105 write_cr0(ctxt->cr0);
108 * now restore the descriptor tables to their proper values
109 * ltr is done i fix_processor_context().
111 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
112 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
118 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
119 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
120 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
121 load_gs_index(ctxt->gs);
122 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
124 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
125 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
126 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
128 fix_processor_context();
134 void restore_processor_state(void)
136 __restore_processor_state(&saved_context);
139 static void fix_processor_context(void)
141 int cpu = smp_processor_id();
142 struct tss_struct *t = &per_cpu(init_tss, cpu);
145 * This just modifies memory; should not be necessary. But... This
146 * is necessary, because 386 hardware has concept of busy TSS or some
149 set_tss_desc(cpu, t);
151 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
153 syscall_init(); /* This sets MSR_*STAR and related */
154 load_TR_desc(); /* This does ltr */
155 load_LDT(¤t->active_mm->context); /* This does lldt */
158 * Now maybe reload the debug registers
160 if (current->thread.debugreg7){
161 loaddebug(¤t->thread, 0);
162 loaddebug(¤t->thread, 1);
163 loaddebug(¤t->thread, 2);
164 loaddebug(¤t->thread, 3);
166 loaddebug(¤t->thread, 6);
167 loaddebug(¤t->thread, 7);
171 #ifdef CONFIG_HIBERNATION
172 /* Defined in arch/x86_64/kernel/suspend_asm.S */
173 extern int restore_image(void);
176 * Address to jump to in the last phase of restore in order to get to the image
177 * kernel's text (this value is passed in the image header).
179 unsigned long restore_jump_address;
182 * Value of the cr3 register from before the hibernation (this value is passed
183 * in the image header).
185 unsigned long restore_cr3;
187 pgd_t *temp_level4_pgt;
189 void *relocated_restore_code;
191 static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
195 i = pud_index(address);
197 for (; i < PTRS_PER_PUD; pud++, i++) {
201 paddr = address + i*PUD_SIZE;
205 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
208 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
209 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
214 pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
215 pe &= __supported_pte_mask;
216 set_pmd(pmd, __pmd(pe));
222 static int set_up_temporary_mappings(void)
224 unsigned long start, end, next;
227 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
228 if (!temp_level4_pgt)
231 /* It is safe to reuse the original kernel mapping */
232 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
233 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
235 /* Set up the direct mapping from scratch */
236 start = (unsigned long)pfn_to_kaddr(0);
237 end = (unsigned long)pfn_to_kaddr(end_pfn);
239 for (; start < end; start = next) {
240 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
243 next = start + PGDIR_SIZE;
246 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
248 set_pgd(temp_level4_pgt + pgd_index(start),
249 mk_kernel_pgd(__pa(pud)));
254 int swsusp_arch_resume(void)
258 /* We have got enough memory and from now on we cannot recover */
259 if ((error = set_up_temporary_mappings()))
262 relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
263 if (!relocated_restore_code)
265 memcpy(relocated_restore_code, &core_restore_code,
266 &restore_registers - &core_restore_code);
273 * pfn_is_nosave - check if given pfn is in the 'nosave' section
276 int pfn_is_nosave(unsigned long pfn)
278 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
279 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
280 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
283 struct restore_data_record {
284 unsigned long jump_address;
289 #define RESTORE_MAGIC 0x0123456789ABCDEFUL
292 * arch_hibernation_header_save - populate the architecture specific part
293 * of a hibernation image header
294 * @addr: address to save the data at
296 int arch_hibernation_header_save(void *addr, unsigned int max_size)
298 struct restore_data_record *rdr = addr;
300 if (max_size < sizeof(struct restore_data_record))
302 rdr->jump_address = restore_jump_address;
303 rdr->cr3 = restore_cr3;
304 rdr->magic = RESTORE_MAGIC;
309 * arch_hibernation_header_restore - read the architecture specific data
310 * from the hibernation image header
311 * @addr: address to read the data from
313 int arch_hibernation_header_restore(void *addr)
315 struct restore_data_record *rdr = addr;
317 restore_jump_address = rdr->jump_address;
318 restore_cr3 = rdr->cr3;
319 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
321 #endif /* CONFIG_HIBERNATION */