x86 headers: protect page_32.h via __ASSEMBLY__
[linux-2.6-block.git] / arch / x86 / power / hibernate_asm_64.S
CommitLineData
cf7700fe
RW
1/*
2 * Hibernation support for x86-64
1da177e4
LT
3 *
4 * Distribute under GPLv2.
5 *
cf7700fe
RW
6 * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright 2005 Andi Kleen <ak@suse.de>
8 * Copyright 2004 Pavel Machek <pavel@suse.cz>
9 *
d158cbdf
RW
10 * swsusp_arch_resume must not use any stack or any nonlocal variables while
11 * copying pages:
1da177e4
LT
12 *
13 * Its rewriting one kernel image with another. What is stack in "old"
14 * image could very well be data page in "new" image, and overwriting
15 * your own stack under you is bad idea.
16 */
cf7700fe 17
1da177e4
LT
18 .text
19#include <linux/linkage.h>
20#include <asm/segment.h>
21#include <asm/page.h>
e2d5df93 22#include <asm/asm-offsets.h>
bbb1e57a 23#include <asm/processor-flags.h>
1da177e4
LT
24
25ENTRY(swsusp_arch_suspend)
0de80bcc 26 movq $saved_context, %rax
65ea5b03
PA
27 movq %rsp, pt_regs_sp(%rax)
28 movq %rbp, pt_regs_bp(%rax)
29 movq %rsi, pt_regs_si(%rax)
30 movq %rdi, pt_regs_di(%rax)
31 movq %rbx, pt_regs_bx(%rax)
32 movq %rcx, pt_regs_cx(%rax)
33 movq %rdx, pt_regs_dx(%rax)
0de80bcc
RW
34 movq %r8, pt_regs_r8(%rax)
35 movq %r9, pt_regs_r9(%rax)
36 movq %r10, pt_regs_r10(%rax)
37 movq %r11, pt_regs_r11(%rax)
38 movq %r12, pt_regs_r12(%rax)
39 movq %r13, pt_regs_r13(%rax)
40 movq %r14, pt_regs_r14(%rax)
41 movq %r15, pt_regs_r15(%rax)
42 pushfq
65ea5b03 43 popq pt_regs_flags(%rax)
1da177e4 44
d158cbdf
RW
45 /* save the address of restore_registers */
46 movq $restore_registers, %rax
47 movq %rax, restore_jump_address(%rip)
c30bb68c
RW
48 /* save cr3 */
49 movq %cr3, %rax
50 movq %rax, restore_cr3(%rip)
d158cbdf 51
1da177e4
LT
52 call swsusp_save
53 ret
54
3dd08325
RW
55ENTRY(restore_image)
56 /* switch to temporary page tables */
57 movq $__PAGE_OFFSET, %rdx
58 movq temp_level4_pgt(%rip), %rax
59 subq %rdx, %rax
60 movq %rax, %cr3
61 /* Flush TLB */
1da177e4
LT
62 movq mmu_cr4_features(%rip), %rax
63 movq %rax, %rdx
bbb1e57a 64 andq $~(X86_CR4_PGE), %rdx
1da177e4
LT
65 movq %rdx, %cr4; # turn off PGE
66 movq %cr3, %rcx; # flush TLB
67 movq %rcx, %cr3;
68 movq %rax, %cr4; # turn PGE back on
69
d158cbdf
RW
70 /* prepare to jump to the image kernel */
71 movq restore_jump_address(%rip), %rax
c30bb68c 72 movq restore_cr3(%rip), %rbx
d158cbdf
RW
73
74 /* prepare to copy image data to their original locations */
75534b50 75 movq restore_pblist(%rip), %rdx
d158cbdf
RW
76 movq relocated_restore_code(%rip), %rcx
77 jmpq *%rcx
78
79 /* code below has been relocated to a safe page */
80ENTRY(core_restore_code)
1da177e4
LT
81loop:
82 testq %rdx, %rdx
83 jz done
84
85 /* get addresses from the pbe and copy the page */
86 movq pbe_address(%rdx), %rsi
87 movq pbe_orig_address(%rdx), %rdi
d158cbdf 88 movq $(PAGE_SIZE >> 3), %rcx
1da177e4
LT
89 rep
90 movsq
91
92 /* progress to the next pbe */
93 movq pbe_next(%rdx), %rdx
94 jmp loop
95done:
d158cbdf
RW
96 /* jump to the restore_registers address from the image header */
97 jmpq *%rax
98 /*
99 * NOTE: This assumes that the boot kernel's text mapping covers the
100 * image kernel's page containing restore_registers and the address of
101 * this page is the same as in the image kernel's text mapping (it
102 * should always be true, because the text mapping is linear, starting
103 * from 0, and is supposed to cover the entire kernel text for every
104 * kernel).
105 *
106 * code below belongs to the image kernel
107 */
108
109ENTRY(restore_registers)
3dd08325 110 /* go back to the original page tables */
c30bb68c 111 movq %rbx, %cr3
1ab60e0f 112
1da177e4
LT
113 /* Flush TLB, including "global" things (vmalloc) */
114 movq mmu_cr4_features(%rip), %rax
115 movq %rax, %rdx
bbb1e57a 116 andq $~(X86_CR4_PGE), %rdx
1da177e4
LT
117 movq %rdx, %cr4; # turn off PGE
118 movq %cr3, %rcx; # flush TLB
119 movq %rcx, %cr3
120 movq %rax, %cr4; # turn PGE back on
121
0de80bcc
RW
122 /* We don't restore %rax, it must be 0 anyway */
123 movq $saved_context, %rax
65ea5b03
PA
124 movq pt_regs_sp(%rax), %rsp
125 movq pt_regs_bp(%rax), %rbp
126 movq pt_regs_si(%rax), %rsi
127 movq pt_regs_di(%rax), %rdi
128 movq pt_regs_bx(%rax), %rbx
129 movq pt_regs_cx(%rax), %rcx
130 movq pt_regs_dx(%rax), %rdx
0de80bcc
RW
131 movq pt_regs_r8(%rax), %r8
132 movq pt_regs_r9(%rax), %r9
133 movq pt_regs_r10(%rax), %r10
134 movq pt_regs_r11(%rax), %r11
135 movq pt_regs_r12(%rax), %r12
136 movq pt_regs_r13(%rax), %r13
137 movq pt_regs_r14(%rax), %r14
138 movq pt_regs_r15(%rax), %r15
65ea5b03 139 pushq pt_regs_flags(%rax)
0de80bcc 140 popfq
1da177e4
LT
141
142 xorq %rax, %rax
143
d158cbdf
RW
144 /* tell the hibernation core that we've just restored the memory */
145 movq %rax, in_suspend(%rip)
146
1da177e4 147 ret