Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl> |
2 | * | |
3 | * Distribute under GPLv2. | |
4 | * | |
d158cbdf RW |
5 | * swsusp_arch_resume must not use any stack or any nonlocal variables while |
6 | * copying pages: | |
1da177e4 LT |
7 | * |
8 | * Its rewriting one kernel image with another. What is stack in "old" | |
9 | * image could very well be data page in "new" image, and overwriting | |
10 | * your own stack under you is bad idea. | |
11 | */ | |
12 | ||
13 | .text | |
14 | #include <linux/linkage.h> | |
15 | #include <asm/segment.h> | |
16 | #include <asm/page.h> | |
e2d5df93 | 17 | #include <asm/asm-offsets.h> |
1da177e4 LT |
18 | |
19 | ENTRY(swsusp_arch_suspend) | |
0de80bcc | 20 | movq $saved_context, %rax |
65ea5b03 PA |
21 | movq %rsp, pt_regs_sp(%rax) |
22 | movq %rbp, pt_regs_bp(%rax) | |
23 | movq %rsi, pt_regs_si(%rax) | |
24 | movq %rdi, pt_regs_di(%rax) | |
25 | movq %rbx, pt_regs_bx(%rax) | |
26 | movq %rcx, pt_regs_cx(%rax) | |
27 | movq %rdx, pt_regs_dx(%rax) | |
0de80bcc RW |
28 | movq %r8, pt_regs_r8(%rax) |
29 | movq %r9, pt_regs_r9(%rax) | |
30 | movq %r10, pt_regs_r10(%rax) | |
31 | movq %r11, pt_regs_r11(%rax) | |
32 | movq %r12, pt_regs_r12(%rax) | |
33 | movq %r13, pt_regs_r13(%rax) | |
34 | movq %r14, pt_regs_r14(%rax) | |
35 | movq %r15, pt_regs_r15(%rax) | |
36 | pushfq | |
65ea5b03 | 37 | popq pt_regs_flags(%rax) |
1da177e4 | 38 | |
d158cbdf RW |
39 | /* save the address of restore_registers */ |
40 | movq $restore_registers, %rax | |
41 | movq %rax, restore_jump_address(%rip) | |
c30bb68c RW |
42 | /* save cr3 */ |
43 | movq %cr3, %rax | |
44 | movq %rax, restore_cr3(%rip) | |
d158cbdf | 45 | |
1da177e4 LT |
46 | call swsusp_save |
47 | ret | |
48 | ||
3dd08325 RW |
49 | ENTRY(restore_image) |
50 | /* switch to temporary page tables */ | |
51 | movq $__PAGE_OFFSET, %rdx | |
52 | movq temp_level4_pgt(%rip), %rax | |
53 | subq %rdx, %rax | |
54 | movq %rax, %cr3 | |
55 | /* Flush TLB */ | |
1da177e4 LT |
56 | movq mmu_cr4_features(%rip), %rax |
57 | movq %rax, %rdx | |
58 | andq $~(1<<7), %rdx # PGE | |
59 | movq %rdx, %cr4; # turn off PGE | |
60 | movq %cr3, %rcx; # flush TLB | |
61 | movq %rcx, %cr3; | |
62 | movq %rax, %cr4; # turn PGE back on | |
63 | ||
d158cbdf RW |
64 | /* prepare to jump to the image kernel */ |
65 | movq restore_jump_address(%rip), %rax | |
c30bb68c | 66 | movq restore_cr3(%rip), %rbx |
d158cbdf RW |
67 | |
68 | /* prepare to copy image data to their original locations */ | |
75534b50 | 69 | movq restore_pblist(%rip), %rdx |
d158cbdf RW |
70 | movq relocated_restore_code(%rip), %rcx |
71 | jmpq *%rcx | |
72 | ||
73 | /* code below has been relocated to a safe page */ | |
74 | ENTRY(core_restore_code) | |
1da177e4 LT |
75 | loop: |
76 | testq %rdx, %rdx | |
77 | jz done | |
78 | ||
79 | /* get addresses from the pbe and copy the page */ | |
80 | movq pbe_address(%rdx), %rsi | |
81 | movq pbe_orig_address(%rdx), %rdi | |
d158cbdf | 82 | movq $(PAGE_SIZE >> 3), %rcx |
1da177e4 LT |
83 | rep |
84 | movsq | |
85 | ||
86 | /* progress to the next pbe */ | |
87 | movq pbe_next(%rdx), %rdx | |
88 | jmp loop | |
89 | done: | |
d158cbdf RW |
90 | /* jump to the restore_registers address from the image header */ |
91 | jmpq *%rax | |
92 | /* | |
93 | * NOTE: This assumes that the boot kernel's text mapping covers the | |
94 | * image kernel's page containing restore_registers and the address of | |
95 | * this page is the same as in the image kernel's text mapping (it | |
96 | * should always be true, because the text mapping is linear, starting | |
97 | * from 0, and is supposed to cover the entire kernel text for every | |
98 | * kernel). | |
99 | * | |
100 | * code below belongs to the image kernel | |
101 | */ | |
102 | ||
103 | ENTRY(restore_registers) | |
3dd08325 | 104 | /* go back to the original page tables */ |
c30bb68c | 105 | movq %rbx, %cr3 |
1ab60e0f | 106 | |
1da177e4 LT |
107 | /* Flush TLB, including "global" things (vmalloc) */ |
108 | movq mmu_cr4_features(%rip), %rax | |
109 | movq %rax, %rdx | |
110 | andq $~(1<<7), %rdx; # PGE | |
111 | movq %rdx, %cr4; # turn off PGE | |
112 | movq %cr3, %rcx; # flush TLB | |
113 | movq %rcx, %cr3 | |
114 | movq %rax, %cr4; # turn PGE back on | |
115 | ||
0de80bcc RW |
116 | /* We don't restore %rax, it must be 0 anyway */ |
117 | movq $saved_context, %rax | |
65ea5b03 PA |
118 | movq pt_regs_sp(%rax), %rsp |
119 | movq pt_regs_bp(%rax), %rbp | |
120 | movq pt_regs_si(%rax), %rsi | |
121 | movq pt_regs_di(%rax), %rdi | |
122 | movq pt_regs_bx(%rax), %rbx | |
123 | movq pt_regs_cx(%rax), %rcx | |
124 | movq pt_regs_dx(%rax), %rdx | |
0de80bcc RW |
125 | movq pt_regs_r8(%rax), %r8 |
126 | movq pt_regs_r9(%rax), %r9 | |
127 | movq pt_regs_r10(%rax), %r10 | |
128 | movq pt_regs_r11(%rax), %r11 | |
129 | movq pt_regs_r12(%rax), %r12 | |
130 | movq pt_regs_r13(%rax), %r13 | |
131 | movq pt_regs_r14(%rax), %r14 | |
132 | movq pt_regs_r15(%rax), %r15 | |
65ea5b03 | 133 | pushq pt_regs_flags(%rax) |
0de80bcc | 134 | popfq |
1da177e4 LT |
135 | |
136 | xorq %rax, %rax | |
137 | ||
d158cbdf RW |
138 | /* tell the hibernation core that we've just restored the memory */ |
139 | movq %rax, in_suspend(%rip) | |
140 | ||
1da177e4 | 141 | ret |