2 * Hibernate support specific for ARM64
4 * Derived from work on ARM hibernation support by:
6 * Ubuntu project, hibernation support for mach-dove
7 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9 * https://lkml.org/lkml/2010/6/18/4
10 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11 * https://patchwork.kernel.org/patch/96442/
13 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
15 * License terms: GNU General Public License (GPL) version 2
17 #define pr_fmt(x) "hibernate: " x
18 #include <linux/kvm_host.h>
20 #include <linux/notifier.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/utsname.h>
25 #include <linux/version.h>
27 #include <asm/barrier.h>
28 #include <asm/cacheflush.h>
29 #include <asm/irqflags.h>
30 #include <asm/memory.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgtable-hwdef.h>
35 #include <asm/sections.h>
37 #include <asm/suspend.h>
38 #include <asm/sysreg.h>
42 * Hibernate core relies on this value being 0 on resume, and marks it
43 * __nosavedata assuming it will keep the resume kernel's '0' value. This
44 * doesn't happen with either KASLR.
46 * defined as "__visible int in_suspend __nosavedata" in
47 * kernel/power/hibernate.c
49 extern int in_suspend;
51 /* Find a symbols alias in the linear map */
52 #define LMADDR(x) phys_to_virt(virt_to_phys(x))
54 /* Do we need to reset el2? */
55 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
57 /* temporary el2 vectors in the __hibernate_exit_text section. */
58 extern char hibernate_el2_vectors[];
60 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
61 extern char __hyp_stub_vectors[];
64 * Values that may not change over hibernate/resume. We put the build number
65 * and date in here so that we guarantee not to resume with a different
68 struct arch_hibernate_hdr_invariants {
69 char uts_version[__NEW_UTS_LEN + 1];
72 /* These values need to be know across a hibernate/restore. */
73 static struct arch_hibernate_hdr {
74 struct arch_hibernate_hdr_invariants invariants;
76 /* These are needed to find the relocated kernel if built with kaslr */
77 phys_addr_t ttbr1_el1;
78 void (*reenter_kernel)(void);
81 * We need to know where the __hyp_stub_vectors are after restore to
84 phys_addr_t __hyp_stub_vectors;
87 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
89 memset(i, 0, sizeof(*i));
90 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
93 int pfn_is_nosave(unsigned long pfn)
95 unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
96 unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
98 return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
101 void notrace save_processor_state(void)
103 WARN_ON(num_online_cpus() != 1);
106 void notrace restore_processor_state(void)
110 int arch_hibernation_header_save(void *addr, unsigned int max_size)
112 struct arch_hibernate_hdr *hdr = addr;
114 if (max_size < sizeof(*hdr))
117 arch_hdr_invariants(&hdr->invariants);
118 hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
119 hdr->reenter_kernel = _cpu_resume;
121 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
122 if (el2_reset_needed())
123 hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
125 hdr->__hyp_stub_vectors = 0;
129 EXPORT_SYMBOL(arch_hibernation_header_save);
131 int arch_hibernation_header_restore(void *addr)
133 struct arch_hibernate_hdr_invariants invariants;
134 struct arch_hibernate_hdr *hdr = addr;
136 arch_hdr_invariants(&invariants);
137 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
138 pr_crit("Hibernate image not generated by this kernel!\n");
146 EXPORT_SYMBOL(arch_hibernation_header_restore);
149 * Copies length bytes, starting at src_start into an new page,
150 * perform cache maintentance, then maps it at the specified address low
151 * address as executable.
153 * This is used by hibernate to copy the code it needs to execute when
154 * overwriting the kernel text. This function generates a new set of page
155 * tables, which it loads into ttbr0.
157 * Length is provided as we probably only want 4K of data, even on a 64K
160 static int create_safe_exec_page(void *src_start, size_t length,
161 unsigned long dst_addr,
162 phys_addr_t *phys_dst_addr,
163 void *(*allocator)(gfp_t mask),
171 unsigned long dst = (unsigned long)allocator(mask);
178 memcpy((void *)dst, src_start, length);
179 flush_icache_range(dst, dst + length);
181 pgd = pgd_offset_raw(allocator(mask), dst_addr);
182 if (pgd_none(*pgd)) {
183 pud = allocator(mask);
188 pgd_populate(&init_mm, pgd, pud);
191 pud = pud_offset(pgd, dst_addr);
192 if (pud_none(*pud)) {
193 pmd = allocator(mask);
198 pud_populate(&init_mm, pud, pmd);
201 pmd = pmd_offset(pud, dst_addr);
202 if (pmd_none(*pmd)) {
203 pte = allocator(mask);
208 pmd_populate_kernel(&init_mm, pmd, pte);
211 pte = pte_offset_kernel(pmd, dst_addr);
212 set_pte(pte, __pte(virt_to_phys((void *)dst) |
213 pgprot_val(PAGE_KERNEL_EXEC)));
216 * Load our new page tables. A strict BBM approach requires that we
217 * ensure that TLBs are free of any entries that may overlap with the
218 * global mappings we are about to install.
220 * For a real hibernate/resume cycle TTBR0 currently points to a zero
221 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
222 * runtime services), while for a userspace-driven test_resume cycle it
223 * points to userspace page tables (and we must point it at a zero page
224 * ourselves). Elsewhere we only (un)install the idmap with preemption
225 * disabled, so T0SZ should be as required regardless.
227 cpu_set_reserved_ttbr0();
228 local_flush_tlb_all();
229 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
232 *phys_dst_addr = virt_to_phys((void *)dst);
239 int swsusp_arch_suspend(void)
243 struct sleep_stack_data state;
245 if (cpus_are_stuck_in_kernel()) {
246 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
250 local_dbg_save(flags);
252 if (__cpu_suspend_enter(&state)) {
255 /* Clean kernel to PoC for secondary core startup */
256 __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
259 * Tell the hibernation core that we've just restored
264 __cpu_suspend_exit();
267 local_dbg_restore(flags);
272 static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
277 unsigned long addr = start;
279 dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
282 pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
283 dst_pte = pte_offset_kernel(dst_pmd, start);
285 src_pte = pte_offset_kernel(src_pmd, start);
287 if (!pte_none(*src_pte))
289 * Resume will overwrite areas that may be marked
290 * read only (code, rodata). Clear the RDONLY bit from
291 * the temporary mappings we use during restore.
293 set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
294 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
299 static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
305 unsigned long addr = start;
307 if (pud_none(*dst_pud)) {
308 dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
311 pud_populate(&init_mm, dst_pud, dst_pmd);
313 dst_pmd = pmd_offset(dst_pud, start);
315 src_pmd = pmd_offset(src_pud, start);
317 next = pmd_addr_end(addr, end);
318 if (pmd_none(*src_pmd))
320 if (pmd_table(*src_pmd)) {
321 if (copy_pte(dst_pmd, src_pmd, addr, next))
325 __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
327 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
332 static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
338 unsigned long addr = start;
340 if (pgd_none(*dst_pgd)) {
341 dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
344 pgd_populate(&init_mm, dst_pgd, dst_pud);
346 dst_pud = pud_offset(dst_pgd, start);
348 src_pud = pud_offset(src_pgd, start);
350 next = pud_addr_end(addr, end);
351 if (pud_none(*src_pud))
353 if (pud_table(*(src_pud))) {
354 if (copy_pmd(dst_pud, src_pud, addr, next))
358 __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
360 } while (dst_pud++, src_pud++, addr = next, addr != end);
365 static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
369 unsigned long addr = start;
370 pgd_t *src_pgd = pgd_offset_k(start);
372 dst_pgd = pgd_offset_raw(dst_pgd, start);
374 next = pgd_addr_end(addr, end);
375 if (pgd_none(*src_pgd))
377 if (copy_pud(dst_pgd, src_pgd, addr, next))
379 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
385 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
387 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
388 * we don't need to free it here.
390 int swsusp_arch_resume(void)
396 void *lm_restore_pblist;
397 phys_addr_t phys_hibernate_exit;
398 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
399 void *, phys_addr_t, phys_addr_t);
402 * Restoring the memory image will overwrite the ttbr1 page tables.
403 * Create a second copy of just the linear map, and use this when
406 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
408 pr_err("Failed to allocate memory for temporary page tables.");
412 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
417 * Since we only copied the linear map, we need to find restore_pblist's
418 * linear map address.
420 lm_restore_pblist = LMADDR(restore_pblist);
423 * We need a zero page that is zero before & after resume in order to
424 * to break before make on the ttbr1 page tables.
426 zero_page = (void *)get_safe_page(GFP_ATOMIC);
428 pr_err("Failed to allocate zero page.");
434 * Locate the exit code in the bottom-but-one page, so that *NULL
435 * still has disastrous affects.
437 hibernate_exit = (void *)PAGE_SIZE;
438 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
440 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
441 * a new set of ttbr0 page tables and load them.
443 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
444 (unsigned long)hibernate_exit,
445 &phys_hibernate_exit,
446 (void *)get_safe_page, GFP_ATOMIC);
448 pr_err("Failed to create safe executable page for hibernate_exit code.");
453 * The hibernate exit text contains a set of el2 vectors, that will
454 * be executed at el2 with the mmu off in order to reload hyp-stub.
456 __flush_dcache_area(hibernate_exit, exit_size);
459 * KASLR will cause the el2 vectors to be in a different location in
460 * the resumed kernel. Load hibernate's temporary copy into el2.
462 * We can skip this step if we booted at EL1, or are running with VHE.
464 if (el2_reset_needed()) {
465 phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
466 el2_vectors += hibernate_el2_vectors -
467 __hibernate_exit_text_start; /* offset */
469 __hyp_set_vectors(el2_vectors);
472 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
473 resume_hdr.reenter_kernel, lm_restore_pblist,
474 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
480 static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
481 unsigned long action, void *ptr)
483 if (action == PM_HIBERNATION_PREPARE &&
484 cpumask_first(cpu_online_mask) != 0) {
485 pr_warn("CPU0 is offline.\n");
486 return notifier_from_errno(-ENODEV);
492 static int __init check_boot_cpu_online_init(void)
495 * Set this pm_notifier callback with a lower priority than
496 * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
497 * called earlier to disable cpu hotplug before the cpu online check.
499 pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
503 core_initcall(check_boot_cpu_online_init);