Commit | Line | Data |
---|---|---|
fb4a9602 | 1 | #include <linux/percpu.h> |
95322526 LP |
2 | #include <linux/slab.h> |
3 | #include <asm/cacheflush.h> | |
4 | #include <asm/cpu_ops.h> | |
5 | #include <asm/debug-monitors.h> | |
6 | #include <asm/pgtable.h> | |
7 | #include <asm/memory.h> | |
8 | #include <asm/smp_plat.h> | |
9 | #include <asm/suspend.h> | |
10 | #include <asm/tlbflush.h> | |
11 | ||
12 | extern int __cpu_suspend(unsigned long); | |
13 | /* | |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | |
16 | * the necessary data available when the caches are not searched. | |
17 | * | |
18 | * @arg: Argument to pass to suspend operations | |
19 | * @ptr: CPU context virtual address | |
20 | * @save_ptr: address of the location where the context physical address | |
21 | * must be saved | |
22 | */ | |
23 | int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, | |
24 | phys_addr_t *save_ptr) | |
25 | { | |
26 | int cpu = smp_processor_id(); | |
27 | ||
28 | *save_ptr = virt_to_phys(ptr); | |
29 | ||
30 | cpu_do_suspend(ptr); | |
31 | /* | |
32 | * Only flush the context that must be retrieved with the MMU | |
33 | * off. VA primitives ensure the flush is applied to all | |
34 | * cache levels so context is pushed to DRAM. | |
35 | */ | |
36 | __flush_dcache_area(ptr, sizeof(*ptr)); | |
37 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); | |
38 | ||
39 | return cpu_ops[cpu]->cpu_suspend(arg); | |
40 | } | |
41 | ||
65c021bb LP |
42 | /* |
43 | * This hook is provided so that cpu_suspend code can restore HW | |
44 | * breakpoints as early as possible in the resume path, before reenabling | |
45 | * debug exceptions. Code cannot be run from a CPU PM notifier since by the | |
46 | * time the notifier runs debug exceptions might have been enabled already, | |
47 | * with HW breakpoints registers content still in an unknown state. | |
48 | */ | |
49 | void (*hw_breakpoint_restore)(void *); | |
50 | void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | |
51 | { | |
52 | /* Prevent multiple restore hook initializations */ | |
53 | if (WARN_ON(hw_breakpoint_restore)) | |
54 | return; | |
55 | hw_breakpoint_restore = hw_bp_restore; | |
56 | } | |
57 | ||
95322526 LP |
58 | /** |
59 | * cpu_suspend | |
60 | * | |
61 | * @arg: argument to pass to the finisher function | |
62 | */ | |
63 | int cpu_suspend(unsigned long arg) | |
64 | { | |
65 | struct mm_struct *mm = current->active_mm; | |
66 | int ret, cpu = smp_processor_id(); | |
67 | unsigned long flags; | |
68 | ||
69 | /* | |
70 | * If cpu_ops have not been registered or suspend | |
71 | * has not been initialized, cpu_suspend call fails early. | |
72 | */ | |
73 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | |
74 | return -EOPNOTSUPP; | |
75 | ||
76 | /* | |
77 | * From this point debug exceptions are disabled to prevent | |
78 | * updates to mdscr register (saved and restored along with | |
79 | * general purpose registers) from kernel debuggers. | |
80 | */ | |
81 | local_dbg_save(flags); | |
82 | ||
83 | /* | |
84 | * mm context saved on the stack, it will be restored when | |
85 | * the cpu comes out of reset through the identity mapped | |
86 | * page tables, so that the thread address space is properly | |
87 | * set-up on function return. | |
88 | */ | |
89 | ret = __cpu_suspend(arg); | |
90 | if (ret == 0) { | |
91 | cpu_switch_mm(mm->pgd, mm); | |
92 | flush_tlb_all(); | |
fb4a9602 LP |
93 | |
94 | /* | |
95 | * Restore per-cpu offset before any kernel | |
96 | * subsystem relying on it has a chance to run. | |
97 | */ | |
98 | set_my_cpu_offset(per_cpu_offset(cpu)); | |
99 | ||
65c021bb LP |
100 | /* |
101 | * Restore HW breakpoint registers to sane values | |
102 | * before debug exceptions are possibly reenabled | |
103 | * through local_dbg_restore. | |
104 | */ | |
105 | if (hw_breakpoint_restore) | |
106 | hw_breakpoint_restore(NULL); | |
95322526 LP |
107 | } |
108 | ||
109 | /* | |
110 | * Restore pstate flags. OS lock and mdscr have been already | |
111 | * restored, so from this point onwards, debugging is fully | |
112 | * renabled if it was enabled when core started shutdown. | |
113 | */ | |
114 | local_dbg_restore(flags); | |
115 | ||
116 | return ret; | |
117 | } | |
118 | ||
119 | extern struct sleep_save_sp sleep_save_sp; | |
120 | extern phys_addr_t sleep_idmap_phys; | |
121 | ||
122 | static int cpu_suspend_init(void) | |
123 | { | |
124 | void *ctx_ptr; | |
125 | ||
126 | /* ctx_ptr is an array of physical addresses */ | |
127 | ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); | |
128 | ||
129 | if (WARN_ON(!ctx_ptr)) | |
130 | return -ENOMEM; | |
131 | ||
132 | sleep_save_sp.save_ptr_stash = ctx_ptr; | |
133 | sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); | |
134 | sleep_idmap_phys = virt_to_phys(idmap_pg_dir); | |
135 | __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); | |
136 | __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys)); | |
137 | ||
138 | return 0; | |
139 | } | |
140 | early_initcall(cpu_suspend_init); |