Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f6b0fa02 | 2 | #include <linux/linkage.h> |
941aefac | 3 | #include <linux/threads.h> |
f6b0fa02 RK |
4 | #include <asm/asm-offsets.h> |
5 | #include <asm/assembler.h> | |
6 | #include <asm/glue-cache.h> | |
7 | #include <asm/glue-proc.h> | |
f6b0fa02 RK |
8 | .text |
9 | ||
7604537b LP |
10 | /* |
11 | * Implementation of MPIDR hash algorithm through shifting | |
12 | * and OR'ing. | |
13 | * | |
14 | * @dst: register containing hash result | |
15 | * @rs0: register containing affinity level 0 bit shift | |
16 | * @rs1: register containing affinity level 1 bit shift | |
17 | * @rs2: register containing affinity level 2 bit shift | |
18 | * @mpidr: register containing MPIDR value | |
19 | * @mask: register containing MPIDR mask | |
20 | * | |
21 | * Pseudo C-code: | |
22 | * | |
23 | *u32 dst; | |
24 | * | |
25 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { | |
26 | * u32 aff0, aff1, aff2; | |
27 | * u32 mpidr_masked = mpidr & mask; | |
28 | * aff0 = mpidr_masked & 0xff; | |
29 | * aff1 = mpidr_masked & 0xff00; | |
30 | * aff2 = mpidr_masked & 0xff0000; | |
31 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); | |
32 | *} | |
33 | * Input registers: rs0, rs1, rs2, mpidr, mask | |
34 | * Output register: dst | |
35 | * Note: input and output registers must be disjoint register sets | |
36 | (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) | |
37 | */ | |
38 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask | |
39 | and \mpidr, \mpidr, \mask @ mask out MPIDR bits | |
40 | and \dst, \mpidr, #0xff @ mask=aff0 | |
41 | ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 | |
42 | THUMB( lsr \dst, \dst, \rs0 ) | |
43 | and \mask, \mpidr, #0xff00 @ mask = aff1 | |
44 | ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) | |
45 | THUMB( lsr \mask, \mask, \rs1 ) | |
46 | THUMB( orr \dst, \dst, \mask ) | |
47 | and \mask, \mpidr, #0xff0000 @ mask = aff2 | |
48 | ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) | |
49 | THUMB( lsr \mask, \mask, \rs2 ) | |
50 | THUMB( orr \dst, \dst, \mask ) | |
51 | .endm | |
52 | ||
f6b0fa02 | 53 | /* |
abda1bd5 RK |
54 | * Save CPU state for a suspend. This saves the CPU general purpose |
55 | * registers, and allocates space on the kernel stack to save the CPU | |
56 | * specific registers and some other data for resume. | |
57 | * r0 = suspend function arg0 | |
58 | * r1 = suspend function | |
71a8986d | 59 | * r2 = MPIDR value the resuming CPU will use |
f6b0fa02 | 60 | */ |
2c74a0ce | 61 | ENTRY(__cpu_suspend) |
e8856a87 | 62 | stmfd sp!, {r4 - r11, lr} |
f6b0fa02 RK |
63 | #ifdef MULTI_CPU |
64 | ldr r10, =processor | |
abda1bd5 | 65 | ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state |
941aefac | 66 | #else |
abda1bd5 | 67 | ldr r4, =cpu_suspend_size |
3fd431bd | 68 | #endif |
abda1bd5 RK |
69 | mov r5, sp @ current virtual SP |
70 | add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn | |
71 | sub sp, sp, r4 @ allocate CPU state on stack | |
abda1bd5 | 72 | ldr r3, =sleep_save_sp |
71a8986d | 73 | stmfd sp!, {r0, r1} @ save suspend func arg and pointer |
7604537b | 74 | ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] |
71a8986d NP |
75 | ALT_SMP(ldr r0, =mpidr_hash) |
76 | ALT_UP_B(1f) | |
77 | /* This ldmia relies on the memory layout of the mpidr_hash struct */ | |
78 | ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts | |
79 | compute_mpidr_hash r0, r6, r7, r8, r2, r1 | |
80 | add r3, r3, r0, lsl #2 | |
81 | 1: mov r2, r5 @ virtual SP | |
82 | mov r1, r4 @ size of save block | |
83 | add r0, sp, #8 @ pointer to save block | |
abda1bd5 | 84 | bl __cpu_suspend_save |
14327c66 | 85 | badr lr, cpu_suspend_abort |
3799bbe5 | 86 | ldmfd sp!, {r0, pc} @ call suspend fn |
2c74a0ce | 87 | ENDPROC(__cpu_suspend) |
f6b0fa02 RK |
88 | .ltorg |
89 | ||
29cb3cd2 | 90 | cpu_suspend_abort: |
de8e71ca | 91 | ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn |
f5fa68d9 RK |
92 | teq r0, #0 |
93 | moveq r0, #1 @ force non-zero value | |
29cb3cd2 RK |
94 | mov sp, r2 |
95 | ldmfd sp!, {r4 - r11, pc} | |
96 | ENDPROC(cpu_suspend_abort) | |
97 | ||
f6b0fa02 RK |
98 | /* |
99 | * r0 = control register value | |
f6b0fa02 | 100 | */ |
62b2d07c | 101 | .align 5 |
e6eadc67 | 102 | .pushsection .idmap.text,"ax" |
f6b0fa02 | 103 | ENTRY(cpu_resume_mmu) |
f6b0fa02 | 104 | ldr r3, =cpu_resume_after_mmu |
d675d0bc | 105 | instr_sync |
e8ce0eb5 RK |
106 | mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc |
107 | mrc p15, 0, r0, c0, c0, 0 @ read id reg | |
d675d0bc | 108 | instr_sync |
e8ce0eb5 RK |
109 | mov r0, r0 |
110 | mov r0, r0 | |
6ebbf2ce | 111 | ret r3 @ jump to virtual address |
62b2d07c | 112 | ENDPROC(cpu_resume_mmu) |
e6eadc67 | 113 | .popsection |
f6b0fa02 | 114 | cpu_resume_after_mmu: |
14cd8fd5 | 115 | bl cpu_init @ restore the und/abt/irq banked regs |
29cb3cd2 | 116 | mov r0, #0 @ return zero on success |
5fa94c81 | 117 | ldmfd sp!, {r4 - r11, pc} |
f6b0fa02 RK |
118 | ENDPROC(cpu_resume_after_mmu) |
119 | ||
d0776aff | 120 | .text |
f6b0fa02 | 121 | .align |
2678bb9f | 122 | |
ca70ea43 MS |
123 | #ifdef CONFIG_MCPM |
124 | .arm | |
125 | THUMB( .thumb ) | |
126 | ENTRY(cpu_resume_no_hyp) | |
127 | ARM_BE8(setend be) @ ensure we are in BE mode | |
128 | b no_hyp | |
129 | #endif | |
130 | ||
2678bb9f | 131 | #ifdef CONFIG_MMU |
32e55a77 SB |
132 | .arm |
133 | ENTRY(cpu_resume_arm) | |
9ce93bdd | 134 | THUMB( badr r9, 1f ) @ Kernel is entered in ARM. |
32e55a77 SB |
135 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, |
136 | THUMB( .thumb ) @ switch to Thumb now. | |
137 | THUMB(1: ) | |
2678bb9f RK |
138 | #endif |
139 | ||
f6b0fa02 | 140 | ENTRY(cpu_resume) |
97bcb0fe | 141 | ARM_BE8(setend be) @ ensure we are in BE mode |
0e0779da LP |
142 | #ifdef CONFIG_ARM_VIRT_EXT |
143 | bl __hyp_stub_install_secondary | |
144 | #endif | |
145 | safe_svcmode_maskall r1 | |
ca70ea43 | 146 | no_hyp: |
7604537b LP |
147 | mov r1, #0 |
148 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) | |
149 | ALT_UP_B(1f) | |
150 | adr r2, mpidr_hash_ptr | |
151 | ldr r3, [r2] | |
152 | add r2, r2, r3 @ r2 = struct mpidr_hash phys address | |
153 | /* | |
154 | * This ldmia relies on the memory layout of the mpidr_hash | |
155 | * struct mpidr_hash. | |
156 | */ | |
157 | ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts | |
158 | compute_mpidr_hash r1, r4, r5, r6, r0, r3 | |
159 | 1: | |
160 | adr r0, _sleep_save_sp | |
d0776aff AB |
161 | ldr r2, [r0] |
162 | add r0, r0, r2 | |
7604537b LP |
163 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] |
164 | ldr r0, [r0, r1, lsl #2] | |
165 | ||
de8e71ca RK |
166 | @ load phys pgd, stack, resume fn |
167 | ARM( ldmia r0!, {r1, sp, pc} ) | |
168 | THUMB( ldmia r0!, {r1, r2, r3} ) | |
169 | THUMB( mov sp, r2 ) | |
170 | THUMB( bx r3 ) | |
f6b0fa02 RK |
171 | ENDPROC(cpu_resume) |
172 | ||
2678bb9f | 173 | #ifdef CONFIG_MMU |
32e55a77 | 174 | ENDPROC(cpu_resume_arm) |
ca70ea43 MS |
175 | #endif |
176 | #ifdef CONFIG_MCPM | |
177 | ENDPROC(cpu_resume_no_hyp) | |
2678bb9f | 178 | #endif |
f6b0fa02 | 179 | |
7604537b | 180 | .align 2 |
d0776aff AB |
181 | _sleep_save_sp: |
182 | .long sleep_save_sp - . | |
7604537b LP |
183 | mpidr_hash_ptr: |
184 | .long mpidr_hash - . @ mpidr_hash struct offset | |
185 | ||
d0776aff | 186 | .data |
1abd3502 | 187 | .align 2 |
7604537b LP |
188 | .type sleep_save_sp, #object |
189 | ENTRY(sleep_save_sp) | |
7604537b | 190 | .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp |