powerpc/64s: Move hash MMU support code under CONFIG_PPC_64S_HASH_MMU
[linux-2.6-block.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
9994a338 2/*
9994a338
PM
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
9994a338
PM
15 */
16
9994a338 17#include <linux/errno.h>
c3525940 18#include <linux/err.h>
6cc0c16d 19#include <asm/cache.h>
9994a338
PM
20#include <asm/unistd.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/thread_info.h>
ee13cb24 25#include <asm/code-patching-asm.h>
9994a338
PM
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
3f639ee8 29#include <asm/firmware.h>
007d88d0 30#include <asm/bug.h>
ec2b36b9 31#include <asm/ptrace.h>
945feb17 32#include <asm/irqflags.h>
7230c564 33#include <asm/hw_irq.h>
5d1c5745 34#include <asm/context_tracking.h>
8a649045 35#include <asm/ppc-opcode.h>
51973a81 36#include <asm/barrier.h>
9445aa1a 37#include <asm/export.h>
ec0c464c 38#include <asm/asm-compat.h>
222f20f1
NP
39#ifdef CONFIG_PPC_BOOK3S
40#include <asm/exception-64s.h>
41#else
42#include <asm/exception-64e.h>
43#endif
2c86cd18 44#include <asm/feature-fixups.h>
890274c2 45#include <asm/kup.h>
9994a338
PM
46
47/*
48 * System calls.
49 */
9994a338 50 .section ".text"
be6abfa7 51
ee13cb24
ME
52#ifdef CONFIG_PPC_BOOK3S_64
53
54#define FLUSH_COUNT_CACHE \
551: nop; \
792254a7
NP
56 patch_site 1b, patch__call_flush_branch_caches1; \
571: nop; \
58 patch_site 1b, patch__call_flush_branch_caches2; \
591: nop; \
60 patch_site 1b, patch__call_flush_branch_caches3
ee13cb24 61
ee13cb24
ME
62.macro nops number
63 .rept \number
64 nop
65 .endr
66.endm
67
68.balign 32
1026798c
NP
69.global flush_branch_caches
70flush_branch_caches:
ee13cb24
ME
71 /* Save LR into r9 */
72 mflr r9
73
39e72bf9 74 // Flush the link stack
ee13cb24
ME
75 .rept 64
76 bl .+4
77 .endr
78 b 1f
79 nops 6
80
81 .balign 32
82 /* Restore LR */
831: mtlr r9
39e72bf9
ME
84
85 // If we're just flushing the link stack, return here
863: nop
87 patch_site 3b patch__flush_link_stack_return
88
ee13cb24
ME
89 li r9,0x7fff
90 mtctr r9
91
70d7cdaf 92 PPC_BCCTR_FLUSH
ee13cb24
ME
93
942: nop
95 patch_site 2b patch__flush_count_cache_return
96
97 nops 3
98
99 .rept 278
100 .balign 32
70d7cdaf 101 PPC_BCCTR_FLUSH
ee13cb24
ME
102 nops 7
103 .endr
104
105 blr
106#else
107#define FLUSH_COUNT_CACHE
108#endif /* CONFIG_PPC_BOOK3S_64 */
109
9994a338
PM
110/*
111 * This routine switches between two different tasks. The process
112 * state of one is saved on its kernel stack. Then the state
113 * of the other is restored from its kernel stack. The memory
114 * management hardware is updated to the second process's state.
6cc0c16d 115 * Finally, we can return to the second process, via interrupt_return.
9994a338
PM
116 * On entry, r3 points to the THREAD for the current task, r4
117 * points to the THREAD for the new task.
118 *
119 * Note: there are two ways to get to the "going out" portion
120 * of this code; either by coming in via the entry (_switch)
121 * or via "fork" which must set up an environment equivalent
122 * to the "_switch" path. If you change this you'll have to change
123 * the fork code also.
124 *
125 * The code which creates the new task context is in 'copy_thread'
2ef9481e 126 * in arch/powerpc/kernel/process.c
9994a338
PM
127 */
128 .align 7
129_GLOBAL(_switch)
130 mflr r0
131 std r0,16(r1)
132 stdu r1,-SWITCH_FRAME_SIZE(r1)
133 /* r3-r13 are caller saved -- Cort */
5290ae2b 134 SAVE_NVGPRS(r1)
68bfa962 135 std r0,_NIP(r1) /* Return to switch caller */
9994a338
PM
136 mfcr r23
137 std r23,_CCR(r1)
138 std r1,KSP(r3) /* Set old stack pointer */
139
890274c2
ME
140 kuap_check_amr r9, r10
141
792254a7 142 FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
ee13cb24 143
9145effd
NP
144 /*
145 * On SMP kernels, care must be taken because a task may be
146 * scheduled off CPUx and on to CPUy. Memory ordering must be
147 * considered.
148 *
149 * Cacheable stores on CPUx will be visible when the task is
150 * scheduled on CPUy by virtue of the core scheduler barriers
151 * (see "Notes on Program-Order guarantees on SMP systems." in
152 * kernel/sched/core.c).
153 *
154 * Uncacheable stores in the case of involuntary preemption must
147c1341 155 * be taken care of. The smp_mb__after_spinlock() in __schedule()
9145effd
NP
156 * is implemented as hwsync on powerpc, which orders MMIO too. So
157 * long as there is an hwsync in the context switch path, it will
158 * be executed on the source CPU after the task has performed
159 * all MMIO ops on that CPU, and on the destination CPU before the
160 * task performs any MMIO ops there.
9994a338 161 */
9994a338 162
f89451fb 163 /*
837e72f7
NP
164 * The kernel context switch path must contain a spin_lock,
165 * which contains larx/stcx, which will clear any reservation
166 * of the task being switched.
f89451fb 167 */
a515348f
MN
168#ifdef CONFIG_PPC_BOOK3S
169/* Cancel all explict user streams as they will have no use after context
170 * switch and will stop the HW from creating streams itself
171 */
15a3204d 172 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
a515348f
MN
173#endif
174
9994a338
PM
175 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
176 std r6,PACACURRENT(r13) /* Set new 'current' */
06ec27ae
CL
177#if defined(CONFIG_STACKPROTECTOR)
178 ld r6, TASK_CANARY(r6)
179 std r6, PACA_CANARY(r13)
180#endif
9994a338
PM
181
182 ld r8,KSP(r4) /* new stack pointer */
387e220a 183#ifdef CONFIG_PPC_64S_HASH_MMU
caca285e
AK
184BEGIN_MMU_FTR_SECTION
185 b 2f
5a25b6f5 186END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1189be65 187BEGIN_FTR_SECTION
9994a338
PM
188 clrrdi r6,r8,28 /* get its ESID */
189 clrrdi r9,r1,28 /* get current sp ESID */
13b3d13b 190FTR_SECTION_ELSE
1189be65
PM
191 clrrdi r6,r8,40 /* get its 1T ESID */
192 clrrdi r9,r1,40 /* get current sp 1T ESID */
13b3d13b 193ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
9994a338
PM
194 clrldi. r0,r6,2 /* is new ESID c00000000? */
195 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
196 cror eq,4*cr1+eq,eq
197 beq 2f /* if yes, don't slbie it */
198
199 /* Bolt in the new stack SLB entry */
200 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
201 oris r0,r6,(SLB_ESID_V)@h
202 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
203BEGIN_FTR_SECTION
204 li r9,MMU_SEGSIZE_1T /* insert B field */
205 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
206 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
44ae3ab3 207END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2f6093c8 208
00efee7d
MN
209 /* Update the last bolted SLB. No write barriers are needed
210 * here, provided we only update the current CPU's SLB shadow
211 * buffer.
212 */
2f6093c8 213 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7 214 li r12,0
7ffcf8ec
AB
215 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
216 li r12,SLBSHADOW_STACKVSID
217 STDX_BE r7,r12,r9 /* Save VSID */
218 li r12,SLBSHADOW_STACKESID
219 STDX_BE r0,r12,r9 /* Save ESID */
2f6093c8 220
44ae3ab3 221 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
f66bce5e
OJ
222 * we have 1TB segments, the only CPUs known to have the errata
223 * only support less than 1TB of system memory and we'll never
224 * actually hit this code path.
225 */
226
91d06971 227 isync
9994a338 228 slbie r6
505ea82e 229BEGIN_FTR_SECTION
9994a338 230 slbie r6 /* Workaround POWER5 < DD2.1 issue */
505ea82e 231END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
9994a338
PM
232 slbmte r7,r0
233 isync
9994a338 2342:
387e220a 235#endif /* CONFIG_PPC_64S_HASH_MMU */
2d27cfd3 236
7306e83c 237 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
9994a338
PM
238 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
239 because we don't need to leave the 288-byte ABI gap at the
240 top of the kernel stack. */
241 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
242
e4c0fc5f
NP
243 /*
244 * PMU interrupts in radix may come in here. They will use r1, not
245 * PACAKSAVE, so this stack switch will not cause a problem. They
246 * will store to the process stack, which may then be migrated to
247 * another CPU. However the rq lock release on this CPU paired with
248 * the rq lock acquire on the new CPU before the stack becomes
249 * active on the new CPU, will order those stores.
250 */
9994a338
PM
251 mr r1,r8 /* start using new stack pointer */
252 std r7,PACAKSAVE(r13)
253
71433285
AB
254 ld r6,_CCR(r1)
255 mtcrf 0xFF,r6
256
9994a338 257 /* r3-r13 are destroyed -- Cort */
5290ae2b 258 REST_NVGPRS(r1)
9994a338
PM
259
260 /* convert old thread to its task_struct for return value */
261 addi r3,r3,-THREAD
262 ld r7,_NIP(r1) /* Return to _switch caller in new task */
263 mtlr r7
264 addi r1,r1,SWITCH_FRAME_SIZE
265 blr
266
9994a338
PM
267#ifdef CONFIG_PPC_RTAS
268/*
269 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
270 * called with the MMU off.
271 *
272 * In addition, we need to be in 32b mode, at least for now.
273 *
274 * Note: r3 is an input parameter to rtas, so don't trash it...
275 */
276_GLOBAL(enter_rtas)
277 mflr r0
278 std r0,16(r1)
ed9e84a4 279 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
9994a338
PM
280
281 /* Because RTAS is running in 32b mode, it clobbers the high order half
282 * of all registers that it saves. We therefore save those registers
283 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
284 */
285 SAVE_GPR(2, r1) /* Save the TOC */
286 SAVE_GPR(13, r1) /* Save paca */
5290ae2b 287 SAVE_NVGPRS(r1) /* Save the non-volatiles */
9994a338
PM
288
289 mfcr r4
290 std r4,_CCR(r1)
291 mfctr r5
292 std r5,_CTR(r1)
293 mfspr r6,SPRN_XER
294 std r6,_XER(r1)
295 mfdar r7
296 std r7,_DAR(r1)
297 mfdsisr r8
298 std r8,_DSISR(r1)
9994a338 299
9fe901d1
MK
300 /* Temporary workaround to clear CR until RTAS can be modified to
301 * ignore all bits.
302 */
303 li r0,0
304 mtcr r0
305
01417c6c 306#ifdef CONFIG_BUG
9994a338
PM
307 /* There is no way it is acceptable to get here with interrupts enabled,
308 * check it with the asm equivalent of WARN_ON
309 */
4e26bc4a 310 lbz r0,PACAIRQSOFTMASK(r13)
01417c6c 3111: tdeqi r0,IRQS_ENABLED
1e688dd2 312 EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
007d88d0 313#endif
01417c6c 314
d04c56f7
PM
315 /* Hard-disable interrupts */
316 mfmsr r6
317 rldicl r7,r6,48,1
318 rotldi r7,r7,16
319 mtmsrd r7,1
320
9994a338
PM
321 /* Unfortunately, the stack pointer and the MSR are also clobbered,
322 * so they are saved in the PACA which allows us to restore
323 * our original state after RTAS returns.
324 */
325 std r1,PACAR1(r13)
326 std r6,PACASAVEDMSR(r13)
327
328 /* Setup our real return addr */
ad0289e4 329 LOAD_REG_ADDR(r4,rtas_return_loc)
e58c3495 330 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
331 mtlr r4
332
333 li r0,0
334 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
335 andc r0,r6,r0
336
337 li r9,1
338 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
5c0484e2 339 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
9994a338 340 andc r6,r0,r9
90653a84
NR
341
342__enter_rtas:
9994a338
PM
343 sync /* disable interrupts so SRR0/1 */
344 mtmsrd r0 /* don't get trashed */
345
e58c3495 346 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
347 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
348 ld r4,RTASBASE(r4) /* get the rtas->base value */
349
350 mtspr SPRN_SRR0,r5
351 mtspr SPRN_SRR1,r6
222f20f1 352 RFI_TO_KERNEL
9994a338
PM
353 b . /* prevent speculative execution */
354
ad0289e4 355rtas_return_loc:
5c0484e2
BH
356 FIXUP_ENDIAN
357
47fee31d
NP
358 /*
359 * Clear RI and set SF before anything.
360 */
361 mfmsr r6
362 li r0,MSR_RI
363 andc r6,r6,r0
364 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
365 or r6,r6,r0
366 sync
367 mtmsrd r6
368
9994a338 369 /* relocation is off at this point */
2dd60d79 370 GET_PACA(r4)
e58c3495 371 clrldi r4,r4,2 /* convert to realmode address */
9994a338 372
e31aa453
PM
373 bcl 20,31,$+4
3740: mflr r3
ad0289e4 375 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
e31aa453 376
9994a338 377 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
378 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
379
380 mtspr SPRN_SRR0,r3
381 mtspr SPRN_SRR1,r4
222f20f1 382 RFI_TO_KERNEL
9994a338 383 b . /* prevent speculative execution */
90653a84
NR
384_ASM_NOKPROBE_SYMBOL(__enter_rtas)
385_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
9994a338 386
e31aa453 387 .align 3
eb039161 3881: .8byte rtas_restore_regs
e31aa453 389
ad0289e4 390rtas_restore_regs:
9994a338
PM
391 /* relocation is on at this point */
392 REST_GPR(2, r1) /* Restore the TOC */
393 REST_GPR(13, r1) /* Restore paca */
5290ae2b 394 REST_NVGPRS(r1) /* Restore the non-volatiles */
9994a338 395
2dd60d79 396 GET_PACA(r13)
9994a338
PM
397
398 ld r4,_CCR(r1)
399 mtcr r4
400 ld r5,_CTR(r1)
401 mtctr r5
402 ld r6,_XER(r1)
403 mtspr SPRN_XER,r6
404 ld r7,_DAR(r1)
405 mtdar r7
406 ld r8,_DSISR(r1)
407 mtdsisr r8
9994a338 408
ed9e84a4 409 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
9994a338
PM
410 ld r0,16(r1) /* get return address */
411
412 mtlr r0
413 blr /* return to caller */
414
415#endif /* CONFIG_PPC_RTAS */
416
9994a338
PM
417_GLOBAL(enter_prom)
418 mflr r0
419 std r0,16(r1)
ed9e84a4 420 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
9994a338
PM
421
422 /* Because PROM is running in 32b mode, it clobbers the high order half
423 * of all registers that it saves. We therefore save those registers
424 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
425 */
6c171994 426 SAVE_GPR(2, r1)
9994a338 427 SAVE_GPR(13, r1)
5290ae2b 428 SAVE_NVGPRS(r1)
6c171994 429 mfcr r10
9994a338 430 mfmsr r11
6c171994 431 std r10,_CCR(r1)
9994a338
PM
432 std r11,_MSR(r1)
433
5c0484e2
BH
434 /* Put PROM address in SRR0 */
435 mtsrr0 r4
436
437 /* Setup our trampoline return addr in LR */
438 bcl 20,31,$+4
4390: mflr r4
440 addi r4,r4,(1f - 0b)
441 mtlr r4
9994a338 442
5c0484e2 443 /* Prepare a 32-bit mode big endian MSR
9994a338 444 */
2d27cfd3
BH
445#ifdef CONFIG_PPC_BOOK3E
446 rlwinm r11,r11,0,1,31
5c0484e2
BH
447 mtsrr1 r11
448 rfi
2d27cfd3 449#else /* CONFIG_PPC_BOOK3E */
e89a8ca9 450 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
5c0484e2
BH
451 andc r11,r11,r12
452 mtsrr1 r11
222f20f1 453 RFI_TO_KERNEL
2d27cfd3 454#endif /* CONFIG_PPC_BOOK3E */
9994a338 455
5c0484e2
BH
4561: /* Return from OF */
457 FIXUP_ENDIAN
9994a338
PM
458
459 /* Just make sure that r1 top 32 bits didn't get
460 * corrupt by OF
461 */
462 rldicl r1,r1,0,32
463
464 /* Restore the MSR (back to 64 bits) */
465 ld r0,_MSR(r1)
6c171994 466 MTMSRD(r0)
9994a338
PM
467 isync
468
469 /* Restore other registers */
470 REST_GPR(2, r1)
471 REST_GPR(13, r1)
5290ae2b 472 REST_NVGPRS(r1)
9994a338
PM
473 ld r4,_CCR(r1)
474 mtcr r4
ed9e84a4
JS
475
476 addi r1,r1,SWITCH_FRAME_SIZE
9994a338
PM
477 ld r0,16(r1)
478 mtlr r0
479 blr