powerpc/32: Add KASAN support
[linux-2.6-block.git] / arch / powerpc / kernel / head_32.S
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14cf11af
PM
12 *
13 * This file contains the low-level support and setup for the
14 * PowerPC platform, including trap and interrupt dispatch.
15 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
e7039845 24#include <linux/init.h>
b3b8dc6c 25#include <asm/reg.h>
14cf11af
PM
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/pgtable.h>
29#include <asm/cputable.h>
30#include <asm/cache.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
ec2b36b9 34#include <asm/ptrace.h>
5e696617 35#include <asm/bug.h>
dd84c217 36#include <asm/kvm_book3s_asm.h>
9445aa1a 37#include <asm/export.h>
2c86cd18 38#include <asm/feature-fixups.h>
14cf11af 39
14cf11af
PM
40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
41#define LOAD_BAT(n, reg, RA, RB) \
42 /* see the comment for clear_bats() -- Cort */ \
43 li RA,0; \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_DBAT##n##U,RA; \
46 lwz RA,(n*16)+0(reg); \
47 lwz RB,(n*16)+4(reg); \
48 mtspr SPRN_IBAT##n##U,RA; \
49 mtspr SPRN_IBAT##n##L,RB; \
50 beq 1f; \
51 lwz RA,(n*16)+8(reg); \
52 lwz RB,(n*16)+12(reg); \
53 mtspr SPRN_DBAT##n##U,RA; \
54 mtspr SPRN_DBAT##n##L,RB; \
551:
14cf11af 56
e7039845 57 __HEAD
b3b8dc6c
PM
58 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
59 .stabs "head_32.S",N_SO,0,0,0f
14cf11af 600:
748a7683 61_ENTRY(_stext);
14cf11af
PM
62
63/*
64 * _start is defined this way because the XCOFF loader in the OpenFirmware
65 * on the powermac expects the entry point to be a procedure descriptor.
66 */
748a7683 67_ENTRY(_start);
14cf11af
PM
68 /*
69 * These are here for legacy reasons, the kernel used to
70 * need to look like a coff function entry for the pmac
71 * but we're always started by some kind of bootloader now.
72 * -- Cort
73 */
74 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
75 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
76 nop
77
78/* PMAC
79 * Enter here with the kernel text, data and bss loaded starting at
80 * 0, running with virtual == physical mapping.
81 * r5 points to the prom entry point (the client interface handler
82 * address). Address translation is turned on, with the prom
83 * managing the hash table. Interrupts are disabled. The stack
84 * pointer (r1) points to just below the end of the half-meg region
85 * from 0x380000 - 0x400000, which is mapped in already.
86 *
87 * If we are booted from MacOS via BootX, we enter with the kernel
88 * image loaded somewhere, and the following values in registers:
89 * r3: 'BooX' (0x426f6f58)
90 * r4: virtual address of boot_infos_t
91 * r5: 0
92 *
14cf11af
PM
93 * PREP
94 * This is jumped to on prep systems right after the kernel is relocated
95 * to its proper place in memory by the boot loader. The expected layout
96 * of the regs is:
97 * r3: ptr to residual data
98 * r4: initrd_start or if no initrd then 0
99 * r5: initrd_end - unused if r4 is 0
100 * r6: Start of command line string
101 * r7: End of command line string
102 *
103 * This just gets a minimal mmu environment setup so we can call
104 * start_here() to do the real work.
105 * -- Cort
106 */
107
108 .globl __start
109__start:
110/*
111 * We have to do any OF calls before we map ourselves to KERNELBASE,
112 * because OF may have I/O devices mapped into that area
113 * (particularly on CHRP).
114 */
9b6b563c
PM
115 cmpwi 0,r5,0
116 beq 1f
2bda347b 117
28794d34 118#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
2bda347b
BH
119 /* find out where we are now */
120 bcl 20,31,$+4
1210: mflr r8 /* r8 = runtime addr here */
122 addis r8,r8,(_stext - 0b)@ha
123 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
9b6b563c 124 bl prom_init
28794d34
BH
125#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
126
127 /* We never return. We also hit that trap if trying to boot
128 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
9b6b563c
PM
129 trap
130
d7f39454
BH
131/*
132 * Check for BootX signature when supporting PowerMac and branch to
133 * appropriate trampoline if it's present
134 */
135#ifdef CONFIG_PPC_PMAC
1361: lis r31,0x426f
137 ori r31,r31,0x6f58
138 cmpw 0,r3,r31
139 bne 1f
140 bl bootx_init
141 trap
142#endif /* CONFIG_PPC_PMAC */
143
6dece0eb 1441: mr r31,r3 /* save device tree ptr */
14cf11af
PM
145 li r24,0 /* cpu # */
146
147/*
148 * early_init() does the early machine identification and does
149 * the necessary low-level setup and clears the BSS
150 * -- Cort <cort@fsmlabs.com>
151 */
152 bl early_init
153
14cf11af
PM
154/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
155 * the physical address we are running at, returned by early_init()
156 */
157 bl mmu_off
158__after_mmu_off:
14cf11af
PM
159 bl clear_bats
160 bl flush_tlbs
161
162 bl initial_bats
f21f49ea 163#if defined(CONFIG_BOOTX_TEXT)
51d3082f
BH
164 bl setup_disp_bat
165#endif
c374e00e
SW
166#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
167 bl setup_cpm_bat
168#endif
d1d56f8c
AH
169#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
170 bl setup_usbgecko_bat
171#endif
14cf11af
PM
172
173/*
174 * Call setup_cpu for CPU 0 and initialize 6xx Idle
175 */
176 bl reloc_offset
177 li r24,0 /* cpu# */
178 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 179#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
180 bl reloc_offset
181 bl init_idle_6xx
d7cceda9 182#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af
PM
183
184
14cf11af
PM
185/*
186 * We need to run with _start at physical address 0.
187 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
188 * the exception vectors at 0 (and therefore this copy
189 * overwrites OF's exception vectors with our own).
9b6b563c 190 * The MMU is off at this point.
14cf11af
PM
191 */
192 bl reloc_offset
193 mr r26,r3
194 addis r4,r3,KERNELBASE@h /* current address of _start */
ccdcef72
DF
195 lis r5,PHYSICAL_START@h
196 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
14cf11af 197 bne relocate_kernel
14cf11af
PM
198/*
199 * we now have the 1st 16M of ram mapped with the bats.
200 * prep needs the mmu to be turned on here, but pmac already has it on.
201 * this shouldn't bother the pmac since it just gets turned on again
202 * as we jump to our code at KERNELBASE. -- Cort
203 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
204 * off, and in other cases, we now turn it off before changing BATs above.
205 */
206turn_on_mmu:
207 mfmsr r0
208 ori r0,r0,MSR_DR|MSR_IR
209 mtspr SPRN_SRR1,r0
210 lis r0,start_here@h
211 ori r0,r0,start_here@l
212 mtspr SPRN_SRR0,r0
213 SYNC
214 RFI /* enables MMU */
215
216/*
217 * We need __secondary_hold as a place to hold the other cpus on
218 * an SMP machine, even when we are running a UP kernel.
219 */
220 . = 0xc0 /* for prep bootloader */
221 li r3,1 /* MTX only has 1 cpu */
222 .globl __secondary_hold
223__secondary_hold:
224 /* tell the master we're here */
bbd0abda 225 stw r3,__secondary_hold_acknowledge@l(0)
14cf11af
PM
226#ifdef CONFIG_SMP
227100: lwz r4,0(0)
228 /* wait until we're told to start */
229 cmpw 0,r4,r3
230 bne 100b
231 /* our cpu # was at addr 0 - go */
232 mr r24,r3 /* cpu # */
233 b __secondary_start
234#else
235 b .
236#endif /* CONFIG_SMP */
237
bbd0abda
PM
238 .globl __secondary_hold_spinloop
239__secondary_hold_spinloop:
240 .long 0
241 .globl __secondary_hold_acknowledge
242__secondary_hold_acknowledge:
243 .long -1
244
14cf11af
PM
245/*
246 * Exception entry code. This code runs with address translation
247 * turned off, i.e. using physical addresses.
248 * We assume sprg3 has the physical address of the current
249 * task's thread_struct.
250 */
251#define EXCEPTION_PROLOG \
ee43eb78
BH
252 mtspr SPRN_SPRG_SCRATCH0,r10; \
253 mtspr SPRN_SPRG_SCRATCH1,r11; \
14cf11af
PM
254 mfcr r10; \
255 EXCEPTION_PROLOG_1; \
256 EXCEPTION_PROLOG_2
257
258#define EXCEPTION_PROLOG_1 \
259 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
260 andi. r11,r11,MSR_PR; \
261 tophys(r11,r1); /* use tophys(r1) if kernel */ \
262 beq 1f; \
ee43eb78 263 mfspr r11,SPRN_SPRG_THREAD; \
8c1fc5ab 264 lwz r11,TASK_STACK-THREAD(r11); \
14cf11af
PM
265 addi r11,r11,THREAD_SIZE; \
266 tophys(r11,r11); \
2671: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
268
269
270#define EXCEPTION_PROLOG_2 \
14cf11af
PM
271 stw r10,_CCR(r11); /* save registers */ \
272 stw r12,GPR12(r11); \
273 stw r9,GPR9(r11); \
ee43eb78 274 mfspr r10,SPRN_SPRG_SCRATCH0; \
14cf11af 275 stw r10,GPR10(r11); \
ee43eb78 276 mfspr r12,SPRN_SPRG_SCRATCH1; \
14cf11af
PM
277 stw r12,GPR11(r11); \
278 mflr r10; \
279 stw r10,_LINK(r11); \
280 mfspr r12,SPRN_SRR0; \
281 mfspr r9,SPRN_SRR1; \
282 stw r1,GPR1(r11); \
283 stw r1,0(r11); \
284 tovirt(r1,r11); /* set new kernel sp */ \
285 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
286 MTMSRD(r10); /* (except for mach check in rtas) */ \
287 stw r0,GPR0(r11); \
ec2b36b9
BH
288 lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
289 addi r10,r10,STACK_FRAME_REGS_MARKER@l; \
f78541dc 290 stw r10,8(r11); \
14cf11af
PM
291 SAVE_4GPRS(3, r11); \
292 SAVE_2GPRS(7, r11)
293
294/*
295 * Note: code which follows this uses cr0.eq (set if from kernel),
296 * r11, r12 (SRR0), and r9 (SRR1).
297 *
298 * Note2: once we have set r1 we are in a position to take exceptions
299 * again, and we could thus set MSR:RI at that point.
300 */
301
302/*
303 * Exception vectors.
304 */
305#define EXCEPTION(n, label, hdlr, xfer) \
306 . = n; \
dd84c217 307 DO_KVM n; \
14cf11af
PM
308label: \
309 EXCEPTION_PROLOG; \
310 addi r3,r1,STACK_FRAME_OVERHEAD; \
311 xfer(n, hdlr)
312
313#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
314 li r10,trap; \
d73e0c99 315 stw r10,_TRAP(r11); \
14cf11af
PM
316 li r10,MSR_KERNEL; \
317 copyee(r10, r9); \
318 bl tfer; \
319i##n: \
320 .long hdlr; \
321 .long ret
322
323#define COPY_EE(d, s) rlwimi d,s,0,16,16
324#define NOCOPY(d, s)
325
326#define EXC_XFER_STD(n, hdlr) \
327 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
328 ret_from_except_full)
329
330#define EXC_XFER_LITE(n, hdlr) \
331 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
332 ret_from_except)
333
334#define EXC_XFER_EE(n, hdlr) \
335 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
336 ret_from_except_full)
337
338#define EXC_XFER_EE_LITE(n, hdlr) \
339 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
340 ret_from_except)
341
342/* System reset */
343/* core99 pmac starts the seconary here by changing the vector, and
dc1c1ca3 344 putting it back to what it was (unknown_exception) when done. */
dc1c1ca3 345 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
14cf11af
PM
346
347/* Machine check */
348/*
349 * On CHRP, this is complicated by the fact that we could get a
350 * machine check inside RTAS, and we have no guarantee that certain
351 * critical registers will have the values we expect. The set of
352 * registers that might have bad values includes all the GPRs
353 * and all the BATs. We indicate that we are in RTAS by putting
354 * a non-zero value, the address of the exception frame to use,
0df977ea
CL
355 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
356 * and uses its value if it is non-zero.
14cf11af
PM
357 * (Other exception handlers assume that r1 is a valid kernel stack
358 * pointer when we take an exception from supervisor mode.)
359 * -- paulus.
360 */
361 . = 0x200
dd84c217 362 DO_KVM 0x200
ee43eb78
BH
363 mtspr SPRN_SPRG_SCRATCH0,r10
364 mtspr SPRN_SPRG_SCRATCH1,r11
14cf11af
PM
365 mfcr r10
366#ifdef CONFIG_PPC_CHRP
0df977ea
CL
367 mfspr r11, SPRN_SPRG_THREAD
368 lwz r11, RTAS_SP(r11)
369 cmpwi cr1, r11, 0
370 bne cr1, 7f
14cf11af
PM
371#endif /* CONFIG_PPC_CHRP */
372 EXCEPTION_PROLOG_1
3737: EXCEPTION_PROLOG_2
374 addi r3,r1,STACK_FRAME_OVERHEAD
375#ifdef CONFIG_PPC_CHRP
14cf11af
PM
376 bne cr1,1f
377#endif
dc1c1ca3 378 EXC_XFER_STD(0x200, machine_check_exception)
14cf11af
PM
379#ifdef CONFIG_PPC_CHRP
3801: b machine_check_in_rtas
381#endif
382
383/* Data access exception. */
384 . = 0x300
dd84c217 385 DO_KVM 0x300
14cf11af
PM
386DataAccess:
387 EXCEPTION_PROLOG
14cf11af 388 mfspr r10,SPRN_DSISR
4ee7084e 389 stw r10,_DSISR(r11)
a68c31fc
CL
390#ifdef CONFIG_PPC_KUAP
391 andis. r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
392#else
f23ab3ef 393 andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
a68c31fc 394#endif
14cf11af
PM
395 bne 1f /* if not, try to put a PTE */
396 mfspr r4,SPRN_DAR /* into the hash table */
397 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
4a3a224c 398BEGIN_MMU_FTR_SECTION
14cf11af 399 bl hash_page
4a3a224c 400END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
4ee7084e 4011: lwz r5,_DSISR(r11) /* get DSISR value */
14cf11af 402 mfspr r4,SPRN_DAR
a546498f 403 EXC_XFER_LITE(0x300, handle_page_fault)
14cf11af 404
14cf11af
PM
405
406/* Instruction access exception. */
407 . = 0x400
dd84c217 408 DO_KVM 0x400
14cf11af
PM
409InstructionAccess:
410 EXCEPTION_PROLOG
b4c001dc 411 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
14cf11af
PM
412 beq 1f /* if so, try to put a PTE */
413 li r3,0 /* into the hash table */
414 mr r4,r12 /* SRR0 is fault address */
4a3a224c 415BEGIN_MMU_FTR_SECTION
14cf11af 416 bl hash_page
4a3a224c 417END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
14cf11af 4181: mr r4,r12
b4c001dc 419 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
a546498f 420 EXC_XFER_LITE(0x400, handle_page_fault)
14cf11af 421
14cf11af
PM
422/* External interrupt */
423 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
424
425/* Alignment exception */
426 . = 0x600
dd84c217 427 DO_KVM 0x600
14cf11af
PM
428Alignment:
429 EXCEPTION_PROLOG
430 mfspr r4,SPRN_DAR
431 stw r4,_DAR(r11)
432 mfspr r5,SPRN_DSISR
433 stw r5,_DSISR(r11)
434 addi r3,r1,STACK_FRAME_OVERHEAD
dc1c1ca3 435 EXC_XFER_EE(0x600, alignment_exception)
14cf11af
PM
436
437/* Program check exception */
dc1c1ca3 438 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
14cf11af
PM
439
440/* Floating-point unavailable */
441 . = 0x800
dd84c217 442 DO_KVM 0x800
14cf11af 443FPUnavailable:
aa42c69c
KP
444BEGIN_FTR_SECTION
445/*
446 * Certain Freescale cores don't have a FPU and treat fp instructions
447 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
448 */
449 b ProgramCheck
450END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
14cf11af 451 EXCEPTION_PROLOG
6f3d8e69
MN
452 beq 1f
453 bl load_up_fpu /* if from user, just load it up */
454 b fast_exception_return
4551: addi r3,r1,STACK_FRAME_OVERHEAD
8dad3f92 456 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
14cf11af
PM
457
458/* Decrementer */
459 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
460
dc1c1ca3
SR
461 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
462 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
14cf11af
PM
463
464/* System call */
465 . = 0xc00
dd84c217 466 DO_KVM 0xc00
14cf11af
PM
467SystemCall:
468 EXCEPTION_PROLOG
469 EXC_XFER_EE_LITE(0xc00, DoSyscall)
470
471/* Single step - not used on 601 */
dc1c1ca3
SR
472 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
473 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
14cf11af
PM
474
475/*
476 * The Altivec unavailable trap is at 0x0f20. Foo.
477 * We effectively remap it to 0x3000.
478 * We include an altivec unavailable exception vector even if
479 * not configured for Altivec, so that you can't panic a
480 * non-altivec kernel running on a machine with altivec just
481 * by executing an altivec instruction.
482 */
483 . = 0xf00
dd84c217 484 DO_KVM 0xf00
555d97ac 485 b PerformanceMonitor
14cf11af
PM
486
487 . = 0xf20
dd84c217 488 DO_KVM 0xf20
14cf11af
PM
489 b AltiVecUnavailable
490
14cf11af
PM
491/*
492 * Handle TLB miss for instruction on 603/603e.
493 * Note: we get an alternate set of r0 - r3 to use automatically.
494 */
495 . = 0x1000
496InstructionTLBMiss:
497/*
00fcb147 498 * r0: scratch
14cf11af
PM
499 * r1: linux style pte ( later becomes ppc hardware pte )
500 * r2: ptr to linux-style pte
501 * r3: scratch
502 */
14cf11af
PM
503 /* Get PTE (linux-style) and check access */
504 mfspr r3,SPRN_IMISS
a8a12199 505#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
8a13c4f9
KG
506 lis r1,PAGE_OFFSET@h /* check if kernel address */
507 cmplw 0,r1,r3
a8a12199 508#endif
93c4a162 509 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
510#ifdef CONFIG_SWAP
511 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
512#else
451b3ec0 513 li r1,_PAGE_PRESENT | _PAGE_EXEC
84de6ab0 514#endif
a8a12199 515#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
8a13c4f9 516 bge- 112f
2c12393f
CL
517 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
518 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
a8a12199 519#endif
93c4a162 520112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
521 lwz r2,0(r2) /* get pmd entry */
522 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
523 beq- InstructionAddressInvalid /* return if no mapping */
524 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
525 lwz r0,0(r2) /* get linux-style pte */
526 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 527 bne- InstructionAddressInvalid /* return if access not permitted */
14cf11af 528 /* Convert linux-style PTE to low word of PPC-style PTE */
f342adca
CL
529 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
530 ori r1, r1, 0xe06 /* clear out reserved bits */
531 andc r1, r0, r1 /* PP = user? 1 : 0 */
345953cf
KG
532BEGIN_FTR_SECTION
533 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
534END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 535 mtspr SPRN_RPA,r1
14cf11af
PM
536 tlbli r3
537 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
538 mtcrf 0x80,r3
539 rfi
540InstructionAddressInvalid:
541 mfspr r3,SPRN_SRR1
542 rlwinm r1,r3,9,6,6 /* Get load/store bit */
543
544 addis r1,r1,0x2000
545 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
14cf11af
PM
546 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
547 or r2,r2,r1
548 mtspr SPRN_SRR1,r2
549 mfspr r1,SPRN_IMISS /* Get failing address */
550 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
551 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
552 xor r1,r1,r2
553 mtspr SPRN_DAR,r1 /* Set fault address */
554 mfmsr r0 /* Restore "normal" registers */
555 xoris r0,r0,MSR_TGPR>>16
556 mtcrf 0x80,r3 /* Restore CR0 */
557 mtmsr r0
558 b InstructionAccess
559
560/*
561 * Handle TLB miss for DATA Load operation on 603/603e
562 */
563 . = 0x1100
564DataLoadTLBMiss:
565/*
00fcb147 566 * r0: scratch
14cf11af
PM
567 * r1: linux style pte ( later becomes ppc hardware pte )
568 * r2: ptr to linux-style pte
569 * r3: scratch
570 */
14cf11af
PM
571 /* Get PTE (linux-style) and check access */
572 mfspr r3,SPRN_DMISS
8a13c4f9
KG
573 lis r1,PAGE_OFFSET@h /* check if kernel address */
574 cmplw 0,r1,r3
93c4a162 575 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
576#ifdef CONFIG_SWAP
577 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
578#else
451b3ec0 579 li r1, _PAGE_PRESENT
84de6ab0 580#endif
8a13c4f9 581 bge- 112f
2c12393f
CL
582 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
583 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 584112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
585 lwz r2,0(r2) /* get pmd entry */
586 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
587 beq- DataAddressInvalid /* return if no mapping */
588 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
589 lwz r0,0(r2) /* get linux-style pte */
590 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 591 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
592 /*
593 * NOTE! We are assuming this is not an SMP system, otherwise
594 * we would need to update the pte atomically with lwarx/stwcx.
595 */
14cf11af 596 /* Convert linux-style PTE to low word of PPC-style PTE */
f342adca 597 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
f68e7927 598 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
eb3436a0 599 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
a4bd6a93 600 ori r1,r1,0xe04 /* clear out reserved bits */
f342adca 601 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
345953cf
KG
602BEGIN_FTR_SECTION
603 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
604END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 605 mtspr SPRN_RPA,r1
2319f123
KG
606 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
607 mtcrf 0x80,r2
608BEGIN_MMU_FTR_SECTION
609 li r0,1
ee43eb78 610 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
611 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
612 slw r0,r0,r2
613 xor r1,r0,r1
614 srw r0,r1,r2
ee43eb78 615 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
616 mfspr r2,SPRN_SRR1
617 rlwimi r2,r0,31-14,14,14
618 mtspr SPRN_SRR1,r2
619END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 620 tlbld r3
14cf11af
PM
621 rfi
622DataAddressInvalid:
623 mfspr r3,SPRN_SRR1
624 rlwinm r1,r3,9,6,6 /* Get load/store bit */
625 addis r1,r1,0x2000
626 mtspr SPRN_DSISR,r1
14cf11af
PM
627 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
628 mtspr SPRN_SRR1,r2
629 mfspr r1,SPRN_DMISS /* Get failing address */
630 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
631 beq 20f /* Jump if big endian */
632 xori r1,r1,3
63320: mtspr SPRN_DAR,r1 /* Set fault address */
634 mfmsr r0 /* Restore "normal" registers */
635 xoris r0,r0,MSR_TGPR>>16
636 mtcrf 0x80,r3 /* Restore CR0 */
637 mtmsr r0
638 b DataAccess
639
640/*
641 * Handle TLB miss for DATA Store on 603/603e
642 */
643 . = 0x1200
644DataStoreTLBMiss:
645/*
00fcb147 646 * r0: scratch
14cf11af
PM
647 * r1: linux style pte ( later becomes ppc hardware pte )
648 * r2: ptr to linux-style pte
649 * r3: scratch
650 */
14cf11af
PM
651 /* Get PTE (linux-style) and check access */
652 mfspr r3,SPRN_DMISS
8a13c4f9
KG
653 lis r1,PAGE_OFFSET@h /* check if kernel address */
654 cmplw 0,r1,r3
93c4a162 655 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
656#ifdef CONFIG_SWAP
657 li r1, _PAGE_RW | _PAGE_PRESENT | _PAGE_ACCESSED
658#else
659 li r1, _PAGE_RW | _PAGE_PRESENT
660#endif
8a13c4f9 661 bge- 112f
2c12393f
CL
662 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
663 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 664112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
665 lwz r2,0(r2) /* get pmd entry */
666 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
667 beq- DataAddressInvalid /* return if no mapping */
668 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
669 lwz r0,0(r2) /* get linux-style pte */
670 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 671 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
672 /*
673 * NOTE! We are assuming this is not an SMP system, otherwise
674 * we would need to update the pte atomically with lwarx/stwcx.
675 */
14cf11af 676 /* Convert linux-style PTE to low word of PPC-style PTE */
f342adca
CL
677 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
678 li r1,0xe06 /* clear out reserved bits & PP msb */
679 andc r1,r0,r1 /* PP = user? 1: 0 */
345953cf
KG
680BEGIN_FTR_SECTION
681 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
682END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 683 mtspr SPRN_RPA,r1
2319f123
KG
684 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
685 mtcrf 0x80,r2
686BEGIN_MMU_FTR_SECTION
687 li r0,1
ee43eb78 688 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
689 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
690 slw r0,r0,r2
691 xor r1,r0,r1
692 srw r0,r1,r2
ee43eb78 693 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
694 mfspr r2,SPRN_SRR1
695 rlwimi r2,r0,31-14,14,14
696 mtspr SPRN_SRR1,r2
697END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 698 tlbld r3
14cf11af
PM
699 rfi
700
701#ifndef CONFIG_ALTIVEC
dc1c1ca3 702#define altivec_assist_exception unknown_exception
14cf11af
PM
703#endif
704
dc1c1ca3 705 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
14cf11af 706 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
dc1c1ca3 707 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
dc1c1ca3 708 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
14cf11af 709 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
dc1c1ca3 710 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
dc1c1ca3
SR
711 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
712 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
714 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
715 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
716 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
717 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
14cf11af 718 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
dc1c1ca3
SR
719 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
720 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
723 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
724 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
725 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
726 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
727 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
728 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
729 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
730 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
731 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
732 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
30726013 733 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_EE)
14cf11af
PM
734
735 . = 0x3000
736
737AltiVecUnavailable:
738 EXCEPTION_PROLOG
739#ifdef CONFIG_ALTIVEC
37f9ef55
BH
740 beq 1f
741 bl load_up_altivec /* if from user, just load it up */
742 b fast_exception_return
14cf11af 743#endif /* CONFIG_ALTIVEC */
37f9ef55 7441: addi r3,r1,STACK_FRAME_OVERHEAD
dc1c1ca3 745 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
14cf11af 746
555d97ac
AF
747PerformanceMonitor:
748 EXCEPTION_PROLOG
749 addi r3,r1,STACK_FRAME_OVERHEAD
750 EXC_XFER_STD(0xf00, performance_monitor_exception)
751
14cf11af
PM
752
753/*
754 * This code is jumped to from the startup code to copy
ccdcef72 755 * the kernel image to physical address PHYSICAL_START.
14cf11af
PM
756 */
757relocate_kernel:
758 addis r9,r26,klimit@ha /* fetch klimit */
759 lwz r25,klimit@l(r9)
760 addis r25,r25,-KERNELBASE@h
ccdcef72 761 lis r3,PHYSICAL_START@h /* Destination base address */
14cf11af
PM
762 li r6,0 /* Destination offset */
763 li r5,0x4000 /* # bytes of memory to copy */
764 bl copy_and_flush /* copy the first 0x4000 bytes */
765 addi r0,r3,4f@l /* jump to the address of 4f */
766 mtctr r0 /* in copy and do the rest. */
767 bctr /* jump to the copy */
7684: mr r5,r25
769 bl copy_and_flush /* copy the rest */
770 b turn_on_mmu
771
772/*
773 * Copy routine used to copy the kernel to start at physical address 0
774 * and flush and invalidate the caches as needed.
775 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
776 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
777 */
748a7683 778_ENTRY(copy_and_flush)
14cf11af
PM
779 addi r5,r5,-4
780 addi r6,r6,-4
7dffb720 7814: li r0,L1_CACHE_BYTES/4
14cf11af
PM
782 mtctr r0
7833: addi r6,r6,4 /* copy a cache line */
784 lwzx r0,r6,r4
785 stwx r0,r6,r3
786 bdnz 3b
787 dcbst r6,r3 /* write it to memory */
788 sync
789 icbi r6,r3 /* flush the icache line */
790 cmplw 0,r6,r5
791 blt 4b
792 sync /* additional sync needed on g4 */
793 isync
794 addi r5,r5,4
795 addi r6,r6,4
796 blr
797
14cf11af 798#ifdef CONFIG_SMP
ee0339f2
JL
799 .globl __secondary_start_mpc86xx
800__secondary_start_mpc86xx:
801 mfspr r3, SPRN_PIR
802 stw r3, __secondary_hold_acknowledge@l(0)
803 mr r24, r3 /* cpu # */
804 b __secondary_start
805
14cf11af
PM
806 .globl __secondary_start_pmac_0
807__secondary_start_pmac_0:
808 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
809 li r24,0
810 b 1f
811 li r24,1
812 b 1f
813 li r24,2
814 b 1f
815 li r24,3
8161:
817 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
818 set to map the 0xf0000000 - 0xffffffff region */
819 mfmsr r0
820 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
821 SYNC
822 mtmsr r0
823 isync
824
825 .globl __secondary_start
826__secondary_start:
14cf11af
PM
827 /* Copy some CPU settings from CPU 0 */
828 bl __restore_cpu_setup
829
830 lis r3,-KERNELBASE@h
831 mr r4,r24
14cf11af 832 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 833#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
834 lis r3,-KERNELBASE@h
835 bl init_idle_6xx
d7cceda9 836#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af 837
4e67bfd7 838 /* get current's stack and current */
7c19c2e5
CL
839 lis r2,secondary_current@ha
840 tophys(r2,r2)
841 lwz r2,secondary_current@l(r2)
ed1cd6de
CL
842 tophys(r1,r2)
843 lwz r1,TASK_STACK(r1)
14cf11af
PM
844
845 /* stack */
846 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
847 li r0,0
848 tophys(r3,r1)
849 stw r0,0(r3)
850
851 /* load up the MMU */
852 bl load_up_mmu
853
854 /* ptr to phys current thread */
855 tophys(r4,r2)
856 addi r4,r4,THREAD /* phys address of our thread_struct */
ee43eb78 857 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
858 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
859 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
860 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
861
862 /* enable MMU and jump to start_secondary */
863 li r4,MSR_KERNEL
14cf11af
PM
864 lis r3,start_secondary@h
865 ori r3,r3,start_secondary@l
866 mtspr SPRN_SRR0,r3
867 mtspr SPRN_SRR1,r4
868 SYNC
869 RFI
870#endif /* CONFIG_SMP */
871
dd84c217
AG
872#ifdef CONFIG_KVM_BOOK3S_HANDLER
873#include "../kvm/book3s_rmhandlers.S"
874#endif
875
14cf11af
PM
876/*
877 * Those generic dummy functions are kept for CPUs not
d7cceda9 878 * included in CONFIG_PPC_BOOK3S_32
14cf11af 879 */
d7cceda9 880#if !defined(CONFIG_PPC_BOOK3S_32)
748a7683 881_ENTRY(__save_cpu_setup)
14cf11af 882 blr
748a7683 883_ENTRY(__restore_cpu_setup)
14cf11af 884 blr
d7cceda9 885#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
14cf11af
PM
886
887
888/*
889 * Load stuff into the MMU. Intended to be called with
890 * IR=0 and DR=0.
891 */
892load_up_mmu:
893 sync /* Force all PTE updates to finish */
894 isync
895 tlbia /* Clear all TLB entries */
896 sync /* wait for tlbia/tlbie to finish */
897 TLBSYNC /* ... on all CPUs */
898 /* Load the SDR1 register (hash table base & size) */
899 lis r6,_SDR1@ha
900 tophys(r6,r6)
901 lwz r6,_SDR1@l(r6)
902 mtspr SPRN_SDR1,r6
f342adca 903 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
14cf11af 904 mtctr r0 /* for context 0 */
f342adca 905 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
31ed2b13
CL
906#ifdef CONFIG_PPC_KUEP
907 oris r3, r3, SR_NX@h /* Set Nx */
a68c31fc
CL
908#endif
909#ifdef CONFIG_PPC_KUAP
910 oris r3, r3, SR_KS@h /* Set Ks */
31ed2b13 911#endif
14cf11af
PM
912 li r4,0
9133: mtsrin r3,r4
914 addi r3,r3,0x111 /* increment VSID */
915 addis r4,r4,0x1000 /* address of next segment */
916 bdnz 3b
31ed2b13
CL
917 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
918 mtctr r0 /* for context 0 */
919 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
a68c31fc 920 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
f342adca 921 oris r3, r3, SR_KP@h /* Kp = 1 */
31ed2b13
CL
9223: mtsrin r3, r4
923 addi r3, r3, 0x111 /* increment VSID */
924 addis r4, r4, 0x1000 /* address of next segment */
925 bdnz 3b
187a0067 926
14cf11af
PM
927/* Load the BAT registers with the values set up by MMU_init.
928 MMU_init takes care of whether we're on a 601 or not. */
929 mfpvr r3
930 srwi r3,r3,16
931 cmpwi r3,1
932 lis r3,BATS@ha
933 addi r3,r3,BATS@l
934 tophys(r3,r3)
935 LOAD_BAT(0,r3,r4,r5)
936 LOAD_BAT(1,r3,r4,r5)
937 LOAD_BAT(2,r3,r4,r5)
938 LOAD_BAT(3,r3,r4,r5)
7c03d653 939BEGIN_MMU_FTR_SECTION
ee0339f2
JL
940 LOAD_BAT(4,r3,r4,r5)
941 LOAD_BAT(5,r3,r4,r5)
942 LOAD_BAT(6,r3,r4,r5)
943 LOAD_BAT(7,r3,r4,r5)
7c03d653 944END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
945 blr
946
947/*
948 * This is where the main kernel code starts.
949 */
950start_here:
951 /* ptr to current */
952 lis r2,init_task@h
953 ori r2,r2,init_task@l
954 /* Set up for using our exception vectors */
955 /* ptr to phys current thread */
956 tophys(r4,r2)
957 addi r4,r4,THREAD /* init task's THREAD */
ee43eb78 958 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
959 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
960 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
961 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
962
963 /* stack */
964 lis r1,init_thread_union@ha
965 addi r1,r1,init_thread_union@l
966 li r0,0
967 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
968/*
187a0067 969 * Do early platform-specific initialization,
14cf11af
PM
970 * and set up the MMU.
971 */
2edb16ef
CL
972#ifdef CONFIG_KASAN
973 bl kasan_early_init
974#endif
6dece0eb
SW
975 li r3,0
976 mr r4,r31
14cf11af 977 bl machine_init
22c841c9 978 bl __save_cpu_setup
14cf11af
PM
979 bl MMU_init
980
14cf11af
PM
981/*
982 * Go back to running unmapped so we can load up new values
983 * for SDR1 (hash table pointer) and the segment registers
984 * and change to using our exception vectors.
985 */
986 lis r4,2f@h
987 ori r4,r4,2f@l
988 tophys(r4,r4)
989 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
14cf11af
PM
990 mtspr SPRN_SRR0,r4
991 mtspr SPRN_SRR1,r3
992 SYNC
993 RFI
994/* Load up the kernel context */
9952: bl load_up_mmu
996
997#ifdef CONFIG_BDI_SWITCH
998 /* Add helper information for the Abatron bdiGDB debugger.
999 * We do this here because we know the mmu is disabled, and
1000 * will be enabled for real in just a few instructions.
1001 */
1002 lis r5, abatron_pteptrs@h
1003 ori r5, r5, abatron_pteptrs@l
1004 stw r5, 0xf0(r0) /* This much match your Abatron config */
1005 lis r6, swapper_pg_dir@h
1006 ori r6, r6, swapper_pg_dir@l
1007 tophys(r5, r5)
1008 stw r6, 0(r5)
1009#endif /* CONFIG_BDI_SWITCH */
1010
1011/* Now turn on the MMU for real! */
1012 li r4,MSR_KERNEL
14cf11af
PM
1013 lis r3,start_kernel@h
1014 ori r3,r3,start_kernel@l
1015 mtspr SPRN_SRR0,r3
1016 mtspr SPRN_SRR1,r4
1017 SYNC
1018 RFI
1019
1020/*
5e696617
BH
1021 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1022 *
14cf11af
PM
1023 * Set up the segment registers for a new context.
1024 */
5e696617
BH
1025_ENTRY(switch_mmu_context)
1026 lwz r3,MMCONTEXTID(r4)
1027 cmpwi cr0,r3,0
1028 blt- 4f
14cf11af
PM
1029 mulli r3,r3,897 /* multiply context by skew factor */
1030 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
31ed2b13
CL
1031#ifdef CONFIG_PPC_KUEP
1032 oris r3, r3, SR_NX@h /* Set Nx */
a68c31fc
CL
1033#endif
1034#ifdef CONFIG_PPC_KUAP
1035 oris r3, r3, SR_KS@h /* Set Ks */
31ed2b13 1036#endif
14cf11af
PM
1037 li r0,NUM_USER_SEGMENTS
1038 mtctr r0
1039
93c4a162 1040 lwz r4, MM_PGD(r4)
14cf11af
PM
1041#ifdef CONFIG_BDI_SWITCH
1042 /* Context switch the PTE pointer for the Abatron BDI2000.
1043 * The PGDIR is passed as second argument.
1044 */
40058337
CL
1045 lis r5, abatron_pteptrs@ha
1046 stw r4, abatron_pteptrs@l + 0x4(r5)
14cf11af 1047#endif
93c4a162
CL
1048 tophys(r4, r4)
1049 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
1050 li r4,0
1051 isync
10523:
14cf11af
PM
1053 mtsrin r3,r4
1054 addi r3,r3,0x111 /* next VSID */
1055 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1056 addis r4,r4,0x1000 /* address of next segment */
1057 bdnz 3b
1058 sync
1059 isync
1060 blr
5e696617
BH
10614: trap
1062 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1063 blr
9445aa1a 1064EXPORT_SYMBOL(switch_mmu_context)
14cf11af
PM
1065
1066/*
1067 * An undocumented "feature" of 604e requires that the v bit
1068 * be cleared before changing BAT values.
1069 *
1070 * Also, newer IBM firmware does not clear bat3 and 4 so
1071 * this makes sure it's done.
1072 * -- Cort
1073 */
1074clear_bats:
1075 li r10,0
1076 mfspr r9,SPRN_PVR
1077 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1078 cmpwi r9, 1
1079 beq 1f
1080
1081 mtspr SPRN_DBAT0U,r10
1082 mtspr SPRN_DBAT0L,r10
1083 mtspr SPRN_DBAT1U,r10
1084 mtspr SPRN_DBAT1L,r10
1085 mtspr SPRN_DBAT2U,r10
1086 mtspr SPRN_DBAT2L,r10
1087 mtspr SPRN_DBAT3U,r10
1088 mtspr SPRN_DBAT3L,r10
10891:
1090 mtspr SPRN_IBAT0U,r10
1091 mtspr SPRN_IBAT0L,r10
1092 mtspr SPRN_IBAT1U,r10
1093 mtspr SPRN_IBAT1L,r10
1094 mtspr SPRN_IBAT2U,r10
1095 mtspr SPRN_IBAT2L,r10
1096 mtspr SPRN_IBAT3U,r10
1097 mtspr SPRN_IBAT3L,r10
7c03d653 1098BEGIN_MMU_FTR_SECTION
14cf11af
PM
1099 /* Here's a tweak: at this point, CPU setup have
1100 * not been called yet, so HIGH_BAT_EN may not be
1101 * set in HID0 for the 745x processors. However, it
1102 * seems that doesn't affect our ability to actually
1103 * write to these SPRs.
1104 */
1105 mtspr SPRN_DBAT4U,r10
1106 mtspr SPRN_DBAT4L,r10
1107 mtspr SPRN_DBAT5U,r10
1108 mtspr SPRN_DBAT5L,r10
1109 mtspr SPRN_DBAT6U,r10
1110 mtspr SPRN_DBAT6L,r10
1111 mtspr SPRN_DBAT7U,r10
1112 mtspr SPRN_DBAT7L,r10
1113 mtspr SPRN_IBAT4U,r10
1114 mtspr SPRN_IBAT4L,r10
1115 mtspr SPRN_IBAT5U,r10
1116 mtspr SPRN_IBAT5L,r10
1117 mtspr SPRN_IBAT6U,r10
1118 mtspr SPRN_IBAT6L,r10
1119 mtspr SPRN_IBAT7U,r10
1120 mtspr SPRN_IBAT7L,r10
7c03d653 1121END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
1122 blr
1123
5e04ae85
CL
1124_ENTRY(update_bats)
1125 lis r4, 1f@h
1126 ori r4, r4, 1f@l
1127 tophys(r4, r4)
1128 mfmsr r6
1129 mflr r7
1130 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1131 rlwinm r0, r6, 0, ~MSR_RI
1132 rlwinm r0, r0, 0, ~MSR_EE
1133 mtmsr r0
1134 mtspr SPRN_SRR0, r4
1135 mtspr SPRN_SRR1, r3
1136 SYNC
1137 RFI
11381: bl clear_bats
1139 lis r3, BATS@ha
1140 addi r3, r3, BATS@l
1141 tophys(r3, r3)
1142 LOAD_BAT(0, r3, r4, r5)
1143 LOAD_BAT(1, r3, r4, r5)
1144 LOAD_BAT(2, r3, r4, r5)
1145 LOAD_BAT(3, r3, r4, r5)
1146BEGIN_MMU_FTR_SECTION
1147 LOAD_BAT(4, r3, r4, r5)
1148 LOAD_BAT(5, r3, r4, r5)
1149 LOAD_BAT(6, r3, r4, r5)
1150 LOAD_BAT(7, r3, r4, r5)
1151END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1152 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1153 mtmsr r3
1154 mtspr SPRN_SRR0, r7
1155 mtspr SPRN_SRR1, r6
1156 SYNC
1157 RFI
1158
14cf11af
PM
1159flush_tlbs:
1160 lis r10, 0x40
11611: addic. r10, r10, -0x1000
1162 tlbie r10
9acd57ca 1163 bgt 1b
14cf11af
PM
1164 sync
1165 blr
1166
1167mmu_off:
1168 addi r4, r3, __after_mmu_off - _start
1169 mfmsr r3
1170 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1171 beqlr
1172 andc r3,r3,r0
1173 mtspr SPRN_SRR0,r4
1174 mtspr SPRN_SRR1,r3
1175 sync
1176 RFI
1177
14cf11af 1178/*
4a5cbf17
BH
1179 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1180 * (we keep one for debugging) and on others, we use one 256M BAT.
14cf11af
PM
1181 */
1182initial_bats:
ccdcef72 1183 lis r11,PAGE_OFFSET@h
14cf11af
PM
1184 mfspr r9,SPRN_PVR
1185 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1186 cmpwi 0,r9,1
1187 bne 4f
1188 ori r11,r11,4 /* set up BAT registers for 601 */
1189 li r8,0x7f /* valid, block length = 8MB */
14cf11af
PM
1190 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1191 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
4a5cbf17
BH
1192 addis r11,r11,0x800000@h
1193 addis r8,r8,0x800000@h
1194 mtspr SPRN_IBAT1U,r11
1195 mtspr SPRN_IBAT1L,r8
1196 addis r11,r11,0x800000@h
1197 addis r8,r8,0x800000@h
1198 mtspr SPRN_IBAT2U,r11
1199 mtspr SPRN_IBAT2L,r8
14cf11af
PM
1200 isync
1201 blr
14cf11af
PM
1202
12034: tophys(r8,r11)
1204#ifdef CONFIG_SMP
1205 ori r8,r8,0x12 /* R/W access, M=1 */
1206#else
1207 ori r8,r8,2 /* R/W access */
1208#endif /* CONFIG_SMP */
14cf11af 1209 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
14cf11af 1210
14cf11af
PM
1211 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1212 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1213 mtspr SPRN_IBAT0L,r8
1214 mtspr SPRN_IBAT0U,r11
1215 isync
1216 blr
1217
14cf11af 1218
f21f49ea 1219#ifdef CONFIG_BOOTX_TEXT
51d3082f
BH
1220setup_disp_bat:
1221 /*
1222 * setup the display bat prepared for us in prom.c
1223 */
1224 mflr r8
1225 bl reloc_offset
1226 mtlr r8
1227 addis r8,r3,disp_BAT@ha
1228 addi r8,r8,disp_BAT@l
1229 cmpwi cr0,r8,0
1230 beqlr
1231 lwz r11,0(r8)
1232 lwz r8,4(r8)
1233 mfspr r9,SPRN_PVR
1234 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1235 cmpwi 0,r9,1
1236 beq 1f
1237 mtspr SPRN_DBAT3L,r8
1238 mtspr SPRN_DBAT3U,r11
1239 blr
12401: mtspr SPRN_IBAT3L,r8
1241 mtspr SPRN_IBAT3U,r11
1242 blr
f21f49ea 1243#endif /* CONFIG_BOOTX_TEXT */
51d3082f 1244
c374e00e
SW
1245#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1246setup_cpm_bat:
1247 lis r8, 0xf000
1248 ori r8, r8, 0x002a
1249 mtspr SPRN_DBAT1L, r8
1250
1251 lis r11, 0xf000
1252 ori r11, r11, (BL_1M << 2) | 2
1253 mtspr SPRN_DBAT1U, r11
1254
1255 blr
1256#endif
1257
d1d56f8c
AH
1258#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1259setup_usbgecko_bat:
1260 /* prepare a BAT for early io */
1261#if defined(CONFIG_GAMECUBE)
1262 lis r8, 0x0c00
1263#elif defined(CONFIG_WII)
1264 lis r8, 0x0d00
1265#else
1266#error Invalid platform for USB Gecko based early debugging.
1267#endif
1268 /*
1269 * The virtual address used must match the virtual address
1270 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1271 */
1272 lis r11, 0xfffe /* top 128K */
1273 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1274 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1275 mtspr SPRN_DBAT1L, r8
1276 mtspr SPRN_DBAT1U, r11
1277 blr
1278#endif
1279
14cf11af
PM
1280#ifdef CONFIG_8260
1281/* Jump into the system reset for the rom.
1282 * We first disable the MMU, and then jump to the ROM reset address.
1283 *
1284 * r3 is the board info structure, r4 is the location for starting.
1285 * I use this for building a small kernel that can load other kernels,
1286 * rather than trying to write or rely on a rom monitor that can tftp load.
1287 */
1288 .globl m8260_gorom
1289m8260_gorom:
1290 mfmsr r0
1291 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1292 sync
1293 mtmsr r0
1294 sync
1295 mfspr r11, SPRN_HID0
1296 lis r10, 0
1297 ori r10,r10,HID0_ICE|HID0_DCE
1298 andc r11, r11, r10
1299 mtspr SPRN_HID0, r11
1300 isync
1301 li r5, MSR_ME|MSR_RI
1302 lis r6,2f@h
1303 addis r6,r6,-KERNELBASE@h
1304 ori r6,r6,2f@l
1305 mtspr SPRN_SRR0,r6
1306 mtspr SPRN_SRR1,r5
1307 isync
1308 sync
1309 rfi
13102:
1311 mtlr r4
1312 blr
1313#endif
1314
1315
1316/*
1317 * We put a few things here that have to be page-aligned.
1318 * This stuff goes at the beginning of the data segment,
1319 * which is page-aligned.
1320 */
1321 .data
1322 .globl sdata
1323sdata:
1324 .globl empty_zero_page
1325empty_zero_page:
1326 .space 4096
9445aa1a 1327EXPORT_SYMBOL(empty_zero_page)
14cf11af
PM
1328
1329 .globl swapper_pg_dir
1330swapper_pg_dir:
bee86f14 1331 .space PGD_TABLE_SIZE
14cf11af 1332
14cf11af
PM
1333/* Room for two PTE pointers, usually the kernel and current user pointers
1334 * to their respective root page table.
1335 */
1336abatron_pteptrs:
1337 .space 8