powerpc/32s: Setup the early hash table at all time.
[linux-block.git] / arch / powerpc / kernel / head_32.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
14cf11af
PM
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14cf11af
PM
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
14cf11af
PM
17 */
18
e7039845 19#include <linux/init.h>
65fddcfc 20#include <linux/pgtable.h>
b3b8dc6c 21#include <asm/reg.h>
14cf11af
PM
22#include <asm/page.h>
23#include <asm/mmu.h>
14cf11af
PM
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
ec2b36b9 29#include <asm/ptrace.h>
5e696617 30#include <asm/bug.h>
dd84c217 31#include <asm/kvm_book3s_asm.h>
9445aa1a 32#include <asm/export.h>
2c86cd18 33#include <asm/feature-fixups.h>
14cf11af 34
8a23fdec
CL
35#include "head_32.h"
36
14cf11af
PM
37#define LOAD_BAT(n, reg, RA, RB) \
38 /* see the comment for clear_bats() -- Cort */ \
39 li RA,0; \
40 mtspr SPRN_IBAT##n##U,RA; \
41 mtspr SPRN_DBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB; \
14cf11af
PM
46 lwz RA,(n*16)+8(reg); \
47 lwz RB,(n*16)+12(reg); \
48 mtspr SPRN_DBAT##n##U,RA; \
39097b9c 49 mtspr SPRN_DBAT##n##L,RB
14cf11af 50
e7039845 51 __HEAD
b3b8dc6c
PM
52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
53 .stabs "head_32.S",N_SO,0,0,0f
14cf11af 540:
748a7683 55_ENTRY(_stext);
14cf11af
PM
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
748a7683 61_ENTRY(_start);
14cf11af
PM
62 /*
63 * These are here for legacy reasons, the kernel used to
64 * need to look like a coff function entry for the pmac
65 * but we're always started by some kind of bootloader now.
66 * -- Cort
67 */
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
70 nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address). Address translation is turned on, with the prom
77 * managing the hash table. Interrupts are disabled. The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 * r3: 'BooX' (0x426f6f58)
84 * r4: virtual address of boot_infos_t
85 * r5: 0
86 *
14cf11af
PM
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader. The expected layout
90 * of the regs is:
91 * r3: ptr to residual data
92 * r4: initrd_start or if no initrd then 0
93 * r5: initrd_end - unused if r4 is 0
94 * r6: Start of command line string
95 * r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102 .globl __start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
9b6b563c
PM
109 cmpwi 0,r5,0
110 beq 1f
2bda347b 111
28794d34 112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
2bda347b
BH
113 /* find out where we are now */
114 bcl 20,31,$+4
1150: mflr r8 /* r8 = runtime addr here */
116 addis r8,r8,(_stext - 0b)@ha
117 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
9b6b563c 118 bl prom_init
28794d34
BH
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121 /* We never return. We also hit that trap if trying to boot
122 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
9b6b563c
PM
123 trap
124
d7f39454
BH
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301: lis r31,0x426f
131 ori r31,r31,0x6f58
132 cmpw 0,r3,r31
133 bne 1f
134 bl bootx_init
135 trap
136#endif /* CONFIG_PPC_PMAC */
137
6dece0eb 1381: mr r31,r3 /* save device tree ptr */
14cf11af
PM
139 li r24,0 /* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 * -- Cort <cort@fsmlabs.com>
145 */
146 bl early_init
147
14cf11af
PM
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 bl mmu_off
152__after_mmu_off:
14cf11af
PM
153 bl clear_bats
154 bl flush_tlbs
155
156 bl initial_bats
215b8237 157 bl load_segment_registers
69a1593a 158BEGIN_MMU_FTR_SECTION
215b8237 159 bl early_hash_table
69a1593a 160END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
f21f49ea 161#if defined(CONFIG_BOOTX_TEXT)
51d3082f
BH
162 bl setup_disp_bat
163#endif
c374e00e
SW
164#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
165 bl setup_cpm_bat
166#endif
d1d56f8c
AH
167#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168 bl setup_usbgecko_bat
169#endif
14cf11af
PM
170
171/*
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
173 */
174 bl reloc_offset
175 li r24,0 /* cpu# */
176 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 177#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
178 bl reloc_offset
179 bl init_idle_6xx
d7cceda9 180#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af
PM
181
182
14cf11af
PM
183/*
184 * We need to run with _start at physical address 0.
185 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
186 * the exception vectors at 0 (and therefore this copy
187 * overwrites OF's exception vectors with our own).
9b6b563c 188 * The MMU is off at this point.
14cf11af
PM
189 */
190 bl reloc_offset
191 mr r26,r3
192 addis r4,r3,KERNELBASE@h /* current address of _start */
ccdcef72
DF
193 lis r5,PHYSICAL_START@h
194 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
14cf11af 195 bne relocate_kernel
14cf11af
PM
196/*
197 * we now have the 1st 16M of ram mapped with the bats.
198 * prep needs the mmu to be turned on here, but pmac already has it on.
199 * this shouldn't bother the pmac since it just gets turned on again
200 * as we jump to our code at KERNELBASE. -- Cort
201 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
202 * off, and in other cases, we now turn it off before changing BATs above.
203 */
204turn_on_mmu:
205 mfmsr r0
215b8237 206 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
14cf11af
PM
207 mtspr SPRN_SRR1,r0
208 lis r0,start_here@h
209 ori r0,r0,start_here@l
210 mtspr SPRN_SRR0,r0
14cf11af
PM
211 RFI /* enables MMU */
212
213/*
214 * We need __secondary_hold as a place to hold the other cpus on
215 * an SMP machine, even when we are running a UP kernel.
216 */
217 . = 0xc0 /* for prep bootloader */
218 li r3,1 /* MTX only has 1 cpu */
219 .globl __secondary_hold
220__secondary_hold:
221 /* tell the master we're here */
bbd0abda 222 stw r3,__secondary_hold_acknowledge@l(0)
14cf11af
PM
223#ifdef CONFIG_SMP
224100: lwz r4,0(0)
225 /* wait until we're told to start */
226 cmpw 0,r4,r3
227 bne 100b
228 /* our cpu # was at addr 0 - go */
229 mr r24,r3 /* cpu # */
230 b __secondary_start
231#else
232 b .
233#endif /* CONFIG_SMP */
234
bbd0abda
PM
235 .globl __secondary_hold_spinloop
236__secondary_hold_spinloop:
237 .long 0
238 .globl __secondary_hold_acknowledge
239__secondary_hold_acknowledge:
240 .long -1
241
14cf11af
PM
242/* System reset */
243/* core99 pmac starts the seconary here by changing the vector, and
dc1c1ca3 244 putting it back to what it was (unknown_exception) when done. */
dc1c1ca3 245 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
14cf11af
PM
246
247/* Machine check */
248/*
249 * On CHRP, this is complicated by the fact that we could get a
250 * machine check inside RTAS, and we have no guarantee that certain
251 * critical registers will have the values we expect. The set of
252 * registers that might have bad values includes all the GPRs
253 * and all the BATs. We indicate that we are in RTAS by putting
254 * a non-zero value, the address of the exception frame to use,
0df977ea
CL
255 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
256 * and uses its value if it is non-zero.
14cf11af
PM
257 * (Other exception handlers assume that r1 is a valid kernel stack
258 * pointer when we take an exception from supervisor mode.)
259 * -- paulus.
260 */
261 . = 0x200
dd84c217 262 DO_KVM 0x200
cd08f109 263MachineCheck:
1f1c4d01 264 EXCEPTION_PROLOG_0
14cf11af 265#ifdef CONFIG_PPC_CHRP
0df977ea
CL
266 mfspr r11, SPRN_SPRG_THREAD
267 lwz r11, RTAS_SP(r11)
268 cmpwi cr1, r11, 0
269 bne cr1, 7f
14cf11af 270#endif /* CONFIG_PPC_CHRP */
cd08f109 271 EXCEPTION_PROLOG_1 for_rtas=1
14cf11af
PM
2727: EXCEPTION_PROLOG_2
273 addi r3,r1,STACK_FRAME_OVERHEAD
274#ifdef CONFIG_PPC_CHRP
232ca1ee
CL
275#ifdef CONFIG_VMAP_STACK
276 mfspr r4, SPRN_SPRG_THREAD
277 tovirt(r4, r4)
278 lwz r4, RTAS_SP(r4)
279 cmpwi cr1, r4, 0
14cf11af 280#endif
232ca1ee 281 beq cr1, machine_check_tramp
32746dfe 282 twi 31, 0, 0
232ca1ee
CL
283#else
284 b machine_check_tramp
14cf11af
PM
285#endif
286
287/* Data access exception. */
288 . = 0x300
dd84c217 289 DO_KVM 0x300
14cf11af 290DataAccess:
232ca1ee
CL
291#ifdef CONFIG_VMAP_STACK
292 mtspr SPRN_SPRG_SCRATCH0,r10
293 mfspr r10, SPRN_SPRG_THREAD
294BEGIN_MMU_FTR_SECTION
295 stw r11, THR11(r10)
296 mfspr r10, SPRN_DSISR
297 mfcr r11
298#ifdef CONFIG_PPC_KUAP
299 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
300#else
301 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
302#endif
303 mfspr r10, SPRN_SPRG_THREAD
304 beq hash_page_dsi
305.Lhash_page_dsi_cont:
306 mtcr r11
307 lwz r11, THR11(r10)
308END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
309 mtspr SPRN_SPRG_SCRATCH1,r11
310 mfspr r11, SPRN_DAR
311 stw r11, DAR(r10)
312 mfspr r11, SPRN_DSISR
313 stw r11, DSISR(r10)
314 mfspr r11, SPRN_SRR0
315 stw r11, SRR0(r10)
316 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
317 stw r11, SRR1(r10)
318 mfcr r10
319 andi. r11, r11, MSR_PR
320
321 EXCEPTION_PROLOG_1
322 b handle_page_fault_tramp_1
323#else /* CONFIG_VMAP_STACK */
cd08f109 324 EXCEPTION_PROLOG handle_dar_dsisr=1
2e15001e
CL
325 get_and_save_dar_dsisr_on_stack r4, r5, r11
326BEGIN_MMU_FTR_SECTION
a68c31fc 327#ifdef CONFIG_PPC_KUAP
2e15001e 328 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
a68c31fc 329#else
2e15001e 330 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
a68c31fc 331#endif
2e15001e 332 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
40bb0e90 333 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
14cf11af 334 bl hash_page
2e15001e
CL
335 b handle_page_fault_tramp_1
336FTR_SECTION_ELSE
337 b handle_page_fault_tramp_2
338ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
232ca1ee 339#endif /* CONFIG_VMAP_STACK */
14cf11af
PM
340
341/* Instruction access exception. */
342 . = 0x400
dd84c217 343 DO_KVM 0x400
14cf11af 344InstructionAccess:
232ca1ee
CL
345#ifdef CONFIG_VMAP_STACK
346 mtspr SPRN_SPRG_SCRATCH0,r10
347 mtspr SPRN_SPRG_SCRATCH1,r11
348 mfspr r10, SPRN_SPRG_THREAD
349 mfspr r11, SPRN_SRR0
350 stw r11, SRR0(r10)
351 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
352 stw r11, SRR1(r10)
353 mfcr r10
354BEGIN_MMU_FTR_SECTION
355 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
356 bne hash_page_isi
357.Lhash_page_isi_cont:
358 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
359END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
360 andi. r11, r11, MSR_PR
361
362 EXCEPTION_PROLOG_1
363 EXCEPTION_PROLOG_2
364#else /* CONFIG_VMAP_STACK */
14cf11af 365 EXCEPTION_PROLOG
b4c001dc 366 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
14cf11af
PM
367 beq 1f /* if so, try to put a PTE */
368 li r3,0 /* into the hash table */
369 mr r4,r12 /* SRR0 is fault address */
4a3a224c 370BEGIN_MMU_FTR_SECTION
14cf11af 371 bl hash_page
4a3a224c 372END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
232ca1ee 373#endif /* CONFIG_VMAP_STACK */
14cf11af 3741: mr r4,r12
b4c001dc 375 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
1ca9db5b 376 stw r4, _DAR(r11)
a546498f 377 EXC_XFER_LITE(0x400, handle_page_fault)
14cf11af 378
14cf11af
PM
379/* External interrupt */
380 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
381
382/* Alignment exception */
383 . = 0x600
dd84c217 384 DO_KVM 0x600
14cf11af 385Alignment:
cd08f109 386 EXCEPTION_PROLOG handle_dar_dsisr=1
c9c84fd9 387 save_dar_dsisr_on_stack r4, r5, r11
14cf11af 388 addi r3,r1,STACK_FRAME_OVERHEAD
232ca1ee 389 b alignment_exception_tramp
14cf11af
PM
390
391/* Program check exception */
dc1c1ca3 392 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
14cf11af
PM
393
394/* Floating-point unavailable */
395 . = 0x800
dd84c217 396 DO_KVM 0x800
14cf11af 397FPUnavailable:
aa42c69c
KP
398BEGIN_FTR_SECTION
399/*
400 * Certain Freescale cores don't have a FPU and treat fp instructions
401 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
402 */
403 b ProgramCheck
404END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
14cf11af 405 EXCEPTION_PROLOG
6f3d8e69
MN
406 beq 1f
407 bl load_up_fpu /* if from user, just load it up */
408 b fast_exception_return
4091: addi r3,r1,STACK_FRAME_OVERHEAD
642770dd 410 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
14cf11af
PM
411
412/* Decrementer */
413 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
414
642770dd
CL
415 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
416 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
14cf11af
PM
417
418/* System call */
419 . = 0xc00
dd84c217 420 DO_KVM 0xc00
14cf11af 421SystemCall:
b86fb888 422 SYSCALL_ENTRY 0xc00
14cf11af 423
dc1c1ca3 424 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
642770dd 425 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
14cf11af
PM
426
427/*
428 * The Altivec unavailable trap is at 0x0f20. Foo.
429 * We effectively remap it to 0x3000.
430 * We include an altivec unavailable exception vector even if
431 * not configured for Altivec, so that you can't panic a
432 * non-altivec kernel running on a machine with altivec just
433 * by executing an altivec instruction.
434 */
435 . = 0xf00
dd84c217 436 DO_KVM 0xf00
555d97ac 437 b PerformanceMonitor
14cf11af
PM
438
439 . = 0xf20
dd84c217 440 DO_KVM 0xf20
14cf11af
PM
441 b AltiVecUnavailable
442
14cf11af
PM
443/*
444 * Handle TLB miss for instruction on 603/603e.
445 * Note: we get an alternate set of r0 - r3 to use automatically.
446 */
447 . = 0x1000
448InstructionTLBMiss:
449/*
00fcb147 450 * r0: scratch
14cf11af
PM
451 * r1: linux style pte ( later becomes ppc hardware pte )
452 * r2: ptr to linux-style pte
453 * r3: scratch
454 */
14cf11af
PM
455 /* Get PTE (linux-style) and check access */
456 mfspr r3,SPRN_IMISS
a8a12199 457#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
f1a1f7a1 458 lis r1, TASK_SIZE@h /* check if kernel address */
8a13c4f9 459 cmplw 0,r1,r3
a8a12199 460#endif
93c4a162 461 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
462#ifdef CONFIG_SWAP
463 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
464#else
451b3ec0 465 li r1,_PAGE_PRESENT | _PAGE_EXEC
84de6ab0 466#endif
a8a12199 467#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
f1a1f7a1 468 bgt- 112f
2c12393f
CL
469 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
470 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
a8a12199 471#endif
93c4a162 472112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
473 lwz r2,0(r2) /* get pmd entry */
474 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
475 beq- InstructionAddressInvalid /* return if no mapping */
476 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
477 lwz r0,0(r2) /* get linux-style pte */
478 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 479 bne- InstructionAddressInvalid /* return if access not permitted */
14cf11af 480 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90 481 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
f342adca
CL
482 ori r1, r1, 0xe06 /* clear out reserved bits */
483 andc r1, r0, r1 /* PP = user? 1 : 0 */
345953cf
KG
484BEGIN_FTR_SECTION
485 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
486END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 487 mtspr SPRN_RPA,r1
14cf11af
PM
488 tlbli r3
489 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
490 mtcrf 0x80,r3
491 rfi
492InstructionAddressInvalid:
493 mfspr r3,SPRN_SRR1
494 rlwinm r1,r3,9,6,6 /* Get load/store bit */
495
496 addis r1,r1,0x2000
497 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
14cf11af
PM
498 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
499 or r2,r2,r1
500 mtspr SPRN_SRR1,r2
501 mfspr r1,SPRN_IMISS /* Get failing address */
502 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
503 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
504 xor r1,r1,r2
505 mtspr SPRN_DAR,r1 /* Set fault address */
506 mfmsr r0 /* Restore "normal" registers */
507 xoris r0,r0,MSR_TGPR>>16
508 mtcrf 0x80,r3 /* Restore CR0 */
509 mtmsr r0
510 b InstructionAccess
511
512/*
513 * Handle TLB miss for DATA Load operation on 603/603e
514 */
515 . = 0x1100
516DataLoadTLBMiss:
517/*
00fcb147 518 * r0: scratch
14cf11af
PM
519 * r1: linux style pte ( later becomes ppc hardware pte )
520 * r2: ptr to linux-style pte
521 * r3: scratch
522 */
14cf11af
PM
523 /* Get PTE (linux-style) and check access */
524 mfspr r3,SPRN_DMISS
f1a1f7a1 525 lis r1, TASK_SIZE@h /* check if kernel address */
8a13c4f9 526 cmplw 0,r1,r3
93c4a162 527 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0
CL
528#ifdef CONFIG_SWAP
529 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
530#else
451b3ec0 531 li r1, _PAGE_PRESENT
84de6ab0 532#endif
f1a1f7a1 533 bgt- 112f
2c12393f
CL
534 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
535 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 536112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
537 lwz r2,0(r2) /* get pmd entry */
538 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
539 beq- DataAddressInvalid /* return if no mapping */
540 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
541 lwz r0,0(r2) /* get linux-style pte */
542 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 543 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
544 /*
545 * NOTE! We are assuming this is not an SMP system, otherwise
546 * we would need to update the pte atomically with lwarx/stwcx.
547 */
14cf11af 548 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90
CL
549 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
550 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
551 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
a4bd6a93 552 ori r1,r1,0xe04 /* clear out reserved bits */
f342adca 553 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
345953cf
KG
554BEGIN_FTR_SECTION
555 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
556END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 557 mtspr SPRN_RPA,r1
2319f123
KG
558 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
559 mtcrf 0x80,r2
560BEGIN_MMU_FTR_SECTION
561 li r0,1
ee43eb78 562 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
563 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
564 slw r0,r0,r2
565 xor r1,r0,r1
566 srw r0,r1,r2
ee43eb78 567 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
568 mfspr r2,SPRN_SRR1
569 rlwimi r2,r0,31-14,14,14
570 mtspr SPRN_SRR1,r2
571END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 572 tlbld r3
14cf11af
PM
573 rfi
574DataAddressInvalid:
575 mfspr r3,SPRN_SRR1
576 rlwinm r1,r3,9,6,6 /* Get load/store bit */
577 addis r1,r1,0x2000
578 mtspr SPRN_DSISR,r1
14cf11af
PM
579 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
580 mtspr SPRN_SRR1,r2
581 mfspr r1,SPRN_DMISS /* Get failing address */
582 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
583 beq 20f /* Jump if big endian */
584 xori r1,r1,3
58520: mtspr SPRN_DAR,r1 /* Set fault address */
586 mfmsr r0 /* Restore "normal" registers */
587 xoris r0,r0,MSR_TGPR>>16
588 mtcrf 0x80,r3 /* Restore CR0 */
589 mtmsr r0
590 b DataAccess
591
592/*
593 * Handle TLB miss for DATA Store on 603/603e
594 */
595 . = 0x1200
596DataStoreTLBMiss:
597/*
00fcb147 598 * r0: scratch
14cf11af
PM
599 * r1: linux style pte ( later becomes ppc hardware pte )
600 * r2: ptr to linux-style pte
601 * r3: scratch
602 */
14cf11af
PM
603 /* Get PTE (linux-style) and check access */
604 mfspr r3,SPRN_DMISS
f1a1f7a1 605 lis r1, TASK_SIZE@h /* check if kernel address */
8a13c4f9 606 cmplw 0,r1,r3
93c4a162 607 mfspr r2, SPRN_SPRG_PGDIR
84de6ab0 608#ifdef CONFIG_SWAP
415480dc 609 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
84de6ab0 610#else
415480dc 611 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
84de6ab0 612#endif
f1a1f7a1 613 bgt- 112f
2c12393f
CL
614 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
615 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
93c4a162 616112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
14cf11af
PM
617 lwz r2,0(r2) /* get pmd entry */
618 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
619 beq- DataAddressInvalid /* return if no mapping */
620 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
eb3436a0
KG
621 lwz r0,0(r2) /* get linux-style pte */
622 andc. r1,r1,r0 /* check access & ~permission */
14cf11af 623 bne- DataAddressInvalid /* return if access not permitted */
14cf11af
PM
624 /*
625 * NOTE! We are assuming this is not an SMP system, otherwise
626 * we would need to update the pte atomically with lwarx/stwcx.
627 */
14cf11af 628 /* Convert linux-style PTE to low word of PPC-style PTE */
40bb0e90 629 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
f342adca
CL
630 li r1,0xe06 /* clear out reserved bits & PP msb */
631 andc r1,r0,r1 /* PP = user? 1: 0 */
345953cf
KG
632BEGIN_FTR_SECTION
633 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
634END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
14cf11af 635 mtspr SPRN_RPA,r1
2319f123
KG
636 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
637 mtcrf 0x80,r2
638BEGIN_MMU_FTR_SECTION
639 li r0,1
ee43eb78 640 mfspr r1,SPRN_SPRG_603_LRU
2319f123
KG
641 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
642 slw r0,r0,r2
643 xor r1,r0,r1
644 srw r0,r1,r2
ee43eb78 645 mtspr SPRN_SPRG_603_LRU,r1
2319f123
KG
646 mfspr r2,SPRN_SRR1
647 rlwimi r2,r0,31-14,14,14
648 mtspr SPRN_SRR1,r2
649END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
14cf11af 650 tlbld r3
14cf11af
PM
651 rfi
652
653#ifndef CONFIG_ALTIVEC
dc1c1ca3 654#define altivec_assist_exception unknown_exception
69eeff02
ME
655#endif
656
657#ifndef CONFIG_TAU_INT
658#define TAUException unknown_exception
14cf11af
PM
659#endif
660
642770dd
CL
661 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
662 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
663 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
664 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
14cf11af 665 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
642770dd
CL
666 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
667 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
668 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
669 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
670 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
671 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
672 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
673 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
674 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
675 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
676 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
677 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
678 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
679 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
680 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
681 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
682 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
683 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
684 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
685 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
686 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
687 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
688 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
689 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
14cf11af
PM
690
691 . = 0x3000
692
232ca1ee
CL
693machine_check_tramp:
694 EXC_XFER_STD(0x200, machine_check_exception)
695
696alignment_exception_tramp:
697 EXC_XFER_STD(0x600, alignment_exception)
698
2e15001e 699handle_page_fault_tramp_1:
232ca1ee
CL
700#ifdef CONFIG_VMAP_STACK
701 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
702#endif
2e15001e
CL
703 lwz r4, _DAR(r11)
704 lwz r5, _DSISR(r11)
705 /* fall through */
706handle_page_fault_tramp_2:
707 EXC_XFER_LITE(0x300, handle_page_fault)
708
232ca1ee
CL
709#ifdef CONFIG_VMAP_STACK
710.macro save_regs_thread thread
711 stw r0, THR0(\thread)
712 stw r3, THR3(\thread)
713 stw r4, THR4(\thread)
714 stw r5, THR5(\thread)
715 stw r6, THR6(\thread)
716 stw r8, THR8(\thread)
717 stw r9, THR9(\thread)
718 mflr r0
719 stw r0, THLR(\thread)
720 mfctr r0
721 stw r0, THCTR(\thread)
722.endm
723
724.macro restore_regs_thread thread
725 lwz r0, THLR(\thread)
726 mtlr r0
727 lwz r0, THCTR(\thread)
728 mtctr r0
729 lwz r0, THR0(\thread)
730 lwz r3, THR3(\thread)
731 lwz r4, THR4(\thread)
732 lwz r5, THR5(\thread)
733 lwz r6, THR6(\thread)
734 lwz r8, THR8(\thread)
735 lwz r9, THR9(\thread)
736.endm
737
738hash_page_dsi:
739 save_regs_thread r10
740 mfdsisr r3
741 mfdar r4
742 mfsrr0 r5
743 mfsrr1 r9
744 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
745 bl hash_page
746 mfspr r10, SPRN_SPRG_THREAD
747 restore_regs_thread r10
748 b .Lhash_page_dsi_cont
749
750hash_page_isi:
751 mr r11, r10
752 mfspr r10, SPRN_SPRG_THREAD
753 save_regs_thread r10
754 li r3, 0
755 lwz r4, SRR0(r10)
756 lwz r9, SRR1(r10)
757 bl hash_page
758 mfspr r10, SPRN_SPRG_THREAD
759 restore_regs_thread r10
760 mr r10, r11
761 b .Lhash_page_isi_cont
762
763 .globl fast_hash_page_return
764fast_hash_page_return:
765 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
766 mfspr r10, SPRN_SPRG_THREAD
767 restore_regs_thread r10
768 bne 1f
769
770 /* DSI */
771 mtcr r11
772 lwz r11, THR11(r10)
773 mfspr r10, SPRN_SPRG_SCRATCH0
232ca1ee
CL
774 RFI
775
7761: /* ISI */
777 mtcr r11
778 mfspr r11, SPRN_SPRG_SCRATCH1
779 mfspr r10, SPRN_SPRG_SCRATCH0
232ca1ee
CL
780 RFI
781
cd08f109
CL
782stack_overflow:
783 vmap_stack_overflow_exception
232ca1ee 784#endif
cd08f109 785
14cf11af
PM
786AltiVecUnavailable:
787 EXCEPTION_PROLOG
788#ifdef CONFIG_ALTIVEC
37f9ef55
BH
789 beq 1f
790 bl load_up_altivec /* if from user, just load it up */
791 b fast_exception_return
14cf11af 792#endif /* CONFIG_ALTIVEC */
37f9ef55 7931: addi r3,r1,STACK_FRAME_OVERHEAD
642770dd 794 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
14cf11af 795
555d97ac
AF
796PerformanceMonitor:
797 EXCEPTION_PROLOG
798 addi r3,r1,STACK_FRAME_OVERHEAD
799 EXC_XFER_STD(0xf00, performance_monitor_exception)
800
14cf11af
PM
801
802/*
803 * This code is jumped to from the startup code to copy
ccdcef72 804 * the kernel image to physical address PHYSICAL_START.
14cf11af
PM
805 */
806relocate_kernel:
807 addis r9,r26,klimit@ha /* fetch klimit */
808 lwz r25,klimit@l(r9)
809 addis r25,r25,-KERNELBASE@h
ccdcef72 810 lis r3,PHYSICAL_START@h /* Destination base address */
14cf11af
PM
811 li r6,0 /* Destination offset */
812 li r5,0x4000 /* # bytes of memory to copy */
813 bl copy_and_flush /* copy the first 0x4000 bytes */
814 addi r0,r3,4f@l /* jump to the address of 4f */
815 mtctr r0 /* in copy and do the rest. */
816 bctr /* jump to the copy */
8174: mr r5,r25
818 bl copy_and_flush /* copy the rest */
819 b turn_on_mmu
820
821/*
822 * Copy routine used to copy the kernel to start at physical address 0
823 * and flush and invalidate the caches as needed.
824 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
825 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
826 */
748a7683 827_ENTRY(copy_and_flush)
14cf11af
PM
828 addi r5,r5,-4
829 addi r6,r6,-4
7dffb720 8304: li r0,L1_CACHE_BYTES/4
14cf11af
PM
831 mtctr r0
8323: addi r6,r6,4 /* copy a cache line */
833 lwzx r0,r6,r4
834 stwx r0,r6,r3
835 bdnz 3b
836 dcbst r6,r3 /* write it to memory */
837 sync
838 icbi r6,r3 /* flush the icache line */
839 cmplw 0,r6,r5
840 blt 4b
841 sync /* additional sync needed on g4 */
842 isync
843 addi r5,r5,4
844 addi r6,r6,4
845 blr
846
14cf11af 847#ifdef CONFIG_SMP
ee0339f2
JL
848 .globl __secondary_start_mpc86xx
849__secondary_start_mpc86xx:
850 mfspr r3, SPRN_PIR
851 stw r3, __secondary_hold_acknowledge@l(0)
852 mr r24, r3 /* cpu # */
853 b __secondary_start
854
14cf11af
PM
855 .globl __secondary_start_pmac_0
856__secondary_start_pmac_0:
857 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
858 li r24,0
859 b 1f
860 li r24,1
861 b 1f
862 li r24,2
863 b 1f
864 li r24,3
8651:
866 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
867 set to map the 0xf0000000 - 0xffffffff region */
868 mfmsr r0
869 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
14cf11af
PM
870 mtmsr r0
871 isync
872
873 .globl __secondary_start
874__secondary_start:
14cf11af
PM
875 /* Copy some CPU settings from CPU 0 */
876 bl __restore_cpu_setup
877
878 lis r3,-KERNELBASE@h
879 mr r4,r24
14cf11af 880 bl call_setup_cpu /* Call setup_cpu for this CPU */
d7cceda9 881#ifdef CONFIG_PPC_BOOK3S_32
14cf11af
PM
882 lis r3,-KERNELBASE@h
883 bl init_idle_6xx
d7cceda9 884#endif /* CONFIG_PPC_BOOK3S_32 */
14cf11af 885
4e67bfd7 886 /* get current's stack and current */
7c19c2e5
CL
887 lis r2,secondary_current@ha
888 tophys(r2,r2)
889 lwz r2,secondary_current@l(r2)
ed1cd6de
CL
890 tophys(r1,r2)
891 lwz r1,TASK_STACK(r1)
14cf11af
PM
892
893 /* stack */
894 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
895 li r0,0
896 tophys(r3,r1)
897 stw r0,0(r3)
898
899 /* load up the MMU */
b7f8b440 900 bl load_segment_registers
14cf11af
PM
901 bl load_up_mmu
902
903 /* ptr to phys current thread */
904 tophys(r4,r2)
905 addi r4,r4,THREAD /* phys address of our thread_struct */
ee43eb78 906 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
907 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
908 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
909 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
910
911 /* enable MMU and jump to start_secondary */
912 li r4,MSR_KERNEL
14cf11af
PM
913 lis r3,start_secondary@h
914 ori r3,r3,start_secondary@l
915 mtspr SPRN_SRR0,r3
916 mtspr SPRN_SRR1,r4
14cf11af
PM
917 RFI
918#endif /* CONFIG_SMP */
919
dd84c217
AG
920#ifdef CONFIG_KVM_BOOK3S_HANDLER
921#include "../kvm/book3s_rmhandlers.S"
922#endif
923
14cf11af
PM
924/*
925 * Those generic dummy functions are kept for CPUs not
d7cceda9 926 * included in CONFIG_PPC_BOOK3S_32
14cf11af 927 */
d7cceda9 928#if !defined(CONFIG_PPC_BOOK3S_32)
748a7683 929_ENTRY(__save_cpu_setup)
14cf11af 930 blr
748a7683 931_ENTRY(__restore_cpu_setup)
14cf11af 932 blr
d7cceda9 933#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
14cf11af 934
14cf11af
PM
935/*
936 * Load stuff into the MMU. Intended to be called with
937 * IR=0 and DR=0.
938 */
215b8237
CL
939early_hash_table:
940 sync /* Force all PTE updates to finish */
941 isync
942 tlbia /* Clear all TLB entries */
943 sync /* wait for tlbia/tlbie to finish */
944 TLBSYNC /* ... on all CPUs */
945 /* Load the SDR1 register (hash table base & size) */
946 lis r6, early_hash - PAGE_OFFSET@h
947 ori r6, r6, 3 /* 256kB table */
948 mtspr SPRN_SDR1, r6
69a1593a
CL
949 lis r6, early_hash@h
950 lis r3, Hash@ha
951 stw r6, Hash@l(r3)
215b8237 952 blr
215b8237 953
14cf11af
PM
954load_up_mmu:
955 sync /* Force all PTE updates to finish */
956 isync
957 tlbia /* Clear all TLB entries */
958 sync /* wait for tlbia/tlbie to finish */
959 TLBSYNC /* ... on all CPUs */
960 /* Load the SDR1 register (hash table base & size) */
961 lis r6,_SDR1@ha
962 tophys(r6,r6)
963 lwz r6,_SDR1@l(r6)
964 mtspr SPRN_SDR1,r6
187a0067 965
8b14e1df 966/* Load the BAT registers with the values set up by MMU_init. */
14cf11af
PM
967 lis r3,BATS@ha
968 addi r3,r3,BATS@l
969 tophys(r3,r3)
970 LOAD_BAT(0,r3,r4,r5)
971 LOAD_BAT(1,r3,r4,r5)
972 LOAD_BAT(2,r3,r4,r5)
973 LOAD_BAT(3,r3,r4,r5)
7c03d653 974BEGIN_MMU_FTR_SECTION
ee0339f2
JL
975 LOAD_BAT(4,r3,r4,r5)
976 LOAD_BAT(5,r3,r4,r5)
977 LOAD_BAT(6,r3,r4,r5)
978 LOAD_BAT(7,r3,r4,r5)
7c03d653 979END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
980 blr
981
2c637d2d 982_GLOBAL(load_segment_registers)
215b8237
CL
983 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
984 mtctr r0 /* for context 0 */
985 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
986#ifdef CONFIG_PPC_KUEP
987 oris r3, r3, SR_NX@h /* Set Nx */
988#endif
989#ifdef CONFIG_PPC_KUAP
990 oris r3, r3, SR_KS@h /* Set Ks */
991#endif
992 li r4, 0
9933: mtsrin r3, r4
994 addi r3, r3, 0x111 /* increment VSID */
995 addis r4, r4, 0x1000 /* address of next segment */
996 bdnz 3b
997 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
998 mtctr r0 /* for context 0 */
999 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
1000 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
1001 oris r3, r3, SR_KP@h /* Kp = 1 */
10023: mtsrin r3, r4
1003 addi r3, r3, 0x111 /* increment VSID */
1004 addis r4, r4, 0x1000 /* address of next segment */
1005 bdnz 3b
1006 blr
1007
14cf11af
PM
1008/*
1009 * This is where the main kernel code starts.
1010 */
1011start_here:
1012 /* ptr to current */
1013 lis r2,init_task@h
1014 ori r2,r2,init_task@l
1015 /* Set up for using our exception vectors */
1016 /* ptr to phys current thread */
1017 tophys(r4,r2)
1018 addi r4,r4,THREAD /* init task's THREAD */
ee43eb78 1019 mtspr SPRN_SPRG_THREAD,r4
4622a2d4
CL
1020 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1021 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1022 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
1023
1024 /* stack */
1025 lis r1,init_thread_union@ha
1026 addi r1,r1,init_thread_union@l
1027 li r0,0
1028 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1029/*
187a0067 1030 * Do early platform-specific initialization,
14cf11af
PM
1031 * and set up the MMU.
1032 */
2edb16ef
CL
1033#ifdef CONFIG_KASAN
1034 bl kasan_early_init
1035#endif
6dece0eb
SW
1036 li r3,0
1037 mr r4,r31
14cf11af 1038 bl machine_init
22c841c9 1039 bl __save_cpu_setup
14cf11af 1040 bl MMU_init
72f208c6 1041 bl MMU_init_hw_patch
14cf11af 1042
14cf11af
PM
1043/*
1044 * Go back to running unmapped so we can load up new values
1045 * for SDR1 (hash table pointer) and the segment registers
1046 * and change to using our exception vectors.
1047 */
1048 lis r4,2f@h
1049 ori r4,r4,2f@l
1050 tophys(r4,r4)
1051 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
94dd54c5
CL
1052
1053 .align 4
14cf11af
PM
1054 mtspr SPRN_SRR0,r4
1055 mtspr SPRN_SRR1,r3
14cf11af
PM
1056 RFI
1057/* Load up the kernel context */
10582: bl load_up_mmu
1059
1060#ifdef CONFIG_BDI_SWITCH
1061 /* Add helper information for the Abatron bdiGDB debugger.
1062 * We do this here because we know the mmu is disabled, and
1063 * will be enabled for real in just a few instructions.
1064 */
1065 lis r5, abatron_pteptrs@h
1066 ori r5, r5, abatron_pteptrs@l
b51ba4fe 1067 stw r5, 0xf0(0) /* This much match your Abatron config */
14cf11af
PM
1068 lis r6, swapper_pg_dir@h
1069 ori r6, r6, swapper_pg_dir@l
1070 tophys(r5, r5)
1071 stw r6, 0(r5)
1072#endif /* CONFIG_BDI_SWITCH */
1073
1074/* Now turn on the MMU for real! */
1075 li r4,MSR_KERNEL
14cf11af
PM
1076 lis r3,start_kernel@h
1077 ori r3,r3,start_kernel@l
1078 mtspr SPRN_SRR0,r3
1079 mtspr SPRN_SRR1,r4
14cf11af
PM
1080 RFI
1081
1082/*
5e696617
BH
1083 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1084 *
14cf11af
PM
1085 * Set up the segment registers for a new context.
1086 */
5e696617
BH
1087_ENTRY(switch_mmu_context)
1088 lwz r3,MMCONTEXTID(r4)
1089 cmpwi cr0,r3,0
1090 blt- 4f
14cf11af
PM
1091 mulli r3,r3,897 /* multiply context by skew factor */
1092 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
31ed2b13
CL
1093#ifdef CONFIG_PPC_KUEP
1094 oris r3, r3, SR_NX@h /* Set Nx */
a68c31fc
CL
1095#endif
1096#ifdef CONFIG_PPC_KUAP
1097 oris r3, r3, SR_KS@h /* Set Ks */
31ed2b13 1098#endif
14cf11af
PM
1099 li r0,NUM_USER_SEGMENTS
1100 mtctr r0
1101
93c4a162 1102 lwz r4, MM_PGD(r4)
14cf11af
PM
1103#ifdef CONFIG_BDI_SWITCH
1104 /* Context switch the PTE pointer for the Abatron BDI2000.
1105 * The PGDIR is passed as second argument.
1106 */
40058337
CL
1107 lis r5, abatron_pteptrs@ha
1108 stw r4, abatron_pteptrs@l + 0x4(r5)
14cf11af 1109#endif
93c4a162
CL
1110 tophys(r4, r4)
1111 mtspr SPRN_SPRG_PGDIR, r4
14cf11af
PM
1112 li r4,0
1113 isync
11143:
14cf11af
PM
1115 mtsrin r3,r4
1116 addi r3,r3,0x111 /* next VSID */
1117 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1118 addis r4,r4,0x1000 /* address of next segment */
1119 bdnz 3b
1120 sync
1121 isync
1122 blr
5e696617
BH
11234: trap
1124 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1125 blr
9445aa1a 1126EXPORT_SYMBOL(switch_mmu_context)
14cf11af
PM
1127
1128/*
1129 * An undocumented "feature" of 604e requires that the v bit
1130 * be cleared before changing BAT values.
1131 *
1132 * Also, newer IBM firmware does not clear bat3 and 4 so
1133 * this makes sure it's done.
1134 * -- Cort
1135 */
1136clear_bats:
1137 li r10,0
14cf11af
PM
1138
1139 mtspr SPRN_DBAT0U,r10
1140 mtspr SPRN_DBAT0L,r10
1141 mtspr SPRN_DBAT1U,r10
1142 mtspr SPRN_DBAT1L,r10
1143 mtspr SPRN_DBAT2U,r10
1144 mtspr SPRN_DBAT2L,r10
1145 mtspr SPRN_DBAT3U,r10
1146 mtspr SPRN_DBAT3L,r10
14cf11af
PM
1147 mtspr SPRN_IBAT0U,r10
1148 mtspr SPRN_IBAT0L,r10
1149 mtspr SPRN_IBAT1U,r10
1150 mtspr SPRN_IBAT1L,r10
1151 mtspr SPRN_IBAT2U,r10
1152 mtspr SPRN_IBAT2L,r10
1153 mtspr SPRN_IBAT3U,r10
1154 mtspr SPRN_IBAT3L,r10
7c03d653 1155BEGIN_MMU_FTR_SECTION
14cf11af
PM
1156 /* Here's a tweak: at this point, CPU setup have
1157 * not been called yet, so HIGH_BAT_EN may not be
1158 * set in HID0 for the 745x processors. However, it
1159 * seems that doesn't affect our ability to actually
1160 * write to these SPRs.
1161 */
1162 mtspr SPRN_DBAT4U,r10
1163 mtspr SPRN_DBAT4L,r10
1164 mtspr SPRN_DBAT5U,r10
1165 mtspr SPRN_DBAT5L,r10
1166 mtspr SPRN_DBAT6U,r10
1167 mtspr SPRN_DBAT6L,r10
1168 mtspr SPRN_DBAT7U,r10
1169 mtspr SPRN_DBAT7L,r10
1170 mtspr SPRN_IBAT4U,r10
1171 mtspr SPRN_IBAT4L,r10
1172 mtspr SPRN_IBAT5U,r10
1173 mtspr SPRN_IBAT5L,r10
1174 mtspr SPRN_IBAT6U,r10
1175 mtspr SPRN_IBAT6L,r10
1176 mtspr SPRN_IBAT7U,r10
1177 mtspr SPRN_IBAT7L,r10
7c03d653 1178END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
14cf11af
PM
1179 blr
1180
5e04ae85
CL
1181_ENTRY(update_bats)
1182 lis r4, 1f@h
1183 ori r4, r4, 1f@l
1184 tophys(r4, r4)
1185 mfmsr r6
1186 mflr r7
1187 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1188 rlwinm r0, r6, 0, ~MSR_RI
1189 rlwinm r0, r0, 0, ~MSR_EE
1190 mtmsr r0
94dd54c5
CL
1191
1192 .align 4
5e04ae85
CL
1193 mtspr SPRN_SRR0, r4
1194 mtspr SPRN_SRR1, r3
5e04ae85
CL
1195 RFI
11961: bl clear_bats
1197 lis r3, BATS@ha
1198 addi r3, r3, BATS@l
1199 tophys(r3, r3)
1200 LOAD_BAT(0, r3, r4, r5)
1201 LOAD_BAT(1, r3, r4, r5)
1202 LOAD_BAT(2, r3, r4, r5)
1203 LOAD_BAT(3, r3, r4, r5)
1204BEGIN_MMU_FTR_SECTION
1205 LOAD_BAT(4, r3, r4, r5)
1206 LOAD_BAT(5, r3, r4, r5)
1207 LOAD_BAT(6, r3, r4, r5)
1208 LOAD_BAT(7, r3, r4, r5)
1209END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1210 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1211 mtmsr r3
1212 mtspr SPRN_SRR0, r7
1213 mtspr SPRN_SRR1, r6
5e04ae85
CL
1214 RFI
1215
14cf11af
PM
1216flush_tlbs:
1217 lis r10, 0x40
12181: addic. r10, r10, -0x1000
1219 tlbie r10
9acd57ca 1220 bgt 1b
14cf11af
PM
1221 sync
1222 blr
1223
1224mmu_off:
1225 addi r4, r3, __after_mmu_off - _start
1226 mfmsr r3
1227 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1228 beqlr
1229 andc r3,r3,r0
94dd54c5
CL
1230
1231 .align 4
14cf11af
PM
1232 mtspr SPRN_SRR0,r4
1233 mtspr SPRN_SRR1,r3
1234 sync
1235 RFI
1236
8b14e1df 1237/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
14cf11af 1238initial_bats:
ccdcef72 1239 lis r11,PAGE_OFFSET@h
39097b9c 1240 tophys(r8,r11)
14cf11af
PM
1241#ifdef CONFIG_SMP
1242 ori r8,r8,0x12 /* R/W access, M=1 */
1243#else
1244 ori r8,r8,2 /* R/W access */
1245#endif /* CONFIG_SMP */
14cf11af 1246 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
14cf11af 1247
8b14e1df 1248 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
14cf11af
PM
1249 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1250 mtspr SPRN_IBAT0L,r8
1251 mtspr SPRN_IBAT0U,r11
1252 isync
1253 blr
1254
f21f49ea 1255#ifdef CONFIG_BOOTX_TEXT
51d3082f
BH
1256setup_disp_bat:
1257 /*
1258 * setup the display bat prepared for us in prom.c
1259 */
1260 mflr r8
1261 bl reloc_offset
1262 mtlr r8
1263 addis r8,r3,disp_BAT@ha
1264 addi r8,r8,disp_BAT@l
1265 cmpwi cr0,r8,0
1266 beqlr
1267 lwz r11,0(r8)
1268 lwz r8,4(r8)
51d3082f
BH
1269 mtspr SPRN_DBAT3L,r8
1270 mtspr SPRN_DBAT3U,r11
51d3082f 1271 blr
f21f49ea 1272#endif /* CONFIG_BOOTX_TEXT */
51d3082f 1273
c374e00e
SW
1274#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1275setup_cpm_bat:
1276 lis r8, 0xf000
1277 ori r8, r8, 0x002a
1278 mtspr SPRN_DBAT1L, r8
1279
1280 lis r11, 0xf000
1281 ori r11, r11, (BL_1M << 2) | 2
1282 mtspr SPRN_DBAT1U, r11
1283
1284 blr
1285#endif
1286
d1d56f8c
AH
1287#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1288setup_usbgecko_bat:
1289 /* prepare a BAT for early io */
1290#if defined(CONFIG_GAMECUBE)
1291 lis r8, 0x0c00
1292#elif defined(CONFIG_WII)
1293 lis r8, 0x0d00
1294#else
1295#error Invalid platform for USB Gecko based early debugging.
1296#endif
1297 /*
1298 * The virtual address used must match the virtual address
1299 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1300 */
1301 lis r11, 0xfffe /* top 128K */
1302 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1303 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1304 mtspr SPRN_DBAT1L, r8
1305 mtspr SPRN_DBAT1U, r11
1306 blr
1307#endif
1308
14cf11af
PM
1309#ifdef CONFIG_8260
1310/* Jump into the system reset for the rom.
1311 * We first disable the MMU, and then jump to the ROM reset address.
1312 *
1313 * r3 is the board info structure, r4 is the location for starting.
1314 * I use this for building a small kernel that can load other kernels,
1315 * rather than trying to write or rely on a rom monitor that can tftp load.
1316 */
1317 .globl m8260_gorom
1318m8260_gorom:
1319 mfmsr r0
1320 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1321 sync
1322 mtmsr r0
1323 sync
1324 mfspr r11, SPRN_HID0
1325 lis r10, 0
1326 ori r10,r10,HID0_ICE|HID0_DCE
1327 andc r11, r11, r10
1328 mtspr SPRN_HID0, r11
1329 isync
1330 li r5, MSR_ME|MSR_RI
1331 lis r6,2f@h
1332 addis r6,r6,-KERNELBASE@h
1333 ori r6,r6,2f@l
1334 mtspr SPRN_SRR0,r6
1335 mtspr SPRN_SRR1,r5
1336 isync
1337 sync
1338 rfi
13392:
1340 mtlr r4
1341 blr
1342#endif
1343
1344
1345/*
1346 * We put a few things here that have to be page-aligned.
1347 * This stuff goes at the beginning of the data segment,
1348 * which is page-aligned.
1349 */
1350 .data
1351 .globl sdata
1352sdata:
1353 .globl empty_zero_page
1354empty_zero_page:
1355 .space 4096
9445aa1a 1356EXPORT_SYMBOL(empty_zero_page)
14cf11af
PM
1357
1358 .globl swapper_pg_dir
1359swapper_pg_dir:
bee86f14 1360 .space PGD_TABLE_SIZE
14cf11af 1361
14cf11af
PM
1362/* Room for two PTE pointers, usually the kernel and current user pointers
1363 * to their respective root page table.
1364 */
1365abatron_pteptrs:
1366 .space 8