mm: introduce include/linux/pgtable.h
[linux-block.git] / arch / powerpc / kernel / head_44x.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
14cf11af 2/*
14cf11af
PM
3 * Kernel execution entry point code.
4 *
5 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
6 * Initial PowerPC version.
7 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Rewritten for PReP
9 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
10 * Low-level exception handers, MMU support, and rewrite.
11 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
12 * PowerPC 8xx modifications.
13 * Copyright (c) 1998-1999 TiVo, Inc.
14 * PowerPC 403GCX modifications.
15 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
16 * PowerPC 403GCX/405GP modifications.
17 * Copyright 2000 MontaVista Software Inc.
18 * PPC405 modifications
19 * PowerPC 403GCX/405GP modifications.
20 * Author: MontaVista Software, Inc.
21 * frank_rowand@mvista.com or source@mvista.com
22 * debbie_chu@mvista.com
23 * Copyright 2002-2005 MontaVista Software, Inc.
24 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
14cf11af
PM
25 */
26
e7039845 27#include <linux/init.h>
14cf11af
PM
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
ca5999fd 31#include <linux/pgtable.h>
14cf11af
PM
32#include <asm/cputable.h>
33#include <asm/thread_info.h>
34#include <asm/ppc_asm.h>
35#include <asm/asm-offsets.h>
46f52210 36#include <asm/ptrace.h>
e7f75ad0 37#include <asm/synch.h>
9445aa1a 38#include <asm/export.h>
6c16816b 39#include <asm/code-patching-asm.h>
14cf11af
PM
40#include "head_booke.h"
41
42
43/* As with the other PowerPC ports, it is expected that when code
44 * execution begins here, the following registers contain valid, yet
45 * optional, information:
46 *
47 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
48 * r4 - Starting address of the init RAM disk
49 * r5 - Ending address of the init RAM disk
50 * r6 - Start of kernel command line string (e.g. "mem=128")
51 * r7 - End of kernel command line string
52 *
53 */
e7039845 54 __HEAD
748a7683
KG
55_ENTRY(_stext);
56_ENTRY(_start);
14cf11af
PM
57 /*
58 * Reserve a word at a fixed location to store the address
59 * of abatron_pteptrs
60 */
61 nop
6dece0eb 62 mr r31,r3 /* save device tree ptr */
14cf11af
PM
63 li r24,0 /* CPU number */
64
26ecb6c4
SP
65#ifdef CONFIG_RELOCATABLE
66/*
67 * Relocate ourselves to the current runtime address.
68 * This is called only by the Boot CPU.
69 * "relocate" is called with our current runtime virutal
70 * address.
71 * r21 will be loaded with the physical runtime address of _stext
72 */
73 bl 0f /* Get our runtime address */
740: mflr r21 /* Make it accessible */
75 addis r21,r21,(_stext - 0b)@ha
76 addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
77
78 /*
79 * We have the runtime (virutal) address of our base.
80 * We calculate our shift of offset from a 256M page.
81 * We could map the 256M page we belong to at PAGE_OFFSET and
82 * get going from there.
83 */
84 lis r4,KERNELBASE@h
85 ori r4,r4,KERNELBASE@l
86 rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
87 rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
88 subf r3,r5,r6 /* r3 = r6 - r5 */
89 add r3,r4,r3 /* Required Virutal Address */
90
91 bl relocate
92#endif
93
795033c3 94 bl init_cpu_state
14cf11af 95
14cf11af
PM
96 /*
97 * This is where the main kernel code starts.
98 */
99
100 /* ptr to current */
101 lis r2,init_task@h
102 ori r2,r2,init_task@l
103
104 /* ptr to current thread */
105 addi r4,r2,THREAD /* init task's THREAD */
ee43eb78 106 mtspr SPRN_SPRG_THREAD,r4
14cf11af
PM
107
108 /* stack */
109 lis r1,init_thread_union@h
110 ori r1,r1,init_thread_union@l
111 li r0,0
112 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
113
114 bl early_init
115
26ecb6c4
SP
116#ifdef CONFIG_RELOCATABLE
117 /*
118 * Relocatable kernel support based on processing of dynamic
119 * relocation entries.
120 *
121 * r25 will contain RPN/ERPN for the start address of memory
122 * r21 will contain the current offset of _stext
123 */
124 lis r3,kernstart_addr@ha
125 la r3,kernstart_addr@l(r3)
126
127 /*
128 * Compute the kernstart_addr.
129 * kernstart_addr => (r6,r8)
130 * kernstart_addr & ~0xfffffff => (r6,r7)
131 */
132 rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
133 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
134 rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
135 or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
136
137 /* Store kernstart_addr */
138 stw r6,0(r3) /* higher 32bit */
139 stw r8,4(r3) /* lower 32bit */
140
141 /*
142 * Compute the virt_phys_offset :
143 * virt_phys_offset = stext.run - kernstart_addr
144 *
145 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
146 * When we relocate, we have :
147 *
148 * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
149 *
150 * hence:
151 * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
152 *
153 */
154
155 /* KERNELBASE&~0xfffffff => (r4,r5) */
156 li r4, 0 /* higer 32bit */
157 lis r5,KERNELBASE@h
158 rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
159
160 /*
161 * 64bit subtraction.
162 */
163 subfc r5,r7,r5
164 subfe r4,r6,r4
165
166 /* Store virt_phys_offset */
167 lis r3,virt_phys_offset@ha
168 la r3,virt_phys_offset@l(r3)
169
170 stw r4,0(r3)
171 stw r5,4(r3)
172
173#elif defined(CONFIG_DYNAMIC_MEMSTART)
9661534d 174 /*
0f890c8d
SP
175 * Mapping based, page aligned dynamic kernel loading.
176 *
9661534d
DK
177 * r25 will contain RPN/ERPN for the start address of memory
178 *
179 * Add the difference between KERNELBASE and PAGE_OFFSET to the
180 * start of physical memory to get kernstart_addr.
181 */
182 lis r3,kernstart_addr@ha
183 la r3,kernstart_addr@l(r3)
184
185 lis r4,KERNELBASE@h
186 ori r4,r4,KERNELBASE@l
187 lis r5,PAGE_OFFSET@h
188 ori r5,r5,PAGE_OFFSET@l
189 subf r4,r5,r4
190
191 rlwinm r6,r25,0,28,31 /* ERPN */
192 rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
193 add r7,r7,r4
194
195 stw r6,0(r3)
196 stw r7,4(r3)
197#endif
198
14cf11af
PM
199/*
200 * Decide what sort of machine this is and initialize the MMU.
201 */
2edb16ef
CL
202#ifdef CONFIG_KASAN
203 bl kasan_early_init
204#endif
6dece0eb
SW
205 li r3,0
206 mr r4,r31
14cf11af
PM
207 bl machine_init
208 bl MMU_init
209
210 /* Setup PTE pointers for the Abatron bdiGDB */
211 lis r6, swapper_pg_dir@h
212 ori r6, r6, swapper_pg_dir@l
213 lis r5, abatron_pteptrs@h
214 ori r5, r5, abatron_pteptrs@l
215 lis r4, KERNELBASE@h
216 ori r4, r4, KERNELBASE@l
217 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
218 stw r6, 0(r5)
219
029b8f66
DK
220 /* Clear the Machine Check Syndrome Register */
221 li r0,0
222 mtspr SPRN_MCSR,r0
223
14cf11af
PM
224 /* Let's move on */
225 lis r4,start_kernel@h
226 ori r4,r4,start_kernel@l
227 lis r3,MSR_KERNEL@h
228 ori r3,r3,MSR_KERNEL@l
229 mtspr SPRN_SRR0,r4
230 mtspr SPRN_SRR1,r3
231 rfi /* change context and jump to start_kernel */
232
233/*
234 * Interrupt vector entry code
235 *
236 * The Book E MMUs are always on so we don't need to handle
237 * interrupts in real mode as with previous PPC processors. In
238 * this case we handle interrupts in the kernel virtual address
239 * space.
240 *
241 * Interrupt vectors are dynamically placed relative to the
242 * interrupt prefix as determined by the address of interrupt_base.
243 * The interrupt vectors offsets are programmed using the labels
244 * for each interrupt vector entry.
245 *
246 * Interrupt vectors must be aligned on a 16 byte boundary.
247 * We align on a 32 byte cache line boundary for good measure.
248 */
249
250interrupt_base:
251 /* Critical Input Interrupt */
cfac5784 252 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
14cf11af
PM
253
254 /* Machine Check Interrupt */
cfac5784
SW
255 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
256 machine_check_exception)
47c0bd1a 257 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
14cf11af
PM
258
259 /* Data Storage Interrupt */
1bc54c03 260 DATA_STORAGE_EXCEPTION
14cf11af 261
1bc54c03 262 /* Instruction Storage Interrupt */
14cf11af
PM
263 INSTRUCTION_STORAGE_EXCEPTION
264
265 /* External Input Interrupt */
cfac5784
SW
266 EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
267 do_IRQ, EXC_XFER_LITE)
14cf11af
PM
268
269 /* Alignment Interrupt */
270 ALIGNMENT_EXCEPTION
271
272 /* Program Interrupt */
273 PROGRAM_EXCEPTION
274
275 /* Floating Point Unavailable Interrupt */
276#ifdef CONFIG_PPC_FPU
277 FP_UNAVAILABLE_EXCEPTION
278#else
cfac5784 279 EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
642770dd 280 FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
14cf11af 281#endif
14cf11af
PM
282 /* System Call Interrupt */
283 START_EXCEPTION(SystemCall)
1a4b739b 284 SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL
14cf11af 285
25985edc 286 /* Auxiliary Processor Unavailable Interrupt */
cfac5784 287 EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
642770dd 288 AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
14cf11af
PM
289
290 /* Decrementer Interrupt */
291 DECREMENTER_EXCEPTION
292
293 /* Fixed Internal Timer Interrupt */
294 /* TODO: Add FIT support */
cfac5784 295 EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
642770dd 296 unknown_exception, EXC_XFER_STD)
14cf11af
PM
297
298 /* Watchdog Timer Interrupt */
299 /* TODO: Add watchdog support */
300#ifdef CONFIG_BOOKE_WDT
cfac5784 301 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
14cf11af 302#else
cfac5784 303 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
14cf11af
PM
304#endif
305
306 /* Data TLB Error Interrupt */
e7f75ad0 307 START_EXCEPTION(DataTLBError44x)
ee43eb78
BH
308 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
309 mtspr SPRN_SPRG_WSCRATCH1, r11
310 mtspr SPRN_SPRG_WSCRATCH2, r12
311 mtspr SPRN_SPRG_WSCRATCH3, r13
14cf11af 312 mfcr r11
ee43eb78 313 mtspr SPRN_SPRG_WSCRATCH4, r11
14cf11af
PM
314 mfspr r10, SPRN_DEAR /* Get faulting address */
315
316 /* If we are faulting a kernel address, we have to use the
317 * kernel page tables.
318 */
8a13c4f9 319 lis r11, PAGE_OFFSET@h
14cf11af
PM
320 cmplw r10, r11
321 blt+ 3f
322 lis r11, swapper_pg_dir@h
323 ori r11, r11, swapper_pg_dir@l
324
325 mfspr r12,SPRN_MMUCR
326 rlwinm r12,r12,0,0,23 /* Clear TID */
327
328 b 4f
329
330 /* Get the PGD for the current thread */
3313:
ee43eb78 332 mfspr r11,SPRN_SPRG_THREAD
14cf11af
PM
333 lwz r11,PGDIR(r11)
334
335 /* Load PID into MMUCR TID */
336 mfspr r12,SPRN_MMUCR
337 mfspr r13,SPRN_PID /* Get PID */
338 rlwimi r12,r13,0,24,31 /* Set TID */
339
3404:
341 mtspr SPRN_MMUCR,r12
342
1bc54c03
BH
343 /* Mask of required permission bits. Note that while we
344 * do copy ESR:ST to _PAGE_RW position as trying to write
345 * to an RO page is pretty common, we don't do it with
346 * _PAGE_DIRTY. We could do it, but it's a fairly rare
347 * event so I'd rather take the overhead when it happens
348 * rather than adding an instruction here. We should measure
349 * whether the whole thing is worth it in the first place
350 * as we could avoid loading SPRN_ESR completely in the first
351 * place...
352 *
353 * TODO: Is it worth doing that mfspr & rlwimi in the first
354 * place or can we save a couple of instructions here ?
355 */
356 mfspr r12,SPRN_ESR
357 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
358 rlwimi r13,r12,10,30,30
359
360 /* Load the PTE */
ca9153a3
IY
361 /* Compute pgdir/pmd offset */
362 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
14cf11af
PM
363 lwzx r11, r12, r11 /* Get pgd/pmd entry */
364 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
365 beq 2f /* Bail if no table */
366
ca9153a3
IY
367 /* Compute pte address */
368 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
1bc54c03
BH
369 lwz r11, 0(r12) /* Get high word of pte entry */
370 lwz r12, 4(r12) /* Get low word of pte entry */
14cf11af 371
1bc54c03
BH
372 lis r10,tlb_44x_index@ha
373
374 andc. r13,r13,r12 /* Check permission */
375
376 /* Load the next available TLB index */
377 lwz r13,tlb_44x_index@l(r10)
378
379 bne 2f /* Bail if permission mismach */
380
381 /* Increment, rollover, and store TLB index */
382 addi r13,r13,1
383
6c16816b 384 patch_site 0f, patch__tlb_44x_hwater_D
1bc54c03 385 /* Compare with watermark (instruction gets patched) */
6c16816b 3860: cmpwi 0,r13,1 /* reserve entries */
1bc54c03
BH
387 ble 5f
388 li r13,0
3895:
390 /* Store the next available TLB index */
391 stw r13,tlb_44x_index@l(r10)
392
393 /* Re-load the faulting address */
394 mfspr r10,SPRN_DEAR
14cf11af
PM
395
396 /* Jump to common tlb load */
e7f75ad0 397 b finish_tlb_load_44x
14cf11af
PM
398
3992:
400 /* The bailout. Restore registers to pre-exception conditions
401 * and call the heavyweights to help us out.
402 */
ee43eb78 403 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 404 mtcr r11
ee43eb78
BH
405 mfspr r13, SPRN_SPRG_RSCRATCH3
406 mfspr r12, SPRN_SPRG_RSCRATCH2
407 mfspr r11, SPRN_SPRG_RSCRATCH1
408 mfspr r10, SPRN_SPRG_RSCRATCH0
1bc54c03 409 b DataStorage
14cf11af
PM
410
411 /* Instruction TLB Error Interrupt */
412 /*
413 * Nearly the same as above, except we get our
414 * information from different registers and bailout
415 * to a different point.
416 */
e7f75ad0 417 START_EXCEPTION(InstructionTLBError44x)
ee43eb78
BH
418 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
419 mtspr SPRN_SPRG_WSCRATCH1, r11
420 mtspr SPRN_SPRG_WSCRATCH2, r12
421 mtspr SPRN_SPRG_WSCRATCH3, r13
14cf11af 422 mfcr r11
ee43eb78 423 mtspr SPRN_SPRG_WSCRATCH4, r11
14cf11af
PM
424 mfspr r10, SPRN_SRR0 /* Get faulting address */
425
426 /* If we are faulting a kernel address, we have to use the
427 * kernel page tables.
428 */
8a13c4f9 429 lis r11, PAGE_OFFSET@h
14cf11af
PM
430 cmplw r10, r11
431 blt+ 3f
432 lis r11, swapper_pg_dir@h
433 ori r11, r11, swapper_pg_dir@l
434
435 mfspr r12,SPRN_MMUCR
436 rlwinm r12,r12,0,0,23 /* Clear TID */
437
438 b 4f
439
440 /* Get the PGD for the current thread */
4413:
ee43eb78 442 mfspr r11,SPRN_SPRG_THREAD
14cf11af
PM
443 lwz r11,PGDIR(r11)
444
445 /* Load PID into MMUCR TID */
446 mfspr r12,SPRN_MMUCR
447 mfspr r13,SPRN_PID /* Get PID */
448 rlwimi r12,r13,0,24,31 /* Set TID */
449
4504:
451 mtspr SPRN_MMUCR,r12
452
1bc54c03 453 /* Make up the required permissions */
ea3cc330 454 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
1bc54c03 455
ca9153a3
IY
456 /* Compute pgdir/pmd offset */
457 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
14cf11af
PM
458 lwzx r11, r12, r11 /* Get pgd/pmd entry */
459 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
460 beq 2f /* Bail if no table */
461
ca9153a3
IY
462 /* Compute pte address */
463 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
1bc54c03
BH
464 lwz r11, 0(r12) /* Get high word of pte entry */
465 lwz r12, 4(r12) /* Get low word of pte entry */
14cf11af 466
1bc54c03
BH
467 lis r10,tlb_44x_index@ha
468
469 andc. r13,r13,r12 /* Check permission */
470
471 /* Load the next available TLB index */
472 lwz r13,tlb_44x_index@l(r10)
473
474 bne 2f /* Bail if permission mismach */
475
476 /* Increment, rollover, and store TLB index */
477 addi r13,r13,1
478
6c16816b 479 patch_site 0f, patch__tlb_44x_hwater_I
1bc54c03 480 /* Compare with watermark (instruction gets patched) */
6c16816b 4810: cmpwi 0,r13,1 /* reserve entries */
1bc54c03
BH
482 ble 5f
483 li r13,0
4845:
485 /* Store the next available TLB index */
486 stw r13,tlb_44x_index@l(r10)
487
488 /* Re-load the faulting address */
489 mfspr r10,SPRN_SRR0
14cf11af
PM
490
491 /* Jump to common TLB load point */
e7f75ad0 492 b finish_tlb_load_44x
14cf11af
PM
493
4942:
495 /* The bailout. Restore registers to pre-exception conditions
496 * and call the heavyweights to help us out.
497 */
ee43eb78 498 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 499 mtcr r11
ee43eb78
BH
500 mfspr r13, SPRN_SPRG_RSCRATCH3
501 mfspr r12, SPRN_SPRG_RSCRATCH2
502 mfspr r11, SPRN_SPRG_RSCRATCH1
503 mfspr r10, SPRN_SPRG_RSCRATCH0
14cf11af
PM
504 b InstructionStorage
505
14cf11af 506/*
14cf11af
PM
507 * Both the instruction and data TLB miss get to this
508 * point to load the TLB.
509 * r10 - EA of fault
1bc54c03
BH
510 * r11 - PTE high word value
511 * r12 - PTE low word value
512 * r13 - TLB index
14cf11af
PM
513 * MMUCR - loaded with proper value when we get here
514 * Upon exit, we reload everything and RFI.
515 */
e7f75ad0 516finish_tlb_load_44x:
1bc54c03 517 /* Combine RPN & ERPN an write WS 0 */
ca9153a3 518 rlwimi r11,r12,0,0,31-PAGE_SHIFT
1bc54c03 519 tlbwe r11,r13,PPC44x_TLB_XLAT
14cf11af
PM
520
521 /*
1bc54c03 522 * Create WS1. This is the faulting address (EPN),
14cf11af
PM
523 * page size, and valid flag.
524 */
ca9153a3
IY
525 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
526 /* Insert valid and page size */
527 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
1bc54c03
BH
528 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
529
530 /* And WS 2 */
531 li r10,0xf85 /* Mask to apply from PTE */
532 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
533 and r11,r12,r10 /* Mask PTE bits to keep */
534 andi. r10,r12,_PAGE_USER /* User page ? */
535 beq 1f /* nope, leave U bits empty */
536 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
5371: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
14cf11af
PM
538
539 /* Done...restore registers and get out of here.
540 */
ee43eb78 541 mfspr r11, SPRN_SPRG_RSCRATCH4
14cf11af 542 mtcr r11
ee43eb78
BH
543 mfspr r13, SPRN_SPRG_RSCRATCH3
544 mfspr r12, SPRN_SPRG_RSCRATCH2
545 mfspr r11, SPRN_SPRG_RSCRATCH1
546 mfspr r10, SPRN_SPRG_RSCRATCH0
14cf11af
PM
547 rfi /* Force context change */
548
e7f75ad0
DK
549/* TLB error interrupts for 476
550 */
551#ifdef CONFIG_PPC_47x
552 START_EXCEPTION(DataTLBError47x)
553 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
554 mtspr SPRN_SPRG_WSCRATCH1,r11
555 mtspr SPRN_SPRG_WSCRATCH2,r12
556 mtspr SPRN_SPRG_WSCRATCH3,r13
557 mfcr r11
558 mtspr SPRN_SPRG_WSCRATCH4,r11
559 mfspr r10,SPRN_DEAR /* Get faulting address */
560
561 /* If we are faulting a kernel address, we have to use the
562 * kernel page tables.
563 */
564 lis r11,PAGE_OFFSET@h
565 cmplw cr0,r10,r11
566 blt+ 3f
567 lis r11,swapper_pg_dir@h
568 ori r11,r11, swapper_pg_dir@l
569 li r12,0 /* MMUCR = 0 */
570 b 4f
571
572 /* Get the PGD for the current thread and setup MMUCR */
5733: mfspr r11,SPRN_SPRG3
574 lwz r11,PGDIR(r11)
575 mfspr r12,SPRN_PID /* Get PID */
5764: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
577
578 /* Mask of required permission bits. Note that while we
579 * do copy ESR:ST to _PAGE_RW position as trying to write
580 * to an RO page is pretty common, we don't do it with
581 * _PAGE_DIRTY. We could do it, but it's a fairly rare
582 * event so I'd rather take the overhead when it happens
583 * rather than adding an instruction here. We should measure
584 * whether the whole thing is worth it in the first place
585 * as we could avoid loading SPRN_ESR completely in the first
586 * place...
587 *
588 * TODO: Is it worth doing that mfspr & rlwimi in the first
589 * place or can we save a couple of instructions here ?
590 */
591 mfspr r12,SPRN_ESR
592 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
593 rlwimi r13,r12,10,30,30
594
595 /* Load the PTE */
596 /* Compute pgdir/pmd offset */
597 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
598 lwzx r11,r12,r11 /* Get pgd/pmd entry */
599
600 /* Word 0 is EPN,V,TS,DSIZ */
601 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
602 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
603 li r12,0
604 tlbwe r10,r12,0
605
606 /* XXX can we do better ? Need to make sure tlbwe has established
607 * latch V bit in MMUCR0 before the PTE is loaded further down */
608#ifdef CONFIG_SMP
609 isync
610#endif
611
612 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
613 /* Compute pte address */
614 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
615 beq 2f /* Bail if no table */
616 lwz r11,0(r12) /* Get high word of pte entry */
617
618 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
619 * bottom of r12 to create a data dependency... We can also use r10
620 * as destination nowadays
621 */
622#ifdef CONFIG_SMP
623 lwsync
624#endif
625 lwz r12,4(r12) /* Get low word of pte entry */
626
627 andc. r13,r13,r12 /* Check permission */
628
629 /* Jump to common tlb load */
630 beq finish_tlb_load_47x
631
6322: /* The bailout. Restore registers to pre-exception conditions
633 * and call the heavyweights to help us out.
634 */
635 mfspr r11,SPRN_SPRG_RSCRATCH4
636 mtcr r11
637 mfspr r13,SPRN_SPRG_RSCRATCH3
638 mfspr r12,SPRN_SPRG_RSCRATCH2
639 mfspr r11,SPRN_SPRG_RSCRATCH1
640 mfspr r10,SPRN_SPRG_RSCRATCH0
641 b DataStorage
642
643 /* Instruction TLB Error Interrupt */
644 /*
645 * Nearly the same as above, except we get our
646 * information from different registers and bailout
647 * to a different point.
648 */
649 START_EXCEPTION(InstructionTLBError47x)
650 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
651 mtspr SPRN_SPRG_WSCRATCH1,r11
652 mtspr SPRN_SPRG_WSCRATCH2,r12
653 mtspr SPRN_SPRG_WSCRATCH3,r13
654 mfcr r11
655 mtspr SPRN_SPRG_WSCRATCH4,r11
656 mfspr r10,SPRN_SRR0 /* Get faulting address */
657
658 /* If we are faulting a kernel address, we have to use the
659 * kernel page tables.
660 */
661 lis r11,PAGE_OFFSET@h
662 cmplw cr0,r10,r11
663 blt+ 3f
664 lis r11,swapper_pg_dir@h
665 ori r11,r11, swapper_pg_dir@l
666 li r12,0 /* MMUCR = 0 */
667 b 4f
668
669 /* Get the PGD for the current thread and setup MMUCR */
6703: mfspr r11,SPRN_SPRG_THREAD
671 lwz r11,PGDIR(r11)
672 mfspr r12,SPRN_PID /* Get PID */
6734: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
674
675 /* Make up the required permissions */
676 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
677
678 /* Load PTE */
679 /* Compute pgdir/pmd offset */
680 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
681 lwzx r11,r12,r11 /* Get pgd/pmd entry */
682
683 /* Word 0 is EPN,V,TS,DSIZ */
684 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
685 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
686 li r12,0
687 tlbwe r10,r12,0
688
689 /* XXX can we do better ? Need to make sure tlbwe has established
690 * latch V bit in MMUCR0 before the PTE is loaded further down */
691#ifdef CONFIG_SMP
692 isync
693#endif
694
695 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
696 /* Compute pte address */
697 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
698 beq 2f /* Bail if no table */
699
700 lwz r11,0(r12) /* Get high word of pte entry */
701 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
702 * bottom of r12 to create a data dependency... We can also use r10
703 * as destination nowadays
704 */
705#ifdef CONFIG_SMP
706 lwsync
707#endif
708 lwz r12,4(r12) /* Get low word of pte entry */
709
710 andc. r13,r13,r12 /* Check permission */
711
712 /* Jump to common TLB load point */
713 beq finish_tlb_load_47x
714
7152: /* The bailout. Restore registers to pre-exception conditions
716 * and call the heavyweights to help us out.
717 */
718 mfspr r11, SPRN_SPRG_RSCRATCH4
719 mtcr r11
720 mfspr r13, SPRN_SPRG_RSCRATCH3
721 mfspr r12, SPRN_SPRG_RSCRATCH2
722 mfspr r11, SPRN_SPRG_RSCRATCH1
723 mfspr r10, SPRN_SPRG_RSCRATCH0
724 b InstructionStorage
725
726/*
727 * Both the instruction and data TLB miss get to this
728 * point to load the TLB.
729 * r10 - free to use
730 * r11 - PTE high word value
731 * r12 - PTE low word value
732 * r13 - free to use
733 * MMUCR - loaded with proper value when we get here
734 * Upon exit, we reload everything and RFI.
735 */
736finish_tlb_load_47x:
737 /* Combine RPN & ERPN an write WS 1 */
738 rlwimi r11,r12,0,0,31-PAGE_SHIFT
739 tlbwe r11,r13,1
740
741 /* And make up word 2 */
742 li r10,0xf85 /* Mask to apply from PTE */
743 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
744 and r11,r12,r10 /* Mask PTE bits to keep */
745 andi. r10,r12,_PAGE_USER /* User page ? */
746 beq 1f /* nope, leave U bits empty */
747 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
7481: tlbwe r11,r13,2
749
750 /* Done...restore registers and get out of here.
751 */
752 mfspr r11, SPRN_SPRG_RSCRATCH4
753 mtcr r11
754 mfspr r13, SPRN_SPRG_RSCRATCH3
755 mfspr r12, SPRN_SPRG_RSCRATCH2
756 mfspr r11, SPRN_SPRG_RSCRATCH1
757 mfspr r10, SPRN_SPRG_RSCRATCH0
758 rfi
759
760#endif /* CONFIG_PPC_47x */
761
762 /* Debug Interrupt */
763 /*
764 * This statement needs to exist at the end of the IVPR
765 * definition just in case you end up taking a debug
766 * exception within another exception.
767 */
768 DEBUG_CRIT_EXCEPTION
769
fc2a6cfe
BB
770interrupt_end:
771
14cf11af
PM
772/*
773 * Global functions
774 */
775
47c0bd1a
BH
776/*
777 * Adjust the machine check IVOR on 440A cores
778 */
779_GLOBAL(__fixup_440A_mcheck)
780 li r3,MachineCheckA@l
781 mtspr SPRN_IVOR1,r3
782 sync
783 blr
784
14cf11af
PM
785_GLOBAL(set_context)
786
787#ifdef CONFIG_BDI_SWITCH
788 /* Context switch the PTE pointer for the Abatron BDI2000.
789 * The PGDIR is the second parameter.
790 */
791 lis r5, abatron_pteptrs@h
792 ori r5, r5, abatron_pteptrs@l
793 stw r4, 0x4(r5)
794#endif
795 mtspr SPRN_PID,r3
796 isync /* Force context change */
797 blr
798
795033c3
DK
799/*
800 * Init CPU state. This is called at boot time or for secondary CPUs
801 * to setup initial TLB entries, setup IVORs, etc...
e7f75ad0 802 *
795033c3
DK
803 */
804_GLOBAL(init_cpu_state)
805 mflr r22
e7f75ad0 806#ifdef CONFIG_PPC_47x
446957ba 807 /* We use the PVR to differentiate 44x cores from 476 */
e7f75ad0
DK
808 mfspr r3,SPRN_PVR
809 srwi r3,r3,16
df777bd3
TB
810 cmplwi cr0,r3,PVR_476FPE@h
811 beq head_start_47x
e7f75ad0
DK
812 cmplwi cr0,r3,PVR_476@h
813 beq head_start_47x
b4e8c8dd
TS
814 cmplwi cr0,r3,PVR_476_ISS@h
815 beq head_start_47x
e7f75ad0
DK
816#endif /* CONFIG_PPC_47x */
817
795033c3
DK
818/*
819 * In case the firmware didn't do it, we apply some workarounds
820 * that are good for all 440 core variants here
821 */
822 mfspr r3,SPRN_CCR0
823 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
824 isync
825 mtspr SPRN_CCR0,r3
826 isync
827 sync
828
829/*
e7f75ad0 830 * Set up the initial MMU state for 44x
795033c3
DK
831 *
832 * We are still executing code at the virtual address
833 * mappings set by the firmware for the base of RAM.
834 *
835 * We first invalidate all TLB entries but the one
836 * we are running from. We then load the KERNELBASE
837 * mappings so we can begin to use kernel addresses
838 * natively and so the interrupt vector locations are
839 * permanently pinned (necessary since Book E
840 * implementations always have translation enabled).
841 *
842 * TODO: Use the known TLB entry we are running from to
843 * determine which physical region we are located
844 * in. This can be used to determine where in RAM
845 * (on a shared CPU system) or PCI memory space
846 * (on a DRAMless system) we are located.
847 * For now, we assume a perfect world which means
848 * we are located at the base of DRAM (physical 0).
849 */
850
851/*
852 * Search TLB for entry that we are currently using.
853 * Invalidate all entries but the one we are using.
854 */
855 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
856 mfspr r3,SPRN_PID /* Get PID */
857 mfmsr r4 /* Get MSR */
858 andi. r4,r4,MSR_IS@l /* TS=1? */
859 beq wmmucr /* If not, leave STS=0 */
860 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
861wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
862 sync
863
864 bl invstr /* Find our address */
865invstr: mflr r5 /* Make it accessible */
866 tlbsx r23,0,r5 /* Find entry we are in */
867 li r4,0 /* Start at TLB entry 0 */
868 li r3,0 /* Set PAGEID inval value */
8691: cmpw r23,r4 /* Is this our entry? */
870 beq skpinv /* If so, skip the inval */
871 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
872skpinv: addi r4,r4,1 /* Increment */
873 cmpwi r4,64 /* Are we done? */
874 bne 1b /* If not, repeat */
875 isync /* If so, context change */
876
877/*
878 * Configure and load pinned entry into TLB slot 63.
879 */
26ecb6c4
SP
880#ifdef CONFIG_NONSTATIC_KERNEL
881 /*
882 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
883 * entries of the initial mapping set by the boot loader.
884 * The XLAT entry is stored in r25
885 */
23913245
SP
886
887 /* Read the XLAT entry for our current mapping */
888 tlbre r25,r23,PPC44x_TLB_XLAT
889
890 lis r3,KERNELBASE@h
891 ori r3,r3,KERNELBASE@l
892
893 /* Use our current RPN entry */
894 mr r4,r25
895#else
795033c3
DK
896
897 lis r3,PAGE_OFFSET@h
898 ori r3,r3,PAGE_OFFSET@l
899
900 /* Kernel is at the base of RAM */
901 li r4, 0 /* Load the kernel physical address */
23913245 902#endif
795033c3
DK
903
904 /* Load the kernel PID = 0 */
905 li r0,0
906 mtspr SPRN_PID,r0
907 sync
908
909 /* Initialize MMUCR */
910 li r5,0
911 mtspr SPRN_MMUCR,r5
912 sync
913
914 /* pageid fields */
915 clrrwi r3,r3,10 /* Mask off the effective page number */
916 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
917
918 /* xlat fields */
919 clrrwi r4,r4,10 /* Mask off the real page number */
920 /* ERPN is 0 for first 4GB page */
921
922 /* attrib fields */
923 /* Added guarded bit to protect against speculative loads/stores */
924 li r5,0
925 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
926
927 li r0,63 /* TLB slot 63 */
928
929 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
930 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
931 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
932
933 /* Force context change */
934 mfmsr r0
935 mtspr SPRN_SRR1, r0
936 lis r0,3f@h
937 ori r0,r0,3f@l
938 mtspr SPRN_SRR0,r0
939 sync
940 rfi
941
942 /* If necessary, invalidate original entry we used */
9433: cmpwi r23,63
944 beq 4f
945 li r6,0
946 tlbwe r6,r23,PPC44x_TLB_PAGEID
947 isync
948
9494:
950#ifdef CONFIG_PPC_EARLY_DEBUG_44x
951 /* Add UART mapping for early debug. */
952
953 /* pageid fields */
954 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
955 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
956
957 /* xlat fields */
958 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
959 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
960
961 /* attrib fields */
962 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
963 li r0,62 /* TLB slot 0 */
964
965 tlbwe r3,r0,PPC44x_TLB_PAGEID
966 tlbwe r4,r0,PPC44x_TLB_XLAT
967 tlbwe r5,r0,PPC44x_TLB_ATTRIB
968
969 /* Force context change */
970 isync
971#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
972
973 /* Establish the interrupt vector offsets */
974 SET_IVOR(0, CriticalInput);
975 SET_IVOR(1, MachineCheck);
976 SET_IVOR(2, DataStorage);
977 SET_IVOR(3, InstructionStorage);
978 SET_IVOR(4, ExternalInput);
979 SET_IVOR(5, Alignment);
980 SET_IVOR(6, Program);
981 SET_IVOR(7, FloatingPointUnavailable);
982 SET_IVOR(8, SystemCall);
983 SET_IVOR(9, AuxillaryProcessorUnavailable);
984 SET_IVOR(10, Decrementer);
985 SET_IVOR(11, FixedIntervalTimer);
986 SET_IVOR(12, WatchdogTimer);
e7f75ad0
DK
987 SET_IVOR(13, DataTLBError44x);
988 SET_IVOR(14, InstructionTLBError44x);
795033c3
DK
989 SET_IVOR(15, DebugCrit);
990
e7f75ad0
DK
991 b head_start_common
992
993
994#ifdef CONFIG_PPC_47x
995
996#ifdef CONFIG_SMP
997
998/* Entry point for secondary 47x processors */
999_GLOBAL(start_secondary_47x)
1000 mr r24,r3 /* CPU number */
1001
1002 bl init_cpu_state
1003
1004 /* Now we need to bolt the rest of kernel memory which
1005 * is done in C code. We must be careful because our task
1006 * struct or our stack can (and will probably) be out
1007 * of reach of the initial 256M TLB entry, so we use a
1008 * small temporary stack in .bss for that. This works
1009 * because only one CPU at a time can be in this code
1010 */
1011 lis r1,temp_boot_stack@h
1012 ori r1,r1,temp_boot_stack@l
1013 addi r1,r1,1024-STACK_FRAME_OVERHEAD
1014 li r0,0
1015 stw r0,0(r1)
1016 bl mmu_init_secondary
1017
1018 /* Now we can get our task struct and real stack pointer */
1019
4e67bfd7 1020 /* Get current's stack and current */
7c19c2e5
CL
1021 lis r2,secondary_current@ha
1022 lwz r2,secondary_current@l(r2)
ed1cd6de 1023 lwz r1,TASK_STACK(r2)
e7f75ad0
DK
1024
1025 /* Current stack pointer */
1026 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1027 li r0,0
1028 stw r0,0(r1)
1029
1030 /* Kernel stack for exception entry in SPRG3 */
1031 addi r4,r2,THREAD /* init task's THREAD */
1032 mtspr SPRN_SPRG3,r4
1033
1034 b start_secondary
1035
1036#endif /* CONFIG_SMP */
1037
1038/*
1039 * Set up the initial MMU state for 44x
1040 *
1041 * We are still executing code at the virtual address
1042 * mappings set by the firmware for the base of RAM.
1043 */
1044
1045head_start_47x:
1046 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
1047 mfspr r3,SPRN_PID /* Get PID */
1048 mfmsr r4 /* Get MSR */
1049 andi. r4,r4,MSR_IS@l /* TS=1? */
1050 beq 1f /* If not, leave STS=0 */
1051 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
10521: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
1053 sync
1054
1055 /* Find the entry we are running from */
1056 bl 1f
10571: mflr r23
1058 tlbsx r23,0,r23
1059 tlbre r24,r23,0
1060 tlbre r25,r23,1
1061 tlbre r26,r23,2
1062
1063/*
1064 * Cleanup time
1065 */
1066
1067 /* Initialize MMUCR */
1068 li r5,0
1069 mtspr SPRN_MMUCR,r5
1070 sync
1071
1072clear_all_utlb_entries:
1073
1074 #; Set initial values.
1075
1076 addis r3,0,0x8000
1077 addi r4,0,0
1078 addi r5,0,0
1079 b clear_utlb_entry
1080
1081 #; Align the loop to speed things up.
1082
1083 .align 6
1084
1085clear_utlb_entry:
1086
1087 tlbwe r4,r3,0
1088 tlbwe r5,r3,1
1089 tlbwe r5,r3,2
1090 addis r3,r3,0x2000
1091 cmpwi r3,0
1092 bne clear_utlb_entry
1093 addis r3,0,0x8000
1094 addis r4,r4,0x100
1095 cmpwi r4,0
1096 bne clear_utlb_entry
1097
1098 #; Restore original entry.
1099
1100 oris r23,r23,0x8000 /* specify the way */
1101 tlbwe r24,r23,0
1102 tlbwe r25,r23,1
1103 tlbwe r26,r23,2
1104
1105/*
1106 * Configure and load pinned entry into TLB for the kernel core
1107 */
1108
1109 lis r3,PAGE_OFFSET@h
1110 ori r3,r3,PAGE_OFFSET@l
1111
e7f75ad0
DK
1112 /* Load the kernel PID = 0 */
1113 li r0,0
1114 mtspr SPRN_PID,r0
1115 sync
1116
1117 /* Word 0 */
1118 clrrwi r3,r3,12 /* Mask off the effective page number */
1119 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1120
9661534d
DK
1121 /* Word 1 - use r25. RPN is the same as the original entry */
1122
e7f75ad0
DK
1123 /* Word 2 */
1124 li r5,0
1125 ori r5,r5,PPC47x_TLB2_S_RWX
1126#ifdef CONFIG_SMP
1127 ori r5,r5,PPC47x_TLB2_M
1128#endif
1129
1130 /* We write to way 0 and bolted 0 */
1131 lis r0,0x8800
1132 tlbwe r3,r0,0
9661534d 1133 tlbwe r25,r0,1
e7f75ad0
DK
1134 tlbwe r5,r0,2
1135
1136/*
1137 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1138 * them up later
1139 */
1140 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1141 mtspr SPRN_SSPCR,r3
1142 mtspr SPRN_USPCR,r3
1143 LOAD_REG_IMMEDIATE(r3, 0x12345670)
1144 mtspr SPRN_ISPCR,r3
1145
1146 /* Force context change */
1147 mfmsr r0
1148 mtspr SPRN_SRR1, r0
1149 lis r0,3f@h
1150 ori r0,r0,3f@l
1151 mtspr SPRN_SRR0,r0
1152 sync
1153 rfi
1154
1155 /* Invalidate original entry we used */
11563:
1157 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1158 tlbwe r24,r23,0
1159 addi r24,0,0
1160 tlbwe r24,r23,1
1161 tlbwe r24,r23,2
1162 isync /* Clear out the shadow TLB entries */
1163
1164#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1165 /* Add UART mapping for early debug. */
1166
1167 /* Word 0 */
1168 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1169 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1170
1171 /* Word 1 */
1172 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1173 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1174
1175 /* Word 2 */
1176 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1177
1178 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1179 * congruence class as the kernel, we need to make sure of it at
1180 * some point
1181 */
1182 lis r0,0x8d00
1183 tlbwe r3,r0,0
1184 tlbwe r4,r0,1
1185 tlbwe r5,r0,2
1186
1187 /* Force context change */
1188 isync
1189#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1190
1191 /* Establish the interrupt vector offsets */
1192 SET_IVOR(0, CriticalInput);
1193 SET_IVOR(1, MachineCheckA);
1194 SET_IVOR(2, DataStorage);
1195 SET_IVOR(3, InstructionStorage);
1196 SET_IVOR(4, ExternalInput);
1197 SET_IVOR(5, Alignment);
1198 SET_IVOR(6, Program);
1199 SET_IVOR(7, FloatingPointUnavailable);
1200 SET_IVOR(8, SystemCall);
1201 SET_IVOR(9, AuxillaryProcessorUnavailable);
1202 SET_IVOR(10, Decrementer);
1203 SET_IVOR(11, FixedIntervalTimer);
1204 SET_IVOR(12, WatchdogTimer);
1205 SET_IVOR(13, DataTLBError47x);
1206 SET_IVOR(14, InstructionTLBError47x);
1207 SET_IVOR(15, DebugCrit);
1208
1209 /* We configure icbi to invalidate 128 bytes at a time since the
1210 * current 32-bit kernel code isn't too happy with icache != dcache
97b3be1e
AP
1211 * block size. We also disable the BTAC as this can cause errors
1212 * in some circumstances (see IBM Erratum 47).
e7f75ad0
DK
1213 */
1214 mfspr r3,SPRN_CCR0
1215 oris r3,r3,0x0020
97b3be1e 1216 ori r3,r3,0x0040
e7f75ad0
DK
1217 mtspr SPRN_CCR0,r3
1218 isync
1219
1220#endif /* CONFIG_PPC_47x */
1221
1222/*
1223 * Here we are back to code that is common between 44x and 47x
1224 *
1225 * We proceed to further kernel initialization and return to the
1226 * main kernel entry
1227 */
1228head_start_common:
795033c3
DK
1229 /* Establish the interrupt vector base */
1230 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
1231 mtspr SPRN_IVPR,r4
1232
9661534d
DK
1233 /*
1234 * If the kernel was loaded at a non-zero 256 MB page, we need to
1235 * mask off the most significant 4 bits to get the relative address
1236 * from the start of physical memory
1237 */
1238 rlwinm r22,r22,0,4,31
1239 addis r22,r22,PAGE_OFFSET@h
795033c3 1240 mtlr r22
e7f75ad0 1241 isync
795033c3
DK
1242 blr
1243
14cf11af
PM
1244/*
1245 * We put a few things here that have to be page-aligned. This stuff
1246 * goes at the beginning of the data segment, which is page-aligned.
1247 */
1248 .data
ca9153a3 1249 .align PAGE_SHIFT
ea703ce2
KG
1250 .globl sdata
1251sdata:
1252 .globl empty_zero_page
1253empty_zero_page:
ca9153a3 1254 .space PAGE_SIZE
9445aa1a 1255EXPORT_SYMBOL(empty_zero_page)
14cf11af
PM
1256
1257/*
1258 * To support >32-bit physical addresses, we use an 8KB pgdir.
1259 */
ea703ce2
KG
1260 .globl swapper_pg_dir
1261swapper_pg_dir:
bee86f14 1262 .space PGD_TABLE_SIZE
14cf11af 1263
14cf11af
PM
1264/*
1265 * Room for two PTE pointers, usually the kernel and current user pointers
1266 * to their respective root page table.
1267 */
1268abatron_pteptrs:
1269 .space 8
e7f75ad0
DK
1270
1271#ifdef CONFIG_SMP
1272 .align 12
1273temp_boot_stack:
1274 .space 1024
1275#endif /* CONFIG_SMP */