| 1 | /* |
| 2 | * Boot code and exception vectors for Book3E processors |
| 3 | * |
| 4 | * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/threads.h> |
| 13 | #include <asm/reg.h> |
| 14 | #include <asm/page.h> |
| 15 | #include <asm/ppc_asm.h> |
| 16 | #include <asm/asm-offsets.h> |
| 17 | #include <asm/cputable.h> |
| 18 | #include <asm/setup.h> |
| 19 | #include <asm/thread_info.h> |
| 20 | #include <asm/reg_a2.h> |
| 21 | #include <asm/exception-64e.h> |
| 22 | #include <asm/bug.h> |
| 23 | #include <asm/irqflags.h> |
| 24 | #include <asm/ptrace.h> |
| 25 | #include <asm/ppc-opcode.h> |
| 26 | #include <asm/mmu.h> |
| 27 | #include <asm/hw_irq.h> |
| 28 | #include <asm/kvm_asm.h> |
| 29 | #include <asm/kvm_booke_hv_asm.h> |
| 30 | |
| 31 | /* XXX This will ultimately add space for a special exception save |
| 32 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... |
| 33 | * when taking special interrupts. For now we don't support that, |
| 34 | * special interrupts from within a non-standard level will probably |
| 35 | * blow you up |
| 36 | */ |
| 37 | #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE |
| 38 | |
| 39 | /* Exception prolog code for all exceptions */ |
| 40 | #define EXCEPTION_PROLOG(n, intnum, type, addition) \ |
| 41 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ |
| 42 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ |
| 43 | std r10,PACA_EX##type+EX_R10(r13); \ |
| 44 | std r11,PACA_EX##type+EX_R11(r13); \ |
| 45 | PROLOG_STORE_RESTORE_SCRATCH_##type; \ |
| 46 | mfcr r10; /* save CR */ \ |
| 47 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ |
| 48 | DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ |
| 49 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ |
| 50 | addition; /* additional code for that exc. */ \ |
| 51 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ |
| 52 | type##_SET_KSTACK; /* get special stack if necessary */\ |
| 53 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ |
| 54 | beq 1f; /* branch around if supervisor */ \ |
| 55 | ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ |
| 56 | 1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ |
| 57 | bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ |
| 58 | mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ |
| 59 | |
| 60 | /* Exception type-specific macros */ |
| 61 | #define GEN_SET_KSTACK \ |
| 62 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ |
| 63 | #define SPRN_GEN_SRR0 SPRN_SRR0 |
| 64 | #define SPRN_GEN_SRR1 SPRN_SRR1 |
| 65 | |
| 66 | #define GDBELL_SET_KSTACK GEN_SET_KSTACK |
| 67 | #define SPRN_GDBELL_SRR0 SPRN_GSRR0 |
| 68 | #define SPRN_GDBELL_SRR1 SPRN_GSRR1 |
| 69 | |
| 70 | #define CRIT_SET_KSTACK \ |
| 71 | ld r1,PACA_CRIT_STACK(r13); \ |
| 72 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; |
| 73 | #define SPRN_CRIT_SRR0 SPRN_CSRR0 |
| 74 | #define SPRN_CRIT_SRR1 SPRN_CSRR1 |
| 75 | |
| 76 | #define DBG_SET_KSTACK \ |
| 77 | ld r1,PACA_DBG_STACK(r13); \ |
| 78 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; |
| 79 | #define SPRN_DBG_SRR0 SPRN_DSRR0 |
| 80 | #define SPRN_DBG_SRR1 SPRN_DSRR1 |
| 81 | |
| 82 | #define MC_SET_KSTACK \ |
| 83 | ld r1,PACA_MC_STACK(r13); \ |
| 84 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; |
| 85 | #define SPRN_MC_SRR0 SPRN_MCSRR0 |
| 86 | #define SPRN_MC_SRR1 SPRN_MCSRR1 |
| 87 | |
| 88 | #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 89 | EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) |
| 90 | |
| 91 | #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 92 | EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) |
| 93 | |
| 94 | #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 95 | EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) |
| 96 | |
| 97 | #define MC_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 98 | EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) |
| 99 | |
| 100 | #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 101 | EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) |
| 102 | |
| 103 | /* |
| 104 | * Store user-visible scratch in PACA exception slots and restore proper value |
| 105 | */ |
| 106 | #define PROLOG_STORE_RESTORE_SCRATCH_GEN |
| 107 | #define PROLOG_STORE_RESTORE_SCRATCH_GDBELL |
| 108 | #define PROLOG_STORE_RESTORE_SCRATCH_DBG |
| 109 | #define PROLOG_STORE_RESTORE_SCRATCH_MC |
| 110 | |
| 111 | #define PROLOG_STORE_RESTORE_SCRATCH_CRIT \ |
| 112 | mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \ |
| 113 | std r10,PACA_EXCRIT+EX_R13(r13); \ |
| 114 | ld r11,PACA_SPRG3(r13); \ |
| 115 | mtspr SPRN_SPRG_CRIT_SCRATCH,r11; |
| 116 | |
| 117 | /* Variants of the "addition" argument for the prolog |
| 118 | */ |
| 119 | #define PROLOG_ADDITION_NONE_GEN(n) |
| 120 | #define PROLOG_ADDITION_NONE_GDBELL(n) |
| 121 | #define PROLOG_ADDITION_NONE_CRIT(n) |
| 122 | #define PROLOG_ADDITION_NONE_DBG(n) |
| 123 | #define PROLOG_ADDITION_NONE_MC(n) |
| 124 | |
| 125 | #define PROLOG_ADDITION_MASKABLE_GEN(n) \ |
| 126 | lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ |
| 127 | cmpwi cr0,r10,0; /* yes -> go out of line */ \ |
| 128 | beq masked_interrupt_book3e_##n |
| 129 | |
| 130 | #define PROLOG_ADDITION_2REGS_GEN(n) \ |
| 131 | std r14,PACA_EXGEN+EX_R14(r13); \ |
| 132 | std r15,PACA_EXGEN+EX_R15(r13) |
| 133 | |
| 134 | #define PROLOG_ADDITION_1REG_GEN(n) \ |
| 135 | std r14,PACA_EXGEN+EX_R14(r13); |
| 136 | |
| 137 | #define PROLOG_ADDITION_2REGS_CRIT(n) \ |
| 138 | std r14,PACA_EXCRIT+EX_R14(r13); \ |
| 139 | std r15,PACA_EXCRIT+EX_R15(r13) |
| 140 | |
| 141 | #define PROLOG_ADDITION_2REGS_DBG(n) \ |
| 142 | std r14,PACA_EXDBG+EX_R14(r13); \ |
| 143 | std r15,PACA_EXDBG+EX_R15(r13) |
| 144 | |
| 145 | #define PROLOG_ADDITION_2REGS_MC(n) \ |
| 146 | std r14,PACA_EXMC+EX_R14(r13); \ |
| 147 | std r15,PACA_EXMC+EX_R15(r13) |
| 148 | |
| 149 | |
| 150 | /* Core exception code for all exceptions except TLB misses. |
| 151 | * XXX: Needs to make SPRN_SPRG_GEN depend on exception type |
| 152 | */ |
| 153 | #define EXCEPTION_COMMON(n, excf, ints) \ |
| 154 | exc_##n##_common: \ |
| 155 | std r0,GPR0(r1); /* save r0 in stackframe */ \ |
| 156 | std r2,GPR2(r1); /* save r2 in stackframe */ \ |
| 157 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ |
| 158 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ |
| 159 | std r9,GPR9(r1); /* save r9 in stackframe */ \ |
| 160 | std r10,_NIP(r1); /* save SRR0 to stackframe */ \ |
| 161 | std r11,_MSR(r1); /* save SRR1 to stackframe */ \ |
| 162 | beq 2f; /* if from kernel mode */ \ |
| 163 | ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ |
| 164 | 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ |
| 165 | ld r4,excf+EX_R11(r13); /* get back r11 */ \ |
| 166 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \ |
| 167 | std r12,GPR12(r1); /* save r12 in stackframe */ \ |
| 168 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ |
| 169 | mflr r6; /* save LR in stackframe */ \ |
| 170 | mfctr r7; /* save CTR in stackframe */ \ |
| 171 | mfspr r8,SPRN_XER; /* save XER in stackframe */ \ |
| 172 | ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ |
| 173 | lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ |
| 174 | lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ |
| 175 | ld r12,exception_marker@toc(r2); \ |
| 176 | li r0,0; \ |
| 177 | std r3,GPR10(r1); /* save r10 to stackframe */ \ |
| 178 | std r4,GPR11(r1); /* save r11 to stackframe */ \ |
| 179 | std r5,GPR13(r1); /* save it to stackframe */ \ |
| 180 | std r6,_LINK(r1); \ |
| 181 | std r7,_CTR(r1); \ |
| 182 | std r8,_XER(r1); \ |
| 183 | li r3,(n)+1; /* indicate partial regs in trap */ \ |
| 184 | std r9,0(r1); /* store stack frame back link */ \ |
| 185 | std r10,_CCR(r1); /* store orig CR in stackframe */ \ |
| 186 | std r9,GPR1(r1); /* store stack frame back link */ \ |
| 187 | std r11,SOFTE(r1); /* and save it to stackframe */ \ |
| 188 | std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ |
| 189 | std r3,_TRAP(r1); /* set trap number */ \ |
| 190 | std r0,RESULT(r1); /* clear regs->result */ \ |
| 191 | ints; |
| 192 | |
| 193 | /* Variants for the "ints" argument. This one does nothing when we want |
| 194 | * to keep interrupts in their original state |
| 195 | */ |
| 196 | #define INTS_KEEP |
| 197 | |
| 198 | /* This second version is meant for exceptions that don't immediately |
| 199 | * hard-enable. We set a bit in paca->irq_happened to ensure that |
| 200 | * a subsequent call to arch_local_irq_restore() will properly |
| 201 | * hard-enable and avoid the fast-path |
| 202 | */ |
| 203 | #define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4) |
| 204 | |
| 205 | /* This is called by exceptions that used INTS_KEEP (that did not touch |
| 206 | * irq indicators in the PACA). This will restore MSR:EE to it's previous |
| 207 | * value |
| 208 | * |
| 209 | * XXX In the long run, we may want to open-code it in order to separate the |
| 210 | * load from the wrtee, thus limiting the latency caused by the dependency |
| 211 | * but at this point, I'll favor code clarity until we have a near to final |
| 212 | * implementation |
| 213 | */ |
| 214 | #define INTS_RESTORE_HARD \ |
| 215 | ld r11,_MSR(r1); \ |
| 216 | wrtee r11; |
| 217 | |
| 218 | /* XXX FIXME: Restore r14/r15 when necessary */ |
| 219 | #define BAD_STACK_TRAMPOLINE(n) \ |
| 220 | exc_##n##_bad_stack: \ |
| 221 | li r1,(n); /* get exception number */ \ |
| 222 | sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ |
| 223 | b bad_stack_book3e; /* bad stack error */ |
| 224 | |
| 225 | /* WARNING: If you change the layout of this stub, make sure you chcek |
| 226 | * the debug exception handler which handles single stepping |
| 227 | * into exceptions from userspace, and the MM code in |
| 228 | * arch/powerpc/mm/tlb_nohash.c which patches the branch here |
| 229 | * and would need to be updated if that branch is moved |
| 230 | */ |
| 231 | #define EXCEPTION_STUB(loc, label) \ |
| 232 | . = interrupt_base_book3e + loc; \ |
| 233 | nop; /* To make debug interrupts happy */ \ |
| 234 | b exc_##label##_book3e; |
| 235 | |
| 236 | #define ACK_NONE(r) |
| 237 | #define ACK_DEC(r) \ |
| 238 | lis r,TSR_DIS@h; \ |
| 239 | mtspr SPRN_TSR,r |
| 240 | #define ACK_FIT(r) \ |
| 241 | lis r,TSR_FIS@h; \ |
| 242 | mtspr SPRN_TSR,r |
| 243 | |
| 244 | /* Used by asynchronous interrupt that may happen in the idle loop. |
| 245 | * |
| 246 | * This check if the thread was in the idle loop, and if yes, returns |
| 247 | * to the caller rather than the PC. This is to avoid a race if |
| 248 | * interrupts happen before the wait instruction. |
| 249 | */ |
| 250 | #define CHECK_NAPPING() \ |
| 251 | CURRENT_THREAD_INFO(r11, r1); \ |
| 252 | ld r10,TI_LOCAL_FLAGS(r11); \ |
| 253 | andi. r9,r10,_TLF_NAPPING; \ |
| 254 | beq+ 1f; \ |
| 255 | ld r8,_LINK(r1); \ |
| 256 | rlwinm r7,r10,0,~_TLF_NAPPING; \ |
| 257 | std r8,_NIP(r1); \ |
| 258 | std r7,TI_LOCAL_FLAGS(r11); \ |
| 259 | 1: |
| 260 | |
| 261 | |
| 262 | #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ |
| 263 | START_EXCEPTION(label); \ |
| 264 | NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ |
| 265 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ |
| 266 | ack(r8); \ |
| 267 | CHECK_NAPPING(); \ |
| 268 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
| 269 | bl hdlr; \ |
| 270 | b .ret_from_except_lite; |
| 271 | |
| 272 | /* This value is used to mark exception frames on the stack. */ |
| 273 | .section ".toc","aw" |
| 274 | exception_marker: |
| 275 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
| 276 | |
| 277 | |
| 278 | /* |
| 279 | * And here we have the exception vectors ! |
| 280 | */ |
| 281 | |
| 282 | .text |
| 283 | .balign 0x1000 |
| 284 | .globl interrupt_base_book3e |
| 285 | interrupt_base_book3e: /* fake trap */ |
| 286 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ |
| 287 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ |
| 288 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ |
| 289 | EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ |
| 290 | EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ |
| 291 | EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ |
| 292 | EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ |
| 293 | EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ |
| 294 | EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ |
| 295 | EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ |
| 296 | EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ |
| 297 | EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ |
| 298 | EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ |
| 299 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ |
| 300 | EXCEPTION_STUB(0x1c0, data_tlb_miss) |
| 301 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) |
| 302 | EXCEPTION_STUB(0x200, altivec_unavailable) /* 0x0f20 */ |
| 303 | EXCEPTION_STUB(0x220, altivec_assist) /* 0x1700 */ |
| 304 | EXCEPTION_STUB(0x260, perfmon) |
| 305 | EXCEPTION_STUB(0x280, doorbell) |
| 306 | EXCEPTION_STUB(0x2a0, doorbell_crit) |
| 307 | EXCEPTION_STUB(0x2c0, guest_doorbell) |
| 308 | EXCEPTION_STUB(0x2e0, guest_doorbell_crit) |
| 309 | EXCEPTION_STUB(0x300, hypercall) |
| 310 | EXCEPTION_STUB(0x320, ehpriv) |
| 311 | |
| 312 | .globl interrupt_end_book3e |
| 313 | interrupt_end_book3e: |
| 314 | |
| 315 | /* Critical Input Interrupt */ |
| 316 | START_EXCEPTION(critical_input); |
| 317 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, |
| 318 | PROLOG_ADDITION_NONE) |
| 319 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) |
| 320 | // bl special_reg_save_crit |
| 321 | // CHECK_NAPPING(); |
| 322 | // addi r3,r1,STACK_FRAME_OVERHEAD |
| 323 | // bl .critical_exception |
| 324 | // b ret_from_crit_except |
| 325 | b . |
| 326 | |
| 327 | /* Machine Check Interrupt */ |
| 328 | START_EXCEPTION(machine_check); |
| 329 | MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK, |
| 330 | PROLOG_ADDITION_NONE) |
| 331 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) |
| 332 | // bl special_reg_save_mc |
| 333 | // addi r3,r1,STACK_FRAME_OVERHEAD |
| 334 | // CHECK_NAPPING(); |
| 335 | // bl .machine_check_exception |
| 336 | // b ret_from_mc_except |
| 337 | b . |
| 338 | |
| 339 | /* Data Storage Interrupt */ |
| 340 | START_EXCEPTION(data_storage) |
| 341 | NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, |
| 342 | PROLOG_ADDITION_2REGS) |
| 343 | mfspr r14,SPRN_DEAR |
| 344 | mfspr r15,SPRN_ESR |
| 345 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) |
| 346 | b storage_fault_common |
| 347 | |
| 348 | /* Instruction Storage Interrupt */ |
| 349 | START_EXCEPTION(instruction_storage); |
| 350 | NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, |
| 351 | PROLOG_ADDITION_2REGS) |
| 352 | li r15,0 |
| 353 | mr r14,r10 |
| 354 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) |
| 355 | b storage_fault_common |
| 356 | |
| 357 | /* External Input Interrupt */ |
| 358 | MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, |
| 359 | external_input, .do_IRQ, ACK_NONE) |
| 360 | |
| 361 | /* Alignment */ |
| 362 | START_EXCEPTION(alignment); |
| 363 | NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, |
| 364 | PROLOG_ADDITION_2REGS) |
| 365 | mfspr r14,SPRN_DEAR |
| 366 | mfspr r15,SPRN_ESR |
| 367 | EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) |
| 368 | b alignment_more /* no room, go out of line */ |
| 369 | |
| 370 | /* Program Interrupt */ |
| 371 | START_EXCEPTION(program); |
| 372 | NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, |
| 373 | PROLOG_ADDITION_1REG) |
| 374 | mfspr r14,SPRN_ESR |
| 375 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) |
| 376 | std r14,_DSISR(r1) |
| 377 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 378 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 379 | bl .save_nvgprs |
| 380 | bl .program_check_exception |
| 381 | b .ret_from_except |
| 382 | |
| 383 | /* Floating Point Unavailable Interrupt */ |
| 384 | START_EXCEPTION(fp_unavailable); |
| 385 | NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, |
| 386 | PROLOG_ADDITION_NONE) |
| 387 | /* we can probably do a shorter exception entry for that one... */ |
| 388 | EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) |
| 389 | ld r12,_MSR(r1) |
| 390 | andi. r0,r12,MSR_PR; |
| 391 | beq- 1f |
| 392 | bl .load_up_fpu |
| 393 | b fast_exception_return |
| 394 | 1: INTS_DISABLE |
| 395 | bl .save_nvgprs |
| 396 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 397 | bl .kernel_fp_unavailable_exception |
| 398 | b .ret_from_except |
| 399 | |
| 400 | /* Altivec Unavailable Interrupt */ |
| 401 | START_EXCEPTION(altivec_unavailable); |
| 402 | NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, |
| 403 | PROLOG_ADDITION_NONE) |
| 404 | /* we can probably do a shorter exception entry for that one... */ |
| 405 | EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP) |
| 406 | #ifdef CONFIG_ALTIVEC |
| 407 | BEGIN_FTR_SECTION |
| 408 | ld r12,_MSR(r1) |
| 409 | andi. r0,r12,MSR_PR; |
| 410 | beq- 1f |
| 411 | bl .load_up_altivec |
| 412 | b fast_exception_return |
| 413 | 1: |
| 414 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 415 | #endif |
| 416 | INTS_DISABLE |
| 417 | bl .save_nvgprs |
| 418 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 419 | bl .altivec_unavailable_exception |
| 420 | b .ret_from_except |
| 421 | |
| 422 | /* AltiVec Assist */ |
| 423 | START_EXCEPTION(altivec_assist); |
| 424 | NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST, |
| 425 | PROLOG_ADDITION_NONE) |
| 426 | EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE) |
| 427 | bl .save_nvgprs |
| 428 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 429 | #ifdef CONFIG_ALTIVEC |
| 430 | BEGIN_FTR_SECTION |
| 431 | bl .altivec_assist_exception |
| 432 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 433 | #else |
| 434 | bl .unknown_exception |
| 435 | #endif |
| 436 | b .ret_from_except |
| 437 | |
| 438 | |
| 439 | /* Decrementer Interrupt */ |
| 440 | MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, |
| 441 | decrementer, .timer_interrupt, ACK_DEC) |
| 442 | |
| 443 | /* Fixed Interval Timer Interrupt */ |
| 444 | MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, |
| 445 | fixed_interval, .unknown_exception, ACK_FIT) |
| 446 | |
| 447 | /* Watchdog Timer Interrupt */ |
| 448 | START_EXCEPTION(watchdog); |
| 449 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, |
| 450 | PROLOG_ADDITION_NONE) |
| 451 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) |
| 452 | // bl special_reg_save_crit |
| 453 | // CHECK_NAPPING(); |
| 454 | // addi r3,r1,STACK_FRAME_OVERHEAD |
| 455 | // bl .unknown_exception |
| 456 | // b ret_from_crit_except |
| 457 | b . |
| 458 | |
| 459 | /* System Call Interrupt */ |
| 460 | START_EXCEPTION(system_call) |
| 461 | mr r9,r13 /* keep a copy of userland r13 */ |
| 462 | mfspr r11,SPRN_SRR0 /* get return address */ |
| 463 | mfspr r12,SPRN_SRR1 /* get previous MSR */ |
| 464 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ |
| 465 | b system_call_common |
| 466 | |
| 467 | /* Auxiliary Processor Unavailable Interrupt */ |
| 468 | START_EXCEPTION(ap_unavailable); |
| 469 | NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, |
| 470 | PROLOG_ADDITION_NONE) |
| 471 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) |
| 472 | bl .save_nvgprs |
| 473 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 474 | bl .unknown_exception |
| 475 | b .ret_from_except |
| 476 | |
| 477 | /* Debug exception as a critical interrupt*/ |
| 478 | START_EXCEPTION(debug_crit); |
| 479 | CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
| 480 | PROLOG_ADDITION_2REGS) |
| 481 | |
| 482 | /* |
| 483 | * If there is a single step or branch-taken exception in an |
| 484 | * exception entry sequence, it was probably meant to apply to |
| 485 | * the code where the exception occurred (since exception entry |
| 486 | * doesn't turn off DE automatically). We simulate the effect |
| 487 | * of turning off DE on entry to an exception handler by turning |
| 488 | * off DE in the CSRR1 value and clearing the debug status. |
| 489 | */ |
| 490 | |
| 491 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 492 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 493 | beq+ 1f |
| 494 | |
| 495 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
| 496 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) |
| 497 | cmpld cr0,r10,r14 |
| 498 | cmpld cr1,r10,r15 |
| 499 | blt+ cr0,1f |
| 500 | bge+ cr1,1f |
| 501 | |
| 502 | /* here it looks like we got an inappropriate debug exception. */ |
| 503 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 504 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ |
| 505 | mtspr SPRN_DBSR,r14 |
| 506 | mtspr SPRN_CSRR1,r11 |
| 507 | lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ |
| 508 | ld r1,PACA_EXCRIT+EX_R1(r13) |
| 509 | ld r14,PACA_EXCRIT+EX_R14(r13) |
| 510 | ld r15,PACA_EXCRIT+EX_R15(r13) |
| 511 | mtcr r10 |
| 512 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ |
| 513 | ld r11,PACA_EXCRIT+EX_R11(r13) |
| 514 | ld r13,PACA_EXCRIT+EX_R13(r13) |
| 515 | rfci |
| 516 | |
| 517 | /* Normal debug exception */ |
| 518 | /* XXX We only handle coming from userspace for now since we can't |
| 519 | * quite save properly an interrupted kernel state yet |
| 520 | */ |
| 521 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ |
| 522 | beq kernel_dbg_exc; /* if from kernel mode */ |
| 523 | |
| 524 | /* Now we mash up things to make it look like we are coming on a |
| 525 | * normal exception |
| 526 | */ |
| 527 | ld r15,PACA_EXCRIT+EX_R13(r13) |
| 528 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 |
| 529 | mfspr r14,SPRN_DBSR |
| 530 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) |
| 531 | std r14,_DSISR(r1) |
| 532 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 533 | mr r4,r14 |
| 534 | ld r14,PACA_EXCRIT+EX_R14(r13) |
| 535 | ld r15,PACA_EXCRIT+EX_R15(r13) |
| 536 | bl .save_nvgprs |
| 537 | bl .DebugException |
| 538 | b .ret_from_except |
| 539 | |
| 540 | kernel_dbg_exc: |
| 541 | b . /* NYI */ |
| 542 | |
| 543 | /* Debug exception as a debug interrupt*/ |
| 544 | START_EXCEPTION(debug_debug); |
| 545 | DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
| 546 | PROLOG_ADDITION_2REGS) |
| 547 | |
| 548 | /* |
| 549 | * If there is a single step or branch-taken exception in an |
| 550 | * exception entry sequence, it was probably meant to apply to |
| 551 | * the code where the exception occurred (since exception entry |
| 552 | * doesn't turn off DE automatically). We simulate the effect |
| 553 | * of turning off DE on entry to an exception handler by turning |
| 554 | * off DE in the DSRR1 value and clearing the debug status. |
| 555 | */ |
| 556 | |
| 557 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 558 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 559 | beq+ 1f |
| 560 | |
| 561 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
| 562 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) |
| 563 | cmpld cr0,r10,r14 |
| 564 | cmpld cr1,r10,r15 |
| 565 | blt+ cr0,1f |
| 566 | bge+ cr1,1f |
| 567 | |
| 568 | /* here it looks like we got an inappropriate debug exception. */ |
| 569 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 570 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ |
| 571 | mtspr SPRN_DBSR,r14 |
| 572 | mtspr SPRN_DSRR1,r11 |
| 573 | lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ |
| 574 | ld r1,PACA_EXDBG+EX_R1(r13) |
| 575 | ld r14,PACA_EXDBG+EX_R14(r13) |
| 576 | ld r15,PACA_EXDBG+EX_R15(r13) |
| 577 | mtcr r10 |
| 578 | ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ |
| 579 | ld r11,PACA_EXDBG+EX_R11(r13) |
| 580 | mfspr r13,SPRN_SPRG_DBG_SCRATCH |
| 581 | rfdi |
| 582 | |
| 583 | /* Normal debug exception */ |
| 584 | /* XXX We only handle coming from userspace for now since we can't |
| 585 | * quite save properly an interrupted kernel state yet |
| 586 | */ |
| 587 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ |
| 588 | beq kernel_dbg_exc; /* if from kernel mode */ |
| 589 | |
| 590 | /* Now we mash up things to make it look like we are coming on a |
| 591 | * normal exception |
| 592 | */ |
| 593 | mfspr r15,SPRN_SPRG_DBG_SCRATCH |
| 594 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 |
| 595 | mfspr r14,SPRN_DBSR |
| 596 | EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE) |
| 597 | std r14,_DSISR(r1) |
| 598 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 599 | mr r4,r14 |
| 600 | ld r14,PACA_EXDBG+EX_R14(r13) |
| 601 | ld r15,PACA_EXDBG+EX_R15(r13) |
| 602 | bl .save_nvgprs |
| 603 | bl .DebugException |
| 604 | b .ret_from_except |
| 605 | |
| 606 | START_EXCEPTION(perfmon); |
| 607 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, |
| 608 | PROLOG_ADDITION_NONE) |
| 609 | EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) |
| 610 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 611 | bl .performance_monitor_exception |
| 612 | b .ret_from_except_lite |
| 613 | |
| 614 | /* Doorbell interrupt */ |
| 615 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, |
| 616 | doorbell, .doorbell_exception, ACK_NONE) |
| 617 | |
| 618 | /* Doorbell critical Interrupt */ |
| 619 | START_EXCEPTION(doorbell_crit); |
| 620 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, |
| 621 | PROLOG_ADDITION_NONE) |
| 622 | // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) |
| 623 | // bl special_reg_save_crit |
| 624 | // CHECK_NAPPING(); |
| 625 | // addi r3,r1,STACK_FRAME_OVERHEAD |
| 626 | // bl .doorbell_critical_exception |
| 627 | // b ret_from_crit_except |
| 628 | b . |
| 629 | |
| 630 | /* |
| 631 | * Guest doorbell interrupt |
| 632 | * This general exception use GSRRx save/restore registers |
| 633 | */ |
| 634 | START_EXCEPTION(guest_doorbell); |
| 635 | GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, |
| 636 | PROLOG_ADDITION_NONE) |
| 637 | EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP) |
| 638 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 639 | bl .save_nvgprs |
| 640 | INTS_RESTORE_HARD |
| 641 | bl .unknown_exception |
| 642 | b .ret_from_except |
| 643 | |
| 644 | /* Guest Doorbell critical Interrupt */ |
| 645 | START_EXCEPTION(guest_doorbell_crit); |
| 646 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, |
| 647 | PROLOG_ADDITION_NONE) |
| 648 | // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) |
| 649 | // bl special_reg_save_crit |
| 650 | // CHECK_NAPPING(); |
| 651 | // addi r3,r1,STACK_FRAME_OVERHEAD |
| 652 | // bl .guest_doorbell_critical_exception |
| 653 | // b ret_from_crit_except |
| 654 | b . |
| 655 | |
| 656 | /* Hypervisor call */ |
| 657 | START_EXCEPTION(hypercall); |
| 658 | NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, |
| 659 | PROLOG_ADDITION_NONE) |
| 660 | EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) |
| 661 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 662 | bl .save_nvgprs |
| 663 | INTS_RESTORE_HARD |
| 664 | bl .unknown_exception |
| 665 | b .ret_from_except |
| 666 | |
| 667 | /* Embedded Hypervisor priviledged */ |
| 668 | START_EXCEPTION(ehpriv); |
| 669 | NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, |
| 670 | PROLOG_ADDITION_NONE) |
| 671 | EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) |
| 672 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 673 | bl .save_nvgprs |
| 674 | INTS_RESTORE_HARD |
| 675 | bl .unknown_exception |
| 676 | b .ret_from_except |
| 677 | |
| 678 | /* |
| 679 | * An interrupt came in while soft-disabled; We mark paca->irq_happened |
| 680 | * accordingly and if the interrupt is level sensitive, we hard disable |
| 681 | */ |
| 682 | |
| 683 | .macro masked_interrupt_book3e paca_irq full_mask |
| 684 | lbz r10,PACAIRQHAPPENED(r13) |
| 685 | ori r10,r10,\paca_irq |
| 686 | stb r10,PACAIRQHAPPENED(r13) |
| 687 | |
| 688 | .if \full_mask == 1 |
| 689 | rldicl r10,r11,48,1 /* clear MSR_EE */ |
| 690 | rotldi r11,r10,16 |
| 691 | mtspr SPRN_SRR1,r11 |
| 692 | .endif |
| 693 | |
| 694 | lwz r11,PACA_EXGEN+EX_CR(r13) |
| 695 | mtcr r11 |
| 696 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 697 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 698 | mfspr r13,SPRN_SPRG_GEN_SCRATCH |
| 699 | rfi |
| 700 | b . |
| 701 | .endm |
| 702 | |
| 703 | masked_interrupt_book3e_0x500: |
| 704 | // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE |
| 705 | masked_interrupt_book3e PACA_IRQ_EE 1 |
| 706 | |
| 707 | masked_interrupt_book3e_0x900: |
| 708 | ACK_DEC(r10); |
| 709 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
| 710 | |
| 711 | masked_interrupt_book3e_0x980: |
| 712 | ACK_FIT(r10); |
| 713 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
| 714 | |
| 715 | masked_interrupt_book3e_0x280: |
| 716 | masked_interrupt_book3e_0x2c0: |
| 717 | masked_interrupt_book3e PACA_IRQ_DBELL 0 |
| 718 | |
| 719 | /* |
| 720 | * Called from arch_local_irq_enable when an interrupt needs |
| 721 | * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 |
| 722 | * to indicate the kind of interrupt. MSR:EE is already off. |
| 723 | * We generate a stackframe like if a real interrupt had happened. |
| 724 | * |
| 725 | * Note: While MSR:EE is off, we need to make sure that _MSR |
| 726 | * in the generated frame has EE set to 1 or the exception |
| 727 | * handler will not properly re-enable them. |
| 728 | */ |
| 729 | _GLOBAL(__replay_interrupt) |
| 730 | /* We are going to jump to the exception common code which |
| 731 | * will retrieve various register values from the PACA which |
| 732 | * we don't give a damn about. |
| 733 | */ |
| 734 | mflr r10 |
| 735 | mfmsr r11 |
| 736 | mfcr r4 |
| 737 | mtspr SPRN_SPRG_GEN_SCRATCH,r13; |
| 738 | std r1,PACA_EXGEN+EX_R1(r13); |
| 739 | stw r4,PACA_EXGEN+EX_CR(r13); |
| 740 | ori r11,r11,MSR_EE |
| 741 | subi r1,r1,INT_FRAME_SIZE; |
| 742 | cmpwi cr0,r3,0x500 |
| 743 | beq exc_0x500_common |
| 744 | cmpwi cr0,r3,0x900 |
| 745 | beq exc_0x900_common |
| 746 | cmpwi cr0,r3,0x280 |
| 747 | beq exc_0x280_common |
| 748 | blr |
| 749 | |
| 750 | |
| 751 | /* |
| 752 | * This is called from 0x300 and 0x400 handlers after the prologs with |
| 753 | * r14 and r15 containing the fault address and error code, with the |
| 754 | * original values stashed away in the PACA |
| 755 | */ |
| 756 | storage_fault_common: |
| 757 | std r14,_DAR(r1) |
| 758 | std r15,_DSISR(r1) |
| 759 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 760 | mr r4,r14 |
| 761 | mr r5,r15 |
| 762 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 763 | ld r15,PACA_EXGEN+EX_R15(r13) |
| 764 | bl .do_page_fault |
| 765 | cmpdi r3,0 |
| 766 | bne- 1f |
| 767 | b .ret_from_except_lite |
| 768 | 1: bl .save_nvgprs |
| 769 | mr r5,r3 |
| 770 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 771 | ld r4,_DAR(r1) |
| 772 | bl .bad_page_fault |
| 773 | b .ret_from_except |
| 774 | |
| 775 | /* |
| 776 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it |
| 777 | * continues here. |
| 778 | */ |
| 779 | alignment_more: |
| 780 | std r14,_DAR(r1) |
| 781 | std r15,_DSISR(r1) |
| 782 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 783 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 784 | ld r15,PACA_EXGEN+EX_R15(r13) |
| 785 | bl .save_nvgprs |
| 786 | INTS_RESTORE_HARD |
| 787 | bl .alignment_exception |
| 788 | b .ret_from_except |
| 789 | |
| 790 | /* |
| 791 | * We branch here from entry_64.S for the last stage of the exception |
| 792 | * return code path. MSR:EE is expected to be off at that point |
| 793 | */ |
| 794 | _GLOBAL(exception_return_book3e) |
| 795 | b 1f |
| 796 | |
| 797 | /* This is the return from load_up_fpu fast path which could do with |
| 798 | * less GPR restores in fact, but for now we have a single return path |
| 799 | */ |
| 800 | .globl fast_exception_return |
| 801 | fast_exception_return: |
| 802 | wrteei 0 |
| 803 | 1: mr r0,r13 |
| 804 | ld r10,_MSR(r1) |
| 805 | REST_4GPRS(2, r1) |
| 806 | andi. r6,r10,MSR_PR |
| 807 | REST_2GPRS(6, r1) |
| 808 | beq 1f |
| 809 | ACCOUNT_CPU_USER_EXIT(r10, r11) |
| 810 | ld r0,GPR13(r1) |
| 811 | |
| 812 | 1: stdcx. r0,0,r1 /* to clear the reservation */ |
| 813 | |
| 814 | ld r8,_CCR(r1) |
| 815 | ld r9,_LINK(r1) |
| 816 | ld r10,_CTR(r1) |
| 817 | ld r11,_XER(r1) |
| 818 | mtcr r8 |
| 819 | mtlr r9 |
| 820 | mtctr r10 |
| 821 | mtxer r11 |
| 822 | REST_2GPRS(8, r1) |
| 823 | ld r10,GPR10(r1) |
| 824 | ld r11,GPR11(r1) |
| 825 | ld r12,GPR12(r1) |
| 826 | mtspr SPRN_SPRG_GEN_SCRATCH,r0 |
| 827 | |
| 828 | std r10,PACA_EXGEN+EX_R10(r13); |
| 829 | std r11,PACA_EXGEN+EX_R11(r13); |
| 830 | ld r10,_NIP(r1) |
| 831 | ld r11,_MSR(r1) |
| 832 | ld r0,GPR0(r1) |
| 833 | ld r1,GPR1(r1) |
| 834 | mtspr SPRN_SRR0,r10 |
| 835 | mtspr SPRN_SRR1,r11 |
| 836 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 837 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 838 | mfspr r13,SPRN_SPRG_GEN_SCRATCH |
| 839 | rfi |
| 840 | |
| 841 | /* |
| 842 | * Trampolines used when spotting a bad kernel stack pointer in |
| 843 | * the exception entry code. |
| 844 | * |
| 845 | * TODO: move some bits like SRR0 read to trampoline, pass PACA |
| 846 | * index around, etc... to handle crit & mcheck |
| 847 | */ |
| 848 | BAD_STACK_TRAMPOLINE(0x000) |
| 849 | BAD_STACK_TRAMPOLINE(0x100) |
| 850 | BAD_STACK_TRAMPOLINE(0x200) |
| 851 | BAD_STACK_TRAMPOLINE(0x220) |
| 852 | BAD_STACK_TRAMPOLINE(0x260) |
| 853 | BAD_STACK_TRAMPOLINE(0x280) |
| 854 | BAD_STACK_TRAMPOLINE(0x2a0) |
| 855 | BAD_STACK_TRAMPOLINE(0x2c0) |
| 856 | BAD_STACK_TRAMPOLINE(0x2e0) |
| 857 | BAD_STACK_TRAMPOLINE(0x300) |
| 858 | BAD_STACK_TRAMPOLINE(0x310) |
| 859 | BAD_STACK_TRAMPOLINE(0x320) |
| 860 | BAD_STACK_TRAMPOLINE(0x400) |
| 861 | BAD_STACK_TRAMPOLINE(0x500) |
| 862 | BAD_STACK_TRAMPOLINE(0x600) |
| 863 | BAD_STACK_TRAMPOLINE(0x700) |
| 864 | BAD_STACK_TRAMPOLINE(0x800) |
| 865 | BAD_STACK_TRAMPOLINE(0x900) |
| 866 | BAD_STACK_TRAMPOLINE(0x980) |
| 867 | BAD_STACK_TRAMPOLINE(0x9f0) |
| 868 | BAD_STACK_TRAMPOLINE(0xa00) |
| 869 | BAD_STACK_TRAMPOLINE(0xb00) |
| 870 | BAD_STACK_TRAMPOLINE(0xc00) |
| 871 | BAD_STACK_TRAMPOLINE(0xd00) |
| 872 | BAD_STACK_TRAMPOLINE(0xd08) |
| 873 | BAD_STACK_TRAMPOLINE(0xe00) |
| 874 | BAD_STACK_TRAMPOLINE(0xf00) |
| 875 | BAD_STACK_TRAMPOLINE(0xf20) |
| 876 | |
| 877 | .globl bad_stack_book3e |
| 878 | bad_stack_book3e: |
| 879 | /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ |
| 880 | mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ |
| 881 | ld r1,PACAEMERGSP(r13) |
| 882 | subi r1,r1,64+INT_FRAME_SIZE |
| 883 | std r10,_NIP(r1) |
| 884 | std r11,_MSR(r1) |
| 885 | ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ |
| 886 | lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ |
| 887 | std r10,GPR1(r1) |
| 888 | std r11,_CCR(r1) |
| 889 | mfspr r10,SPRN_DEAR |
| 890 | mfspr r11,SPRN_ESR |
| 891 | std r10,_DAR(r1) |
| 892 | std r11,_DSISR(r1) |
| 893 | std r0,GPR0(r1); /* save r0 in stackframe */ \ |
| 894 | std r2,GPR2(r1); /* save r2 in stackframe */ \ |
| 895 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ |
| 896 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ |
| 897 | std r9,GPR9(r1); /* save r9 in stackframe */ \ |
| 898 | ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ |
| 899 | ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ |
| 900 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ |
| 901 | std r3,GPR10(r1); /* save r10 to stackframe */ \ |
| 902 | std r4,GPR11(r1); /* save r11 to stackframe */ \ |
| 903 | std r12,GPR12(r1); /* save r12 in stackframe */ \ |
| 904 | std r5,GPR13(r1); /* save it to stackframe */ \ |
| 905 | mflr r10 |
| 906 | mfctr r11 |
| 907 | mfxer r12 |
| 908 | std r10,_LINK(r1) |
| 909 | std r11,_CTR(r1) |
| 910 | std r12,_XER(r1) |
| 911 | SAVE_10GPRS(14,r1) |
| 912 | SAVE_8GPRS(24,r1) |
| 913 | lhz r12,PACA_TRAP_SAVE(r13) |
| 914 | std r12,_TRAP(r1) |
| 915 | addi r11,r1,INT_FRAME_SIZE |
| 916 | std r11,0(r1) |
| 917 | li r12,0 |
| 918 | std r12,0(r11) |
| 919 | ld r2,PACATOC(r13) |
| 920 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 921 | bl .kernel_bad_stack |
| 922 | b 1b |
| 923 | |
| 924 | /* |
| 925 | * Setup the initial TLB for a core. This current implementation |
| 926 | * assume that whatever we are running off will not conflict with |
| 927 | * the new mapping at PAGE_OFFSET. |
| 928 | */ |
| 929 | _GLOBAL(initial_tlb_book3e) |
| 930 | |
| 931 | /* Look for the first TLB with IPROT set */ |
| 932 | mfspr r4,SPRN_TLB0CFG |
| 933 | andi. r3,r4,TLBnCFG_IPROT |
| 934 | lis r3,MAS0_TLBSEL(0)@h |
| 935 | bne found_iprot |
| 936 | |
| 937 | mfspr r4,SPRN_TLB1CFG |
| 938 | andi. r3,r4,TLBnCFG_IPROT |
| 939 | lis r3,MAS0_TLBSEL(1)@h |
| 940 | bne found_iprot |
| 941 | |
| 942 | mfspr r4,SPRN_TLB2CFG |
| 943 | andi. r3,r4,TLBnCFG_IPROT |
| 944 | lis r3,MAS0_TLBSEL(2)@h |
| 945 | bne found_iprot |
| 946 | |
| 947 | lis r3,MAS0_TLBSEL(3)@h |
| 948 | mfspr r4,SPRN_TLB3CFG |
| 949 | /* fall through */ |
| 950 | |
| 951 | found_iprot: |
| 952 | andi. r5,r4,TLBnCFG_HES |
| 953 | bne have_hes |
| 954 | |
| 955 | mflr r8 /* save LR */ |
| 956 | /* 1. Find the index of the entry we're executing in |
| 957 | * |
| 958 | * r3 = MAS0_TLBSEL (for the iprot array) |
| 959 | * r4 = SPRN_TLBnCFG |
| 960 | */ |
| 961 | bl invstr /* Find our address */ |
| 962 | invstr: mflr r6 /* Make it accessible */ |
| 963 | mfmsr r7 |
| 964 | rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ |
| 965 | mfspr r7,SPRN_PID |
| 966 | slwi r7,r7,16 |
| 967 | or r7,r7,r5 |
| 968 | mtspr SPRN_MAS6,r7 |
| 969 | tlbsx 0,r6 /* search MSR[IS], SPID=PID */ |
| 970 | |
| 971 | mfspr r3,SPRN_MAS0 |
| 972 | rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ |
| 973 | |
| 974 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ |
| 975 | oris r7,r7,MAS1_IPROT@h |
| 976 | mtspr SPRN_MAS1,r7 |
| 977 | tlbwe |
| 978 | |
| 979 | /* 2. Invalidate all entries except the entry we're executing in |
| 980 | * |
| 981 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 982 | * r4 = SPRN_TLBnCFG |
| 983 | * r5 = ESEL of entry we are running in |
| 984 | */ |
| 985 | andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ |
| 986 | li r6,0 /* Set Entry counter to 0 */ |
| 987 | 1: mr r7,r3 /* Set MAS0(TLBSEL) */ |
| 988 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ |
| 989 | mtspr SPRN_MAS0,r7 |
| 990 | tlbre |
| 991 | mfspr r7,SPRN_MAS1 |
| 992 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ |
| 993 | cmpw r5,r6 |
| 994 | beq skpinv /* Dont update the current execution TLB */ |
| 995 | mtspr SPRN_MAS1,r7 |
| 996 | tlbwe |
| 997 | isync |
| 998 | skpinv: addi r6,r6,1 /* Increment */ |
| 999 | cmpw r6,r4 /* Are we done? */ |
| 1000 | bne 1b /* If not, repeat */ |
| 1001 | |
| 1002 | /* Invalidate all TLBs */ |
| 1003 | PPC_TLBILX_ALL(0,R0) |
| 1004 | sync |
| 1005 | isync |
| 1006 | |
| 1007 | /* 3. Setup a temp mapping and jump to it |
| 1008 | * |
| 1009 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 1010 | * r5 = ESEL of entry we are running in |
| 1011 | */ |
| 1012 | andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ |
| 1013 | addi r7,r7,0x1 |
| 1014 | mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ |
| 1015 | mtspr SPRN_MAS0,r4 |
| 1016 | tlbre |
| 1017 | |
| 1018 | rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ |
| 1019 | mtspr SPRN_MAS0,r4 |
| 1020 | |
| 1021 | mfspr r7,SPRN_MAS1 |
| 1022 | xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ |
| 1023 | mtspr SPRN_MAS1,r6 |
| 1024 | |
| 1025 | tlbwe |
| 1026 | |
| 1027 | mfmsr r6 |
| 1028 | xori r6,r6,MSR_IS |
| 1029 | mtspr SPRN_SRR1,r6 |
| 1030 | bl 1f /* Find our address */ |
| 1031 | 1: mflr r6 |
| 1032 | addi r6,r6,(2f - 1b) |
| 1033 | mtspr SPRN_SRR0,r6 |
| 1034 | rfi |
| 1035 | 2: |
| 1036 | |
| 1037 | /* 4. Clear out PIDs & Search info |
| 1038 | * |
| 1039 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1040 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1041 | * r5 = MAS3 |
| 1042 | */ |
| 1043 | li r6,0 |
| 1044 | mtspr SPRN_MAS6,r6 |
| 1045 | mtspr SPRN_PID,r6 |
| 1046 | |
| 1047 | /* 5. Invalidate mapping we started in |
| 1048 | * |
| 1049 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1050 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1051 | * r5 = MAS3 |
| 1052 | */ |
| 1053 | mtspr SPRN_MAS0,r3 |
| 1054 | tlbre |
| 1055 | mfspr r6,SPRN_MAS1 |
| 1056 | rlwinm r6,r6,0,2,0 /* clear IPROT */ |
| 1057 | mtspr SPRN_MAS1,r6 |
| 1058 | tlbwe |
| 1059 | |
| 1060 | /* Invalidate TLB1 */ |
| 1061 | PPC_TLBILX_ALL(0,R0) |
| 1062 | sync |
| 1063 | isync |
| 1064 | |
| 1065 | /* The mapping only needs to be cache-coherent on SMP */ |
| 1066 | #ifdef CONFIG_SMP |
| 1067 | #define M_IF_SMP MAS2_M |
| 1068 | #else |
| 1069 | #define M_IF_SMP 0 |
| 1070 | #endif |
| 1071 | |
| 1072 | /* 6. Setup KERNELBASE mapping in TLB[0] |
| 1073 | * |
| 1074 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1075 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1076 | * r5 = MAS3 |
| 1077 | */ |
| 1078 | rlwinm r3,r3,0,16,3 /* clear ESEL */ |
| 1079 | mtspr SPRN_MAS0,r3 |
| 1080 | lis r6,(MAS1_VALID|MAS1_IPROT)@h |
| 1081 | ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l |
| 1082 | mtspr SPRN_MAS1,r6 |
| 1083 | |
| 1084 | LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP) |
| 1085 | mtspr SPRN_MAS2,r6 |
| 1086 | |
| 1087 | rlwinm r5,r5,0,0,25 |
| 1088 | ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX |
| 1089 | mtspr SPRN_MAS3,r5 |
| 1090 | li r5,-1 |
| 1091 | rlwinm r5,r5,0,0,25 |
| 1092 | |
| 1093 | tlbwe |
| 1094 | |
| 1095 | /* 7. Jump to KERNELBASE mapping |
| 1096 | * |
| 1097 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1098 | */ |
| 1099 | /* Now we branch the new virtual address mapped by this entry */ |
| 1100 | LOAD_REG_IMMEDIATE(r6,2f) |
| 1101 | lis r7,MSR_KERNEL@h |
| 1102 | ori r7,r7,MSR_KERNEL@l |
| 1103 | mtspr SPRN_SRR0,r6 |
| 1104 | mtspr SPRN_SRR1,r7 |
| 1105 | rfi /* start execution out of TLB1[0] entry */ |
| 1106 | 2: |
| 1107 | |
| 1108 | /* 8. Clear out the temp mapping |
| 1109 | * |
| 1110 | * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 1111 | */ |
| 1112 | mtspr SPRN_MAS0,r4 |
| 1113 | tlbre |
| 1114 | mfspr r5,SPRN_MAS1 |
| 1115 | rlwinm r5,r5,0,2,0 /* clear IPROT */ |
| 1116 | mtspr SPRN_MAS1,r5 |
| 1117 | tlbwe |
| 1118 | |
| 1119 | /* Invalidate TLB1 */ |
| 1120 | PPC_TLBILX_ALL(0,R0) |
| 1121 | sync |
| 1122 | isync |
| 1123 | |
| 1124 | /* We translate LR and return */ |
| 1125 | tovirt(r8,r8) |
| 1126 | mtlr r8 |
| 1127 | blr |
| 1128 | |
| 1129 | have_hes: |
| 1130 | /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the |
| 1131 | * kernel linear mapping. We also set MAS8 once for all here though |
| 1132 | * that will have to be made dependent on whether we are running under |
| 1133 | * a hypervisor I suppose. |
| 1134 | */ |
| 1135 | |
| 1136 | /* BEWARE, MAGIC |
| 1137 | * This code is called as an ordinary function on the boot CPU. But to |
| 1138 | * avoid duplication, this code is also used in SCOM bringup of |
| 1139 | * secondary CPUs. We read the code between the initial_tlb_code_start |
| 1140 | * and initial_tlb_code_end labels one instruction at a time and RAM it |
| 1141 | * into the new core via SCOM. That doesn't process branches, so there |
| 1142 | * must be none between those two labels. It also means if this code |
| 1143 | * ever takes any parameters, the SCOM code must also be updated to |
| 1144 | * provide them. |
| 1145 | */ |
| 1146 | .globl a2_tlbinit_code_start |
| 1147 | a2_tlbinit_code_start: |
| 1148 | |
| 1149 | ori r11,r3,MAS0_WQ_ALLWAYS |
| 1150 | oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ |
| 1151 | mtspr SPRN_MAS0,r11 |
| 1152 | lis r3,(MAS1_VALID | MAS1_IPROT)@h |
| 1153 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT |
| 1154 | mtspr SPRN_MAS1,r3 |
| 1155 | LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) |
| 1156 | mtspr SPRN_MAS2,r3 |
| 1157 | li r3,MAS3_SR | MAS3_SW | MAS3_SX |
| 1158 | mtspr SPRN_MAS7_MAS3,r3 |
| 1159 | li r3,0 |
| 1160 | mtspr SPRN_MAS8,r3 |
| 1161 | |
| 1162 | /* Write the TLB entry */ |
| 1163 | tlbwe |
| 1164 | |
| 1165 | .globl a2_tlbinit_after_linear_map |
| 1166 | a2_tlbinit_after_linear_map: |
| 1167 | |
| 1168 | /* Now we branch the new virtual address mapped by this entry */ |
| 1169 | LOAD_REG_IMMEDIATE(r3,1f) |
| 1170 | mtctr r3 |
| 1171 | bctr |
| 1172 | |
| 1173 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything |
| 1174 | * else (including IPROTed things left by firmware) |
| 1175 | * r4 = TLBnCFG |
| 1176 | * r3 = current address (more or less) |
| 1177 | */ |
| 1178 | |
| 1179 | li r5,0 |
| 1180 | mtspr SPRN_MAS6,r5 |
| 1181 | tlbsx 0,r3 |
| 1182 | |
| 1183 | rlwinm r9,r4,0,TLBnCFG_N_ENTRY |
| 1184 | rlwinm r10,r4,8,0xff |
| 1185 | addi r10,r10,-1 /* Get inner loop mask */ |
| 1186 | |
| 1187 | li r3,1 |
| 1188 | |
| 1189 | mfspr r5,SPRN_MAS1 |
| 1190 | rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) |
| 1191 | |
| 1192 | mfspr r6,SPRN_MAS2 |
| 1193 | rldicr r6,r6,0,51 /* Extract EPN */ |
| 1194 | |
| 1195 | mfspr r7,SPRN_MAS0 |
| 1196 | rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ |
| 1197 | |
| 1198 | rlwinm r8,r7,16,0xfff /* Extract ESEL */ |
| 1199 | |
| 1200 | 2: add r4,r3,r8 |
| 1201 | and r4,r4,r10 |
| 1202 | |
| 1203 | rlwimi r7,r4,16,MAS0_ESEL_MASK |
| 1204 | |
| 1205 | mtspr SPRN_MAS0,r7 |
| 1206 | mtspr SPRN_MAS1,r5 |
| 1207 | mtspr SPRN_MAS2,r6 |
| 1208 | tlbwe |
| 1209 | |
| 1210 | addi r3,r3,1 |
| 1211 | and. r4,r3,r10 |
| 1212 | |
| 1213 | bne 3f |
| 1214 | addis r6,r6,(1<<30)@h |
| 1215 | 3: |
| 1216 | cmpw r3,r9 |
| 1217 | blt 2b |
| 1218 | |
| 1219 | .globl a2_tlbinit_after_iprot_flush |
| 1220 | a2_tlbinit_after_iprot_flush: |
| 1221 | |
| 1222 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP |
| 1223 | /* Now establish early debug mappings if applicable */ |
| 1224 | /* Restore the MAS0 we used for linear mapping load */ |
| 1225 | mtspr SPRN_MAS0,r11 |
| 1226 | |
| 1227 | lis r3,(MAS1_VALID | MAS1_IPROT)@h |
| 1228 | ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT) |
| 1229 | mtspr SPRN_MAS1,r3 |
| 1230 | LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G) |
| 1231 | mtspr SPRN_MAS2,r3 |
| 1232 | LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW) |
| 1233 | mtspr SPRN_MAS7_MAS3,r3 |
| 1234 | /* re-use the MAS8 value from the linear mapping */ |
| 1235 | tlbwe |
| 1236 | #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ |
| 1237 | |
| 1238 | PPC_TLBILX(0,0,R0) |
| 1239 | sync |
| 1240 | isync |
| 1241 | |
| 1242 | .globl a2_tlbinit_code_end |
| 1243 | a2_tlbinit_code_end: |
| 1244 | |
| 1245 | /* We translate LR and return */ |
| 1246 | mflr r3 |
| 1247 | tovirt(r3,r3) |
| 1248 | mtlr r3 |
| 1249 | blr |
| 1250 | |
| 1251 | /* |
| 1252 | * Main entry (boot CPU, thread 0) |
| 1253 | * |
| 1254 | * We enter here from head_64.S, possibly after the prom_init trampoline |
| 1255 | * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits |
| 1256 | * mode. Anything else is as it was left by the bootloader |
| 1257 | * |
| 1258 | * Initial requirements of this port: |
| 1259 | * |
| 1260 | * - Kernel loaded at 0 physical |
| 1261 | * - A good lump of memory mapped 0:0 by UTLB entry 0 |
| 1262 | * - MSR:IS & MSR:DS set to 0 |
| 1263 | * |
| 1264 | * Note that some of the above requirements will be relaxed in the future |
| 1265 | * as the kernel becomes smarter at dealing with different initial conditions |
| 1266 | * but for now you have to be careful |
| 1267 | */ |
| 1268 | _GLOBAL(start_initialization_book3e) |
| 1269 | mflr r28 |
| 1270 | |
| 1271 | /* First, we need to setup some initial TLBs to map the kernel |
| 1272 | * text, data and bss at PAGE_OFFSET. We don't have a real mode |
| 1273 | * and always use AS 0, so we just set it up to match our link |
| 1274 | * address and never use 0 based addresses. |
| 1275 | */ |
| 1276 | bl .initial_tlb_book3e |
| 1277 | |
| 1278 | /* Init global core bits */ |
| 1279 | bl .init_core_book3e |
| 1280 | |
| 1281 | /* Init per-thread bits */ |
| 1282 | bl .init_thread_book3e |
| 1283 | |
| 1284 | /* Return to common init code */ |
| 1285 | tovirt(r28,r28) |
| 1286 | mtlr r28 |
| 1287 | blr |
| 1288 | |
| 1289 | |
| 1290 | /* |
| 1291 | * Secondary core/processor entry |
| 1292 | * |
| 1293 | * This is entered for thread 0 of a secondary core, all other threads |
| 1294 | * are expected to be stopped. It's similar to start_initialization_book3e |
| 1295 | * except that it's generally entered from the holding loop in head_64.S |
| 1296 | * after CPUs have been gathered by Open Firmware. |
| 1297 | * |
| 1298 | * We assume we are in 32 bits mode running with whatever TLB entry was |
| 1299 | * set for us by the firmware or POR engine. |
| 1300 | */ |
| 1301 | _GLOBAL(book3e_secondary_core_init_tlb_set) |
| 1302 | li r4,1 |
| 1303 | b .generic_secondary_smp_init |
| 1304 | |
| 1305 | _GLOBAL(book3e_secondary_core_init) |
| 1306 | mflr r28 |
| 1307 | |
| 1308 | /* Do we need to setup initial TLB entry ? */ |
| 1309 | cmplwi r4,0 |
| 1310 | bne 2f |
| 1311 | |
| 1312 | /* Setup TLB for this core */ |
| 1313 | bl .initial_tlb_book3e |
| 1314 | |
| 1315 | /* We can return from the above running at a different |
| 1316 | * address, so recalculate r2 (TOC) |
| 1317 | */ |
| 1318 | bl .relative_toc |
| 1319 | |
| 1320 | /* Init global core bits */ |
| 1321 | 2: bl .init_core_book3e |
| 1322 | |
| 1323 | /* Init per-thread bits */ |
| 1324 | 3: bl .init_thread_book3e |
| 1325 | |
| 1326 | /* Return to common init code at proper virtual address. |
| 1327 | * |
| 1328 | * Due to various previous assumptions, we know we entered this |
| 1329 | * function at either the final PAGE_OFFSET mapping or using a |
| 1330 | * 1:1 mapping at 0, so we don't bother doing a complicated check |
| 1331 | * here, we just ensure the return address has the right top bits. |
| 1332 | * |
| 1333 | * Note that if we ever want to be smarter about where we can be |
| 1334 | * started from, we have to be careful that by the time we reach |
| 1335 | * the code below we may already be running at a different location |
| 1336 | * than the one we were called from since initial_tlb_book3e can |
| 1337 | * have moved us already. |
| 1338 | */ |
| 1339 | cmpdi cr0,r28,0 |
| 1340 | blt 1f |
| 1341 | lis r3,PAGE_OFFSET@highest |
| 1342 | sldi r3,r3,32 |
| 1343 | or r28,r28,r3 |
| 1344 | 1: mtlr r28 |
| 1345 | blr |
| 1346 | |
| 1347 | _GLOBAL(book3e_secondary_thread_init) |
| 1348 | mflr r28 |
| 1349 | b 3b |
| 1350 | |
| 1351 | _STATIC(init_core_book3e) |
| 1352 | /* Establish the interrupt vector base */ |
| 1353 | LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) |
| 1354 | mtspr SPRN_IVPR,r3 |
| 1355 | sync |
| 1356 | blr |
| 1357 | |
| 1358 | _STATIC(init_thread_book3e) |
| 1359 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h |
| 1360 | mtspr SPRN_EPCR,r3 |
| 1361 | |
| 1362 | /* Make sure interrupts are off */ |
| 1363 | wrteei 0 |
| 1364 | |
| 1365 | /* disable all timers and clear out status */ |
| 1366 | li r3,0 |
| 1367 | mtspr SPRN_TCR,r3 |
| 1368 | mfspr r3,SPRN_TSR |
| 1369 | mtspr SPRN_TSR,r3 |
| 1370 | |
| 1371 | blr |
| 1372 | |
| 1373 | _GLOBAL(__setup_base_ivors) |
| 1374 | SET_IVOR(0, 0x020) /* Critical Input */ |
| 1375 | SET_IVOR(1, 0x000) /* Machine Check */ |
| 1376 | SET_IVOR(2, 0x060) /* Data Storage */ |
| 1377 | SET_IVOR(3, 0x080) /* Instruction Storage */ |
| 1378 | SET_IVOR(4, 0x0a0) /* External Input */ |
| 1379 | SET_IVOR(5, 0x0c0) /* Alignment */ |
| 1380 | SET_IVOR(6, 0x0e0) /* Program */ |
| 1381 | SET_IVOR(7, 0x100) /* FP Unavailable */ |
| 1382 | SET_IVOR(8, 0x120) /* System Call */ |
| 1383 | SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ |
| 1384 | SET_IVOR(10, 0x160) /* Decrementer */ |
| 1385 | SET_IVOR(11, 0x180) /* Fixed Interval Timer */ |
| 1386 | SET_IVOR(12, 0x1a0) /* Watchdog Timer */ |
| 1387 | SET_IVOR(13, 0x1c0) /* Data TLB Error */ |
| 1388 | SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ |
| 1389 | SET_IVOR(15, 0x040) /* Debug */ |
| 1390 | |
| 1391 | sync |
| 1392 | |
| 1393 | blr |
| 1394 | |
| 1395 | _GLOBAL(setup_altivec_ivors) |
| 1396 | SET_IVOR(32, 0x200) /* AltiVec Unavailable */ |
| 1397 | SET_IVOR(33, 0x220) /* AltiVec Assist */ |
| 1398 | blr |
| 1399 | |
| 1400 | _GLOBAL(setup_perfmon_ivor) |
| 1401 | SET_IVOR(35, 0x260) /* Performance Monitor */ |
| 1402 | blr |
| 1403 | |
| 1404 | _GLOBAL(setup_doorbell_ivors) |
| 1405 | SET_IVOR(36, 0x280) /* Processor Doorbell */ |
| 1406 | SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ |
| 1407 | blr |
| 1408 | |
| 1409 | _GLOBAL(setup_ehv_ivors) |
| 1410 | SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ |
| 1411 | SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ |
| 1412 | SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ |
| 1413 | SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ |
| 1414 | blr |