Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
34d97e07 BH |
2 | /* |
3 | * Copyright 2010 IBM Corp, Benjamin Herrenschmidt <benh@kernel.crashing.org> | |
4 | * | |
6556fd1a | 5 | * Generic idle routine for 64 bits e500 processors |
34d97e07 BH |
6 | */ |
7 | ||
8 | #include <linux/threads.h> | |
9 | #include <asm/reg.h> | |
10 | #include <asm/ppc_asm.h> | |
11 | #include <asm/asm-offsets.h> | |
12 | #include <asm/ppc-opcode.h> | |
13 | #include <asm/processor.h> | |
14 | #include <asm/thread_info.h> | |
f070986a | 15 | #include <asm/epapr_hcalls.h> |
c2e480ba | 16 | #include <asm/hw_irq.h> |
34d97e07 BH |
17 | |
18 | /* 64-bit version only for now */ | |
f070986a SY |
19 | .macro BOOK3E_IDLE name loop |
20 | _GLOBAL(\name) | |
34d97e07 BH |
21 | /* Save LR for later */ |
22 | mflr r0 | |
23 | std r0,16(r1) | |
24 | ||
25 | /* Hard disable interrupts */ | |
26 | wrteei 0 | |
27 | ||
28 | /* Now check if an interrupt came in while we were soft disabled | |
7230c564 | 29 | * since we may otherwise lose it (doorbells etc...). |
34d97e07 | 30 | */ |
7230c564 | 31 | lbz r3,PACAIRQHAPPENED(r13) |
34d97e07 | 32 | cmpwi cr0,r3,0 |
9b81c021 | 33 | bne 2f |
34d97e07 | 34 | |
7230c564 | 35 | /* Now we are going to mark ourselves as soft and hard enabled in |
34d97e07 BH |
36 | * order to be able to take interrupts while asleep. We inform lockdep |
37 | * of that. We don't actually turn interrupts on just yet tho. | |
38 | */ | |
39 | #ifdef CONFIG_TRACE_IRQFLAGS | |
40 | stdu r1,-128(r1) | |
b1576fec | 41 | bl trace_hardirqs_on |
7230c564 | 42 | addi r1,r1,128 |
34d97e07 | 43 | #endif |
c2e480ba | 44 | li r0,IRQS_ENABLED |
4e26bc4a | 45 | stb r0,PACAIRQSOFTMASK(r13) |
34d97e07 BH |
46 | |
47 | /* Interrupts will make use return to LR, so get something we want | |
48 | * in there | |
49 | */ | |
50 | bl 1f | |
51 | ||
7230c564 | 52 | /* And return (interrupts are on) */ |
34d97e07 BH |
53 | ld r0,16(r1) |
54 | mtlr r0 | |
55 | blr | |
56 | ||
57 | 1: /* Let's set the _TLF_NAPPING flag so interrupts make us return | |
58 | * to the right spot | |
59 | */ | |
c911d2e1 | 60 | ld r11, PACACURRENT(r13) |
34d97e07 BH |
61 | ld r10,TI_LOCAL_FLAGS(r11) |
62 | ori r10,r10,_TLF_NAPPING | |
63 | std r10,TI_LOCAL_FLAGS(r11) | |
64 | ||
65 | /* We can now re-enable hard interrupts and go to sleep */ | |
66 | wrteei 1 | |
f070986a SY |
67 | \loop |
68 | ||
9b81c021 NP |
69 | 2: |
70 | lbz r10,PACAIRQHAPPENED(r13) | |
71 | ori r10,r10,PACA_IRQ_HARD_DIS | |
72 | stb r10,PACAIRQHAPPENED(r13) | |
73 | blr | |
f070986a SY |
74 | .endm |
75 | ||
76 | .macro BOOK3E_IDLE_LOOP | |
77 | 1: | |
dabeb572 | 78 | PPC_WAIT_v203 |
34d97e07 | 79 | b 1b |
f070986a SY |
80 | .endm |
81 | ||
82 | /* epapr_ev_idle_start below is patched with the proper hcall | |
83 | opcodes during kernel initialization */ | |
84 | .macro EPAPR_EV_IDLE_LOOP | |
85 | idle_loop: | |
86 | LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE)) | |
87 | ||
88 | .global epapr_ev_idle_start | |
89 | epapr_ev_idle_start: | |
90 | li r3, -1 | |
91 | nop | |
92 | nop | |
93 | nop | |
94 | b idle_loop | |
95 | .endm | |
96 | ||
97 | BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP | |
98 | ||
6556fd1a | 99 | BOOK3E_IDLE e500_idle BOOK3E_IDLE_LOOP |