treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[linux-2.6-block.git] / arch / powerpc / platforms / powermac / cache.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
14cf11af
PM
2/*
3 * This file contains low-level cache management functions
4 * used for sleep and CPU speed changes on Apple machines.
5 * (In fact the only thing that is Apple-specific is that we assume
6 * that we can read from ROM at physical address 0xfff00000.)
7 *
8 * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
9 * Benjamin Herrenschmidt (benh@kernel.crashing.org)
14cf11af
PM
10 */
11
14cf11af
PM
12#include <asm/processor.h>
13#include <asm/ppc_asm.h>
14#include <asm/cputable.h>
2c86cd18 15#include <asm/feature-fixups.h>
14cf11af
PM
16
17/*
18 * Flush and disable all data caches (dL1, L2, L3). This is used
19 * when going to sleep, when doing a PMU based cpufreq transition,
20 * or when "offlining" a CPU on SMP machines. This code is over
21 * paranoid, but I've had enough issues with various CPU revs and
446957ba 22 * bugs that I decided it was worth being over cautious
14cf11af
PM
23 */
24
25_GLOBAL(flush_disable_caches)
d7cceda9 26#ifndef CONFIG_PPC_BOOK3S_32
14cf11af
PM
27 blr
28#else
29BEGIN_FTR_SECTION
30 b flush_disable_745x
31END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
32BEGIN_FTR_SECTION
33 b flush_disable_75x
34END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
35 b __flush_disable_L1
36
37/* This is the code for G3 and 74[01]0 */
38flush_disable_75x:
39 mflr r10
40
41 /* Turn off EE and DR in MSR */
42 mfmsr r11
43 rlwinm r0,r11,0,~MSR_EE
44 rlwinm r0,r0,0,~MSR_DR
45 sync
46 mtmsr r0
47 isync
48
49 /* Stop DST streams */
50BEGIN_FTR_SECTION
51 DSSALL
52 sync
53END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
54
55 /* Stop DPM */
56 mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
57 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
58 sync
59 mtspr SPRN_HID0,r4 /* Disable DPM */
60 sync
61
62 /* Disp-flush L1. We have a weird problem here that I never
63 * totally figured out. On 750FX, using the ROM for the flush
64 * results in a non-working flush. We use that workaround for
65 * now until I finally understand what's going on. --BenH
66 */
67
68 /* ROM base by default */
69 lis r4,0xfff0
70 mfpvr r3
71 srwi r3,r3,16
72 cmplwi cr0,r3,0x7000
73 bne+ 1f
74 /* RAM base on 750FX */
75 li r4,0
761: li r4,0x4000
77 mtctr r4
781: lwz r0,0(r4)
79 addi r4,r4,32
80 bdnz 1b
81 sync
82 isync
83
84 /* Disable / invalidate / enable L1 data */
85 mfspr r3,SPRN_HID0
86 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
87 mtspr SPRN_HID0,r3
88 sync
89 isync
90 ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
91 sync
92 isync
93 mtspr SPRN_HID0,r3
94 xori r3,r3,(HID0_DCI|HID0_ICFI)
95 mtspr SPRN_HID0,r3
96 sync
97
98 /* Get the current enable bit of the L2CR into r4 */
99 mfspr r5,SPRN_L2CR
100 /* Set to data-only (pre-745x bit) */
101 oris r3,r5,L2CR_L2DO@h
102 b 2f
103 /* When disabling L2, code must be in L1 */
104 .balign 32
1051: mtspr SPRN_L2CR,r3
1063: sync
107 isync
108 b 1f
1092: b 3f
1103: sync
111 isync
112 b 1b
1131: /* disp-flush L2. The interesting thing here is that the L2 can be
114 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
115 * but that is probbaly fine. We disp-flush over 4Mb to be safe
116 */
117 lis r4,2
118 mtctr r4
119 lis r4,0xfff0
1201: lwz r0,0(r4)
121 addi r4,r4,32
122 bdnz 1b
123 sync
124 isync
125 lis r4,2
126 mtctr r4
127 lis r4,0xfff0
1281: dcbf 0,r4
129 addi r4,r4,32
130 bdnz 1b
131 sync
132 isync
133
134 /* now disable L2 */
135 rlwinm r5,r5,0,~L2CR_L2E
136 b 2f
137 /* When disabling L2, code must be in L1 */
138 .balign 32
1391: mtspr SPRN_L2CR,r5
1403: sync
141 isync
142 b 1f
1432: b 3f
1443: sync
145 isync
146 b 1b
1471: sync
148 isync
149 /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
150 oris r4,r5,L2CR_L2I@h
151 mtspr SPRN_L2CR,r4
152 sync
153 isync
154
155 /* Wait for the invalidation to complete */
1561: mfspr r3,SPRN_L2CR
157 rlwinm. r0,r3,0,31,31
158 bne 1b
159
160 /* Clear L2I */
161 xoris r4,r4,L2CR_L2I@h
162 sync
163 mtspr SPRN_L2CR,r4
164 sync
165
166 /* now disable the L1 data cache */
167 mfspr r0,SPRN_HID0
168 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
169 mtspr SPRN_HID0,r0
170 sync
171 isync
172
173 /* Restore HID0[DPM] to whatever it was before */
174 sync
175 mfspr r0,SPRN_HID0
176 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
177 mtspr SPRN_HID0,r0
178 sync
179
180 /* restore DR and EE */
181 sync
182 mtmsr r11
183 isync
184
185 mtlr r10
186 blr
187
188/* This code is for 745x processors */
189flush_disable_745x:
190 /* Turn off EE and DR in MSR */
191 mfmsr r11
192 rlwinm r0,r11,0,~MSR_EE
193 rlwinm r0,r0,0,~MSR_DR
194 sync
195 mtmsr r0
196 isync
197
198 /* Stop prefetch streams */
199 DSSALL
200 sync
201
202 /* Disable L2 prefetching */
203 mfspr r0,SPRN_MSSCR0
204 rlwinm r0,r0,0,0,29
205 mtspr SPRN_MSSCR0,r0
206 sync
207 isync
208 lis r4,0
209 dcbf 0,r4
210 dcbf 0,r4
211 dcbf 0,r4
212 dcbf 0,r4
213 dcbf 0,r4
214 dcbf 0,r4
215 dcbf 0,r4
216 dcbf 0,r4
217
218 /* Due to a bug with the HW flush on some CPU revs, we occasionally
219 * experience data corruption. I'm adding a displacement flush along
220 * with a dcbf loop over a few Mb to "help". The problem isn't totally
221 * fixed by this in theory, but at least, in practice, I couldn't reproduce
222 * it even with a big hammer...
223 */
224
225 lis r4,0x0002
226 mtctr r4
227 li r4,0
2281:
229 lwz r0,0(r4)
230 addi r4,r4,32 /* Go to start of next cache line */
231 bdnz 1b
232 isync
233
234 /* Now, flush the first 4MB of memory */
235 lis r4,0x0002
236 mtctr r4
237 li r4,0
238 sync
2391:
240 dcbf 0,r4
241 addi r4,r4,32 /* Go to start of next cache line */
242 bdnz 1b
243
244 /* Flush and disable the L1 data cache */
245 mfspr r6,SPRN_LDSTCR
246 lis r3,0xfff0 /* read from ROM for displacement flush */
247 li r4,0xfe /* start with only way 0 unlocked */
248 li r5,128 /* 128 lines in each way */
2491: mtctr r5
250 rlwimi r6,r4,0,24,31
251 mtspr SPRN_LDSTCR,r6
252 sync
253 isync
2542: lwz r0,0(r3) /* touch each cache line */
255 addi r3,r3,32
256 bdnz 2b
257 rlwinm r4,r4,1,24,30 /* move on to the next way */
258 ori r4,r4,1
259 cmpwi r4,0xff /* all done? */
260 bne 1b
261 /* now unlock the L1 data cache */
262 li r4,0
263 rlwimi r6,r4,0,24,31
264 sync
265 mtspr SPRN_LDSTCR,r6
266 sync
267 isync
268
269 /* Flush the L2 cache using the hardware assist */
270 mfspr r3,SPRN_L2CR
271 cmpwi r3,0 /* check if it is enabled first */
272 bge 4f
273 oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
274 b 2f
275 /* When disabling/locking L2, code must be in L1 */
276 .balign 32
2771: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
2783: sync
279 isync
280 b 1f
2812: b 3f
2823: sync
283 isync
284 b 1b
2851: sync
286 isync
287 ori r0,r3,L2CR_L2HWF_745x
288 sync
289 mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
2903: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
291 andi. r0,r0,L2CR_L2HWF_745x
292 bne 3b
293 sync
294 rlwinm r3,r3,0,~L2CR_L2E
295 b 2f
296 /* When disabling L2, code must be in L1 */
297 .balign 32
2981: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
2993: sync
300 isync
301 b 1f
3022: b 3f
3033: sync
304 isync
305 b 1b
3061: sync
307 isync
308 oris r4,r3,L2CR_L2I@h
309 mtspr SPRN_L2CR,r4
310 sync
311 isync
3121: mfspr r4,SPRN_L2CR
313 andis. r0,r4,L2CR_L2I@h
314 bne 1b
315 sync
316
317BEGIN_FTR_SECTION
318 /* Flush the L3 cache using the hardware assist */
3194: mfspr r3,SPRN_L3CR
320 cmpwi r3,0 /* check if it is enabled */
321 bge 6f
322 oris r0,r3,L3CR_L3IO@h
323 ori r0,r0,L3CR_L3DO
324 sync
325 mtspr SPRN_L3CR,r0 /* lock the L3 cache */
326 sync
327 isync
328 ori r0,r0,L3CR_L3HWF
329 sync
330 mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
3315: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
332 andi. r0,r0,L3CR_L3HWF
333 bne 5b
334 rlwinm r3,r3,0,~L3CR_L3E
335 sync
336 mtspr SPRN_L3CR,r3 /* disable the L3 cache */
337 sync
338 ori r4,r3,L3CR_L3I
339 mtspr SPRN_L3CR,r4
3401: mfspr r4,SPRN_L3CR
341 andi. r0,r4,L3CR_L3I
342 bne 1b
343 sync
344END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
345
3466: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
347 rlwinm r0,r0,0,~HID0_DCE
348 mtspr SPRN_HID0,r0
349 sync
350 isync
351 mtmsr r11 /* restore DR and EE */
352 isync
353 blr
d7cceda9 354#endif /* CONFIG_PPC_BOOK3S_32 */