1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains kexec low-level functions.
5 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
7 * PPC44x port. Copyright (C) 2011, IBM Corporation
8 * Author: Suzuki Poulose <suzuki@in.ibm.com>
14 #include <asm/ppc_asm.h>
15 #include <asm/kexec.h>
20 * Must be relocatable PIC code callable as a C function.
22 .globl relocate_new_kernel
25 /* r4 = reboot_code_buffer */
26 /* r5 = start_address */
28 #ifdef CONFIG_PPC_85xx
34 #define ENTRY_MAPPING_KEXEC_SETUP
35 #include <kernel/85xx_entry_mapping.S>
36 #undef ENTRY_MAPPING_KEXEC_SETUP
43 #elif defined(CONFIG_44x)
45 /* Save our parameters */
51 /* Check for 47x cores */
54 cmplwi cr0,r3,PVR_476FPE@h
56 cmplwi cr0,r3,PVR_476@h
58 cmplwi cr0,r3,PVR_476_ISS@h
60 #endif /* CONFIG_PPC_47x */
63 * Code for setting up 1:1 mapping for PPC440x for KEXEC
65 * We cannot switch off the MMU on PPC44x.
67 * 1) Invalidate all the mappings except the one we are running from.
68 * 2) Create a tmp mapping for our code in the other address space(TS) and
69 * jump to it. Invalidate the entry we started in.
70 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
71 * 4) Jump to the 1:1 mapping in original TS.
72 * 5) Invalidate the tmp mapping.
74 * - Based on the kexec support code for FSL BookE
79 * Load the PID with kernel PID (0).
80 * Also load our MSR_IS and TID to MMUCR for TLB search.
87 oris r3,r3,PPC44x_MMUCR_STS@h
93 * Invalidate all the TLB entries except the current entry
94 * where we are running from
96 bcl 20,31,$+4 /* Find our address */
97 0: mflr r5 /* Make it accessible */
98 tlbsx r23,0,r5 /* Find entry we are in */
99 li r4,0 /* Start at TLB entry 0 */
100 li r3,0 /* Set PAGEID inval value */
101 1: cmpw r23,r4 /* Is this our entry? */
102 beq skip /* If so, skip the inval */
103 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
105 addi r4,r4,1 /* Increment */
106 cmpwi r4,64 /* Are we done? */
107 bne 1b /* If not, repeat */
110 /* Create a temp mapping and jump to it */
111 andi. r6, r23, 1 /* Find the index to use */
112 addi r24, r6, 1 /* r24 will contain 1 or 2 */
114 mfmsr r9 /* get the MSR */
115 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
116 xori r7, r5, 1 /* Use the other address space */
118 /* Read the current mapping entries */
119 tlbre r3, r23, PPC44x_TLB_PAGEID
120 tlbre r4, r23, PPC44x_TLB_XLAT
121 tlbre r5, r23, PPC44x_TLB_ATTRIB
123 /* Save our current XLAT entry */
126 /* Extract the TLB PageSize */
127 li r10, 1 /* r10 will hold PageSize */
128 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
130 /* XXX: As of now we use 256M, 4K pages */
131 cmpwi r11, PPC44x_TLB_256M
133 rotlwi r10, r10, 28 /* r10 = 256M */
136 cmpwi r11, PPC44x_TLB_4K
138 rotlwi r10, r10, 12 /* r10 = 4K */
141 rotlwi r10, r10, 10 /* r10 = 1K */
145 * Write out the tmp 1:1 mapping for this code in other address space
146 * Fixup EPN = RPN , TS=other address space
148 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
150 /* Write out the tmp mapping entries */
151 tlbwe r3, r24, PPC44x_TLB_PAGEID
152 tlbwe r4, r24, PPC44x_TLB_XLAT
153 tlbwe r5, r24, PPC44x_TLB_ATTRIB
155 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
156 not r10, r11 /* Mask for PageNum */
158 /* Switch to other address space in MSR */
159 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
163 addi r8, r8, (2f-1b) /* Find the target offset */
165 /* Jump to the tmp mapping */
171 /* Invalidate the entry we were executing from */
173 tlbwe r3, r23, PPC44x_TLB_PAGEID
175 /* attribute fields. rwx for SUPERVISOR mode */
177 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
179 /* Create 1:1 mapping in 256M pages */
180 xori r7, r7, 1 /* Revert back to Original TS */
182 li r8, 0 /* PageNumber */
183 li r6, 3 /* TLB Index, start at 3 */
186 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
187 mr r4, r3 /* RPN = EPN */
188 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
189 insrwi r3, r7, 1, 23 /* Set TS from r7 */
191 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
192 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
193 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
195 addi r8, r8, 1 /* Increment PN */
196 addi r6, r6, 1 /* Increment TLB Index */
197 cmpwi r8, 8 /* Are we done ? */
201 /* Jump to the new mapping 1:1 */
203 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
207 and r8, r8, r11 /* Get our offset within page */
210 and r5, r25, r10 /* Get our target PageNum */
211 or r8, r8, r5 /* Target jump address */
217 /* Invalidate the tmp entry we used */
219 tlbwe r3, r24, PPC44x_TLB_PAGEID
223 #ifdef CONFIG_PPC_47x
225 /* 1:1 mapping for 47x */
230 * Load the kernel pid (0) to PID and also to MMUCR[TID].
231 * Also set the MSR IS->MMUCR STS
234 mtspr SPRN_PID, r3 /* Set PID */
235 mfmsr r4 /* Get MSR */
236 andi. r4, r4, MSR_IS@l /* TS=1? */
237 beq 1f /* If not, leave STS=0 */
238 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
239 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
242 /* Find the entry we are running from */
246 tlbre r24, r23, 0 /* TLB Word 0 */
247 tlbre r25, r23, 1 /* TLB Word 1 */
248 tlbre r26, r23, 2 /* TLB Word 2 */
252 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
253 * of 4k page size in all 4 ways (0-3 in r3).
254 * This would invalidate the entire UTLB including the one we are
255 * running from. However the shadow TLB entries would help us
256 * to continue the execution, until we flush them (rfi/isync).
258 addis r3, 0, 0x8000 /* specify the way */
259 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
263 /* Align the loop to speed things up. from head_44x.S */
271 addis r3, r3, 0x2000 /* Increment the way */
275 addis r4, r4, 0x100 /* Increment the EPN */
279 /* Create the entries in the other address space */
281 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
282 xori r7, r7, 1 /* r7 = !TS */
284 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
287 * write out the TLB entries for the tmp mapping
288 * Use way '0' so that we could easily invalidate it later.
290 lis r3, 0x8000 /* Way '0' */
296 /* Update the msr to the new TS */
308 * Now we are in the tmp address space.
309 * Create a 1:1 mapping for 0-2GiB in the original TS.
313 li r4, 0 /* TLB Word 0 */
314 li r5, 0 /* TLB Word 1 */
316 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
318 li r8, 0 /* PageIndex */
320 xori r7, r7, 1 /* revert back to original TS */
323 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
324 /* ERPN = 0 as we don't use memory above 2G */
326 mr r4, r5 /* EPN = RPN */
327 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
328 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
330 tlbwe r4, r3, 0 /* Write out the entries */
334 cmpwi r8, 8 /* Have we completed ? */
337 /* make sure we complete the TLB write up */
341 * Prepare to jump to the 1:1 mapping.
342 * 1) Extract page size of the tmp mapping
343 * DSIZ = TLB_Word0[22:27]
344 * 2) Calculate the physical address of the address
347 rlwinm r10, r24, 0, 22, 27
349 cmpwi r10, PPC47x_TLB0_4K
351 li r10, 0x1000 /* r10 = 4k */
355 /* Defaults to 256M */
360 addi r4, r4, (2f-1b) /* virtual address of 2f */
362 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
363 not r10, r11 /* Pagemask = ~(offsetmask) */
365 and r5, r25, r10 /* Physical page */
366 and r6, r4, r11 /* offset within the current page */
368 or r5, r5, r6 /* Physical address for 2f */
370 /* Switch the TS in MSR to the original one */
379 /* Invalidate the tmp mapping */
380 lis r3, 0x8000 /* Way '0' */
382 clrrwi r24, r24, 12 /* Clear the valid bit */
387 /* Make sure we complete the TLB write and flush the shadow TLB */
395 /* Restore the parameters */
405 * Set Machine Status Register to a known status,
406 * switch the MMU off and jump to 1: in a single step.
410 ori r8, r8, MSR_RI|MSR_ME
412 addi r8, r4, 1f - relocate_new_kernel
419 /* from this point address translation is turned off */
420 /* and interrupts are disabled */
422 /* set a new stack at the bottom of our page... */
423 /* (not really needed now) */
424 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
428 li r6, 0 /* checksum */
432 0: /* top, read another word for the indirection page */
436 /* is it a destination page? (r8) */
437 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
440 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
443 2: /* is it an indirection page? (r3) */
444 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
447 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
451 2: /* are we done? */
452 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
456 2: /* is it a source page? (r9) */
457 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
460 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
467 lwzu r0, 4(r9) /* do the copy */
481 /* To be certain of avoiding problems with self-modifying code
482 * execute a serializing instruction here.
487 mfspr r3, SPRN_PIR /* current core we are running on */
488 mr r4, r5 /* load physical address of chunk called */
490 /* jump to the entry point, usually the setup routine */
496 relocate_new_kernel_end:
498 .globl relocate_new_kernel_size
499 relocate_new_kernel_size:
500 .long relocate_new_kernel_end - relocate_new_kernel