| 1 | /* |
| 2 | * PPC64 code to handle Linux booting another kernel. |
| 3 | * |
| 4 | * Copyright (C) 2004-2005, IBM Corp. |
| 5 | * |
| 6 | * Created by: Milton D Miller II |
| 7 | * |
| 8 | * This source code is licensed under the GNU General Public License, |
| 9 | * Version 2. See the file COPYING for more details. |
| 10 | */ |
| 11 | |
| 12 | |
| 13 | #include <linux/kexec.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/thread_info.h> |
| 16 | #include <linux/init_task.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/kernel.h> |
| 19 | |
| 20 | #include <asm/page.h> |
| 21 | #include <asm/current.h> |
| 22 | #include <asm/machdep.h> |
| 23 | #include <asm/cacheflush.h> |
| 24 | #include <asm/paca.h> |
| 25 | #include <asm/mmu.h> |
| 26 | #include <asm/sections.h> /* _end */ |
| 27 | #include <asm/prom.h> |
| 28 | #include <asm/smp.h> |
| 29 | #include <asm/hw_breakpoint.h> |
| 30 | |
| 31 | int default_machine_kexec_prepare(struct kimage *image) |
| 32 | { |
| 33 | int i; |
| 34 | unsigned long begin, end; /* limits of segment */ |
| 35 | unsigned long low, high; /* limits of blocked memory range */ |
| 36 | struct device_node *node; |
| 37 | const unsigned long *basep; |
| 38 | const unsigned int *sizep; |
| 39 | |
| 40 | if (!ppc_md.hpte_clear_all) |
| 41 | return -ENOENT; |
| 42 | |
| 43 | /* |
| 44 | * Since we use the kernel fault handlers and paging code to |
| 45 | * handle the virtual mode, we must make sure no destination |
| 46 | * overlaps kernel static data or bss. |
| 47 | */ |
| 48 | for (i = 0; i < image->nr_segments; i++) |
| 49 | if (image->segment[i].mem < __pa(_end)) |
| 50 | return -ETXTBSY; |
| 51 | |
| 52 | /* |
| 53 | * For non-LPAR, we absolutely can not overwrite the mmu hash |
| 54 | * table, since we are still using the bolted entries in it to |
| 55 | * do the copy. Check that here. |
| 56 | * |
| 57 | * It is safe if the end is below the start of the blocked |
| 58 | * region (end <= low), or if the beginning is after the |
| 59 | * end of the blocked region (begin >= high). Use the |
| 60 | * boolean identity !(a || b) === (!a && !b). |
| 61 | */ |
| 62 | if (htab_address) { |
| 63 | low = __pa(htab_address); |
| 64 | high = low + htab_size_bytes; |
| 65 | |
| 66 | for (i = 0; i < image->nr_segments; i++) { |
| 67 | begin = image->segment[i].mem; |
| 68 | end = begin + image->segment[i].memsz; |
| 69 | |
| 70 | if ((begin < high) && (end > low)) |
| 71 | return -ETXTBSY; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | /* We also should not overwrite the tce tables */ |
| 76 | for (node = of_find_node_by_type(NULL, "pci"); node != NULL; |
| 77 | node = of_find_node_by_type(node, "pci")) { |
| 78 | basep = of_get_property(node, "linux,tce-base", NULL); |
| 79 | sizep = of_get_property(node, "linux,tce-size", NULL); |
| 80 | if (basep == NULL || sizep == NULL) |
| 81 | continue; |
| 82 | |
| 83 | low = *basep; |
| 84 | high = low + (*sizep); |
| 85 | |
| 86 | for (i = 0; i < image->nr_segments; i++) { |
| 87 | begin = image->segment[i].mem; |
| 88 | end = begin + image->segment[i].memsz; |
| 89 | |
| 90 | if ((begin < high) && (end > low)) |
| 91 | return -ETXTBSY; |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) |
| 99 | |
| 100 | static void copy_segments(unsigned long ind) |
| 101 | { |
| 102 | unsigned long entry; |
| 103 | unsigned long *ptr; |
| 104 | void *dest; |
| 105 | void *addr; |
| 106 | |
| 107 | /* |
| 108 | * We rely on kexec_load to create a lists that properly |
| 109 | * initializes these pointers before they are used. |
| 110 | * We will still crash if the list is wrong, but at least |
| 111 | * the compiler will be quiet. |
| 112 | */ |
| 113 | ptr = NULL; |
| 114 | dest = NULL; |
| 115 | |
| 116 | for (entry = ind; !(entry & IND_DONE); entry = *ptr++) { |
| 117 | addr = __va(entry & PAGE_MASK); |
| 118 | |
| 119 | switch (entry & IND_FLAGS) { |
| 120 | case IND_DESTINATION: |
| 121 | dest = addr; |
| 122 | break; |
| 123 | case IND_INDIRECTION: |
| 124 | ptr = addr; |
| 125 | break; |
| 126 | case IND_SOURCE: |
| 127 | copy_page(dest, addr); |
| 128 | dest += PAGE_SIZE; |
| 129 | } |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | void kexec_copy_flush(struct kimage *image) |
| 134 | { |
| 135 | long i, nr_segments = image->nr_segments; |
| 136 | struct kexec_segment ranges[KEXEC_SEGMENT_MAX]; |
| 137 | |
| 138 | /* save the ranges on the stack to efficiently flush the icache */ |
| 139 | memcpy(ranges, image->segment, sizeof(ranges)); |
| 140 | |
| 141 | /* |
| 142 | * After this call we may not use anything allocated in dynamic |
| 143 | * memory, including *image. |
| 144 | * |
| 145 | * Only globals and the stack are allowed. |
| 146 | */ |
| 147 | copy_segments(image->head); |
| 148 | |
| 149 | /* |
| 150 | * we need to clear the icache for all dest pages sometime, |
| 151 | * including ones that were in place on the original copy |
| 152 | */ |
| 153 | for (i = 0; i < nr_segments; i++) |
| 154 | flush_icache_range((unsigned long)__va(ranges[i].mem), |
| 155 | (unsigned long)__va(ranges[i].mem + ranges[i].memsz)); |
| 156 | } |
| 157 | |
| 158 | #ifdef CONFIG_SMP |
| 159 | |
| 160 | static int kexec_all_irq_disabled = 0; |
| 161 | |
| 162 | static void kexec_smp_down(void *arg) |
| 163 | { |
| 164 | local_irq_disable(); |
| 165 | mb(); /* make sure our irqs are disabled before we say they are */ |
| 166 | get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; |
| 167 | while(kexec_all_irq_disabled == 0) |
| 168 | cpu_relax(); |
| 169 | mb(); /* make sure all irqs are disabled before this */ |
| 170 | hw_breakpoint_disable(); |
| 171 | /* |
| 172 | * Now every CPU has IRQs off, we can clear out any pending |
| 173 | * IPIs and be sure that no more will come in after this. |
| 174 | */ |
| 175 | if (ppc_md.kexec_cpu_down) |
| 176 | ppc_md.kexec_cpu_down(0, 1); |
| 177 | |
| 178 | kexec_smp_wait(); |
| 179 | /* NOTREACHED */ |
| 180 | } |
| 181 | |
| 182 | static void kexec_prepare_cpus_wait(int wait_state) |
| 183 | { |
| 184 | int my_cpu, i, notified=-1; |
| 185 | |
| 186 | hw_breakpoint_disable(); |
| 187 | my_cpu = get_cpu(); |
| 188 | /* Make sure each CPU has at least made it to the state we need. |
| 189 | * |
| 190 | * FIXME: There is a (slim) chance of a problem if not all of the CPUs |
| 191 | * are correctly onlined. If somehow we start a CPU on boot with RTAS |
| 192 | * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in |
| 193 | * time, the boot CPU will timeout. If it does eventually execute |
| 194 | * stuff, the secondary will start up (paca[].cpu_start was written) and |
| 195 | * get into a peculiar state. If the platform supports |
| 196 | * smp_ops->take_timebase(), the secondary CPU will probably be spinning |
| 197 | * in there. If not (i.e. pseries), the secondary will continue on and |
| 198 | * try to online itself/idle/etc. If it survives that, we need to find |
| 199 | * these possible-but-not-online-but-should-be CPUs and chaperone them |
| 200 | * into kexec_smp_wait(). |
| 201 | */ |
| 202 | for_each_online_cpu(i) { |
| 203 | if (i == my_cpu) |
| 204 | continue; |
| 205 | |
| 206 | while (paca[i].kexec_state < wait_state) { |
| 207 | barrier(); |
| 208 | if (i != notified) { |
| 209 | printk(KERN_INFO "kexec: waiting for cpu %d " |
| 210 | "(physical %d) to enter %i state\n", |
| 211 | i, paca[i].hw_cpu_id, wait_state); |
| 212 | notified = i; |
| 213 | } |
| 214 | } |
| 215 | } |
| 216 | mb(); |
| 217 | } |
| 218 | |
| 219 | static void kexec_prepare_cpus(void) |
| 220 | { |
| 221 | |
| 222 | smp_call_function(kexec_smp_down, NULL, /* wait */0); |
| 223 | local_irq_disable(); |
| 224 | mb(); /* make sure IRQs are disabled before we say they are */ |
| 225 | get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; |
| 226 | |
| 227 | kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF); |
| 228 | /* we are sure every CPU has IRQs off at this point */ |
| 229 | kexec_all_irq_disabled = 1; |
| 230 | |
| 231 | /* after we tell the others to go down */ |
| 232 | if (ppc_md.kexec_cpu_down) |
| 233 | ppc_md.kexec_cpu_down(0, 0); |
| 234 | |
| 235 | /* |
| 236 | * Before removing MMU mappings make sure all CPUs have entered real |
| 237 | * mode: |
| 238 | */ |
| 239 | kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); |
| 240 | |
| 241 | put_cpu(); |
| 242 | } |
| 243 | |
| 244 | #else /* ! SMP */ |
| 245 | |
| 246 | static void kexec_prepare_cpus(void) |
| 247 | { |
| 248 | /* |
| 249 | * move the secondarys to us so that we can copy |
| 250 | * the new kernel 0-0x100 safely |
| 251 | * |
| 252 | * do this if kexec in setup.c ? |
| 253 | * |
| 254 | * We need to release the cpus if we are ever going from an |
| 255 | * UP to an SMP kernel. |
| 256 | */ |
| 257 | smp_release_cpus(); |
| 258 | if (ppc_md.kexec_cpu_down) |
| 259 | ppc_md.kexec_cpu_down(0, 0); |
| 260 | local_irq_disable(); |
| 261 | } |
| 262 | |
| 263 | #endif /* SMP */ |
| 264 | |
| 265 | /* |
| 266 | * kexec thread structure and stack. |
| 267 | * |
| 268 | * We need to make sure that this is 16384-byte aligned due to the |
| 269 | * way process stacks are handled. It also must be statically allocated |
| 270 | * or allocated as part of the kimage, because everything else may be |
| 271 | * overwritten when we copy the kexec image. We piggyback on the |
| 272 | * "init_task" linker section here to statically allocate a stack. |
| 273 | * |
| 274 | * We could use a smaller stack if we don't care about anything using |
| 275 | * current, but that audit has not been performed. |
| 276 | */ |
| 277 | static union thread_union kexec_stack __init_task_data = |
| 278 | { }; |
| 279 | |
| 280 | /* |
| 281 | * For similar reasons to the stack above, the kexecing CPU needs to be on a |
| 282 | * static PACA; we switch to kexec_paca. |
| 283 | */ |
| 284 | struct paca_struct kexec_paca; |
| 285 | |
| 286 | /* Our assembly helper, in kexec_stub.S */ |
| 287 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, |
| 288 | void *image, void *control, |
| 289 | void (*clear_all)(void)) ATTRIB_NORET; |
| 290 | |
| 291 | /* too late to fail here */ |
| 292 | void default_machine_kexec(struct kimage *image) |
| 293 | { |
| 294 | /* prepare control code if any */ |
| 295 | |
| 296 | /* |
| 297 | * If the kexec boot is the normal one, need to shutdown other cpus |
| 298 | * into our wait loop and quiesce interrupts. |
| 299 | * Otherwise, in the case of crashed mode (crashing_cpu >= 0), |
| 300 | * stopping other CPUs and collecting their pt_regs is done before |
| 301 | * using debugger IPI. |
| 302 | */ |
| 303 | |
| 304 | if (crashing_cpu == -1) |
| 305 | kexec_prepare_cpus(); |
| 306 | |
| 307 | pr_debug("kexec: Starting switchover sequence.\n"); |
| 308 | |
| 309 | /* switch to a staticly allocated stack. Based on irq stack code. |
| 310 | * XXX: the task struct will likely be invalid once we do the copy! |
| 311 | */ |
| 312 | kexec_stack.thread_info.task = current_thread_info()->task; |
| 313 | kexec_stack.thread_info.flags = 0; |
| 314 | |
| 315 | /* We need a static PACA, too; copy this CPU's PACA over and switch to |
| 316 | * it. Also poison per_cpu_offset to catch anyone using non-static |
| 317 | * data. |
| 318 | */ |
| 319 | memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct)); |
| 320 | kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL; |
| 321 | paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) - |
| 322 | kexec_paca.paca_index; |
| 323 | setup_paca(&kexec_paca); |
| 324 | |
| 325 | /* XXX: If anyone does 'dynamic lppacas' this will also need to be |
| 326 | * switched to a static version! |
| 327 | */ |
| 328 | |
| 329 | /* Some things are best done in assembly. Finding globals with |
| 330 | * a toc is easier in C, so pass in what we can. |
| 331 | */ |
| 332 | kexec_sequence(&kexec_stack, image->start, image, |
| 333 | page_address(image->control_code_page), |
| 334 | ppc_md.hpte_clear_all); |
| 335 | /* NOTREACHED */ |
| 336 | } |
| 337 | |
| 338 | /* Values we need to export to the second kernel via the device tree. */ |
| 339 | static unsigned long htab_base; |
| 340 | |
| 341 | static struct property htab_base_prop = { |
| 342 | .name = "linux,htab-base", |
| 343 | .length = sizeof(unsigned long), |
| 344 | .value = &htab_base, |
| 345 | }; |
| 346 | |
| 347 | static struct property htab_size_prop = { |
| 348 | .name = "linux,htab-size", |
| 349 | .length = sizeof(unsigned long), |
| 350 | .value = &htab_size_bytes, |
| 351 | }; |
| 352 | |
| 353 | static int __init export_htab_values(void) |
| 354 | { |
| 355 | struct device_node *node; |
| 356 | struct property *prop; |
| 357 | |
| 358 | /* On machines with no htab htab_address is NULL */ |
| 359 | if (!htab_address) |
| 360 | return -ENODEV; |
| 361 | |
| 362 | node = of_find_node_by_path("/chosen"); |
| 363 | if (!node) |
| 364 | return -ENODEV; |
| 365 | |
| 366 | /* remove any stale propertys so ours can be found */ |
| 367 | prop = of_find_property(node, htab_base_prop.name, NULL); |
| 368 | if (prop) |
| 369 | prom_remove_property(node, prop); |
| 370 | prop = of_find_property(node, htab_size_prop.name, NULL); |
| 371 | if (prop) |
| 372 | prom_remove_property(node, prop); |
| 373 | |
| 374 | htab_base = __pa(htab_address); |
| 375 | prom_add_property(node, &htab_base_prop); |
| 376 | prom_add_property(node, &htab_size_prop); |
| 377 | |
| 378 | of_node_put(node); |
| 379 | return 0; |
| 380 | } |
| 381 | late_initcall(export_htab_values); |