1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <asm/alternative.h>
11 #include <asm/sections.h>
12 #include <asm/pgtable.h>
15 #include <asm/vsyscall.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 #include <asm/fixmap.h>
21 #define MAX_PATCH_LEN (255-1)
23 #ifdef CONFIG_HOTPLUG_CPU
24 static int smp_alt_once;
26 static int __init bootonly(char *str)
31 __setup("smp-alt-boot", bootonly);
33 #define smp_alt_once 1
36 static int __initdata_or_module debug_alternative;
38 static int __init debug_alt(char *str)
40 debug_alternative = 1;
43 __setup("debug-alternative", debug_alt);
45 static int noreplace_smp;
47 static int __init setup_noreplace_smp(char *str)
52 __setup("noreplace-smp", setup_noreplace_smp);
54 #ifdef CONFIG_PARAVIRT
55 static int __initdata_or_module noreplace_paravirt = 0;
57 static int __init setup_noreplace_paravirt(char *str)
59 noreplace_paravirt = 1;
62 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #define DPRINTK(fmt, args...) if (debug_alternative) \
66 printk(KERN_DEBUG fmt, args)
68 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
69 /* Use inline assembly to define this because the nops are defined
70 as inline assembly strings in the include files and we cannot
71 get them easily into strings. */
72 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
73 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
74 GENERIC_NOP7 GENERIC_NOP8
76 extern const unsigned char intelnops[];
77 static const unsigned char *const __initconst_or_module
78 intel_nops[ASM_NOP_MAX+1] = {
83 intelnops + 1 + 2 + 3,
84 intelnops + 1 + 2 + 3 + 4,
85 intelnops + 1 + 2 + 3 + 4 + 5,
86 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
87 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
92 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
93 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
96 extern const unsigned char k8nops[];
97 static const unsigned char *const __initconst_or_module
98 k8_nops[ASM_NOP_MAX+1] = {
104 k8nops + 1 + 2 + 3 + 4,
105 k8nops + 1 + 2 + 3 + 4 + 5,
106 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
107 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
112 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
113 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
116 extern const unsigned char k7nops[];
117 static const unsigned char *const __initconst_or_module
118 k7_nops[ASM_NOP_MAX+1] = {
124 k7nops + 1 + 2 + 3 + 4,
125 k7nops + 1 + 2 + 3 + 4 + 5,
126 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
127 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
132 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
133 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
136 extern const unsigned char p6nops[];
137 static const unsigned char *const __initconst_or_module
138 p6_nops[ASM_NOP_MAX+1] = {
144 p6nops + 1 + 2 + 3 + 4,
145 p6nops + 1 + 2 + 3 + 4 + 5,
146 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
147 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
153 extern char __vsyscall_0;
154 static const unsigned char *const *__init_or_module find_nop_table(void)
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
157 boot_cpu_has(X86_FEATURE_NOPL))
163 #else /* CONFIG_X86_64 */
165 static const unsigned char *const *__init_or_module find_nop_table(void)
167 if (boot_cpu_has(X86_FEATURE_K8))
169 else if (boot_cpu_has(X86_FEATURE_K7))
171 else if (boot_cpu_has(X86_FEATURE_NOPL))
177 #endif /* CONFIG_X86_64 */
179 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
180 static void __init_or_module add_nops(void *insns, unsigned int len)
182 const unsigned char *const *noptable = find_nop_table();
185 unsigned int noplen = len;
186 if (noplen > ASM_NOP_MAX)
187 noplen = ASM_NOP_MAX;
188 memcpy(insns, noptable[noplen], noplen);
194 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
195 extern u8 *__smp_locks[], *__smp_locks_end[];
196 static void *text_poke_early(void *addr, const void *opcode, size_t len);
198 /* Replace instructions with better alternatives for this CPU type.
199 This runs before SMP is initialized to avoid SMP problems with
200 self modifying code. This implies that assymetric systems where
201 APs have less capabilities than the boot processor are not handled.
202 Tough. Make sure you disable such features by hand. */
204 void __init_or_module apply_alternatives(struct alt_instr *start,
205 struct alt_instr *end)
208 u8 insnbuf[MAX_PATCH_LEN];
210 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
211 for (a = start; a < end; a++) {
212 u8 *instr = a->instr;
213 BUG_ON(a->replacementlen > a->instrlen);
214 BUG_ON(a->instrlen > sizeof(insnbuf));
215 if (!boot_cpu_has(a->cpuid))
218 /* vsyscall code is not mapped yet. resolve it manually. */
219 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
220 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
221 DPRINTK("%s: vsyscall fixup: %p => %p\n",
222 __func__, a->instr, instr);
225 memcpy(insnbuf, a->replacement, a->replacementlen);
226 if (*insnbuf == 0xe8 && a->replacementlen == 5)
227 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
228 add_nops(insnbuf + a->replacementlen,
229 a->instrlen - a->replacementlen);
230 text_poke_early(instr, insnbuf, a->instrlen);
236 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
240 mutex_lock(&text_mutex);
241 for (ptr = start; ptr < end; ptr++) {
246 /* turn DS segment override prefix into lock prefix */
248 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
250 mutex_unlock(&text_mutex);
253 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
260 mutex_lock(&text_mutex);
261 for (ptr = start; ptr < end; ptr++) {
266 /* turn lock prefix into DS segment override prefix */
268 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
270 mutex_unlock(&text_mutex);
273 struct smp_alt_module {
274 /* what is this ??? */
278 /* ptrs to lock prefixes */
282 /* .text segment, needed to avoid patching init code ;) */
286 struct list_head next;
288 static LIST_HEAD(smp_alt_modules);
289 static DEFINE_MUTEX(smp_alt);
290 static int smp_mode = 1; /* protected by smp_alt */
292 void __init_or_module alternatives_smp_module_add(struct module *mod,
294 void *locks, void *locks_end,
295 void *text, void *text_end)
297 struct smp_alt_module *smp;
303 if (boot_cpu_has(X86_FEATURE_UP))
304 alternatives_smp_unlock(locks, locks_end,
309 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
311 return; /* we'll run the (safe but slow) SMP code then ... */
316 smp->locks_end = locks_end;
318 smp->text_end = text_end;
319 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
320 __func__, smp->locks, smp->locks_end,
321 smp->text, smp->text_end, smp->name);
323 mutex_lock(&smp_alt);
324 list_add_tail(&smp->next, &smp_alt_modules);
325 if (boot_cpu_has(X86_FEATURE_UP))
326 alternatives_smp_unlock(smp->locks, smp->locks_end,
327 smp->text, smp->text_end);
328 mutex_unlock(&smp_alt);
331 void __init_or_module alternatives_smp_module_del(struct module *mod)
333 struct smp_alt_module *item;
335 if (smp_alt_once || noreplace_smp)
338 mutex_lock(&smp_alt);
339 list_for_each_entry(item, &smp_alt_modules, next) {
340 if (mod != item->mod)
342 list_del(&item->next);
343 mutex_unlock(&smp_alt);
344 DPRINTK("%s: %s\n", __func__, item->name);
348 mutex_unlock(&smp_alt);
351 void alternatives_smp_switch(int smp)
353 struct smp_alt_module *mod;
355 #ifdef CONFIG_LOCKDEP
357 * Older binutils section handling bug prevented
358 * alternatives-replacement from working reliably.
360 * If this still occurs then you should see a hang
361 * or crash shortly after this line:
363 printk("lockdep: fixing up alternatives.\n");
366 if (noreplace_smp || smp_alt_once)
368 BUG_ON(!smp && (num_online_cpus() > 1));
370 mutex_lock(&smp_alt);
373 * Avoid unnecessary switches because it forces JIT based VMs to
374 * throw away all cached translations, which can be quite costly.
376 if (smp == smp_mode) {
379 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
380 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
381 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
382 list_for_each_entry(mod, &smp_alt_modules, next)
383 alternatives_smp_lock(mod->locks, mod->locks_end,
384 mod->text, mod->text_end);
386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
388 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
389 list_for_each_entry(mod, &smp_alt_modules, next)
390 alternatives_smp_unlock(mod->locks, mod->locks_end,
391 mod->text, mod->text_end);
394 mutex_unlock(&smp_alt);
399 #ifdef CONFIG_PARAVIRT
400 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
401 struct paravirt_patch_site *end)
403 struct paravirt_patch_site *p;
404 char insnbuf[MAX_PATCH_LEN];
406 if (noreplace_paravirt)
409 for (p = start; p < end; p++) {
412 BUG_ON(p->len > MAX_PATCH_LEN);
413 /* prep the buffer with the original instructions */
414 memcpy(insnbuf, p->instr, p->len);
415 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
416 (unsigned long)p->instr, p->len);
418 BUG_ON(used > p->len);
420 /* Pad the rest with nops */
421 add_nops(insnbuf + used, p->len - used);
422 text_poke_early(p->instr, insnbuf, p->len);
425 extern struct paravirt_patch_site __start_parainstructions[],
426 __stop_parainstructions[];
427 #endif /* CONFIG_PARAVIRT */
429 void __init alternative_instructions(void)
431 /* The patching is not fully atomic, so try to avoid local interruptions
432 that might execute the to be patched code.
433 Other CPUs are not running. */
437 * Don't stop machine check exceptions while patching.
438 * MCEs only happen when something got corrupted and in this
439 * case we must do something about the corruption.
440 * Ignoring it is worse than a unlikely patching race.
441 * Also machine checks tend to be broadcast and if one CPU
442 * goes into machine check the others follow quickly, so we don't
443 * expect a machine check to cause undue problems during to code
447 apply_alternatives(__alt_instructions, __alt_instructions_end);
449 /* switch to patch-once-at-boottime-only mode and free the
450 * tables in case we know the number of CPUs will never ever
452 #ifdef CONFIG_HOTPLUG_CPU
453 if (num_possible_cpus() < 2)
459 if (1 == num_possible_cpus()) {
460 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
461 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
462 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
464 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
468 alternatives_smp_module_add(NULL, "core kernel",
469 __smp_locks, __smp_locks_end,
472 /* Only switch to UP mode if we don't immediately boot others */
473 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
474 alternatives_smp_switch(0);
477 apply_paravirt(__parainstructions, __parainstructions_end);
480 free_init_pages("SMP alternatives",
481 (unsigned long)__smp_locks,
482 (unsigned long)__smp_locks_end);
488 * text_poke_early - Update instructions on a live kernel at boot time
489 * @addr: address to modify
490 * @opcode: source of the copy
491 * @len: length to copy
493 * When you use this code to patch more than one byte of an instruction
494 * you need to make sure that other CPUs cannot execute this code in parallel.
495 * Also no thread must be currently preempted in the middle of these
496 * instructions. And on the local CPU you need to be protected again NMI or MCE
497 * handlers seeing an inconsistent instruction while you patch.
499 static void *__init_or_module text_poke_early(void *addr, const void *opcode,
503 local_irq_save(flags);
504 memcpy(addr, opcode, len);
506 local_irq_restore(flags);
507 /* Could also do a CLFLUSH here to speed up CPU recovery; but
508 that causes hangs on some VIA CPUs. */
513 * text_poke - Update instructions on a live kernel
514 * @addr: address to modify
515 * @opcode: source of the copy
516 * @len: length to copy
518 * Only atomic text poke/set should be allowed when not doing early patching.
519 * It means the size must be writable atomically and the address must be aligned
520 * in a way that permits an atomic write. It also makes sure we fit on a single
523 * Note: Must be called under text_mutex.
525 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
529 struct page *pages[2];
532 if (!core_kernel_text((unsigned long)addr)) {
533 pages[0] = vmalloc_to_page(addr);
534 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
536 pages[0] = virt_to_page(addr);
537 WARN_ON(!PageReserved(pages[0]));
538 pages[1] = virt_to_page(addr + PAGE_SIZE);
541 local_irq_save(flags);
542 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
544 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
545 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
546 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
547 clear_fixmap(FIX_TEXT_POKE0);
549 clear_fixmap(FIX_TEXT_POKE1);
552 /* Could also do a CLFLUSH here to speed up CPU recovery; but
553 that causes hangs on some VIA CPUs. */
554 for (i = 0; i < len; i++)
555 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
556 local_irq_restore(flags);