| 1 | #define pr_fmt(fmt) "SMP alternatives: " fmt |
| 2 | |
| 3 | #include <linux/module.h> |
| 4 | #include <linux/sched.h> |
| 5 | #include <linux/mutex.h> |
| 6 | #include <linux/list.h> |
| 7 | #include <linux/stringify.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/vmalloc.h> |
| 10 | #include <linux/memory.h> |
| 11 | #include <linux/stop_machine.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/kdebug.h> |
| 14 | #include <asm/text-patching.h> |
| 15 | #include <asm/alternative.h> |
| 16 | #include <asm/sections.h> |
| 17 | #include <asm/pgtable.h> |
| 18 | #include <asm/mce.h> |
| 19 | #include <asm/nmi.h> |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/io.h> |
| 23 | #include <asm/fixmap.h> |
| 24 | |
| 25 | int __read_mostly alternatives_patched; |
| 26 | |
| 27 | EXPORT_SYMBOL_GPL(alternatives_patched); |
| 28 | |
| 29 | #define MAX_PATCH_LEN (255-1) |
| 30 | |
| 31 | static int __initdata_or_module debug_alternative; |
| 32 | |
| 33 | static int __init debug_alt(char *str) |
| 34 | { |
| 35 | debug_alternative = 1; |
| 36 | return 1; |
| 37 | } |
| 38 | __setup("debug-alternative", debug_alt); |
| 39 | |
| 40 | static int noreplace_smp; |
| 41 | |
| 42 | static int __init setup_noreplace_smp(char *str) |
| 43 | { |
| 44 | noreplace_smp = 1; |
| 45 | return 1; |
| 46 | } |
| 47 | __setup("noreplace-smp", setup_noreplace_smp); |
| 48 | |
| 49 | #ifdef CONFIG_PARAVIRT |
| 50 | static int __initdata_or_module noreplace_paravirt = 0; |
| 51 | |
| 52 | static int __init setup_noreplace_paravirt(char *str) |
| 53 | { |
| 54 | noreplace_paravirt = 1; |
| 55 | return 1; |
| 56 | } |
| 57 | __setup("noreplace-paravirt", setup_noreplace_paravirt); |
| 58 | #endif |
| 59 | |
| 60 | #define DPRINTK(fmt, args...) \ |
| 61 | do { \ |
| 62 | if (debug_alternative) \ |
| 63 | printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ |
| 64 | } while (0) |
| 65 | |
| 66 | #define DUMP_BYTES(buf, len, fmt, args...) \ |
| 67 | do { \ |
| 68 | if (unlikely(debug_alternative)) { \ |
| 69 | int j; \ |
| 70 | \ |
| 71 | if (!(len)) \ |
| 72 | break; \ |
| 73 | \ |
| 74 | printk(KERN_DEBUG fmt, ##args); \ |
| 75 | for (j = 0; j < (len) - 1; j++) \ |
| 76 | printk(KERN_CONT "%02hhx ", buf[j]); \ |
| 77 | printk(KERN_CONT "%02hhx\n", buf[j]); \ |
| 78 | } \ |
| 79 | } while (0) |
| 80 | |
| 81 | /* |
| 82 | * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes |
| 83 | * that correspond to that nop. Getting from one nop to the next, we |
| 84 | * add to the array the offset that is equal to the sum of all sizes of |
| 85 | * nops preceding the one we are after. |
| 86 | * |
| 87 | * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the |
| 88 | * nice symmetry of sizes of the previous nops. |
| 89 | */ |
| 90 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
| 91 | static const unsigned char intelnops[] = |
| 92 | { |
| 93 | GENERIC_NOP1, |
| 94 | GENERIC_NOP2, |
| 95 | GENERIC_NOP3, |
| 96 | GENERIC_NOP4, |
| 97 | GENERIC_NOP5, |
| 98 | GENERIC_NOP6, |
| 99 | GENERIC_NOP7, |
| 100 | GENERIC_NOP8, |
| 101 | GENERIC_NOP5_ATOMIC |
| 102 | }; |
| 103 | static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = |
| 104 | { |
| 105 | NULL, |
| 106 | intelnops, |
| 107 | intelnops + 1, |
| 108 | intelnops + 1 + 2, |
| 109 | intelnops + 1 + 2 + 3, |
| 110 | intelnops + 1 + 2 + 3 + 4, |
| 111 | intelnops + 1 + 2 + 3 + 4 + 5, |
| 112 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, |
| 113 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
| 114 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
| 115 | }; |
| 116 | #endif |
| 117 | |
| 118 | #ifdef K8_NOP1 |
| 119 | static const unsigned char k8nops[] = |
| 120 | { |
| 121 | K8_NOP1, |
| 122 | K8_NOP2, |
| 123 | K8_NOP3, |
| 124 | K8_NOP4, |
| 125 | K8_NOP5, |
| 126 | K8_NOP6, |
| 127 | K8_NOP7, |
| 128 | K8_NOP8, |
| 129 | K8_NOP5_ATOMIC |
| 130 | }; |
| 131 | static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = |
| 132 | { |
| 133 | NULL, |
| 134 | k8nops, |
| 135 | k8nops + 1, |
| 136 | k8nops + 1 + 2, |
| 137 | k8nops + 1 + 2 + 3, |
| 138 | k8nops + 1 + 2 + 3 + 4, |
| 139 | k8nops + 1 + 2 + 3 + 4 + 5, |
| 140 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 141 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
| 142 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
| 143 | }; |
| 144 | #endif |
| 145 | |
| 146 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
| 147 | static const unsigned char k7nops[] = |
| 148 | { |
| 149 | K7_NOP1, |
| 150 | K7_NOP2, |
| 151 | K7_NOP3, |
| 152 | K7_NOP4, |
| 153 | K7_NOP5, |
| 154 | K7_NOP6, |
| 155 | K7_NOP7, |
| 156 | K7_NOP8, |
| 157 | K7_NOP5_ATOMIC |
| 158 | }; |
| 159 | static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = |
| 160 | { |
| 161 | NULL, |
| 162 | k7nops, |
| 163 | k7nops + 1, |
| 164 | k7nops + 1 + 2, |
| 165 | k7nops + 1 + 2 + 3, |
| 166 | k7nops + 1 + 2 + 3 + 4, |
| 167 | k7nops + 1 + 2 + 3 + 4 + 5, |
| 168 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 169 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
| 170 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
| 171 | }; |
| 172 | #endif |
| 173 | |
| 174 | #ifdef P6_NOP1 |
| 175 | static const unsigned char p6nops[] = |
| 176 | { |
| 177 | P6_NOP1, |
| 178 | P6_NOP2, |
| 179 | P6_NOP3, |
| 180 | P6_NOP4, |
| 181 | P6_NOP5, |
| 182 | P6_NOP6, |
| 183 | P6_NOP7, |
| 184 | P6_NOP8, |
| 185 | P6_NOP5_ATOMIC |
| 186 | }; |
| 187 | static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = |
| 188 | { |
| 189 | NULL, |
| 190 | p6nops, |
| 191 | p6nops + 1, |
| 192 | p6nops + 1 + 2, |
| 193 | p6nops + 1 + 2 + 3, |
| 194 | p6nops + 1 + 2 + 3 + 4, |
| 195 | p6nops + 1 + 2 + 3 + 4 + 5, |
| 196 | p6nops + 1 + 2 + 3 + 4 + 5 + 6, |
| 197 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
| 198 | p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
| 199 | }; |
| 200 | #endif |
| 201 | |
| 202 | /* Initialize these to a safe default */ |
| 203 | #ifdef CONFIG_X86_64 |
| 204 | const unsigned char * const *ideal_nops = p6_nops; |
| 205 | #else |
| 206 | const unsigned char * const *ideal_nops = intel_nops; |
| 207 | #endif |
| 208 | |
| 209 | void __init arch_init_ideal_nops(void) |
| 210 | { |
| 211 | switch (boot_cpu_data.x86_vendor) { |
| 212 | case X86_VENDOR_INTEL: |
| 213 | /* |
| 214 | * Due to a decoder implementation quirk, some |
| 215 | * specific Intel CPUs actually perform better with |
| 216 | * the "k8_nops" than with the SDM-recommended NOPs. |
| 217 | */ |
| 218 | if (boot_cpu_data.x86 == 6 && |
| 219 | boot_cpu_data.x86_model >= 0x0f && |
| 220 | boot_cpu_data.x86_model != 0x1c && |
| 221 | boot_cpu_data.x86_model != 0x26 && |
| 222 | boot_cpu_data.x86_model != 0x27 && |
| 223 | boot_cpu_data.x86_model < 0x30) { |
| 224 | ideal_nops = k8_nops; |
| 225 | } else if (boot_cpu_has(X86_FEATURE_NOPL)) { |
| 226 | ideal_nops = p6_nops; |
| 227 | } else { |
| 228 | #ifdef CONFIG_X86_64 |
| 229 | ideal_nops = k8_nops; |
| 230 | #else |
| 231 | ideal_nops = intel_nops; |
| 232 | #endif |
| 233 | } |
| 234 | break; |
| 235 | |
| 236 | case X86_VENDOR_AMD: |
| 237 | if (boot_cpu_data.x86 > 0xf) { |
| 238 | ideal_nops = p6_nops; |
| 239 | return; |
| 240 | } |
| 241 | |
| 242 | /* fall through */ |
| 243 | |
| 244 | default: |
| 245 | #ifdef CONFIG_X86_64 |
| 246 | ideal_nops = k8_nops; |
| 247 | #else |
| 248 | if (boot_cpu_has(X86_FEATURE_K8)) |
| 249 | ideal_nops = k8_nops; |
| 250 | else if (boot_cpu_has(X86_FEATURE_K7)) |
| 251 | ideal_nops = k7_nops; |
| 252 | else |
| 253 | ideal_nops = intel_nops; |
| 254 | #endif |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
| 259 | static void __init_or_module add_nops(void *insns, unsigned int len) |
| 260 | { |
| 261 | while (len > 0) { |
| 262 | unsigned int noplen = len; |
| 263 | if (noplen > ASM_NOP_MAX) |
| 264 | noplen = ASM_NOP_MAX; |
| 265 | memcpy(insns, ideal_nops[noplen], noplen); |
| 266 | insns += noplen; |
| 267 | len -= noplen; |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
| 272 | extern s32 __smp_locks[], __smp_locks_end[]; |
| 273 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
| 274 | |
| 275 | /* |
| 276 | * Are we looking at a near JMP with a 1 or 4-byte displacement. |
| 277 | */ |
| 278 | static inline bool is_jmp(const u8 opcode) |
| 279 | { |
| 280 | return opcode == 0xeb || opcode == 0xe9; |
| 281 | } |
| 282 | |
| 283 | static void __init_or_module |
| 284 | recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) |
| 285 | { |
| 286 | u8 *next_rip, *tgt_rip; |
| 287 | s32 n_dspl, o_dspl; |
| 288 | int repl_len; |
| 289 | |
| 290 | if (a->replacementlen != 5) |
| 291 | return; |
| 292 | |
| 293 | o_dspl = *(s32 *)(insnbuf + 1); |
| 294 | |
| 295 | /* next_rip of the replacement JMP */ |
| 296 | next_rip = repl_insn + a->replacementlen; |
| 297 | /* target rip of the replacement JMP */ |
| 298 | tgt_rip = next_rip + o_dspl; |
| 299 | n_dspl = tgt_rip - orig_insn; |
| 300 | |
| 301 | DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); |
| 302 | |
| 303 | if (tgt_rip - orig_insn >= 0) { |
| 304 | if (n_dspl - 2 <= 127) |
| 305 | goto two_byte_jmp; |
| 306 | else |
| 307 | goto five_byte_jmp; |
| 308 | /* negative offset */ |
| 309 | } else { |
| 310 | if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) |
| 311 | goto two_byte_jmp; |
| 312 | else |
| 313 | goto five_byte_jmp; |
| 314 | } |
| 315 | |
| 316 | two_byte_jmp: |
| 317 | n_dspl -= 2; |
| 318 | |
| 319 | insnbuf[0] = 0xeb; |
| 320 | insnbuf[1] = (s8)n_dspl; |
| 321 | add_nops(insnbuf + 2, 3); |
| 322 | |
| 323 | repl_len = 2; |
| 324 | goto done; |
| 325 | |
| 326 | five_byte_jmp: |
| 327 | n_dspl -= 5; |
| 328 | |
| 329 | insnbuf[0] = 0xe9; |
| 330 | *(s32 *)&insnbuf[1] = n_dspl; |
| 331 | |
| 332 | repl_len = 5; |
| 333 | |
| 334 | done: |
| 335 | |
| 336 | DPRINTK("final displ: 0x%08x, JMP 0x%lx", |
| 337 | n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); |
| 338 | } |
| 339 | |
| 340 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) |
| 341 | { |
| 342 | unsigned long flags; |
| 343 | |
| 344 | if (instr[0] != 0x90) |
| 345 | return; |
| 346 | |
| 347 | local_irq_save(flags); |
| 348 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
| 349 | sync_core(); |
| 350 | local_irq_restore(flags); |
| 351 | |
| 352 | DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", |
| 353 | instr, a->instrlen - a->padlen, a->padlen); |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * Replace instructions with better alternatives for this CPU type. This runs |
| 358 | * before SMP is initialized to avoid SMP problems with self modifying code. |
| 359 | * This implies that asymmetric systems where APs have less capabilities than |
| 360 | * the boot processor are not handled. Tough. Make sure you disable such |
| 361 | * features by hand. |
| 362 | */ |
| 363 | void __init_or_module apply_alternatives(struct alt_instr *start, |
| 364 | struct alt_instr *end) |
| 365 | { |
| 366 | struct alt_instr *a; |
| 367 | u8 *instr, *replacement; |
| 368 | u8 insnbuf[MAX_PATCH_LEN]; |
| 369 | |
| 370 | DPRINTK("alt table %p -> %p", start, end); |
| 371 | /* |
| 372 | * The scan order should be from start to end. A later scanned |
| 373 | * alternative code can overwrite previously scanned alternative code. |
| 374 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to |
| 375 | * patch code. |
| 376 | * |
| 377 | * So be careful if you want to change the scan order to any other |
| 378 | * order. |
| 379 | */ |
| 380 | for (a = start; a < end; a++) { |
| 381 | int insnbuf_sz = 0; |
| 382 | |
| 383 | instr = (u8 *)&a->instr_offset + a->instr_offset; |
| 384 | replacement = (u8 *)&a->repl_offset + a->repl_offset; |
| 385 | BUG_ON(a->instrlen > sizeof(insnbuf)); |
| 386 | BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); |
| 387 | if (!boot_cpu_has(a->cpuid)) { |
| 388 | if (a->padlen > 1) |
| 389 | optimize_nops(a, instr); |
| 390 | |
| 391 | continue; |
| 392 | } |
| 393 | |
| 394 | DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", |
| 395 | a->cpuid >> 5, |
| 396 | a->cpuid & 0x1f, |
| 397 | instr, a->instrlen, |
| 398 | replacement, a->replacementlen, a->padlen); |
| 399 | |
| 400 | DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); |
| 401 | DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); |
| 402 | |
| 403 | memcpy(insnbuf, replacement, a->replacementlen); |
| 404 | insnbuf_sz = a->replacementlen; |
| 405 | |
| 406 | /* 0xe8 is a relative jump; fix the offset. */ |
| 407 | if (*insnbuf == 0xe8 && a->replacementlen == 5) { |
| 408 | *(s32 *)(insnbuf + 1) += replacement - instr; |
| 409 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
| 410 | *(s32 *)(insnbuf + 1), |
| 411 | (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); |
| 412 | } |
| 413 | |
| 414 | if (a->replacementlen && is_jmp(replacement[0])) |
| 415 | recompute_jump(a, instr, replacement, insnbuf); |
| 416 | |
| 417 | if (a->instrlen > a->replacementlen) { |
| 418 | add_nops(insnbuf + a->replacementlen, |
| 419 | a->instrlen - a->replacementlen); |
| 420 | insnbuf_sz += a->instrlen - a->replacementlen; |
| 421 | } |
| 422 | DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); |
| 423 | |
| 424 | text_poke_early(instr, insnbuf, insnbuf_sz); |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | #ifdef CONFIG_SMP |
| 429 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
| 430 | u8 *text, u8 *text_end) |
| 431 | { |
| 432 | const s32 *poff; |
| 433 | |
| 434 | mutex_lock(&text_mutex); |
| 435 | for (poff = start; poff < end; poff++) { |
| 436 | u8 *ptr = (u8 *)poff + *poff; |
| 437 | |
| 438 | if (!*poff || ptr < text || ptr >= text_end) |
| 439 | continue; |
| 440 | /* turn DS segment override prefix into lock prefix */ |
| 441 | if (*ptr == 0x3e) |
| 442 | text_poke(ptr, ((unsigned char []){0xf0}), 1); |
| 443 | } |
| 444 | mutex_unlock(&text_mutex); |
| 445 | } |
| 446 | |
| 447 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
| 448 | u8 *text, u8 *text_end) |
| 449 | { |
| 450 | const s32 *poff; |
| 451 | |
| 452 | mutex_lock(&text_mutex); |
| 453 | for (poff = start; poff < end; poff++) { |
| 454 | u8 *ptr = (u8 *)poff + *poff; |
| 455 | |
| 456 | if (!*poff || ptr < text || ptr >= text_end) |
| 457 | continue; |
| 458 | /* turn lock prefix into DS segment override prefix */ |
| 459 | if (*ptr == 0xf0) |
| 460 | text_poke(ptr, ((unsigned char []){0x3E}), 1); |
| 461 | } |
| 462 | mutex_unlock(&text_mutex); |
| 463 | } |
| 464 | |
| 465 | struct smp_alt_module { |
| 466 | /* what is this ??? */ |
| 467 | struct module *mod; |
| 468 | char *name; |
| 469 | |
| 470 | /* ptrs to lock prefixes */ |
| 471 | const s32 *locks; |
| 472 | const s32 *locks_end; |
| 473 | |
| 474 | /* .text segment, needed to avoid patching init code ;) */ |
| 475 | u8 *text; |
| 476 | u8 *text_end; |
| 477 | |
| 478 | struct list_head next; |
| 479 | }; |
| 480 | static LIST_HEAD(smp_alt_modules); |
| 481 | static DEFINE_MUTEX(smp_alt); |
| 482 | static bool uniproc_patched = false; /* protected by smp_alt */ |
| 483 | |
| 484 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
| 485 | char *name, |
| 486 | void *locks, void *locks_end, |
| 487 | void *text, void *text_end) |
| 488 | { |
| 489 | struct smp_alt_module *smp; |
| 490 | |
| 491 | mutex_lock(&smp_alt); |
| 492 | if (!uniproc_patched) |
| 493 | goto unlock; |
| 494 | |
| 495 | if (num_possible_cpus() == 1) |
| 496 | /* Don't bother remembering, we'll never have to undo it. */ |
| 497 | goto smp_unlock; |
| 498 | |
| 499 | smp = kzalloc(sizeof(*smp), GFP_KERNEL); |
| 500 | if (NULL == smp) |
| 501 | /* we'll run the (safe but slow) SMP code then ... */ |
| 502 | goto unlock; |
| 503 | |
| 504 | smp->mod = mod; |
| 505 | smp->name = name; |
| 506 | smp->locks = locks; |
| 507 | smp->locks_end = locks_end; |
| 508 | smp->text = text; |
| 509 | smp->text_end = text_end; |
| 510 | DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", |
| 511 | smp->locks, smp->locks_end, |
| 512 | smp->text, smp->text_end, smp->name); |
| 513 | |
| 514 | list_add_tail(&smp->next, &smp_alt_modules); |
| 515 | smp_unlock: |
| 516 | alternatives_smp_unlock(locks, locks_end, text, text_end); |
| 517 | unlock: |
| 518 | mutex_unlock(&smp_alt); |
| 519 | } |
| 520 | |
| 521 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
| 522 | { |
| 523 | struct smp_alt_module *item; |
| 524 | |
| 525 | mutex_lock(&smp_alt); |
| 526 | list_for_each_entry(item, &smp_alt_modules, next) { |
| 527 | if (mod != item->mod) |
| 528 | continue; |
| 529 | list_del(&item->next); |
| 530 | kfree(item); |
| 531 | break; |
| 532 | } |
| 533 | mutex_unlock(&smp_alt); |
| 534 | } |
| 535 | |
| 536 | void alternatives_enable_smp(void) |
| 537 | { |
| 538 | struct smp_alt_module *mod; |
| 539 | |
| 540 | /* Why bother if there are no other CPUs? */ |
| 541 | BUG_ON(num_possible_cpus() == 1); |
| 542 | |
| 543 | mutex_lock(&smp_alt); |
| 544 | |
| 545 | if (uniproc_patched) { |
| 546 | pr_info("switching to SMP code\n"); |
| 547 | BUG_ON(num_online_cpus() != 1); |
| 548 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); |
| 549 | clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); |
| 550 | list_for_each_entry(mod, &smp_alt_modules, next) |
| 551 | alternatives_smp_lock(mod->locks, mod->locks_end, |
| 552 | mod->text, mod->text_end); |
| 553 | uniproc_patched = false; |
| 554 | } |
| 555 | mutex_unlock(&smp_alt); |
| 556 | } |
| 557 | |
| 558 | /* Return 1 if the address range is reserved for smp-alternatives */ |
| 559 | int alternatives_text_reserved(void *start, void *end) |
| 560 | { |
| 561 | struct smp_alt_module *mod; |
| 562 | const s32 *poff; |
| 563 | u8 *text_start = start; |
| 564 | u8 *text_end = end; |
| 565 | |
| 566 | list_for_each_entry(mod, &smp_alt_modules, next) { |
| 567 | if (mod->text > text_end || mod->text_end < text_start) |
| 568 | continue; |
| 569 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
| 570 | const u8 *ptr = (const u8 *)poff + *poff; |
| 571 | |
| 572 | if (text_start <= ptr && text_end > ptr) |
| 573 | return 1; |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | return 0; |
| 578 | } |
| 579 | #endif /* CONFIG_SMP */ |
| 580 | |
| 581 | #ifdef CONFIG_PARAVIRT |
| 582 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
| 583 | struct paravirt_patch_site *end) |
| 584 | { |
| 585 | struct paravirt_patch_site *p; |
| 586 | char insnbuf[MAX_PATCH_LEN]; |
| 587 | |
| 588 | if (noreplace_paravirt) |
| 589 | return; |
| 590 | |
| 591 | for (p = start; p < end; p++) { |
| 592 | unsigned int used; |
| 593 | |
| 594 | BUG_ON(p->len > MAX_PATCH_LEN); |
| 595 | /* prep the buffer with the original instructions */ |
| 596 | memcpy(insnbuf, p->instr, p->len); |
| 597 | used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, |
| 598 | (unsigned long)p->instr, p->len); |
| 599 | |
| 600 | BUG_ON(used > p->len); |
| 601 | |
| 602 | /* Pad the rest with nops */ |
| 603 | add_nops(insnbuf + used, p->len - used); |
| 604 | text_poke_early(p->instr, insnbuf, p->len); |
| 605 | } |
| 606 | } |
| 607 | extern struct paravirt_patch_site __start_parainstructions[], |
| 608 | __stop_parainstructions[]; |
| 609 | #endif /* CONFIG_PARAVIRT */ |
| 610 | |
| 611 | void __init alternative_instructions(void) |
| 612 | { |
| 613 | /* The patching is not fully atomic, so try to avoid local interruptions |
| 614 | that might execute the to be patched code. |
| 615 | Other CPUs are not running. */ |
| 616 | stop_nmi(); |
| 617 | |
| 618 | /* |
| 619 | * Don't stop machine check exceptions while patching. |
| 620 | * MCEs only happen when something got corrupted and in this |
| 621 | * case we must do something about the corruption. |
| 622 | * Ignoring it is worse than a unlikely patching race. |
| 623 | * Also machine checks tend to be broadcast and if one CPU |
| 624 | * goes into machine check the others follow quickly, so we don't |
| 625 | * expect a machine check to cause undue problems during to code |
| 626 | * patching. |
| 627 | */ |
| 628 | |
| 629 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
| 630 | |
| 631 | #ifdef CONFIG_SMP |
| 632 | /* Patch to UP if other cpus not imminent. */ |
| 633 | if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { |
| 634 | uniproc_patched = true; |
| 635 | alternatives_smp_module_add(NULL, "core kernel", |
| 636 | __smp_locks, __smp_locks_end, |
| 637 | _text, _etext); |
| 638 | } |
| 639 | |
| 640 | if (!uniproc_patched || num_possible_cpus() == 1) |
| 641 | free_init_pages("SMP alternatives", |
| 642 | (unsigned long)__smp_locks, |
| 643 | (unsigned long)__smp_locks_end); |
| 644 | #endif |
| 645 | |
| 646 | apply_paravirt(__parainstructions, __parainstructions_end); |
| 647 | |
| 648 | restart_nmi(); |
| 649 | alternatives_patched = 1; |
| 650 | } |
| 651 | |
| 652 | /** |
| 653 | * text_poke_early - Update instructions on a live kernel at boot time |
| 654 | * @addr: address to modify |
| 655 | * @opcode: source of the copy |
| 656 | * @len: length to copy |
| 657 | * |
| 658 | * When you use this code to patch more than one byte of an instruction |
| 659 | * you need to make sure that other CPUs cannot execute this code in parallel. |
| 660 | * Also no thread must be currently preempted in the middle of these |
| 661 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
| 662 | * handlers seeing an inconsistent instruction while you patch. |
| 663 | */ |
| 664 | void *__init_or_module text_poke_early(void *addr, const void *opcode, |
| 665 | size_t len) |
| 666 | { |
| 667 | unsigned long flags; |
| 668 | local_irq_save(flags); |
| 669 | memcpy(addr, opcode, len); |
| 670 | sync_core(); |
| 671 | local_irq_restore(flags); |
| 672 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
| 673 | that causes hangs on some VIA CPUs. */ |
| 674 | return addr; |
| 675 | } |
| 676 | |
| 677 | /** |
| 678 | * text_poke - Update instructions on a live kernel |
| 679 | * @addr: address to modify |
| 680 | * @opcode: source of the copy |
| 681 | * @len: length to copy |
| 682 | * |
| 683 | * Only atomic text poke/set should be allowed when not doing early patching. |
| 684 | * It means the size must be writable atomically and the address must be aligned |
| 685 | * in a way that permits an atomic write. It also makes sure we fit on a single |
| 686 | * page. |
| 687 | * |
| 688 | * Note: Must be called under text_mutex. |
| 689 | */ |
| 690 | void *text_poke(void *addr, const void *opcode, size_t len) |
| 691 | { |
| 692 | unsigned long flags; |
| 693 | char *vaddr; |
| 694 | struct page *pages[2]; |
| 695 | int i; |
| 696 | |
| 697 | if (!core_kernel_text((unsigned long)addr)) { |
| 698 | pages[0] = vmalloc_to_page(addr); |
| 699 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
| 700 | } else { |
| 701 | pages[0] = virt_to_page(addr); |
| 702 | WARN_ON(!PageReserved(pages[0])); |
| 703 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
| 704 | } |
| 705 | BUG_ON(!pages[0]); |
| 706 | local_irq_save(flags); |
| 707 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
| 708 | if (pages[1]) |
| 709 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); |
| 710 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); |
| 711 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
| 712 | clear_fixmap(FIX_TEXT_POKE0); |
| 713 | if (pages[1]) |
| 714 | clear_fixmap(FIX_TEXT_POKE1); |
| 715 | local_flush_tlb(); |
| 716 | sync_core(); |
| 717 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
| 718 | that causes hangs on some VIA CPUs. */ |
| 719 | for (i = 0; i < len; i++) |
| 720 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); |
| 721 | local_irq_restore(flags); |
| 722 | return addr; |
| 723 | } |
| 724 | |
| 725 | static void do_sync_core(void *info) |
| 726 | { |
| 727 | sync_core(); |
| 728 | } |
| 729 | |
| 730 | static bool bp_patching_in_progress; |
| 731 | static void *bp_int3_handler, *bp_int3_addr; |
| 732 | |
| 733 | int poke_int3_handler(struct pt_regs *regs) |
| 734 | { |
| 735 | /* bp_patching_in_progress */ |
| 736 | smp_rmb(); |
| 737 | |
| 738 | if (likely(!bp_patching_in_progress)) |
| 739 | return 0; |
| 740 | |
| 741 | if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) |
| 742 | return 0; |
| 743 | |
| 744 | /* set up the specified breakpoint handler */ |
| 745 | regs->ip = (unsigned long) bp_int3_handler; |
| 746 | |
| 747 | return 1; |
| 748 | |
| 749 | } |
| 750 | |
| 751 | /** |
| 752 | * text_poke_bp() -- update instructions on live kernel on SMP |
| 753 | * @addr: address to patch |
| 754 | * @opcode: opcode of new instruction |
| 755 | * @len: length to copy |
| 756 | * @handler: address to jump to when the temporary breakpoint is hit |
| 757 | * |
| 758 | * Modify multi-byte instruction by using int3 breakpoint on SMP. |
| 759 | * We completely avoid stop_machine() here, and achieve the |
| 760 | * synchronization using int3 breakpoint. |
| 761 | * |
| 762 | * The way it is done: |
| 763 | * - add a int3 trap to the address that will be patched |
| 764 | * - sync cores |
| 765 | * - update all but the first byte of the patched range |
| 766 | * - sync cores |
| 767 | * - replace the first byte (int3) by the first byte of |
| 768 | * replacing opcode |
| 769 | * - sync cores |
| 770 | * |
| 771 | * Note: must be called under text_mutex. |
| 772 | */ |
| 773 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
| 774 | { |
| 775 | unsigned char int3 = 0xcc; |
| 776 | |
| 777 | bp_int3_handler = handler; |
| 778 | bp_int3_addr = (u8 *)addr + sizeof(int3); |
| 779 | bp_patching_in_progress = true; |
| 780 | /* |
| 781 | * Corresponding read barrier in int3 notifier for |
| 782 | * making sure the in_progress flags is correctly ordered wrt. |
| 783 | * patching |
| 784 | */ |
| 785 | smp_wmb(); |
| 786 | |
| 787 | text_poke(addr, &int3, sizeof(int3)); |
| 788 | |
| 789 | on_each_cpu(do_sync_core, NULL, 1); |
| 790 | |
| 791 | if (len - sizeof(int3) > 0) { |
| 792 | /* patch all but the first byte */ |
| 793 | text_poke((char *)addr + sizeof(int3), |
| 794 | (const char *) opcode + sizeof(int3), |
| 795 | len - sizeof(int3)); |
| 796 | /* |
| 797 | * According to Intel, this core syncing is very likely |
| 798 | * not necessary and we'd be safe even without it. But |
| 799 | * better safe than sorry (plus there's not only Intel). |
| 800 | */ |
| 801 | on_each_cpu(do_sync_core, NULL, 1); |
| 802 | } |
| 803 | |
| 804 | /* patch the first byte */ |
| 805 | text_poke(addr, opcode, sizeof(int3)); |
| 806 | |
| 807 | on_each_cpu(do_sync_core, NULL, 1); |
| 808 | |
| 809 | bp_patching_in_progress = false; |
| 810 | smp_wmb(); |
| 811 | |
| 812 | return addr; |
| 813 | } |
| 814 | |