x86_64: remove unused variable maxcpus
[linux-2.6-block.git] / arch / i386 / kernel / alternative.c
CommitLineData
9a0b5817 1#include <linux/module.h>
f6a57033 2#include <linux/sched.h>
9a0b5817
GH
3#include <linux/spinlock.h>
4#include <linux/list.h>
5#include <asm/alternative.h>
6#include <asm/sections.h>
7
b7fb4af0 8static int noreplace_smp = 0;
d167a518
GH
9static int smp_alt_once = 0;
10static int debug_alternative = 0;
9a0b5817 11
d167a518
GH
12static int __init bootonly(char *str)
13{
14 smp_alt_once = 1;
15 return 1;
16}
b7fb4af0
JF
17__setup("smp-alt-boot", bootonly);
18
d167a518
GH
19static int __init debug_alt(char *str)
20{
21 debug_alternative = 1;
22 return 1;
23}
d167a518
GH
24__setup("debug-alternative", debug_alt);
25
b7fb4af0
JF
26static int __init setup_noreplace_smp(char *str)
27{
28 noreplace_smp = 1;
29 return 1;
30}
31__setup("noreplace-smp", setup_noreplace_smp);
32
959b4fdf
JF
33#ifdef CONFIG_PARAVIRT
34static int noreplace_paravirt = 0;
35
36static int __init setup_noreplace_paravirt(char *str)
37{
38 noreplace_paravirt = 1;
39 return 1;
40}
41__setup("noreplace-paravirt", setup_noreplace_paravirt);
42#endif
b7fb4af0 43
d167a518
GH
44#define DPRINTK(fmt, args...) if (debug_alternative) \
45 printk(KERN_DEBUG fmt, args)
46
47#ifdef GENERIC_NOP1
9a0b5817
GH
48/* Use inline assembly to define this because the nops are defined
49 as inline assembly strings in the include files and we cannot
50 get them easily into strings. */
51asm("\t.data\nintelnops: "
52 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
53 GENERIC_NOP7 GENERIC_NOP8);
d167a518 54extern unsigned char intelnops[];
9a0b5817
GH
55static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
56 NULL,
57 intelnops,
58 intelnops + 1,
59 intelnops + 1 + 2,
60 intelnops + 1 + 2 + 3,
61 intelnops + 1 + 2 + 3 + 4,
62 intelnops + 1 + 2 + 3 + 4 + 5,
63 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
64 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
65};
d167a518
GH
66#endif
67
68#ifdef K8_NOP1
69asm("\t.data\nk8nops: "
70 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
71 K8_NOP7 K8_NOP8);
72extern unsigned char k8nops[];
9a0b5817
GH
73static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
74 NULL,
75 k8nops,
76 k8nops + 1,
77 k8nops + 1 + 2,
78 k8nops + 1 + 2 + 3,
79 k8nops + 1 + 2 + 3 + 4,
80 k8nops + 1 + 2 + 3 + 4 + 5,
81 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
82 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
83};
d167a518
GH
84#endif
85
86#ifdef K7_NOP1
87asm("\t.data\nk7nops: "
88 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
89 K7_NOP7 K7_NOP8);
90extern unsigned char k7nops[];
9a0b5817
GH
91static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
92 NULL,
93 k7nops,
94 k7nops + 1,
95 k7nops + 1 + 2,
96 k7nops + 1 + 2 + 3,
97 k7nops + 1 + 2 + 3 + 4,
98 k7nops + 1 + 2 + 3 + 4 + 5,
99 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
100 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
101};
d167a518
GH
102#endif
103
104#ifdef CONFIG_X86_64
105
106extern char __vsyscall_0;
107static inline unsigned char** find_nop_table(void)
108{
109 return k8_nops;
110}
111
112#else /* CONFIG_X86_64 */
113
9a0b5817
GH
114static struct nop {
115 int cpuid;
116 unsigned char **noptable;
117} noptypes[] = {
118 { X86_FEATURE_K8, k8_nops },
119 { X86_FEATURE_K7, k7_nops },
120 { -1, NULL }
121};
122
9a0b5817
GH
123static unsigned char** find_nop_table(void)
124{
125 unsigned char **noptable = intel_nops;
126 int i;
127
128 for (i = 0; noptypes[i].cpuid >= 0; i++) {
129 if (boot_cpu_has(noptypes[i].cpuid)) {
130 noptable = noptypes[i].noptable;
131 break;
132 }
133 }
134 return noptable;
135}
136
d167a518
GH
137#endif /* CONFIG_X86_64 */
138
139ec7c4
RR
139static void nop_out(void *insns, unsigned int len)
140{
141 unsigned char **noptable = find_nop_table();
142
143 while (len > 0) {
144 unsigned int noplen = len;
145 if (noplen > ASM_NOP_MAX)
146 noplen = ASM_NOP_MAX;
147 memcpy(insns, noptable[noplen], noplen);
148 insns += noplen;
149 len -= noplen;
150 }
151}
152
d167a518 153extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
d167a518
GH
154extern u8 *__smp_locks[], *__smp_locks_end[];
155
9a0b5817
GH
156/* Replace instructions with better alternatives for this CPU type.
157 This runs before SMP is initialized to avoid SMP problems with
158 self modifying code. This implies that assymetric systems where
159 APs have less capabilities than the boot processor are not handled.
160 Tough. Make sure you disable such features by hand. */
161
162void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
163{
9a0b5817 164 struct alt_instr *a;
d167a518 165 u8 *instr;
139ec7c4 166 int diff;
9a0b5817
GH
167
168 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
169 for (a = start; a < end; a++) {
170 BUG_ON(a->replacementlen > a->instrlen);
171 if (!boot_cpu_has(a->cpuid))
172 continue;
d167a518
GH
173 instr = a->instr;
174#ifdef CONFIG_X86_64
175 /* vsyscall code is not mapped yet. resolve it manually. */
176 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
177 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
178 DPRINTK("%s: vsyscall fixup: %p => %p\n",
179 __FUNCTION__, a->instr, instr);
180 }
181#endif
182 memcpy(instr, a->replacement, a->replacementlen);
9a0b5817 183 diff = a->instrlen - a->replacementlen;
139ec7c4 184 nop_out(instr + a->replacementlen, diff);
9a0b5817
GH
185 }
186}
187
8ec4d41f
GH
188#ifdef CONFIG_SMP
189
9a0b5817
GH
190static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
191{
192 u8 **ptr;
193
194 for (ptr = start; ptr < end; ptr++) {
195 if (*ptr < text)
196 continue;
197 if (*ptr > text_end)
198 continue;
199 **ptr = 0xf0; /* lock prefix */
200 };
201}
202
203static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
204{
9a0b5817
GH
205 u8 **ptr;
206
b7fb4af0
JF
207 if (noreplace_smp)
208 return;
209
9a0b5817
GH
210 for (ptr = start; ptr < end; ptr++) {
211 if (*ptr < text)
212 continue;
213 if (*ptr > text_end)
214 continue;
139ec7c4 215 nop_out(*ptr, 1);
9a0b5817
GH
216 };
217}
218
219struct smp_alt_module {
220 /* what is this ??? */
221 struct module *mod;
222 char *name;
223
224 /* ptrs to lock prefixes */
225 u8 **locks;
226 u8 **locks_end;
227
228 /* .text segment, needed to avoid patching init code ;) */
229 u8 *text;
230 u8 *text_end;
231
232 struct list_head next;
233};
234static LIST_HEAD(smp_alt_modules);
235static DEFINE_SPINLOCK(smp_alt);
236
9a0b5817
GH
237void alternatives_smp_module_add(struct module *mod, char *name,
238 void *locks, void *locks_end,
239 void *text, void *text_end)
240{
241 struct smp_alt_module *smp;
242 unsigned long flags;
243
b7fb4af0
JF
244 if (noreplace_smp)
245 return;
246
9a0b5817
GH
247 if (smp_alt_once) {
248 if (boot_cpu_has(X86_FEATURE_UP))
249 alternatives_smp_unlock(locks, locks_end,
250 text, text_end);
251 return;
252 }
253
254 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
255 if (NULL == smp)
256 return; /* we'll run the (safe but slow) SMP code then ... */
257
258 smp->mod = mod;
259 smp->name = name;
260 smp->locks = locks;
261 smp->locks_end = locks_end;
262 smp->text = text;
263 smp->text_end = text_end;
264 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
265 __FUNCTION__, smp->locks, smp->locks_end,
266 smp->text, smp->text_end, smp->name);
267
268 spin_lock_irqsave(&smp_alt, flags);
269 list_add_tail(&smp->next, &smp_alt_modules);
270 if (boot_cpu_has(X86_FEATURE_UP))
271 alternatives_smp_unlock(smp->locks, smp->locks_end,
272 smp->text, smp->text_end);
273 spin_unlock_irqrestore(&smp_alt, flags);
274}
275
276void alternatives_smp_module_del(struct module *mod)
277{
278 struct smp_alt_module *item;
279 unsigned long flags;
280
b7fb4af0 281 if (smp_alt_once || noreplace_smp)
9a0b5817
GH
282 return;
283
284 spin_lock_irqsave(&smp_alt, flags);
285 list_for_each_entry(item, &smp_alt_modules, next) {
286 if (mod != item->mod)
287 continue;
288 list_del(&item->next);
289 spin_unlock_irqrestore(&smp_alt, flags);
290 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
291 kfree(item);
292 return;
293 }
294 spin_unlock_irqrestore(&smp_alt, flags);
295}
296
297void alternatives_smp_switch(int smp)
298{
299 struct smp_alt_module *mod;
300 unsigned long flags;
301
3047e99e
IM
302#ifdef CONFIG_LOCKDEP
303 /*
304 * A not yet fixed binutils section handling bug prevents
305 * alternatives-replacement from working reliably, so turn
306 * it off:
307 */
308 printk("lockdep: not fixing up alternatives.\n");
309 return;
310#endif
311
b7fb4af0 312 if (noreplace_smp || smp_alt_once)
9a0b5817
GH
313 return;
314 BUG_ON(!smp && (num_online_cpus() > 1));
315
316 spin_lock_irqsave(&smp_alt, flags);
317 if (smp) {
318 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
319 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
320 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
321 list_for_each_entry(mod, &smp_alt_modules, next)
322 alternatives_smp_lock(mod->locks, mod->locks_end,
323 mod->text, mod->text_end);
324 } else {
325 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
326 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
327 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
328 list_for_each_entry(mod, &smp_alt_modules, next)
329 alternatives_smp_unlock(mod->locks, mod->locks_end,
330 mod->text, mod->text_end);
331 }
332 spin_unlock_irqrestore(&smp_alt, flags);
333}
334
8ec4d41f
GH
335#endif
336
139ec7c4 337#ifdef CONFIG_PARAVIRT
98de032b
JF
338void apply_paravirt(struct paravirt_patch_site *start,
339 struct paravirt_patch_site *end)
139ec7c4 340{
98de032b 341 struct paravirt_patch_site *p;
139ec7c4 342
959b4fdf
JF
343 if (noreplace_paravirt)
344 return;
345
139ec7c4
RR
346 for (p = start; p < end; p++) {
347 unsigned int used;
348
349 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
350 p->len);
7f63c41c 351
63f70270
JF
352 BUG_ON(used > p->len);
353
139ec7c4
RR
354 /* Pad the rest with nops */
355 nop_out(p->instr + used, p->len - used);
356 }
357
63f70270
JF
358 /* Sync to be conservative, in case we patched following
359 * instructions */
139ec7c4
RR
360 sync_core();
361}
98de032b 362extern struct paravirt_patch_site __start_parainstructions[],
139ec7c4
RR
363 __stop_parainstructions[];
364#endif /* CONFIG_PARAVIRT */
365
9a0b5817
GH
366void __init alternative_instructions(void)
367{
e51959fa 368 unsigned long flags;
e51959fa
ZA
369
370 local_irq_save(flags);
9a0b5817
GH
371 apply_alternatives(__alt_instructions, __alt_instructions_end);
372
373 /* switch to patch-once-at-boottime-only mode and free the
374 * tables in case we know the number of CPUs will never ever
375 * change */
376#ifdef CONFIG_HOTPLUG_CPU
377 if (num_possible_cpus() < 2)
378 smp_alt_once = 1;
379#else
380 smp_alt_once = 1;
381#endif
382
8ec4d41f 383#ifdef CONFIG_SMP
9a0b5817
GH
384 if (smp_alt_once) {
385 if (1 == num_possible_cpus()) {
386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
388 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
9a0b5817
GH
389 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
390 _text, _etext);
391 }
392 free_init_pages("SMP alternatives",
e3ebadd9
LT
393 (unsigned long)__smp_locks,
394 (unsigned long)__smp_locks_end);
9a0b5817 395 } else {
9a0b5817
GH
396 alternatives_smp_module_add(NULL, "core kernel",
397 __smp_locks, __smp_locks_end,
398 _text, _etext);
399 alternatives_smp_switch(0);
400 }
8ec4d41f 401#endif
441d40dc 402 apply_paravirt(__parainstructions, __parainstructions_end);
e51959fa 403 local_irq_restore(flags);
9a0b5817 404}