x86: merge winchip-2 and winchip-2a cpu choices
[linux-2.6-block.git] / include / asm-x86 / smp.h
... / ...
CommitLineData
1#ifndef ASM_X86__SMP_H
2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h>
5#include <linux/init.h>
6#include <asm/percpu.h>
7
8/*
9 * We need the APIC definitions automatically as part of 'smp.h'
10 */
11#ifdef CONFIG_X86_LOCAL_APIC
12# include <asm/mpspec.h>
13# include <asm/apic.h>
14# ifdef CONFIG_X86_IO_APIC
15# include <asm/io_apic.h>
16# endif
17#endif
18#include <asm/pda.h>
19#include <asm/thread_info.h>
20
21extern cpumask_t cpu_callout_map;
22extern cpumask_t cpu_initialized;
23extern cpumask_t cpu_callin_map;
24
25extern void (*mtrr_hook)(void);
26extern void zap_low_mappings(void);
27
28extern int __cpuinit get_local_pda(int cpu);
29
30extern int smp_num_siblings;
31extern unsigned int num_processors;
32extern cpumask_t cpu_initialized;
33
34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
36DECLARE_PER_CPU(u16, cpu_llc_id);
37#ifdef CONFIG_X86_32
38DECLARE_PER_CPU(int, cpu_number);
39#endif
40
41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
43
44/* Static state in head.S used to set up a CPU */
45extern struct {
46 void *sp;
47 unsigned short ss;
48} stack_start;
49
50struct smp_ops {
51 void (*smp_prepare_boot_cpu)(void);
52 void (*smp_prepare_cpus)(unsigned max_cpus);
53 void (*smp_cpus_done)(unsigned max_cpus);
54
55 void (*smp_send_stop)(void);
56 void (*smp_send_reschedule)(int cpu);
57
58 int (*cpu_up)(unsigned cpu);
59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
63 void (*send_call_func_ipi)(cpumask_t mask);
64 void (*send_call_func_single_ipi)(int cpu);
65};
66
67/* Globals due to paravirt */
68extern void set_cpu_sibling_map(int cpu);
69
70#ifdef CONFIG_SMP
71#ifndef CONFIG_PARAVIRT
72#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
73#endif
74extern struct smp_ops smp_ops;
75
76static inline void smp_send_stop(void)
77{
78 smp_ops.smp_send_stop();
79}
80
81static inline void smp_prepare_boot_cpu(void)
82{
83 smp_ops.smp_prepare_boot_cpu();
84}
85
86static inline void smp_prepare_cpus(unsigned int max_cpus)
87{
88 smp_ops.smp_prepare_cpus(max_cpus);
89}
90
91static inline void smp_cpus_done(unsigned int max_cpus)
92{
93 smp_ops.smp_cpus_done(max_cpus);
94}
95
96static inline int __cpu_up(unsigned int cpu)
97{
98 return smp_ops.cpu_up(cpu);
99}
100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
116static inline void smp_send_reschedule(int cpu)
117{
118 smp_ops.smp_send_reschedule(cpu);
119}
120
121static inline void arch_send_call_function_single_ipi(int cpu)
122{
123 smp_ops.send_call_func_single_ipi(cpu);
124}
125
126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{
128 smp_ops.send_call_func_ipi(mask);
129}
130
131void cpu_disable_common(void);
132void native_smp_prepare_boot_cpu(void);
133void native_smp_prepare_cpus(unsigned int max_cpus);
134void native_smp_cpus_done(unsigned int max_cpus);
135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
140
141void native_send_call_func_ipi(cpumask_t mask);
142void native_send_call_func_single_ipi(int cpu);
143
144void smp_store_cpu_info(int id);
145#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
146
147/* We don't mark CPUs online until __cpu_up(), so we need another measure */
148static inline int num_booting_cpus(void)
149{
150 return cpus_weight(cpu_callout_map);
151}
152#endif /* CONFIG_SMP */
153
154#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
155extern void prefill_possible_map(void);
156#else
157static inline void prefill_possible_map(void)
158{
159}
160#endif
161
162extern unsigned disabled_cpus __cpuinitdata;
163
164#ifdef CONFIG_X86_32_SMP
165/*
166 * This function is needed by all SMP systems. It must _always_ be valid
167 * from the initial startup. We map APIC_BASE very early in page_setup(),
168 * so this is correct in the x86 case.
169 */
170#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
171extern int safe_smp_processor_id(void);
172
173#elif defined(CONFIG_X86_64_SMP)
174#define raw_smp_processor_id() read_pda(cpunumber)
175
176#define stack_smp_processor_id() \
177({ \
178 struct thread_info *ti; \
179 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
180 ti->cpu; \
181})
182#define safe_smp_processor_id() smp_processor_id()
183
184#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
185#define cpu_physical_id(cpu) boot_cpu_physical_apicid
186#define safe_smp_processor_id() 0
187#define stack_smp_processor_id() 0
188#endif
189
190#ifdef CONFIG_X86_LOCAL_APIC
191
192#ifndef CONFIG_X86_64
193static inline int logical_smp_processor_id(void)
194{
195 /* we don't want to mark this access volatile - bad code generation */
196 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
197}
198
199#include <mach_apicdef.h>
200static inline unsigned int read_apic_id(void)
201{
202 unsigned int reg;
203
204 reg = *(u32 *)(APIC_BASE + APIC_ID);
205
206 return GET_APIC_ID(reg);
207}
208#endif
209
210
211# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
212extern int hard_smp_processor_id(void);
213# else
214#include <mach_apicdef.h>
215static inline int hard_smp_processor_id(void)
216{
217 /* we don't want to mark this access volatile - bad code generation */
218 return read_apic_id();
219}
220# endif /* APIC_DEFINITION */
221
222#else /* CONFIG_X86_LOCAL_APIC */
223
224# ifndef CONFIG_SMP
225# define hard_smp_processor_id() 0
226# endif
227
228#endif /* CONFIG_X86_LOCAL_APIC */
229
230#endif /* __ASSEMBLY__ */
231#endif /* ASM_X86__SMP_H */