Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SMP_H |
2 | #define _ASM_X86_SMP_H | |
c27cfeff | 3 | #ifndef __ASSEMBLY__ |
53ebef49 | 4 | #include <linux/cpumask.h> |
93b016f8 | 5 | #include <linux/init.h> |
7e1efc0c | 6 | #include <asm/percpu.h> |
53ebef49 | 7 | |
b23dab08 GC |
8 | /* |
9 | * We need the APIC definitions automatically as part of 'smp.h' | |
10 | */ | |
11 | #ifdef CONFIG_X86_LOCAL_APIC | |
12 | # include <asm/mpspec.h> | |
13 | # include <asm/apic.h> | |
14 | # ifdef CONFIG_X86_IO_APIC | |
15 | # include <asm/io_apic.h> | |
16 | # endif | |
17 | #endif | |
b23dab08 | 18 | #include <asm/thread_info.h> |
fb8fd077 | 19 | #include <asm/cpumask.h> |
b23dab08 | 20 | |
53ebef49 GC |
21 | extern int smp_num_siblings; |
22 | extern unsigned int num_processors; | |
c27cfeff | 23 | |
7e1efc0c GOC |
24 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
25 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | |
26 | DECLARE_PER_CPU(u16, cpu_llc_id); | |
fb26132b | 27 | DECLARE_PER_CPU(int, cpu_number); |
23ca4bba | 28 | |
c2d1cec1 MT |
29 | static inline struct cpumask *cpu_sibling_mask(int cpu) |
30 | { | |
31 | return &per_cpu(cpu_sibling_map, cpu); | |
32 | } | |
33 | ||
34 | static inline struct cpumask *cpu_core_mask(int cpu) | |
35 | { | |
36 | return &per_cpu(cpu_core_map, cpu); | |
37 | } | |
38 | ||
23ca4bba MT |
39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
40 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | |
7e1efc0c | 41 | |
9d97d0da GOC |
42 | /* Static state in head.S used to set up a CPU */ |
43 | extern struct { | |
44 | void *sp; | |
45 | unsigned short ss; | |
46 | } stack_start; | |
47 | ||
16694024 GC |
48 | struct smp_ops { |
49 | void (*smp_prepare_boot_cpu)(void); | |
50 | void (*smp_prepare_cpus)(unsigned max_cpus); | |
16694024 GC |
51 | void (*smp_cpus_done)(unsigned max_cpus); |
52 | ||
53 | void (*smp_send_stop)(void); | |
54 | void (*smp_send_reschedule)(int cpu); | |
3b16cf87 | 55 | |
93be71b6 AN |
56 | int (*cpu_up)(unsigned cpu); |
57 | int (*cpu_disable)(void); | |
58 | void (*cpu_die)(unsigned int cpu); | |
59 | void (*play_dead)(void); | |
60 | ||
bcda016e | 61 | void (*send_call_func_ipi)(const struct cpumask *mask); |
3b16cf87 | 62 | void (*send_call_func_single_ipi)(int cpu); |
16694024 GC |
63 | }; |
64 | ||
14522076 GC |
65 | /* Globals due to paravirt */ |
66 | extern void set_cpu_sibling_map(int cpu); | |
67 | ||
c76cb368 | 68 | #ifdef CONFIG_SMP |
d0173aea GOC |
69 | #ifndef CONFIG_PARAVIRT |
70 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | |
71 | #endif | |
c76cb368 | 72 | extern struct smp_ops smp_ops; |
8678969e | 73 | |
377d6984 GC |
74 | static inline void smp_send_stop(void) |
75 | { | |
76 | smp_ops.smp_send_stop(); | |
77 | } | |
78 | ||
1e3fac83 GC |
79 | static inline void smp_prepare_boot_cpu(void) |
80 | { | |
81 | smp_ops.smp_prepare_boot_cpu(); | |
82 | } | |
83 | ||
7557da67 GC |
84 | static inline void smp_prepare_cpus(unsigned int max_cpus) |
85 | { | |
86 | smp_ops.smp_prepare_cpus(max_cpus); | |
87 | } | |
88 | ||
c5597649 GC |
89 | static inline void smp_cpus_done(unsigned int max_cpus) |
90 | { | |
91 | smp_ops.smp_cpus_done(max_cpus); | |
92 | } | |
93 | ||
71d19549 GC |
94 | static inline int __cpu_up(unsigned int cpu) |
95 | { | |
96 | return smp_ops.cpu_up(cpu); | |
97 | } | |
98 | ||
93be71b6 AN |
99 | static inline int __cpu_disable(void) |
100 | { | |
101 | return smp_ops.cpu_disable(); | |
102 | } | |
103 | ||
104 | static inline void __cpu_die(unsigned int cpu) | |
105 | { | |
106 | smp_ops.cpu_die(cpu); | |
107 | } | |
108 | ||
109 | static inline void play_dead(void) | |
110 | { | |
111 | smp_ops.play_dead(); | |
112 | } | |
113 | ||
8678969e GC |
114 | static inline void smp_send_reschedule(int cpu) |
115 | { | |
116 | smp_ops.smp_send_reschedule(cpu); | |
117 | } | |
64b1a21e | 118 | |
3b16cf87 JA |
119 | static inline void arch_send_call_function_single_ipi(int cpu) |
120 | { | |
121 | smp_ops.send_call_func_single_ipi(cpu); | |
122 | } | |
123 | ||
124 | static inline void arch_send_call_function_ipi(cpumask_t mask) | |
64b1a21e | 125 | { |
e7986739 | 126 | smp_ops.send_call_func_ipi(&mask); |
64b1a21e | 127 | } |
71d19549 | 128 | |
8227dce7 | 129 | void cpu_disable_common(void); |
1e3fac83 | 130 | void native_smp_prepare_boot_cpu(void); |
7557da67 | 131 | void native_smp_prepare_cpus(unsigned int max_cpus); |
c5597649 | 132 | void native_smp_cpus_done(unsigned int max_cpus); |
71d19549 | 133 | int native_cpu_up(unsigned int cpunum); |
93be71b6 AN |
134 | int native_cpu_disable(void); |
135 | void native_cpu_die(unsigned int cpu); | |
136 | void native_play_dead(void); | |
a21f5d88 | 137 | void play_dead_common(void); |
93be71b6 | 138 | |
bcda016e | 139 | void native_send_call_func_ipi(const struct cpumask *mask); |
3b16cf87 | 140 | void native_send_call_func_single_ipi(int cpu); |
93b016f8 | 141 | |
1d89a7f0 | 142 | void smp_store_cpu_info(int id); |
c70dcb74 | 143 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
a9c057c1 GC |
144 | |
145 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | |
146 | static inline int num_booting_cpus(void) | |
147 | { | |
c2d1cec1 | 148 | return cpumask_weight(cpu_callout_mask); |
a9c057c1 | 149 | } |
14adf855 | 150 | #endif /* CONFIG_SMP */ |
a9c057c1 | 151 | |
2fe60147 AS |
152 | extern unsigned disabled_cpus __cpuinitdata; |
153 | ||
a9c057c1 GC |
154 | #ifdef CONFIG_X86_32_SMP |
155 | /* | |
156 | * This function is needed by all SMP systems. It must _always_ be valid | |
157 | * from the initial startup. We map APIC_BASE very early in page_setup(), | |
158 | * so this is correct in the x86 case. | |
159 | */ | |
6dbde353 | 160 | #define raw_smp_processor_id() (percpu_read(cpu_number)) |
a9c057c1 GC |
161 | extern int safe_smp_processor_id(void); |
162 | ||
163 | #elif defined(CONFIG_X86_64_SMP) | |
ea927906 | 164 | #define raw_smp_processor_id() (percpu_read(cpu_number)) |
a9c057c1 GC |
165 | |
166 | #define stack_smp_processor_id() \ | |
167 | ({ \ | |
168 | struct thread_info *ti; \ | |
169 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | |
170 | ti->cpu; \ | |
171 | }) | |
172 | #define safe_smp_processor_id() smp_processor_id() | |
173 | ||
c76cb368 | 174 | #endif |
16694024 | 175 | |
1b000843 GC |
176 | #ifdef CONFIG_X86_LOCAL_APIC |
177 | ||
1b374e4d | 178 | #ifndef CONFIG_X86_64 |
1b000843 GC |
179 | static inline int logical_smp_processor_id(void) |
180 | { | |
181 | /* we don't want to mark this access volatile - bad code generation */ | |
182 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | |
183 | } | |
184 | ||
ac23d4ee JS |
185 | #endif |
186 | ||
1b000843 | 187 | extern int hard_smp_processor_id(void); |
1b000843 GC |
188 | |
189 | #else /* CONFIG_X86_LOCAL_APIC */ | |
190 | ||
191 | # ifndef CONFIG_SMP | |
192 | # define hard_smp_processor_id() 0 | |
193 | # endif | |
194 | ||
195 | #endif /* CONFIG_X86_LOCAL_APIC */ | |
196 | ||
c27cfeff | 197 | #endif /* __ASSEMBLY__ */ |
1965aae3 | 198 | #endif /* _ASM_X86_SMP_H */ |