Commit | Line | Data |
---|---|---|
82023503 GC |
1 | #include <linux/cpumask.h> |
2 | #include <linux/interrupt.h> | |
82023503 GC |
3 | |
4 | #include <linux/mm.h> | |
5 | #include <linux/delay.h> | |
6 | #include <linux/spinlock.h> | |
7 | #include <linux/kernel_stat.h> | |
8 | #include <linux/mc146818rtc.h> | |
9 | #include <linux/cache.h> | |
82023503 | 10 | #include <linux/cpu.h> |
82023503 GC |
11 | |
12 | #include <asm/smp.h> | |
13 | #include <asm/mtrr.h> | |
14 | #include <asm/tlbflush.h> | |
15 | #include <asm/mmu_context.h> | |
7b6aa335 | 16 | #include <asm/apic.h> |
82023503 | 17 | #include <asm/proto.h> |
43f39890 | 18 | #include <asm/ipi.h> |
82023503 | 19 | |
1a8aa8ac DV |
20 | void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) |
21 | { | |
22 | /* | |
23 | * Subtle. In the case of the 'never do double writes' workaround | |
24 | * we have to lock out interrupts to be safe. As we don't care | |
25 | * of the value read we use an atomic rmw access to avoid costly | |
26 | * cli/sti. Otherwise we use an even cheaper single atomic write | |
27 | * to the APIC. | |
28 | */ | |
29 | unsigned int cfg; | |
30 | ||
31 | /* | |
32 | * Wait for idle. | |
33 | */ | |
34 | __xapic_wait_icr_idle(); | |
35 | ||
36 | /* | |
37 | * No need to touch the target chip field | |
38 | */ | |
39 | cfg = __prepare_ICR(shortcut, vector, dest); | |
40 | ||
41 | /* | |
42 | * Send the IPI. The write to APIC_ICR fires this off. | |
43 | */ | |
44 | native_apic_mem_write(APIC_ICR, cfg); | |
45 | } | |
46 | ||
47 | /* | |
48 | * This is used to send an IPI with no shorthand notation (the destination is | |
49 | * specified in bits 56 to 63 of the ICR). | |
50 | */ | |
51 | void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) | |
52 | { | |
53 | unsigned long cfg; | |
54 | ||
55 | /* | |
56 | * Wait for idle. | |
57 | */ | |
58 | if (unlikely(vector == NMI_VECTOR)) | |
59 | safe_apic_wait_icr_idle(); | |
60 | else | |
61 | __xapic_wait_icr_idle(); | |
62 | ||
63 | /* | |
64 | * prepare target chip field | |
65 | */ | |
66 | cfg = __prepare_ICR2(mask); | |
67 | native_apic_mem_write(APIC_ICR2, cfg); | |
68 | ||
69 | /* | |
70 | * program the ICR | |
71 | */ | |
72 | cfg = __prepare_ICR(0, vector, dest); | |
73 | ||
74 | /* | |
75 | * Send the IPI. The write to APIC_ICR fires this off. | |
76 | */ | |
77 | native_apic_mem_write(APIC_ICR, cfg); | |
78 | } | |
79 | ||
53be0fac TG |
80 | void default_send_IPI_single_phys(int cpu, int vector) |
81 | { | |
82 | unsigned long flags; | |
83 | ||
84 | local_irq_save(flags); | |
85 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), | |
86 | vector, APIC_DEST_PHYSICAL); | |
87 | local_irq_restore(flags); | |
88 | } | |
89 | ||
c5e95482 YL |
90 | void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) |
91 | { | |
92 | unsigned long query_cpu; | |
93 | unsigned long flags; | |
94 | ||
95 | /* | |
96 | * Hack. The clustered APIC addressing mode doesn't allow us to send | |
97 | * to an arbitrary mask, so I do a unicast to each CPU instead. | |
98 | * - mbligh | |
99 | */ | |
100 | local_irq_save(flags); | |
101 | for_each_cpu(query_cpu, mask) { | |
102 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | |
103 | query_cpu), vector, APIC_DEST_PHYSICAL); | |
104 | } | |
105 | local_irq_restore(flags); | |
106 | } | |
107 | ||
108 | void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, | |
109 | int vector) | |
110 | { | |
111 | unsigned int this_cpu = smp_processor_id(); | |
112 | unsigned int query_cpu; | |
113 | unsigned long flags; | |
114 | ||
115 | /* See Hack comment above */ | |
116 | ||
117 | local_irq_save(flags); | |
118 | for_each_cpu(query_cpu, mask) { | |
119 | if (query_cpu == this_cpu) | |
120 | continue; | |
121 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, | |
122 | query_cpu), vector, APIC_DEST_PHYSICAL); | |
123 | } | |
124 | local_irq_restore(flags); | |
125 | } | |
126 | ||
7e29393b TG |
127 | /* |
128 | * Helper function for APICs which insist on cpumasks | |
129 | */ | |
130 | void default_send_IPI_single(int cpu, int vector) | |
131 | { | |
132 | apic->send_IPI_mask(cpumask_of(cpu), vector); | |
133 | } | |
134 | ||
1245e166 TH |
135 | #ifdef CONFIG_X86_32 |
136 | ||
c5e95482 YL |
137 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, |
138 | int vector) | |
139 | { | |
140 | unsigned long flags; | |
141 | unsigned int query_cpu; | |
142 | ||
143 | /* | |
144 | * Hack. The clustered APIC addressing mode doesn't allow us to send | |
145 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | |
146 | * should be modified to do 1 message per cluster ID - mbligh | |
147 | */ | |
148 | ||
149 | local_irq_save(flags); | |
150 | for_each_cpu(query_cpu, mask) | |
151 | __default_send_IPI_dest_field( | |
6f802c4b TH |
152 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
153 | vector, apic->dest_logical); | |
c5e95482 YL |
154 | local_irq_restore(flags); |
155 | } | |
156 | ||
157 | void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | |
158 | int vector) | |
159 | { | |
160 | unsigned long flags; | |
161 | unsigned int query_cpu; | |
162 | unsigned int this_cpu = smp_processor_id(); | |
163 | ||
164 | /* See Hack comment above */ | |
165 | ||
166 | local_irq_save(flags); | |
167 | for_each_cpu(query_cpu, mask) { | |
168 | if (query_cpu == this_cpu) | |
169 | continue; | |
170 | __default_send_IPI_dest_field( | |
6f802c4b TH |
171 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
172 | vector, apic->dest_logical); | |
c5e95482 YL |
173 | } |
174 | local_irq_restore(flags); | |
175 | } | |
176 | ||
c5e95482 YL |
177 | /* |
178 | * This is only used on smaller machines. | |
179 | */ | |
180 | void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) | |
181 | { | |
182 | unsigned long mask = cpumask_bits(cpumask)[0]; | |
183 | unsigned long flags; | |
184 | ||
e3f0f36d | 185 | if (!mask) |
83d349f3 LT |
186 | return; |
187 | ||
c5e95482 YL |
188 | local_irq_save(flags); |
189 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); | |
190 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); | |
191 | local_irq_restore(flags); | |
192 | } | |
193 | ||
194 | void default_send_IPI_allbutself(int vector) | |
195 | { | |
196 | /* | |
197 | * if there are no other CPUs in the system then we get an APIC send | |
198 | * error if we try to broadcast, thus avoid sending IPIs in this case. | |
199 | */ | |
200 | if (!(num_online_cpus() > 1)) | |
201 | return; | |
202 | ||
203 | __default_local_send_IPI_allbutself(vector); | |
204 | } | |
205 | ||
206 | void default_send_IPI_all(int vector) | |
207 | { | |
208 | __default_local_send_IPI_all(vector); | |
209 | } | |
210 | ||
dac5f412 | 211 | void default_send_IPI_self(int vector) |
82023503 | 212 | { |
43f39890 | 213 | __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); |
82023503 GC |
214 | } |
215 | ||
216 | /* must come after the send_IPI functions above for inlining */ | |
82023503 GC |
217 | static int convert_apicid_to_cpu(int apic_id) |
218 | { | |
219 | int i; | |
220 | ||
221 | for_each_possible_cpu(i) { | |
222 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | |
223 | return i; | |
224 | } | |
225 | return -1; | |
226 | } | |
227 | ||
228 | int safe_smp_processor_id(void) | |
229 | { | |
230 | int apicid, cpuid; | |
231 | ||
93984fbd | 232 | if (!boot_cpu_has(X86_FEATURE_APIC)) |
82023503 GC |
233 | return 0; |
234 | ||
235 | apicid = hard_smp_processor_id(); | |
236 | if (apicid == BAD_APICID) | |
237 | return 0; | |
238 | ||
239 | cpuid = convert_apicid_to_cpu(apicid); | |
240 | ||
241 | return cpuid >= 0 ? cpuid : 0; | |
242 | } | |
243 | #endif |