x86, apic: untangle the send_IPI_*() jungle
[linux-2.6-block.git] / arch / x86 / kernel / genapic_flat_64.c
1 /*
2  * Copyright 2004 James Cleverdon, IBM.
3  * Subject to the GNU Public License, v.2
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/errno.h>
12 #include <linux/threads.h>
13 #include <linux/cpumask.h>
14 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/hardirq.h>
19 #include <asm/smp.h>
20 #include <asm/ipi.h>
21 #include <asm/genapic.h>
22 #include <mach_apicdef.h>
23
24 #ifdef CONFIG_ACPI
25 #include <acpi/acpi_bus.h>
26 #endif
27
28 static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 {
30         return 1;
31 }
32
33 static const struct cpumask *flat_target_cpus(void)
34 {
35         return cpu_online_mask;
36 }
37
38 static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39 {
40         /* Careful. Some cpus do not strictly honor the set of cpus
41          * specified in the interrupt destination when using lowest
42          * priority interrupt delivery mode.
43          *
44          * In particular there was a hyperthreading cpu observed to
45          * deliver interrupts to the wrong hyperthread when only one
46          * hyperthread was specified in the interrupt desitination.
47          */
48         cpumask_clear(retmask);
49         cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
50 }
51
52 /*
53  * Set up the logical destination ID.
54  *
55  * Intel recommends to set DFR, LDR and TPR before enabling
56  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
57  * document number 292116).  So here it goes...
58  */
59 static void flat_init_apic_ldr(void)
60 {
61         unsigned long val;
62         unsigned long num, id;
63
64         num = smp_processor_id();
65         id = 1UL << num;
66         apic_write(APIC_DFR, APIC_DFR_FLAT);
67         val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68         val |= SET_APIC_LOGICAL_ID(id);
69         apic_write(APIC_LDR, val);
70 }
71
72 static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
73 {
74         unsigned long flags;
75
76         local_irq_save(flags);
77         __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
78         local_irq_restore(flags);
79 }
80
81 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
82 {
83         unsigned long mask = cpumask_bits(cpumask)[0];
84
85         _flat_send_IPI_mask(mask, vector);
86 }
87
88 static void
89  flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
90 {
91         unsigned long mask = cpumask_bits(cpumask)[0];
92         int cpu = smp_processor_id();
93
94         if (cpu < BITS_PER_LONG)
95                 clear_bit(cpu, &mask);
96
97         _flat_send_IPI_mask(mask, vector);
98 }
99
100 static void flat_send_IPI_allbutself(int vector)
101 {
102         int cpu = smp_processor_id();
103 #ifdef  CONFIG_HOTPLUG_CPU
104         int hotplug = 1;
105 #else
106         int hotplug = 0;
107 #endif
108         if (hotplug || vector == NMI_VECTOR) {
109                 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
110                         unsigned long mask = cpumask_bits(cpu_online_mask)[0];
111
112                         if (cpu < BITS_PER_LONG)
113                                 clear_bit(cpu, &mask);
114
115                         _flat_send_IPI_mask(mask, vector);
116                 }
117         } else if (num_online_cpus() > 1) {
118                 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
119                                             vector, apic->dest_logical);
120         }
121 }
122
123 static void flat_send_IPI_all(int vector)
124 {
125         if (vector == NMI_VECTOR) {
126                 flat_send_IPI_mask(cpu_online_mask, vector);
127         } else {
128                 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
129                                             vector, apic->dest_logical);
130         }
131 }
132
133 static unsigned int flat_get_apic_id(unsigned long x)
134 {
135         unsigned int id;
136
137         id = (((x)>>24) & 0xFFu);
138
139         return id;
140 }
141
142 static unsigned long set_apic_id(unsigned int id)
143 {
144         unsigned long x;
145
146         x = ((id & 0xFFu)<<24);
147         return x;
148 }
149
150 static unsigned int read_xapic_id(void)
151 {
152         unsigned int id;
153
154         id = flat_get_apic_id(apic_read(APIC_ID));
155         return id;
156 }
157
158 static int flat_apic_id_registered(void)
159 {
160         return physid_isset(read_xapic_id(), phys_cpu_present_map);
161 }
162
163 static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
164 {
165         return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
166 }
167
168 static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
169                                                 const struct cpumask *andmask)
170 {
171         unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
172         unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
173
174         return mask1 & mask2;
175 }
176
177 static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
178 {
179         return hard_smp_processor_id() >> index_msb;
180 }
181
182 struct genapic apic_flat =  {
183         .name                           = "flat",
184         .probe                          = NULL,
185         .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
186         .apic_id_registered             = flat_apic_id_registered,
187
188         .irq_delivery_mode              = dest_LowestPrio,
189         .irq_dest_mode                  = 1, /* logical */
190
191         .target_cpus                    = flat_target_cpus,
192         .disable_esr                    = 0,
193         .dest_logical                   = APIC_DEST_LOGICAL,
194         .check_apicid_used              = NULL,
195         .check_apicid_present           = NULL,
196
197         .vector_allocation_domain       = flat_vector_allocation_domain,
198         .init_apic_ldr                  = flat_init_apic_ldr,
199
200         .ioapic_phys_id_map             = NULL,
201         .setup_apic_routing             = NULL,
202         .multi_timer_check              = NULL,
203         .apicid_to_node                 = NULL,
204         .cpu_to_logical_apicid          = NULL,
205         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
206         .apicid_to_cpu_present          = NULL,
207         .setup_portio_remap             = NULL,
208         .check_phys_apicid_present      = default_check_phys_apicid_present,
209         .enable_apic_mode               = NULL,
210         .phys_pkg_id                    = flat_phys_pkg_id,
211         .mps_oem_check                  = NULL,
212
213         .get_apic_id                    = flat_get_apic_id,
214         .set_apic_id                    = set_apic_id,
215         .apic_id_mask                   = 0xFFu << 24,
216
217         .cpu_mask_to_apicid             = flat_cpu_mask_to_apicid,
218         .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
219
220         .send_IPI_mask                  = flat_send_IPI_mask,
221         .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
222         .send_IPI_allbutself            = flat_send_IPI_allbutself,
223         .send_IPI_all                   = flat_send_IPI_all,
224         .send_IPI_self                  = apic_send_IPI_self,
225
226         .wakeup_cpu                     = NULL,
227         .trampoline_phys_low            = 0,
228         .trampoline_phys_high           = 0,
229         .wait_for_init_deassert         = NULL,
230         .smp_callin_clear_local_apic    = NULL,
231         .store_NMI_vector               = NULL,
232         .restore_NMI_vector             = NULL,
233         .inquire_remote_apic            = NULL,
234 };
235
236 /*
237  * Physflat mode is used when there are more than 8 CPUs on a AMD system.
238  * We cannot use logical delivery in this case because the mask
239  * overflows, so use physical mode.
240  */
241 static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
242 {
243 #ifdef CONFIG_ACPI
244         /*
245          * Quirk: some x86_64 machines can only use physical APIC mode
246          * regardless of how many processors are present (x86_64 ES7000
247          * is an example).
248          */
249         if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
250                 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
251                 printk(KERN_DEBUG "system APIC only can use physical flat");
252                 return 1;
253         }
254 #endif
255
256         return 0;
257 }
258
259 static const struct cpumask *physflat_target_cpus(void)
260 {
261         return cpu_online_mask;
262 }
263
264 static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
265 {
266         cpumask_clear(retmask);
267         cpumask_set_cpu(cpu, retmask);
268 }
269
270 static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
271 {
272         default_send_IPI_mask_sequence(cpumask, vector);
273 }
274
275 static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
276                                               int vector)
277 {
278         default_send_IPI_mask_allbutself(cpumask, vector);
279 }
280
281 static void physflat_send_IPI_allbutself(int vector)
282 {
283         default_send_IPI_mask_allbutself(cpu_online_mask, vector);
284 }
285
286 static void physflat_send_IPI_all(int vector)
287 {
288         physflat_send_IPI_mask(cpu_online_mask, vector);
289 }
290
291 static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
292 {
293         int cpu;
294
295         /*
296          * We're using fixed IRQ delivery, can only return one phys APIC ID.
297          * May as well be the first.
298          */
299         cpu = cpumask_first(cpumask);
300         if ((unsigned)cpu < nr_cpu_ids)
301                 return per_cpu(x86_cpu_to_apicid, cpu);
302         else
303                 return BAD_APICID;
304 }
305
306 static unsigned int
307 physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
308                                 const struct cpumask *andmask)
309 {
310         int cpu;
311
312         /*
313          * We're using fixed IRQ delivery, can only return one phys APIC ID.
314          * May as well be the first.
315          */
316         for_each_cpu_and(cpu, cpumask, andmask) {
317                 if (cpumask_test_cpu(cpu, cpu_online_mask))
318                         break;
319         }
320         if (cpu < nr_cpu_ids)
321                 return per_cpu(x86_cpu_to_apicid, cpu);
322
323         return BAD_APICID;
324 }
325
326 struct genapic apic_physflat =  {
327
328         .name                           = "physical flat",
329         .probe                          = NULL,
330         .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
331         .apic_id_registered             = flat_apic_id_registered,
332
333         .irq_delivery_mode              = dest_Fixed,
334         .irq_dest_mode                  = 0, /* physical */
335
336         .target_cpus                    = physflat_target_cpus,
337         .disable_esr                    = 0,
338         .dest_logical                   = 0,
339         .check_apicid_used              = NULL,
340         .check_apicid_present           = NULL,
341
342         .vector_allocation_domain       = physflat_vector_allocation_domain,
343         /* not needed, but shouldn't hurt: */
344         .init_apic_ldr                  = flat_init_apic_ldr,
345
346         .ioapic_phys_id_map             = NULL,
347         .setup_apic_routing             = NULL,
348         .multi_timer_check              = NULL,
349         .apicid_to_node                 = NULL,
350         .cpu_to_logical_apicid          = NULL,
351         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
352         .apicid_to_cpu_present          = NULL,
353         .setup_portio_remap             = NULL,
354         .check_phys_apicid_present      = default_check_phys_apicid_present,
355         .enable_apic_mode               = NULL,
356         .phys_pkg_id                    = flat_phys_pkg_id,
357         .mps_oem_check                  = NULL,
358
359         .get_apic_id                    = flat_get_apic_id,
360         .set_apic_id                    = set_apic_id,
361         .apic_id_mask                   = 0xFFu << 24,
362
363         .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
364         .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
365
366         .send_IPI_mask                  = physflat_send_IPI_mask,
367         .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
368         .send_IPI_allbutself            = physflat_send_IPI_allbutself,
369         .send_IPI_all                   = physflat_send_IPI_all,
370         .send_IPI_self                  = apic_send_IPI_self,
371
372         .wakeup_cpu                     = NULL,
373         .trampoline_phys_low            = 0,
374         .trampoline_phys_high           = 0,
375         .wait_for_init_deassert         = NULL,
376         .smp_callin_clear_local_apic    = NULL,
377         .store_NMI_vector               = NULL,
378         .restore_NMI_vector             = NULL,
379         .inquire_remote_apic            = NULL,
380 };