x86, apic: Make apic drivers static
[linux-2.6-block.git] / arch / x86 / kernel / apic / apic_flat_64.c
1 /*
2  * Copyright 2004 James Cleverdon, IBM.
3  * Subject to the GNU Public License, v.2
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/errno.h>
12 #include <linux/threads.h>
13 #include <linux/cpumask.h>
14 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/hardirq.h>
19 #include <asm/smp.h>
20 #include <asm/apic.h>
21 #include <asm/ipi.h>
22
23 #ifdef CONFIG_ACPI
24 #include <acpi/acpi_bus.h>
25 #endif
26
27 static struct apic apic_physflat;
28 static struct apic apic_flat;
29
30 struct apic __read_mostly *apic = &apic_flat;
31 EXPORT_SYMBOL_GPL(apic);
32
33 static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
34 {
35         return 1;
36 }
37
38 static const struct cpumask *flat_target_cpus(void)
39 {
40         return cpu_online_mask;
41 }
42
43 static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
44 {
45         /* Careful. Some cpus do not strictly honor the set of cpus
46          * specified in the interrupt destination when using lowest
47          * priority interrupt delivery mode.
48          *
49          * In particular there was a hyperthreading cpu observed to
50          * deliver interrupts to the wrong hyperthread when only one
51          * hyperthread was specified in the interrupt desitination.
52          */
53         cpumask_clear(retmask);
54         cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
55 }
56
57 /*
58  * Set up the logical destination ID.
59  *
60  * Intel recommends to set DFR, LDR and TPR before enabling
61  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
62  * document number 292116).  So here it goes...
63  */
64 static void flat_init_apic_ldr(void)
65 {
66         unsigned long val;
67         unsigned long num, id;
68
69         num = smp_processor_id();
70         id = 1UL << num;
71         apic_write(APIC_DFR, APIC_DFR_FLAT);
72         val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
73         val |= SET_APIC_LOGICAL_ID(id);
74         apic_write(APIC_LDR, val);
75 }
76
77 static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
78 {
79         unsigned long flags;
80
81         local_irq_save(flags);
82         __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
83         local_irq_restore(flags);
84 }
85
86 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
87 {
88         unsigned long mask = cpumask_bits(cpumask)[0];
89
90         _flat_send_IPI_mask(mask, vector);
91 }
92
93 static void
94  flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
95 {
96         unsigned long mask = cpumask_bits(cpumask)[0];
97         int cpu = smp_processor_id();
98
99         if (cpu < BITS_PER_LONG)
100                 clear_bit(cpu, &mask);
101
102         _flat_send_IPI_mask(mask, vector);
103 }
104
105 static void flat_send_IPI_allbutself(int vector)
106 {
107         int cpu = smp_processor_id();
108 #ifdef  CONFIG_HOTPLUG_CPU
109         int hotplug = 1;
110 #else
111         int hotplug = 0;
112 #endif
113         if (hotplug || vector == NMI_VECTOR) {
114                 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
115                         unsigned long mask = cpumask_bits(cpu_online_mask)[0];
116
117                         if (cpu < BITS_PER_LONG)
118                                 clear_bit(cpu, &mask);
119
120                         _flat_send_IPI_mask(mask, vector);
121                 }
122         } else if (num_online_cpus() > 1) {
123                 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
124                                             vector, apic->dest_logical);
125         }
126 }
127
128 static void flat_send_IPI_all(int vector)
129 {
130         if (vector == NMI_VECTOR) {
131                 flat_send_IPI_mask(cpu_online_mask, vector);
132         } else {
133                 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
134                                             vector, apic->dest_logical);
135         }
136 }
137
138 static unsigned int flat_get_apic_id(unsigned long x)
139 {
140         unsigned int id;
141
142         id = (((x)>>24) & 0xFFu);
143
144         return id;
145 }
146
147 static unsigned long set_apic_id(unsigned int id)
148 {
149         unsigned long x;
150
151         x = ((id & 0xFFu)<<24);
152         return x;
153 }
154
155 static unsigned int read_xapic_id(void)
156 {
157         unsigned int id;
158
159         id = flat_get_apic_id(apic_read(APIC_ID));
160         return id;
161 }
162
163 static int flat_apic_id_registered(void)
164 {
165         return physid_isset(read_xapic_id(), phys_cpu_present_map);
166 }
167
168 static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
169 {
170         return initial_apic_id >> index_msb;
171 }
172
173 static struct apic apic_flat =  {
174         .name                           = "flat",
175         .probe                          = NULL,
176         .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
177         .apic_id_registered             = flat_apic_id_registered,
178
179         .irq_delivery_mode              = dest_LowestPrio,
180         .irq_dest_mode                  = 1, /* logical */
181
182         .target_cpus                    = flat_target_cpus,
183         .disable_esr                    = 0,
184         .dest_logical                   = APIC_DEST_LOGICAL,
185         .check_apicid_used              = NULL,
186         .check_apicid_present           = NULL,
187
188         .vector_allocation_domain       = flat_vector_allocation_domain,
189         .init_apic_ldr                  = flat_init_apic_ldr,
190
191         .ioapic_phys_id_map             = NULL,
192         .setup_apic_routing             = NULL,
193         .multi_timer_check              = NULL,
194         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
195         .apicid_to_cpu_present          = NULL,
196         .setup_portio_remap             = NULL,
197         .check_phys_apicid_present      = default_check_phys_apicid_present,
198         .enable_apic_mode               = NULL,
199         .phys_pkg_id                    = flat_phys_pkg_id,
200         .mps_oem_check                  = NULL,
201
202         .get_apic_id                    = flat_get_apic_id,
203         .set_apic_id                    = set_apic_id,
204         .apic_id_mask                   = 0xFFu << 24,
205
206         .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
207         .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
208
209         .send_IPI_mask                  = flat_send_IPI_mask,
210         .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
211         .send_IPI_allbutself            = flat_send_IPI_allbutself,
212         .send_IPI_all                   = flat_send_IPI_all,
213         .send_IPI_self                  = apic_send_IPI_self,
214
215         .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
216         .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
217         .wait_for_init_deassert         = NULL,
218         .smp_callin_clear_local_apic    = NULL,
219         .inquire_remote_apic            = default_inquire_remote_apic,
220
221         .read                           = native_apic_mem_read,
222         .write                          = native_apic_mem_write,
223         .icr_read                       = native_apic_icr_read,
224         .icr_write                      = native_apic_icr_write,
225         .wait_icr_idle                  = native_apic_wait_icr_idle,
226         .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
227 };
228
229 /*
230  * Physflat mode is used when there are more than 8 CPUs on a system.
231  * We cannot use logical delivery in this case because the mask
232  * overflows, so use physical mode.
233  */
234 static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
235 {
236 #ifdef CONFIG_ACPI
237         /*
238          * Quirk: some x86_64 machines can only use physical APIC mode
239          * regardless of how many processors are present (x86_64 ES7000
240          * is an example).
241          */
242         if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
243                 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
244                 printk(KERN_DEBUG "system APIC only can use physical flat");
245                 return 1;
246         }
247
248         if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
249                 printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
250                 return 1;
251         }
252 #endif
253
254         return 0;
255 }
256
257 static const struct cpumask *physflat_target_cpus(void)
258 {
259         return cpu_online_mask;
260 }
261
262 static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
263 {
264         cpumask_clear(retmask);
265         cpumask_set_cpu(cpu, retmask);
266 }
267
268 static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
269 {
270         default_send_IPI_mask_sequence_phys(cpumask, vector);
271 }
272
273 static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
274                                               int vector)
275 {
276         default_send_IPI_mask_allbutself_phys(cpumask, vector);
277 }
278
279 static void physflat_send_IPI_allbutself(int vector)
280 {
281         default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
282 }
283
284 static void physflat_send_IPI_all(int vector)
285 {
286         physflat_send_IPI_mask(cpu_online_mask, vector);
287 }
288
289 static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
290 {
291         int cpu;
292
293         /*
294          * We're using fixed IRQ delivery, can only return one phys APIC ID.
295          * May as well be the first.
296          */
297         cpu = cpumask_first(cpumask);
298         if ((unsigned)cpu < nr_cpu_ids)
299                 return per_cpu(x86_cpu_to_apicid, cpu);
300         else
301                 return BAD_APICID;
302 }
303
304 static unsigned int
305 physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
306                                 const struct cpumask *andmask)
307 {
308         int cpu;
309
310         /*
311          * We're using fixed IRQ delivery, can only return one phys APIC ID.
312          * May as well be the first.
313          */
314         for_each_cpu_and(cpu, cpumask, andmask) {
315                 if (cpumask_test_cpu(cpu, cpu_online_mask))
316                         break;
317         }
318         return per_cpu(x86_cpu_to_apicid, cpu);
319 }
320
321 static int physflat_probe(void)
322 {
323         if (apic == &apic_physflat || num_possible_cpus() > 8)
324                 return 1;
325
326         return 0;
327 }
328
329 static struct apic apic_physflat =  {
330
331         .name                           = "physical flat",
332         .probe                          = physflat_probe,
333         .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
334         .apic_id_registered             = flat_apic_id_registered,
335
336         .irq_delivery_mode              = dest_Fixed,
337         .irq_dest_mode                  = 0, /* physical */
338
339         .target_cpus                    = physflat_target_cpus,
340         .disable_esr                    = 0,
341         .dest_logical                   = 0,
342         .check_apicid_used              = NULL,
343         .check_apicid_present           = NULL,
344
345         .vector_allocation_domain       = physflat_vector_allocation_domain,
346         /* not needed, but shouldn't hurt: */
347         .init_apic_ldr                  = flat_init_apic_ldr,
348
349         .ioapic_phys_id_map             = NULL,
350         .setup_apic_routing             = NULL,
351         .multi_timer_check              = NULL,
352         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
353         .apicid_to_cpu_present          = NULL,
354         .setup_portio_remap             = NULL,
355         .check_phys_apicid_present      = default_check_phys_apicid_present,
356         .enable_apic_mode               = NULL,
357         .phys_pkg_id                    = flat_phys_pkg_id,
358         .mps_oem_check                  = NULL,
359
360         .get_apic_id                    = flat_get_apic_id,
361         .set_apic_id                    = set_apic_id,
362         .apic_id_mask                   = 0xFFu << 24,
363
364         .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
365         .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
366
367         .send_IPI_mask                  = physflat_send_IPI_mask,
368         .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
369         .send_IPI_allbutself            = physflat_send_IPI_allbutself,
370         .send_IPI_all                   = physflat_send_IPI_all,
371         .send_IPI_self                  = apic_send_IPI_self,
372
373         .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
374         .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
375         .wait_for_init_deassert         = NULL,
376         .smp_callin_clear_local_apic    = NULL,
377         .inquire_remote_apic            = default_inquire_remote_apic,
378
379         .read                           = native_apic_mem_read,
380         .write                          = native_apic_mem_write,
381         .icr_read                       = native_apic_icr_read,
382         .icr_write                      = native_apic_icr_write,
383         .wait_icr_idle                  = native_apic_wait_icr_idle,
384         .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
385 };
386
387 /*
388  * We need to check for physflat first, so this order is important.
389  */
390 apic_drivers(apic_physflat, apic_flat);