Commit | Line | Data |
---|---|---|
12a67cf6 SS |
1 | #include <linux/threads.h> |
2 | #include <linux/cpumask.h> | |
3 | #include <linux/string.h> | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/ctype.h> | |
6 | #include <linux/init.h> | |
1b9b89e7 YL |
7 | #include <linux/dmar.h> |
8 | ||
12a67cf6 SS |
9 | #include <asm/smp.h> |
10 | #include <asm/ipi.h> | |
11 | #include <asm/genapic.h> | |
12 | ||
13 | DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); | |
14 | ||
2caa3715 | 15 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
1b9b89e7 | 16 | { |
d25ae38b | 17 | if (cpu_has_x2apic) |
1b9b89e7 YL |
18 | return 1; |
19 | ||
20 | return 0; | |
21 | } | |
22 | ||
12a67cf6 SS |
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | ||
bcda016e | 25 | static const struct cpumask *x2apic_target_cpus(void) |
12a67cf6 | 26 | { |
bcda016e | 27 | return cpumask_of(0); |
12a67cf6 SS |
28 | } |
29 | ||
30 | /* | |
31 | * for now each logical cpu is in its own vector allocation domain. | |
32 | */ | |
bcda016e | 33 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
12a67cf6 | 34 | { |
bcda016e MT |
35 | cpumask_clear(retmask); |
36 | cpumask_set_cpu(cpu, retmask); | |
12a67cf6 SS |
37 | } |
38 | ||
39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |
40 | unsigned int dest) | |
41 | { | |
42 | unsigned long cfg; | |
43 | ||
44 | cfg = __prepare_ICR(0, vector, dest); | |
45 | ||
46 | /* | |
47 | * send the IPI. | |
48 | */ | |
49 | x2apic_icr_write(cfg, apicid); | |
50 | } | |
51 | ||
52 | /* | |
53 | * for now, we send the IPI's one by one in the cpumask. | |
54 | * TBD: Based on the cpu mask, we can send the IPI's to the cluster group | |
55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | |
56 | * writes. | |
57 | */ | |
bcda016e | 58 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
12a67cf6 SS |
59 | { |
60 | unsigned long flags; | |
61 | unsigned long query_cpu; | |
62 | ||
63 | local_irq_save(flags); | |
bcda016e | 64 | for_each_cpu(query_cpu, mask) |
e7986739 MT |
65 | __x2apic_send_IPI_dest( |
66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | |
67 | vector, APIC_DEST_LOGICAL); | |
12a67cf6 SS |
68 | local_irq_restore(flags); |
69 | } | |
70 | ||
bcda016e MT |
71 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
72 | int vector) | |
12a67cf6 | 73 | { |
e7986739 MT |
74 | unsigned long flags; |
75 | unsigned long query_cpu; | |
76 | unsigned long this_cpu = smp_processor_id(); | |
12a67cf6 | 77 | |
e7986739 | 78 | local_irq_save(flags); |
bcda016e | 79 | for_each_cpu(query_cpu, mask) |
e7986739 MT |
80 | if (query_cpu != this_cpu) |
81 | __x2apic_send_IPI_dest( | |
82 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | |
83 | vector, APIC_DEST_LOGICAL); | |
84 | local_irq_restore(flags); | |
85 | } | |
12a67cf6 | 86 | |
e7986739 MT |
87 | static void x2apic_send_IPI_allbutself(int vector) |
88 | { | |
89 | unsigned long flags; | |
90 | unsigned long query_cpu; | |
91 | unsigned long this_cpu = smp_processor_id(); | |
92 | ||
93 | local_irq_save(flags); | |
94 | for_each_online_cpu(query_cpu) | |
95 | if (query_cpu != this_cpu) | |
96 | __x2apic_send_IPI_dest( | |
97 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | |
98 | vector, APIC_DEST_LOGICAL); | |
99 | local_irq_restore(flags); | |
12a67cf6 SS |
100 | } |
101 | ||
102 | static void x2apic_send_IPI_all(int vector) | |
103 | { | |
bcda016e | 104 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
12a67cf6 SS |
105 | } |
106 | ||
107 | static int x2apic_apic_id_registered(void) | |
108 | { | |
109 | return 1; | |
110 | } | |
111 | ||
bcda016e | 112 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
12a67cf6 SS |
113 | { |
114 | int cpu; | |
115 | ||
116 | /* | |
7d87d536 | 117 | * We're using fixed IRQ delivery, can only return one logical APIC ID. |
12a67cf6 SS |
118 | * May as well be the first. |
119 | */ | |
bcda016e | 120 | cpu = cpumask_first(cpumask); |
e7986739 | 121 | if ((unsigned)cpu < nr_cpu_ids) |
12a67cf6 SS |
122 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
123 | else | |
124 | return BAD_APICID; | |
125 | } | |
126 | ||
6eeb7c5a MT |
127 | static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
128 | const struct cpumask *andmask) | |
95d313cf MT |
129 | { |
130 | int cpu; | |
131 | ||
132 | /* | |
7d87d536 | 133 | * We're using fixed IRQ delivery, can only return one logical APIC ID. |
95d313cf MT |
134 | * May as well be the first. |
135 | */ | |
a775a38b MT |
136 | for_each_cpu_and(cpu, cpumask, andmask) |
137 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | |
138 | break; | |
6eeb7c5a | 139 | if (cpu < nr_cpu_ids) |
7d87d536 | 140 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
95d313cf MT |
141 | return BAD_APICID; |
142 | } | |
143 | ||
f910a9dc YL |
144 | static unsigned int get_apic_id(unsigned long x) |
145 | { | |
146 | unsigned int id; | |
147 | ||
148 | id = x; | |
149 | return id; | |
150 | } | |
151 | ||
152 | static unsigned long set_apic_id(unsigned int id) | |
153 | { | |
154 | unsigned long x; | |
155 | ||
156 | x = id; | |
157 | return x; | |
158 | } | |
159 | ||
12a67cf6 SS |
160 | static unsigned int phys_pkg_id(int index_msb) |
161 | { | |
e17941b0 | 162 | return current_cpu_data.initial_apicid >> index_msb; |
12a67cf6 SS |
163 | } |
164 | ||
165 | static void x2apic_send_IPI_self(int vector) | |
166 | { | |
167 | apic_write(APIC_SELF_IPI, vector); | |
168 | } | |
169 | ||
170 | static void init_x2apic_ldr(void) | |
171 | { | |
172 | int cpu = smp_processor_id(); | |
173 | ||
174 | per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); | |
175 | return; | |
176 | } | |
177 | ||
178 | struct genapic apic_x2apic_cluster = { | |
504a3c3a IM |
179 | |
180 | .name = "cluster x2apic", | |
181 | .probe = NULL, | |
182 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | |
183 | .apic_id_registered = x2apic_apic_id_registered, | |
184 | ||
f8987a10 IM |
185 | .irq_delivery_mode = dest_LowestPrio, |
186 | .irq_dest_mode = (APIC_DEST_LOGICAL != 0), | |
504a3c3a IM |
187 | |
188 | .target_cpus = x2apic_target_cpus, | |
189 | .ESR_DISABLE = 0, | |
190 | .apic_destination_logical = 0, | |
191 | .check_apicid_used = NULL, | |
192 | .check_apicid_present = NULL, | |
193 | ||
194 | .no_balance_irq = 0, | |
195 | .no_ioapic_check = 0, | |
196 | ||
197 | .vector_allocation_domain = x2apic_vector_allocation_domain, | |
198 | .init_apic_ldr = init_x2apic_ldr, | |
199 | ||
200 | .ioapic_phys_id_map = NULL, | |
201 | .setup_apic_routing = NULL, | |
202 | .multi_timer_check = NULL, | |
203 | .apicid_to_node = NULL, | |
204 | .cpu_to_logical_apicid = NULL, | |
205 | .cpu_present_to_apicid = NULL, | |
206 | .apicid_to_cpu_present = NULL, | |
207 | .setup_portio_remap = NULL, | |
208 | .check_phys_apicid_present = NULL, | |
209 | .enable_apic_mode = NULL, | |
210 | .phys_pkg_id = phys_pkg_id, | |
211 | .mps_oem_check = NULL, | |
212 | ||
213 | .get_apic_id = get_apic_id, | |
214 | .set_apic_id = set_apic_id, | |
215 | .apic_id_mask = 0xFFFFFFFFu, | |
216 | ||
217 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | |
218 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | |
219 | ||
220 | .send_IPI_mask = x2apic_send_IPI_mask, | |
221 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | |
222 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | |
223 | .send_IPI_all = x2apic_send_IPI_all, | |
224 | .send_IPI_self = x2apic_send_IPI_self, | |
225 | ||
226 | .wakeup_cpu = NULL, | |
227 | .trampoline_phys_low = 0, | |
228 | .trampoline_phys_high = 0, | |
229 | .wait_for_init_deassert = NULL, | |
230 | .smp_callin_clear_local_apic = NULL, | |
231 | .store_NMI_vector = NULL, | |
232 | .restore_NMI_vector = NULL, | |
233 | .inquire_remote_apic = NULL, | |
12a67cf6 | 234 | }; |