Commit | Line | Data |
---|---|---|
dbd70fb4 | 1 | /* |
dbd70fb4 HC |
2 | * Copyright IBM Corp. 2007 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | |
4 | */ | |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/device.h> | |
10 | #include <linux/bootmem.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/workqueue.h> | |
13 | #include <linux/cpu.h> | |
14 | #include <linux/smp.h> | |
15 | #include <asm/delay.h> | |
16 | #include <asm/s390_ext.h> | |
17 | #include <asm/sysinfo.h> | |
18 | ||
19 | #define CPU_BITS 64 | |
c10fde0d HC |
20 | #define NR_MAG 6 |
21 | ||
22 | #define PTF_HORIZONTAL (0UL) | |
23 | #define PTF_VERTICAL (1UL) | |
24 | #define PTF_CHECK (2UL) | |
dbd70fb4 HC |
25 | |
26 | struct tl_cpu { | |
c10fde0d HC |
27 | unsigned char reserved0[4]; |
28 | unsigned char :6; | |
29 | unsigned char pp:2; | |
30 | unsigned char reserved1; | |
dbd70fb4 HC |
31 | unsigned short origin; |
32 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; | |
33 | }; | |
34 | ||
35 | struct tl_container { | |
36 | unsigned char reserved[8]; | |
37 | }; | |
38 | ||
39 | union tl_entry { | |
40 | unsigned char nl; | |
41 | struct tl_cpu cpu; | |
42 | struct tl_container container; | |
43 | }; | |
44 | ||
dbd70fb4 HC |
45 | struct tl_info { |
46 | unsigned char reserved0[2]; | |
47 | unsigned short length; | |
48 | unsigned char mag[NR_MAG]; | |
49 | unsigned char reserved1; | |
50 | unsigned char mnest; | |
51 | unsigned char reserved2[4]; | |
52 | union tl_entry tle[0]; | |
53 | }; | |
54 | ||
55 | struct core_info { | |
56 | struct core_info *next; | |
57 | cpumask_t mask; | |
58 | }; | |
59 | ||
60 | static void topology_work_fn(struct work_struct *work); | |
61 | static struct tl_info *tl_info; | |
62 | static struct core_info core_info; | |
63 | static int machine_has_topology; | |
64 | static int machine_has_topology_irq; | |
65 | static struct timer_list topology_timer; | |
66 | static void set_topology_timer(void); | |
67 | static DECLARE_WORK(topology_work, topology_work_fn); | |
74af2831 HC |
68 | /* topology_lock protects the core linked list */ |
69 | static DEFINE_SPINLOCK(topology_lock); | |
dbd70fb4 | 70 | |
d00aa4e7 HC |
71 | cpumask_t cpu_core_map[NR_CPUS]; |
72 | ||
dbd70fb4 HC |
73 | cpumask_t cpu_coregroup_map(unsigned int cpu) |
74 | { | |
75 | struct core_info *core = &core_info; | |
74af2831 | 76 | unsigned long flags; |
dbd70fb4 HC |
77 | cpumask_t mask; |
78 | ||
79 | cpus_clear(mask); | |
80 | if (!machine_has_topology) | |
81 | return cpu_present_map; | |
74af2831 | 82 | spin_lock_irqsave(&topology_lock, flags); |
dbd70fb4 HC |
83 | while (core) { |
84 | if (cpu_isset(cpu, core->mask)) { | |
85 | mask = core->mask; | |
86 | break; | |
87 | } | |
88 | core = core->next; | |
89 | } | |
74af2831 | 90 | spin_unlock_irqrestore(&topology_lock, flags); |
dbd70fb4 HC |
91 | if (cpus_empty(mask)) |
92 | mask = cpumask_of_cpu(cpu); | |
93 | return mask; | |
94 | } | |
95 | ||
96 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | |
97 | { | |
98 | unsigned int cpu; | |
99 | ||
100 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); | |
101 | cpu < CPU_BITS; | |
102 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) | |
103 | { | |
104 | unsigned int rcpu, lcpu; | |
105 | ||
106 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | |
107 | for_each_present_cpu(lcpu) { | |
c10fde0d | 108 | if (__cpu_logical_map[lcpu] == rcpu) { |
dbd70fb4 | 109 | cpu_set(lcpu, core->mask); |
c10fde0d HC |
110 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
111 | } | |
dbd70fb4 HC |
112 | } |
113 | } | |
114 | } | |
115 | ||
116 | static void clear_cores(void) | |
117 | { | |
118 | struct core_info *core = &core_info; | |
119 | ||
120 | while (core) { | |
121 | cpus_clear(core->mask); | |
122 | core = core->next; | |
123 | } | |
124 | } | |
125 | ||
126 | static union tl_entry *next_tle(union tl_entry *tle) | |
127 | { | |
128 | if (tle->nl) | |
129 | return (union tl_entry *)((struct tl_container *)tle + 1); | |
130 | else | |
131 | return (union tl_entry *)((struct tl_cpu *)tle + 1); | |
132 | } | |
133 | ||
134 | static void tl_to_cores(struct tl_info *info) | |
135 | { | |
136 | union tl_entry *tle, *end; | |
137 | struct core_info *core = &core_info; | |
138 | ||
74af2831 | 139 | spin_lock_irq(&topology_lock); |
dbd70fb4 | 140 | clear_cores(); |
c10fde0d | 141 | tle = info->tle; |
dbd70fb4 HC |
142 | end = (union tl_entry *)((unsigned long)info + info->length); |
143 | while (tle < end) { | |
144 | switch (tle->nl) { | |
145 | case 5: | |
146 | case 4: | |
147 | case 3: | |
148 | case 2: | |
149 | break; | |
150 | case 1: | |
151 | core = core->next; | |
152 | break; | |
153 | case 0: | |
154 | add_cpus_to_core(&tle->cpu, core); | |
155 | break; | |
156 | default: | |
157 | clear_cores(); | |
158 | machine_has_topology = 0; | |
159 | return; | |
160 | } | |
161 | tle = next_tle(tle); | |
162 | } | |
74af2831 | 163 | spin_unlock_irq(&topology_lock); |
dbd70fb4 HC |
164 | } |
165 | ||
c10fde0d HC |
166 | static void topology_update_polarization_simple(void) |
167 | { | |
168 | int cpu; | |
169 | ||
170 | mutex_lock(&smp_cpu_state_mutex); | |
171 | for_each_present_cpu(cpu) | |
172 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | |
173 | mutex_unlock(&smp_cpu_state_mutex); | |
174 | } | |
175 | ||
176 | static int ptf(unsigned long fc) | |
dbd70fb4 HC |
177 | { |
178 | int rc; | |
179 | ||
180 | asm volatile( | |
181 | " .insn rre,0xb9a20000,%1,%1\n" | |
182 | " ipm %0\n" | |
183 | " srl %0,28\n" | |
184 | : "=d" (rc) | |
c10fde0d HC |
185 | : "d" (fc) : "cc"); |
186 | return rc; | |
187 | } | |
188 | ||
189 | int topology_set_cpu_management(int fc) | |
190 | { | |
191 | int cpu; | |
192 | int rc; | |
193 | ||
194 | if (!machine_has_topology) | |
195 | return -EOPNOTSUPP; | |
196 | if (fc) | |
197 | rc = ptf(PTF_VERTICAL); | |
198 | else | |
199 | rc = ptf(PTF_HORIZONTAL); | |
200 | if (rc) | |
201 | return -EBUSY; | |
202 | for_each_present_cpu(cpu) | |
203 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | |
dbd70fb4 HC |
204 | return rc; |
205 | } | |
206 | ||
d00aa4e7 HC |
207 | static void update_cpu_core_map(void) |
208 | { | |
209 | int cpu; | |
210 | ||
211 | for_each_present_cpu(cpu) | |
212 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | |
213 | } | |
214 | ||
dbd70fb4 HC |
215 | void arch_update_cpu_topology(void) |
216 | { | |
217 | struct tl_info *info = tl_info; | |
218 | struct sys_device *sysdev; | |
219 | int cpu; | |
220 | ||
c10fde0d | 221 | if (!machine_has_topology) { |
d00aa4e7 | 222 | update_cpu_core_map(); |
c10fde0d | 223 | topology_update_polarization_simple(); |
dbd70fb4 | 224 | return; |
c10fde0d | 225 | } |
dbd70fb4 HC |
226 | stsi(info, 15, 1, 2); |
227 | tl_to_cores(info); | |
d00aa4e7 | 228 | update_cpu_core_map(); |
dbd70fb4 HC |
229 | for_each_online_cpu(cpu) { |
230 | sysdev = get_cpu_sysdev(cpu); | |
231 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | |
232 | } | |
233 | } | |
234 | ||
fd781fa2 HC |
235 | static void topology_work_fn(struct work_struct *work) |
236 | { | |
69b895fd | 237 | arch_reinit_sched_domains(); |
dbd70fb4 HC |
238 | } |
239 | ||
c10fde0d HC |
240 | void topology_schedule_update(void) |
241 | { | |
242 | schedule_work(&topology_work); | |
243 | } | |
244 | ||
dbd70fb4 HC |
245 | static void topology_timer_fn(unsigned long ignored) |
246 | { | |
c10fde0d HC |
247 | if (ptf(PTF_CHECK)) |
248 | topology_schedule_update(); | |
dbd70fb4 HC |
249 | set_topology_timer(); |
250 | } | |
251 | ||
252 | static void set_topology_timer(void) | |
253 | { | |
254 | topology_timer.function = topology_timer_fn; | |
255 | topology_timer.data = 0; | |
256 | topology_timer.expires = jiffies + 60 * HZ; | |
257 | add_timer(&topology_timer); | |
258 | } | |
259 | ||
260 | static void topology_interrupt(__u16 code) | |
261 | { | |
262 | schedule_work(&topology_work); | |
263 | } | |
264 | ||
265 | static int __init init_topology_update(void) | |
266 | { | |
267 | int rc; | |
268 | ||
d00aa4e7 | 269 | rc = 0; |
c10fde0d HC |
270 | if (!machine_has_topology) { |
271 | topology_update_polarization_simple(); | |
d00aa4e7 | 272 | goto out; |
c10fde0d HC |
273 | } |
274 | init_timer_deferrable(&topology_timer); | |
dbd70fb4 HC |
275 | if (machine_has_topology_irq) { |
276 | rc = register_external_interrupt(0x2005, topology_interrupt); | |
277 | if (rc) | |
d00aa4e7 | 278 | goto out; |
dbd70fb4 HC |
279 | ctl_set_bit(0, 8); |
280 | } | |
281 | else | |
282 | set_topology_timer(); | |
d00aa4e7 HC |
283 | out: |
284 | update_cpu_core_map(); | |
285 | return rc; | |
dbd70fb4 HC |
286 | } |
287 | __initcall(init_topology_update); | |
288 | ||
289 | void __init s390_init_cpu_topology(void) | |
290 | { | |
291 | unsigned long long facility_bits; | |
292 | struct tl_info *info; | |
293 | struct core_info *core; | |
294 | int nr_cores; | |
295 | int i; | |
296 | ||
297 | if (stfle(&facility_bits, 1) <= 0) | |
298 | return; | |
299 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) | |
300 | return; | |
301 | machine_has_topology = 1; | |
302 | ||
303 | if (facility_bits & (1ULL << 51)) | |
304 | machine_has_topology_irq = 1; | |
305 | ||
306 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | |
dbd70fb4 HC |
307 | info = tl_info; |
308 | stsi(info, 15, 1, 2); | |
309 | ||
310 | nr_cores = info->mag[NR_MAG - 2]; | |
311 | for (i = 0; i < info->mnest - 2; i++) | |
312 | nr_cores *= info->mag[NR_MAG - 3 - i]; | |
313 | ||
314 | printk(KERN_INFO "CPU topology:"); | |
315 | for (i = 0; i < NR_MAG; i++) | |
316 | printk(" %d", info->mag[i]); | |
317 | printk(" / %d\n", info->mnest); | |
318 | ||
319 | core = &core_info; | |
320 | for (i = 0; i < nr_cores; i++) { | |
321 | core->next = alloc_bootmem(sizeof(struct core_info)); | |
322 | core = core->next; | |
323 | if (!core) | |
324 | goto error; | |
325 | } | |
326 | return; | |
327 | error: | |
328 | machine_has_topology = 0; | |
329 | machine_has_topology_irq = 0; | |
330 | } |