Commit | Line | Data |
---|---|---|
dbd70fb4 | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2007, 2011 |
dbd70fb4 HC |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | |
5 | ||
395d31d4 MS |
6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
8 | ||
83a24e32 | 9 | #include <linux/workqueue.h> |
dbd70fb4 | 10 | #include <linux/bootmem.h> |
83a24e32 HC |
11 | #include <linux/cpuset.h> |
12 | #include <linux/device.h> | |
13 | #include <linux/kernel.h> | |
dbd70fb4 | 14 | #include <linux/sched.h> |
83a24e32 HC |
15 | #include <linux/init.h> |
16 | #include <linux/delay.h> | |
dbd70fb4 HC |
17 | #include <linux/cpu.h> |
18 | #include <linux/smp.h> | |
83a24e32 | 19 | #include <linux/mm.h> |
78609132 | 20 | #include <asm/sysinfo.h> |
dbd70fb4 | 21 | |
c10fde0d HC |
22 | #define PTF_HORIZONTAL (0UL) |
23 | #define PTF_VERTICAL (1UL) | |
24 | #define PTF_CHECK (2UL) | |
dbd70fb4 | 25 | |
4cb14bc8 HC |
26 | struct mask_info { |
27 | struct mask_info *next; | |
10d38589 | 28 | unsigned char id; |
dbd70fb4 HC |
29 | cpumask_t mask; |
30 | }; | |
31 | ||
d1e57508 | 32 | static void set_topology_timer(void); |
dbd70fb4 | 33 | static void topology_work_fn(struct work_struct *work); |
c30f91b6 | 34 | static struct sysinfo_15_1_x *tl_info; |
dbd70fb4 | 35 | |
d1e57508 HC |
36 | static int topology_enabled = 1; |
37 | static DECLARE_WORK(topology_work, topology_work_fn); | |
d00aa4e7 | 38 | |
d1e57508 HC |
39 | /* topology_lock protects the socket and book linked lists */ |
40 | static DEFINE_SPINLOCK(topology_lock); | |
41 | static struct mask_info socket_info; | |
4cb14bc8 | 42 | static struct mask_info book_info; |
d1e57508 HC |
43 | |
44 | struct cpu_topology_s390 cpu_topology[NR_CPUS]; | |
83a24e32 | 45 | |
4cb14bc8 | 46 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
dbd70fb4 | 47 | { |
dbd70fb4 HC |
48 | cpumask_t mask; |
49 | ||
d1e57508 HC |
50 | cpumask_copy(&mask, cpumask_of(cpu)); |
51 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) | |
0b52783d | 52 | return mask; |
d1e57508 HC |
53 | for (; info; info = info->next) { |
54 | if (cpumask_test_cpu(cpu, &info->mask)) | |
55 | return info->mask; | |
0b52783d | 56 | } |
dbd70fb4 HC |
57 | return mask; |
58 | } | |
59 | ||
f6bf1a8a HC |
60 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, |
61 | struct mask_info *book, | |
d1e57508 HC |
62 | struct mask_info *socket, |
63 | int one_socket_per_cpu) | |
dbd70fb4 HC |
64 | { |
65 | unsigned int cpu; | |
66 | ||
0327dab0 | 67 | for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { |
8b646bd7 MS |
68 | unsigned int rcpu; |
69 | int lcpu; | |
dbd70fb4 | 70 | |
c30f91b6 | 71 | rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; |
8b646bd7 | 72 | lcpu = smp_find_processor_id(rcpu); |
d1e57508 HC |
73 | if (lcpu < 0) |
74 | continue; | |
75 | cpumask_set_cpu(lcpu, &book->mask); | |
76 | cpu_topology[lcpu].book_id = book->id; | |
77 | cpumask_set_cpu(lcpu, &socket->mask); | |
78 | cpu_topology[lcpu].core_id = rcpu; | |
79 | if (one_socket_per_cpu) { | |
80 | cpu_topology[lcpu].socket_id = rcpu; | |
81 | socket = socket->next; | |
82 | } else { | |
83 | cpu_topology[lcpu].socket_id = socket->id; | |
dbd70fb4 | 84 | } |
d1e57508 | 85 | smp_cpu_set_polarization(lcpu, tl_cpu->pp); |
dbd70fb4 | 86 | } |
d1e57508 | 87 | return socket; |
dbd70fb4 HC |
88 | } |
89 | ||
4cb14bc8 | 90 | static void clear_masks(void) |
dbd70fb4 | 91 | { |
4cb14bc8 | 92 | struct mask_info *info; |
dbd70fb4 | 93 | |
d1e57508 | 94 | info = &socket_info; |
4cb14bc8 | 95 | while (info) { |
0f1959f5 | 96 | cpumask_clear(&info->mask); |
4cb14bc8 HC |
97 | info = info->next; |
98 | } | |
4cb14bc8 HC |
99 | info = &book_info; |
100 | while (info) { | |
0f1959f5 | 101 | cpumask_clear(&info->mask); |
4cb14bc8 | 102 | info = info->next; |
dbd70fb4 HC |
103 | } |
104 | } | |
105 | ||
c30f91b6 | 106 | static union topology_entry *next_tle(union topology_entry *tle) |
dbd70fb4 | 107 | { |
c30f91b6 HC |
108 | if (!tle->nl) |
109 | return (union topology_entry *)((struct topology_cpu *)tle + 1); | |
110 | return (union topology_entry *)((struct topology_container *)tle + 1); | |
dbd70fb4 HC |
111 | } |
112 | ||
d1e57508 | 113 | static void __tl_to_masks_generic(struct sysinfo_15_1_x *info) |
dbd70fb4 | 114 | { |
d1e57508 | 115 | struct mask_info *socket = &socket_info; |
83a24e32 | 116 | struct mask_info *book = &book_info; |
c30f91b6 | 117 | union topology_entry *tle, *end; |
4cb14bc8 | 118 | |
c10fde0d | 119 | tle = info->tle; |
c30f91b6 | 120 | end = (union topology_entry *)((unsigned long)info + info->length); |
dbd70fb4 HC |
121 | while (tle < end) { |
122 | switch (tle->nl) { | |
dbd70fb4 | 123 | case 2: |
4cb14bc8 HC |
124 | book = book->next; |
125 | book->id = tle->container.id; | |
dbd70fb4 HC |
126 | break; |
127 | case 1: | |
d1e57508 HC |
128 | socket = socket->next; |
129 | socket->id = tle->container.id; | |
dbd70fb4 HC |
130 | break; |
131 | case 0: | |
d1e57508 | 132 | add_cpus_to_mask(&tle->cpu, book, socket, 0); |
dbd70fb4 HC |
133 | break; |
134 | default: | |
4cb14bc8 | 135 | clear_masks(); |
4baeb964 | 136 | return; |
dbd70fb4 HC |
137 | } |
138 | tle = next_tle(tle); | |
139 | } | |
4baeb964 HC |
140 | } |
141 | ||
d1e57508 | 142 | static void __tl_to_masks_z10(struct sysinfo_15_1_x *info) |
4baeb964 | 143 | { |
d1e57508 | 144 | struct mask_info *socket = &socket_info; |
4baeb964 HC |
145 | struct mask_info *book = &book_info; |
146 | union topology_entry *tle, *end; | |
147 | ||
148 | tle = info->tle; | |
149 | end = (union topology_entry *)((unsigned long)info + info->length); | |
150 | while (tle < end) { | |
151 | switch (tle->nl) { | |
152 | case 1: | |
153 | book = book->next; | |
154 | book->id = tle->container.id; | |
155 | break; | |
156 | case 0: | |
d1e57508 | 157 | socket = add_cpus_to_mask(&tle->cpu, book, socket, 1); |
4baeb964 HC |
158 | break; |
159 | default: | |
160 | clear_masks(); | |
161 | return; | |
162 | } | |
163 | tle = next_tle(tle); | |
164 | } | |
165 | } | |
166 | ||
d1e57508 | 167 | static void tl_to_masks(struct sysinfo_15_1_x *info) |
4baeb964 HC |
168 | { |
169 | struct cpuid cpu_id; | |
170 | ||
4baeb964 | 171 | spin_lock_irq(&topology_lock); |
d1e57508 | 172 | get_cpu_id(&cpu_id); |
4baeb964 HC |
173 | clear_masks(); |
174 | switch (cpu_id.machine) { | |
175 | case 0x2097: | |
176 | case 0x2098: | |
d1e57508 | 177 | __tl_to_masks_z10(info); |
4baeb964 HC |
178 | break; |
179 | default: | |
d1e57508 | 180 | __tl_to_masks_generic(info); |
4baeb964 | 181 | } |
74af2831 | 182 | spin_unlock_irq(&topology_lock); |
dbd70fb4 HC |
183 | } |
184 | ||
c10fde0d HC |
185 | static void topology_update_polarization_simple(void) |
186 | { | |
187 | int cpu; | |
188 | ||
189 | mutex_lock(&smp_cpu_state_mutex); | |
5439050f | 190 | for_each_possible_cpu(cpu) |
50ab9a9a | 191 | smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); |
c10fde0d HC |
192 | mutex_unlock(&smp_cpu_state_mutex); |
193 | } | |
194 | ||
195 | static int ptf(unsigned long fc) | |
dbd70fb4 HC |
196 | { |
197 | int rc; | |
198 | ||
199 | asm volatile( | |
200 | " .insn rre,0xb9a20000,%1,%1\n" | |
201 | " ipm %0\n" | |
202 | " srl %0,28\n" | |
203 | : "=d" (rc) | |
c10fde0d HC |
204 | : "d" (fc) : "cc"); |
205 | return rc; | |
206 | } | |
207 | ||
208 | int topology_set_cpu_management(int fc) | |
209 | { | |
83a24e32 | 210 | int cpu, rc; |
c10fde0d | 211 | |
9186d7a9 | 212 | if (!MACHINE_HAS_TOPOLOGY) |
c10fde0d HC |
213 | return -EOPNOTSUPP; |
214 | if (fc) | |
215 | rc = ptf(PTF_VERTICAL); | |
216 | else | |
217 | rc = ptf(PTF_HORIZONTAL); | |
218 | if (rc) | |
219 | return -EBUSY; | |
5439050f | 220 | for_each_possible_cpu(cpu) |
50ab9a9a | 221 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
dbd70fb4 HC |
222 | return rc; |
223 | } | |
224 | ||
d1e57508 | 225 | static void update_cpu_masks(void) |
d00aa4e7 | 226 | { |
4cb14bc8 | 227 | unsigned long flags; |
d00aa4e7 HC |
228 | int cpu; |
229 | ||
4cb14bc8 HC |
230 | spin_lock_irqsave(&topology_lock, flags); |
231 | for_each_possible_cpu(cpu) { | |
d1e57508 HC |
232 | cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); |
233 | cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); | |
234 | if (!MACHINE_HAS_TOPOLOGY) { | |
235 | cpu_topology[cpu].core_id = cpu; | |
236 | cpu_topology[cpu].socket_id = cpu; | |
237 | cpu_topology[cpu].book_id = cpu; | |
238 | } | |
4cb14bc8 HC |
239 | } |
240 | spin_unlock_irqrestore(&topology_lock, flags); | |
241 | } | |
242 | ||
96f4a70d | 243 | void store_topology(struct sysinfo_15_1_x *info) |
4cb14bc8 | 244 | { |
fade4dc4 HC |
245 | if (topology_max_mnest >= 3) |
246 | stsi(info, 15, 1, 3); | |
247 | else | |
248 | stsi(info, 15, 1, 2); | |
d00aa4e7 HC |
249 | } |
250 | ||
ee79d1bd | 251 | int arch_update_cpu_topology(void) |
dbd70fb4 | 252 | { |
c30f91b6 | 253 | struct sysinfo_15_1_x *info = tl_info; |
8a25a2fd | 254 | struct device *dev; |
dbd70fb4 HC |
255 | int cpu; |
256 | ||
9186d7a9 | 257 | if (!MACHINE_HAS_TOPOLOGY) { |
d1e57508 | 258 | update_cpu_masks(); |
c10fde0d | 259 | topology_update_polarization_simple(); |
ee79d1bd | 260 | return 0; |
c10fde0d | 261 | } |
4cb14bc8 | 262 | store_topology(info); |
d1e57508 HC |
263 | tl_to_masks(info); |
264 | update_cpu_masks(); | |
dbd70fb4 | 265 | for_each_online_cpu(cpu) { |
8a25a2fd KS |
266 | dev = get_cpu_device(cpu); |
267 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | |
dbd70fb4 | 268 | } |
ee79d1bd | 269 | return 1; |
dbd70fb4 HC |
270 | } |
271 | ||
fd781fa2 HC |
272 | static void topology_work_fn(struct work_struct *work) |
273 | { | |
f414f5f1 | 274 | rebuild_sched_domains(); |
dbd70fb4 HC |
275 | } |
276 | ||
c10fde0d HC |
277 | void topology_schedule_update(void) |
278 | { | |
279 | schedule_work(&topology_work); | |
280 | } | |
281 | ||
dbd70fb4 HC |
282 | static void topology_timer_fn(unsigned long ignored) |
283 | { | |
c10fde0d HC |
284 | if (ptf(PTF_CHECK)) |
285 | topology_schedule_update(); | |
dbd70fb4 HC |
286 | set_topology_timer(); |
287 | } | |
288 | ||
d68bddb7 HC |
289 | static struct timer_list topology_timer = |
290 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | |
291 | ||
292 | static atomic_t topology_poll = ATOMIC_INIT(0); | |
293 | ||
dbd70fb4 HC |
294 | static void set_topology_timer(void) |
295 | { | |
d68bddb7 HC |
296 | if (atomic_add_unless(&topology_poll, -1, 0)) |
297 | mod_timer(&topology_timer, jiffies + HZ / 10); | |
298 | else | |
299 | mod_timer(&topology_timer, jiffies + HZ * 60); | |
300 | } | |
301 | ||
302 | void topology_expect_change(void) | |
303 | { | |
304 | if (!MACHINE_HAS_TOPOLOGY) | |
305 | return; | |
306 | /* This is racy, but it doesn't matter since it is just a heuristic. | |
307 | * Worst case is that we poll in a higher frequency for a bit longer. | |
308 | */ | |
309 | if (atomic_read(&topology_poll) > 60) | |
310 | return; | |
311 | atomic_add(60, &topology_poll); | |
312 | set_topology_timer(); | |
dbd70fb4 HC |
313 | } |
314 | ||
2b1a61f0 | 315 | static int __init early_parse_topology(char *p) |
dbd70fb4 | 316 | { |
c9af3fa9 | 317 | if (strncmp(p, "off", 3)) |
2b1a61f0 | 318 | return 0; |
c9af3fa9 | 319 | topology_enabled = 0; |
2b1a61f0 | 320 | return 0; |
dbd70fb4 | 321 | } |
2b1a61f0 | 322 | early_param("topology", early_parse_topology); |
dbd70fb4 | 323 | |
caa04f69 SO |
324 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
325 | struct mask_info *mask, int offset) | |
4cb14bc8 HC |
326 | { |
327 | int i, nr_masks; | |
328 | ||
c30f91b6 | 329 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; |
4cb14bc8 | 330 | for (i = 0; i < info->mnest - offset; i++) |
c30f91b6 | 331 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; |
4cb14bc8 HC |
332 | nr_masks = max(nr_masks, 1); |
333 | for (i = 0; i < nr_masks; i++) { | |
334 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | |
335 | mask = mask->next; | |
336 | } | |
337 | } | |
338 | ||
dbd70fb4 HC |
339 | void __init s390_init_cpu_topology(void) |
340 | { | |
c30f91b6 | 341 | struct sysinfo_15_1_x *info; |
dbd70fb4 HC |
342 | int i; |
343 | ||
9186d7a9 | 344 | if (!MACHINE_HAS_TOPOLOGY) |
dbd70fb4 | 345 | return; |
dbd70fb4 | 346 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
dbd70fb4 | 347 | info = tl_info; |
4cb14bc8 | 348 | store_topology(info); |
395d31d4 | 349 | pr_info("The CPU configuration topology of the machine is:"); |
c30f91b6 | 350 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
83a24e32 HC |
351 | printk(KERN_CONT " %d", info->mag[i]); |
352 | printk(KERN_CONT " / %d\n", info->mnest); | |
d1e57508 | 353 | alloc_masks(info, &socket_info, 1); |
f6bf1a8a | 354 | alloc_masks(info, &book_info, 2); |
dbd70fb4 | 355 | } |
83a24e32 HC |
356 | |
357 | static int cpu_management; | |
358 | ||
72f31889 LT |
359 | static ssize_t dispatching_show(struct device *dev, |
360 | struct device_attribute *attr, | |
83a24e32 HC |
361 | char *buf) |
362 | { | |
363 | ssize_t count; | |
364 | ||
365 | mutex_lock(&smp_cpu_state_mutex); | |
366 | count = sprintf(buf, "%d\n", cpu_management); | |
367 | mutex_unlock(&smp_cpu_state_mutex); | |
368 | return count; | |
369 | } | |
370 | ||
72f31889 LT |
371 | static ssize_t dispatching_store(struct device *dev, |
372 | struct device_attribute *attr, | |
83a24e32 HC |
373 | const char *buf, |
374 | size_t count) | |
375 | { | |
376 | int val, rc; | |
377 | char delim; | |
378 | ||
379 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | |
380 | return -EINVAL; | |
381 | if (val != 0 && val != 1) | |
382 | return -EINVAL; | |
383 | rc = 0; | |
384 | get_online_cpus(); | |
385 | mutex_lock(&smp_cpu_state_mutex); | |
386 | if (cpu_management == val) | |
387 | goto out; | |
388 | rc = topology_set_cpu_management(val); | |
d68bddb7 HC |
389 | if (rc) |
390 | goto out; | |
391 | cpu_management = val; | |
392 | topology_expect_change(); | |
83a24e32 HC |
393 | out: |
394 | mutex_unlock(&smp_cpu_state_mutex); | |
395 | put_online_cpus(); | |
396 | return rc ? rc : count; | |
397 | } | |
72f31889 | 398 | static DEVICE_ATTR(dispatching, 0644, dispatching_show, |
83a24e32 HC |
399 | dispatching_store); |
400 | ||
72f31889 LT |
401 | static ssize_t cpu_polarization_show(struct device *dev, |
402 | struct device_attribute *attr, char *buf) | |
83a24e32 HC |
403 | { |
404 | int cpu = dev->id; | |
405 | ssize_t count; | |
406 | ||
407 | mutex_lock(&smp_cpu_state_mutex); | |
50ab9a9a | 408 | switch (smp_cpu_get_polarization(cpu)) { |
83a24e32 HC |
409 | case POLARIZATION_HRZ: |
410 | count = sprintf(buf, "horizontal\n"); | |
411 | break; | |
412 | case POLARIZATION_VL: | |
413 | count = sprintf(buf, "vertical:low\n"); | |
414 | break; | |
415 | case POLARIZATION_VM: | |
416 | count = sprintf(buf, "vertical:medium\n"); | |
417 | break; | |
418 | case POLARIZATION_VH: | |
419 | count = sprintf(buf, "vertical:high\n"); | |
420 | break; | |
421 | default: | |
422 | count = sprintf(buf, "unknown\n"); | |
423 | break; | |
424 | } | |
425 | mutex_unlock(&smp_cpu_state_mutex); | |
426 | return count; | |
427 | } | |
72f31889 | 428 | static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); |
83a24e32 HC |
429 | |
430 | static struct attribute *topology_cpu_attrs[] = { | |
72f31889 | 431 | &dev_attr_polarization.attr, |
83a24e32 HC |
432 | NULL, |
433 | }; | |
434 | ||
435 | static struct attribute_group topology_cpu_attr_group = { | |
436 | .attrs = topology_cpu_attrs, | |
437 | }; | |
438 | ||
439 | int topology_cpu_init(struct cpu *cpu) | |
440 | { | |
72f31889 | 441 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); |
83a24e32 HC |
442 | } |
443 | ||
444 | static int __init topology_init(void) | |
445 | { | |
446 | if (!MACHINE_HAS_TOPOLOGY) { | |
447 | topology_update_polarization_simple(); | |
448 | goto out; | |
449 | } | |
83a24e32 HC |
450 | set_topology_timer(); |
451 | out: | |
d1e57508 | 452 | update_cpu_masks(); |
72f31889 | 453 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); |
83a24e32 HC |
454 | } |
455 | device_initcall(topology_init); |