Commit | Line | Data |
---|---|---|
dbd70fb4 | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2007, 2011 |
dbd70fb4 HC |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | |
5 | ||
395d31d4 MS |
6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
8 | ||
83a24e32 | 9 | #include <linux/workqueue.h> |
8c910580 | 10 | #include <linux/bootmem.h> |
51dce386 HC |
11 | #include <linux/uaccess.h> |
12 | #include <linux/sysctl.h> | |
83a24e32 HC |
13 | #include <linux/cpuset.h> |
14 | #include <linux/device.h> | |
80020fbd | 15 | #include <linux/export.h> |
83a24e32 | 16 | #include <linux/kernel.h> |
dbd70fb4 | 17 | #include <linux/sched.h> |
105ab3d8 | 18 | #include <linux/sched/topology.h> |
83a24e32 | 19 | #include <linux/delay.h> |
d05d15da HC |
20 | #include <linux/init.h> |
21 | #include <linux/slab.h> | |
dbd70fb4 HC |
22 | #include <linux/cpu.h> |
23 | #include <linux/smp.h> | |
83a24e32 | 24 | #include <linux/mm.h> |
3a368f74 PH |
25 | #include <linux/nodemask.h> |
26 | #include <linux/node.h> | |
78609132 | 27 | #include <asm/sysinfo.h> |
3a368f74 | 28 | #include <asm/numa.h> |
dbd70fb4 | 29 | |
c10fde0d HC |
30 | #define PTF_HORIZONTAL (0UL) |
31 | #define PTF_VERTICAL (1UL) | |
32 | #define PTF_CHECK (2UL) | |
dbd70fb4 | 33 | |
1b25fda0 HC |
34 | enum { |
35 | TOPOLOGY_MODE_HW, | |
36 | TOPOLOGY_MODE_SINGLE, | |
37 | TOPOLOGY_MODE_PACKAGE, | |
38 | TOPOLOGY_MODE_UNINITIALIZED | |
39 | }; | |
40 | ||
4cb14bc8 HC |
41 | struct mask_info { |
42 | struct mask_info *next; | |
10d38589 | 43 | unsigned char id; |
dbd70fb4 HC |
44 | cpumask_t mask; |
45 | }; | |
46 | ||
1b25fda0 | 47 | static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; |
d1e57508 | 48 | static void set_topology_timer(void); |
dbd70fb4 | 49 | static void topology_work_fn(struct work_struct *work); |
c30f91b6 | 50 | static struct sysinfo_15_1_x *tl_info; |
dbd70fb4 | 51 | |
d1e57508 | 52 | static DECLARE_WORK(topology_work, topology_work_fn); |
d00aa4e7 | 53 | |
3a3814c2 | 54 | /* |
30fc4ca2 | 55 | * Socket/Book linked lists and cpu_topology updates are |
3a3814c2 MH |
56 | * protected by "sched_domains_mutex". |
57 | */ | |
d1e57508 | 58 | static struct mask_info socket_info; |
4cb14bc8 | 59 | static struct mask_info book_info; |
adac0f1e | 60 | static struct mask_info drawer_info; |
d1e57508 | 61 | |
30fc4ca2 HC |
62 | struct cpu_topology_s390 cpu_topology[NR_CPUS]; |
63 | EXPORT_SYMBOL_GPL(cpu_topology); | |
83a24e32 | 64 | |
8c910580 HC |
65 | cpumask_t cpus_with_topology; |
66 | ||
4cb14bc8 | 67 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
dbd70fb4 | 68 | { |
dbd70fb4 HC |
69 | cpumask_t mask; |
70 | ||
d1e57508 | 71 | cpumask_copy(&mask, cpumask_of(cpu)); |
1b25fda0 HC |
72 | switch (topology_mode) { |
73 | case TOPOLOGY_MODE_HW: | |
74 | while (info) { | |
75 | if (cpumask_test_cpu(cpu, &info->mask)) { | |
76 | mask = info->mask; | |
77 | break; | |
78 | } | |
79 | info = info->next; | |
80 | } | |
81 | if (cpumask_empty(&mask)) | |
82 | cpumask_copy(&mask, cpumask_of(cpu)); | |
83 | break; | |
84 | case TOPOLOGY_MODE_PACKAGE: | |
85 | cpumask_copy(&mask, cpu_present_mask); | |
86 | break; | |
87 | default: | |
88 | /* fallthrough */ | |
89 | case TOPOLOGY_MODE_SINGLE: | |
90 | cpumask_copy(&mask, cpumask_of(cpu)); | |
91 | break; | |
0b52783d | 92 | } |
dbd70fb4 HC |
93 | return mask; |
94 | } | |
95 | ||
10ad34bc MS |
96 | static cpumask_t cpu_thread_map(unsigned int cpu) |
97 | { | |
98 | cpumask_t mask; | |
99 | int i; | |
100 | ||
101 | cpumask_copy(&mask, cpumask_of(cpu)); | |
1b25fda0 | 102 | if (topology_mode != TOPOLOGY_MODE_HW) |
10ad34bc MS |
103 | return mask; |
104 | cpu -= cpu % (smp_cpu_mtid + 1); | |
105 | for (i = 0; i <= smp_cpu_mtid; i++) | |
106 | if (cpu_present(cpu + i)) | |
107 | cpumask_set_cpu(cpu + i, &mask); | |
108 | return mask; | |
109 | } | |
110 | ||
251ea0ca HC |
111 | #define TOPOLOGY_CORE_BITS 64 |
112 | ||
86d18a55 HC |
113 | static void add_cpus_to_mask(struct topology_core *tl_core, |
114 | struct mask_info *drawer, | |
115 | struct mask_info *book, | |
116 | struct mask_info *socket) | |
dbd70fb4 | 117 | { |
439eb131 | 118 | struct cpu_topology_s390 *topo; |
10ad34bc | 119 | unsigned int core; |
dbd70fb4 | 120 | |
251ea0ca | 121 | for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { |
10ad34bc MS |
122 | unsigned int rcore; |
123 | int lcpu, i; | |
dbd70fb4 | 124 | |
10ad34bc MS |
125 | rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; |
126 | lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); | |
d1e57508 HC |
127 | if (lcpu < 0) |
128 | continue; | |
10ad34bc | 129 | for (i = 0; i <= smp_cpu_mtid; i++) { |
30fc4ca2 | 130 | topo = &cpu_topology[lcpu + i]; |
adac0f1e | 131 | topo->drawer_id = drawer->id; |
439eb131 | 132 | topo->book_id = book->id; |
86d18a55 | 133 | topo->socket_id = socket->id; |
439eb131 HC |
134 | topo->core_id = rcore; |
135 | topo->thread_id = lcpu + i; | |
1887aa07 | 136 | topo->dedicated = tl_core->d; |
adac0f1e | 137 | cpumask_set_cpu(lcpu + i, &drawer->mask); |
10ad34bc MS |
138 | cpumask_set_cpu(lcpu + i, &book->mask); |
139 | cpumask_set_cpu(lcpu + i, &socket->mask); | |
8c910580 | 140 | cpumask_set_cpu(lcpu + i, &cpus_with_topology); |
10ad34bc | 141 | smp_cpu_set_polarization(lcpu + i, tl_core->pp); |
dbd70fb4 HC |
142 | } |
143 | } | |
144 | } | |
145 | ||
4cb14bc8 | 146 | static void clear_masks(void) |
dbd70fb4 | 147 | { |
4cb14bc8 | 148 | struct mask_info *info; |
dbd70fb4 | 149 | |
d1e57508 | 150 | info = &socket_info; |
4cb14bc8 | 151 | while (info) { |
0f1959f5 | 152 | cpumask_clear(&info->mask); |
4cb14bc8 HC |
153 | info = info->next; |
154 | } | |
4cb14bc8 HC |
155 | info = &book_info; |
156 | while (info) { | |
0f1959f5 | 157 | cpumask_clear(&info->mask); |
4cb14bc8 | 158 | info = info->next; |
dbd70fb4 | 159 | } |
adac0f1e HC |
160 | info = &drawer_info; |
161 | while (info) { | |
162 | cpumask_clear(&info->mask); | |
163 | info = info->next; | |
164 | } | |
dbd70fb4 HC |
165 | } |
166 | ||
c30f91b6 | 167 | static union topology_entry *next_tle(union topology_entry *tle) |
dbd70fb4 | 168 | { |
c30f91b6 | 169 | if (!tle->nl) |
10ad34bc | 170 | return (union topology_entry *)((struct topology_core *)tle + 1); |
c30f91b6 | 171 | return (union topology_entry *)((struct topology_container *)tle + 1); |
dbd70fb4 HC |
172 | } |
173 | ||
86d18a55 | 174 | static void tl_to_masks(struct sysinfo_15_1_x *info) |
dbd70fb4 | 175 | { |
d1e57508 | 176 | struct mask_info *socket = &socket_info; |
83a24e32 | 177 | struct mask_info *book = &book_info; |
adac0f1e | 178 | struct mask_info *drawer = &drawer_info; |
c30f91b6 | 179 | union topology_entry *tle, *end; |
4cb14bc8 | 180 | |
86d18a55 | 181 | clear_masks(); |
c10fde0d | 182 | tle = info->tle; |
c30f91b6 | 183 | end = (union topology_entry *)((unsigned long)info + info->length); |
dbd70fb4 HC |
184 | while (tle < end) { |
185 | switch (tle->nl) { | |
adac0f1e HC |
186 | case 3: |
187 | drawer = drawer->next; | |
188 | drawer->id = tle->container.id; | |
189 | break; | |
dbd70fb4 | 190 | case 2: |
4cb14bc8 HC |
191 | book = book->next; |
192 | book->id = tle->container.id; | |
dbd70fb4 HC |
193 | break; |
194 | case 1: | |
d1e57508 HC |
195 | socket = socket->next; |
196 | socket->id = tle->container.id; | |
dbd70fb4 HC |
197 | break; |
198 | case 0: | |
86d18a55 | 199 | add_cpus_to_mask(&tle->cpu, drawer, book, socket); |
4baeb964 HC |
200 | break; |
201 | default: | |
202 | clear_masks(); | |
203 | return; | |
204 | } | |
205 | tle = next_tle(tle); | |
206 | } | |
207 | } | |
208 | ||
c10fde0d HC |
209 | static void topology_update_polarization_simple(void) |
210 | { | |
211 | int cpu; | |
212 | ||
5439050f | 213 | for_each_possible_cpu(cpu) |
50ab9a9a | 214 | smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); |
c10fde0d HC |
215 | } |
216 | ||
217 | static int ptf(unsigned long fc) | |
dbd70fb4 HC |
218 | { |
219 | int rc; | |
220 | ||
221 | asm volatile( | |
222 | " .insn rre,0xb9a20000,%1,%1\n" | |
223 | " ipm %0\n" | |
224 | " srl %0,28\n" | |
225 | : "=d" (rc) | |
c10fde0d HC |
226 | : "d" (fc) : "cc"); |
227 | return rc; | |
228 | } | |
229 | ||
230 | int topology_set_cpu_management(int fc) | |
231 | { | |
83a24e32 | 232 | int cpu, rc; |
c10fde0d | 233 | |
9186d7a9 | 234 | if (!MACHINE_HAS_TOPOLOGY) |
c10fde0d HC |
235 | return -EOPNOTSUPP; |
236 | if (fc) | |
237 | rc = ptf(PTF_VERTICAL); | |
238 | else | |
239 | rc = ptf(PTF_HORIZONTAL); | |
240 | if (rc) | |
241 | return -EBUSY; | |
5439050f | 242 | for_each_possible_cpu(cpu) |
50ab9a9a | 243 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
dbd70fb4 HC |
244 | return rc; |
245 | } | |
246 | ||
d1e57508 | 247 | static void update_cpu_masks(void) |
d00aa4e7 | 248 | { |
439eb131 | 249 | struct cpu_topology_s390 *topo; |
1b25fda0 | 250 | int cpu, id; |
d00aa4e7 | 251 | |
4cb14bc8 | 252 | for_each_possible_cpu(cpu) { |
30fc4ca2 | 253 | topo = &cpu_topology[cpu]; |
439eb131 HC |
254 | topo->thread_mask = cpu_thread_map(cpu); |
255 | topo->core_mask = cpu_group_map(&socket_info, cpu); | |
256 | topo->book_mask = cpu_group_map(&book_info, cpu); | |
adac0f1e | 257 | topo->drawer_mask = cpu_group_map(&drawer_info, cpu); |
1b25fda0 HC |
258 | if (topology_mode != TOPOLOGY_MODE_HW) { |
259 | id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; | |
439eb131 HC |
260 | topo->thread_id = cpu; |
261 | topo->core_id = cpu; | |
1b25fda0 HC |
262 | topo->socket_id = id; |
263 | topo->book_id = id; | |
264 | topo->drawer_id = id; | |
8c910580 HC |
265 | if (cpu_present(cpu)) |
266 | cpumask_set_cpu(cpu, &cpus_with_topology); | |
d1e57508 | 267 | } |
4cb14bc8 | 268 | } |
3a368f74 | 269 | numa_update_cpu_topology(); |
4cb14bc8 HC |
270 | } |
271 | ||
96f4a70d | 272 | void store_topology(struct sysinfo_15_1_x *info) |
4cb14bc8 | 273 | { |
ae5ca67a | 274 | stsi(info, 15, 1, topology_mnest_limit()); |
d00aa4e7 HC |
275 | } |
276 | ||
1887aa07 MS |
277 | static void __arch_update_dedicated_flag(void *arg) |
278 | { | |
279 | if (topology_cpu_dedicated(smp_processor_id())) | |
280 | set_cpu_flag(CIF_DEDICATED_CPU); | |
281 | else | |
282 | clear_cpu_flag(CIF_DEDICATED_CPU); | |
283 | } | |
284 | ||
8c910580 | 285 | static int __arch_update_cpu_topology(void) |
dbd70fb4 | 286 | { |
c30f91b6 | 287 | struct sysinfo_15_1_x *info = tl_info; |
8c910580 | 288 | int rc = 0; |
dbd70fb4 | 289 | |
51dce386 | 290 | mutex_lock(&smp_cpu_state_mutex); |
8c910580 | 291 | cpumask_clear(&cpus_with_topology); |
3a368f74 PH |
292 | if (MACHINE_HAS_TOPOLOGY) { |
293 | rc = 1; | |
294 | store_topology(info); | |
295 | tl_to_masks(info); | |
c10fde0d | 296 | } |
d1e57508 | 297 | update_cpu_masks(); |
3a368f74 PH |
298 | if (!MACHINE_HAS_TOPOLOGY) |
299 | topology_update_polarization_simple(); | |
51dce386 | 300 | mutex_unlock(&smp_cpu_state_mutex); |
8c910580 HC |
301 | return rc; |
302 | } | |
303 | ||
304 | int arch_update_cpu_topology(void) | |
305 | { | |
306 | struct device *dev; | |
307 | int cpu, rc; | |
308 | ||
309 | rc = __arch_update_cpu_topology(); | |
1887aa07 | 310 | on_each_cpu(__arch_update_dedicated_flag, NULL, 0); |
dbd70fb4 | 311 | for_each_online_cpu(cpu) { |
8a25a2fd KS |
312 | dev = get_cpu_device(cpu); |
313 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | |
dbd70fb4 | 314 | } |
3a368f74 | 315 | return rc; |
dbd70fb4 HC |
316 | } |
317 | ||
fd781fa2 HC |
318 | static void topology_work_fn(struct work_struct *work) |
319 | { | |
f414f5f1 | 320 | rebuild_sched_domains(); |
dbd70fb4 HC |
321 | } |
322 | ||
c10fde0d HC |
323 | void topology_schedule_update(void) |
324 | { | |
325 | schedule_work(&topology_work); | |
326 | } | |
327 | ||
51dce386 HC |
328 | static void topology_flush_work(void) |
329 | { | |
330 | flush_work(&topology_work); | |
331 | } | |
332 | ||
dbd70fb4 HC |
333 | static void topology_timer_fn(unsigned long ignored) |
334 | { | |
c10fde0d HC |
335 | if (ptf(PTF_CHECK)) |
336 | topology_schedule_update(); | |
dbd70fb4 HC |
337 | set_topology_timer(); |
338 | } | |
339 | ||
d68bddb7 HC |
340 | static struct timer_list topology_timer = |
341 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | |
342 | ||
343 | static atomic_t topology_poll = ATOMIC_INIT(0); | |
344 | ||
dbd70fb4 HC |
345 | static void set_topology_timer(void) |
346 | { | |
d68bddb7 HC |
347 | if (atomic_add_unless(&topology_poll, -1, 0)) |
348 | mod_timer(&topology_timer, jiffies + HZ / 10); | |
349 | else | |
350 | mod_timer(&topology_timer, jiffies + HZ * 60); | |
351 | } | |
352 | ||
353 | void topology_expect_change(void) | |
354 | { | |
355 | if (!MACHINE_HAS_TOPOLOGY) | |
356 | return; | |
357 | /* This is racy, but it doesn't matter since it is just a heuristic. | |
358 | * Worst case is that we poll in a higher frequency for a bit longer. | |
359 | */ | |
360 | if (atomic_read(&topology_poll) > 60) | |
361 | return; | |
362 | atomic_add(60, &topology_poll); | |
363 | set_topology_timer(); | |
dbd70fb4 HC |
364 | } |
365 | ||
83a24e32 HC |
366 | static int cpu_management; |
367 | ||
72f31889 LT |
368 | static ssize_t dispatching_show(struct device *dev, |
369 | struct device_attribute *attr, | |
83a24e32 HC |
370 | char *buf) |
371 | { | |
372 | ssize_t count; | |
373 | ||
374 | mutex_lock(&smp_cpu_state_mutex); | |
375 | count = sprintf(buf, "%d\n", cpu_management); | |
376 | mutex_unlock(&smp_cpu_state_mutex); | |
377 | return count; | |
378 | } | |
379 | ||
72f31889 LT |
380 | static ssize_t dispatching_store(struct device *dev, |
381 | struct device_attribute *attr, | |
83a24e32 HC |
382 | const char *buf, |
383 | size_t count) | |
384 | { | |
385 | int val, rc; | |
386 | char delim; | |
387 | ||
388 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | |
389 | return -EINVAL; | |
390 | if (val != 0 && val != 1) | |
391 | return -EINVAL; | |
392 | rc = 0; | |
393 | get_online_cpus(); | |
394 | mutex_lock(&smp_cpu_state_mutex); | |
395 | if (cpu_management == val) | |
396 | goto out; | |
397 | rc = topology_set_cpu_management(val); | |
d68bddb7 HC |
398 | if (rc) |
399 | goto out; | |
400 | cpu_management = val; | |
401 | topology_expect_change(); | |
83a24e32 HC |
402 | out: |
403 | mutex_unlock(&smp_cpu_state_mutex); | |
404 | put_online_cpus(); | |
405 | return rc ? rc : count; | |
406 | } | |
72f31889 | 407 | static DEVICE_ATTR(dispatching, 0644, dispatching_show, |
83a24e32 HC |
408 | dispatching_store); |
409 | ||
72f31889 LT |
410 | static ssize_t cpu_polarization_show(struct device *dev, |
411 | struct device_attribute *attr, char *buf) | |
83a24e32 HC |
412 | { |
413 | int cpu = dev->id; | |
414 | ssize_t count; | |
415 | ||
416 | mutex_lock(&smp_cpu_state_mutex); | |
50ab9a9a | 417 | switch (smp_cpu_get_polarization(cpu)) { |
83a24e32 HC |
418 | case POLARIZATION_HRZ: |
419 | count = sprintf(buf, "horizontal\n"); | |
420 | break; | |
421 | case POLARIZATION_VL: | |
422 | count = sprintf(buf, "vertical:low\n"); | |
423 | break; | |
424 | case POLARIZATION_VM: | |
425 | count = sprintf(buf, "vertical:medium\n"); | |
426 | break; | |
427 | case POLARIZATION_VH: | |
428 | count = sprintf(buf, "vertical:high\n"); | |
429 | break; | |
430 | default: | |
431 | count = sprintf(buf, "unknown\n"); | |
432 | break; | |
433 | } | |
434 | mutex_unlock(&smp_cpu_state_mutex); | |
435 | return count; | |
436 | } | |
72f31889 | 437 | static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); |
83a24e32 HC |
438 | |
439 | static struct attribute *topology_cpu_attrs[] = { | |
72f31889 | 440 | &dev_attr_polarization.attr, |
83a24e32 HC |
441 | NULL, |
442 | }; | |
443 | ||
444 | static struct attribute_group topology_cpu_attr_group = { | |
445 | .attrs = topology_cpu_attrs, | |
446 | }; | |
447 | ||
1887aa07 MS |
448 | static ssize_t cpu_dedicated_show(struct device *dev, |
449 | struct device_attribute *attr, char *buf) | |
450 | { | |
451 | int cpu = dev->id; | |
452 | ssize_t count; | |
453 | ||
454 | mutex_lock(&smp_cpu_state_mutex); | |
455 | count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu)); | |
456 | mutex_unlock(&smp_cpu_state_mutex); | |
457 | return count; | |
458 | } | |
459 | static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL); | |
460 | ||
461 | static struct attribute *topology_extra_cpu_attrs[] = { | |
462 | &dev_attr_dedicated.attr, | |
463 | NULL, | |
464 | }; | |
465 | ||
466 | static struct attribute_group topology_extra_cpu_attr_group = { | |
467 | .attrs = topology_extra_cpu_attrs, | |
468 | }; | |
469 | ||
83a24e32 HC |
470 | int topology_cpu_init(struct cpu *cpu) |
471 | { | |
1887aa07 MS |
472 | int rc; |
473 | ||
474 | rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); | |
475 | if (rc || !MACHINE_HAS_TOPOLOGY) | |
476 | return rc; | |
477 | rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group); | |
478 | if (rc) | |
479 | sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group); | |
480 | return rc; | |
83a24e32 HC |
481 | } |
482 | ||
3ddb1b75 | 483 | static const struct cpumask *cpu_thread_mask(int cpu) |
10ad34bc | 484 | { |
30fc4ca2 | 485 | return &cpu_topology[cpu].thread_mask; |
10ad34bc MS |
486 | } |
487 | ||
488 | ||
2dfd7476 VG |
489 | const struct cpumask *cpu_coregroup_mask(int cpu) |
490 | { | |
30fc4ca2 | 491 | return &cpu_topology[cpu].core_mask; |
2dfd7476 VG |
492 | } |
493 | ||
494 | static const struct cpumask *cpu_book_mask(int cpu) | |
495 | { | |
30fc4ca2 | 496 | return &cpu_topology[cpu].book_mask; |
2dfd7476 VG |
497 | } |
498 | ||
adac0f1e HC |
499 | static const struct cpumask *cpu_drawer_mask(int cpu) |
500 | { | |
30fc4ca2 | 501 | return &cpu_topology[cpu].drawer_mask; |
adac0f1e HC |
502 | } |
503 | ||
2dfd7476 | 504 | static struct sched_domain_topology_level s390_topology[] = { |
10ad34bc | 505 | { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, |
2dfd7476 VG |
506 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
507 | { cpu_book_mask, SD_INIT_NAME(BOOK) }, | |
adac0f1e | 508 | { cpu_drawer_mask, SD_INIT_NAME(DRAWER) }, |
c0e5ddab | 509 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
2dfd7476 VG |
510 | { NULL, }, |
511 | }; | |
512 | ||
d05d15da HC |
513 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
514 | struct mask_info *mask, int offset) | |
515 | { | |
516 | int i, nr_masks; | |
517 | ||
518 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; | |
519 | for (i = 0; i < info->mnest - offset; i++) | |
520 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; | |
521 | nr_masks = max(nr_masks, 1); | |
522 | for (i = 0; i < nr_masks; i++) { | |
8c910580 | 523 | mask->next = memblock_virt_alloc(sizeof(*mask->next), 8); |
d05d15da HC |
524 | mask = mask->next; |
525 | } | |
526 | } | |
527 | ||
8c910580 | 528 | void __init topology_init_early(void) |
d05d15da HC |
529 | { |
530 | struct sysinfo_15_1_x *info; | |
d05d15da | 531 | |
ebb299a5 | 532 | set_sched_topology(s390_topology); |
1b25fda0 HC |
533 | if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { |
534 | if (MACHINE_HAS_TOPOLOGY) | |
535 | topology_mode = TOPOLOGY_MODE_HW; | |
536 | else | |
537 | topology_mode = TOPOLOGY_MODE_SINGLE; | |
538 | } | |
d05d15da | 539 | if (!MACHINE_HAS_TOPOLOGY) |
8c910580 | 540 | goto out; |
8676caa4 | 541 | tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); |
d05d15da HC |
542 | info = tl_info; |
543 | store_topology(info); | |
496e59cc HC |
544 | pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", |
545 | info->mag[0], info->mag[1], info->mag[2], info->mag[3], | |
546 | info->mag[4], info->mag[5], info->mnest); | |
d05d15da HC |
547 | alloc_masks(info, &socket_info, 1); |
548 | alloc_masks(info, &book_info, 2); | |
adac0f1e | 549 | alloc_masks(info, &drawer_info, 3); |
8c910580 HC |
550 | out: |
551 | __arch_update_cpu_topology(); | |
1887aa07 | 552 | __arch_update_dedicated_flag(NULL); |
d05d15da | 553 | } |
d05d15da | 554 | |
1b25fda0 HC |
555 | static inline int topology_get_mode(int enabled) |
556 | { | |
557 | if (!enabled) | |
558 | return TOPOLOGY_MODE_SINGLE; | |
559 | return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; | |
560 | } | |
561 | ||
51dce386 HC |
562 | static inline int topology_is_enabled(void) |
563 | { | |
564 | return topology_mode != TOPOLOGY_MODE_SINGLE; | |
565 | } | |
566 | ||
1b25fda0 HC |
567 | static int __init topology_setup(char *str) |
568 | { | |
569 | bool enabled; | |
570 | int rc; | |
571 | ||
572 | rc = kstrtobool(str, &enabled); | |
573 | if (rc) | |
574 | return rc; | |
575 | topology_mode = topology_get_mode(enabled); | |
576 | return 0; | |
577 | } | |
578 | early_param("topology", topology_setup); | |
579 | ||
51dce386 HC |
580 | static int topology_ctl_handler(struct ctl_table *ctl, int write, |
581 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
582 | { | |
583 | unsigned int len; | |
584 | int new_mode; | |
585 | char buf[2]; | |
586 | ||
587 | if (!*lenp || *ppos) { | |
588 | *lenp = 0; | |
589 | return 0; | |
590 | } | |
591 | if (!write) { | |
592 | strncpy(buf, topology_is_enabled() ? "1\n" : "0\n", | |
593 | ARRAY_SIZE(buf)); | |
594 | len = strnlen(buf, ARRAY_SIZE(buf)); | |
595 | if (len > *lenp) | |
596 | len = *lenp; | |
597 | if (copy_to_user(buffer, buf, len)) | |
598 | return -EFAULT; | |
599 | goto out; | |
600 | } | |
601 | len = *lenp; | |
602 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) | |
603 | return -EFAULT; | |
604 | if (buf[0] != '0' && buf[0] != '1') | |
605 | return -EINVAL; | |
606 | mutex_lock(&smp_cpu_state_mutex); | |
607 | new_mode = topology_get_mode(buf[0] == '1'); | |
608 | if (topology_mode != new_mode) { | |
609 | topology_mode = new_mode; | |
610 | topology_schedule_update(); | |
611 | } | |
612 | mutex_unlock(&smp_cpu_state_mutex); | |
613 | topology_flush_work(); | |
614 | out: | |
615 | *lenp = len; | |
616 | *ppos += len; | |
617 | return 0; | |
618 | } | |
619 | ||
620 | static struct ctl_table topology_ctl_table[] = { | |
621 | { | |
622 | .procname = "topology", | |
623 | .mode = 0644, | |
624 | .proc_handler = topology_ctl_handler, | |
625 | }, | |
626 | { }, | |
627 | }; | |
628 | ||
629 | static struct ctl_table topology_dir_table[] = { | |
630 | { | |
631 | .procname = "s390", | |
632 | .maxlen = 0, | |
633 | .mode = 0555, | |
634 | .child = topology_ctl_table, | |
635 | }, | |
636 | { }, | |
637 | }; | |
638 | ||
83a24e32 HC |
639 | static int __init topology_init(void) |
640 | { | |
48e9a6c1 MS |
641 | if (MACHINE_HAS_TOPOLOGY) |
642 | set_topology_timer(); | |
643 | else | |
83a24e32 | 644 | topology_update_polarization_simple(); |
51dce386 | 645 | register_sysctl_table(topology_dir_table); |
72f31889 | 646 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); |
83a24e32 HC |
647 | } |
648 | device_initcall(topology_init); |