Commit | Line | Data |
---|---|---|
dbd70fb4 | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2007, 2011 |
dbd70fb4 HC |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | |
5 | ||
395d31d4 MS |
6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
8 | ||
83a24e32 | 9 | #include <linux/workqueue.h> |
8c910580 | 10 | #include <linux/bootmem.h> |
51dce386 HC |
11 | #include <linux/uaccess.h> |
12 | #include <linux/sysctl.h> | |
83a24e32 HC |
13 | #include <linux/cpuset.h> |
14 | #include <linux/device.h> | |
80020fbd | 15 | #include <linux/export.h> |
83a24e32 | 16 | #include <linux/kernel.h> |
dbd70fb4 | 17 | #include <linux/sched.h> |
105ab3d8 | 18 | #include <linux/sched/topology.h> |
83a24e32 | 19 | #include <linux/delay.h> |
d05d15da HC |
20 | #include <linux/init.h> |
21 | #include <linux/slab.h> | |
dbd70fb4 HC |
22 | #include <linux/cpu.h> |
23 | #include <linux/smp.h> | |
83a24e32 | 24 | #include <linux/mm.h> |
3a368f74 PH |
25 | #include <linux/nodemask.h> |
26 | #include <linux/node.h> | |
78609132 | 27 | #include <asm/sysinfo.h> |
3a368f74 | 28 | #include <asm/numa.h> |
dbd70fb4 | 29 | |
c10fde0d HC |
30 | #define PTF_HORIZONTAL (0UL) |
31 | #define PTF_VERTICAL (1UL) | |
32 | #define PTF_CHECK (2UL) | |
dbd70fb4 | 33 | |
1b25fda0 HC |
34 | enum { |
35 | TOPOLOGY_MODE_HW, | |
36 | TOPOLOGY_MODE_SINGLE, | |
37 | TOPOLOGY_MODE_PACKAGE, | |
38 | TOPOLOGY_MODE_UNINITIALIZED | |
39 | }; | |
40 | ||
4cb14bc8 HC |
41 | struct mask_info { |
42 | struct mask_info *next; | |
10d38589 | 43 | unsigned char id; |
dbd70fb4 HC |
44 | cpumask_t mask; |
45 | }; | |
46 | ||
1b25fda0 | 47 | static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; |
d1e57508 | 48 | static void set_topology_timer(void); |
dbd70fb4 | 49 | static void topology_work_fn(struct work_struct *work); |
c30f91b6 | 50 | static struct sysinfo_15_1_x *tl_info; |
dbd70fb4 | 51 | |
d1e57508 | 52 | static DECLARE_WORK(topology_work, topology_work_fn); |
d00aa4e7 | 53 | |
3a3814c2 | 54 | /* |
30fc4ca2 | 55 | * Socket/Book linked lists and cpu_topology updates are |
3a3814c2 MH |
56 | * protected by "sched_domains_mutex". |
57 | */ | |
d1e57508 | 58 | static struct mask_info socket_info; |
4cb14bc8 | 59 | static struct mask_info book_info; |
adac0f1e | 60 | static struct mask_info drawer_info; |
d1e57508 | 61 | |
30fc4ca2 HC |
62 | struct cpu_topology_s390 cpu_topology[NR_CPUS]; |
63 | EXPORT_SYMBOL_GPL(cpu_topology); | |
83a24e32 | 64 | |
8c910580 HC |
65 | cpumask_t cpus_with_topology; |
66 | ||
4cb14bc8 | 67 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
dbd70fb4 | 68 | { |
dbd70fb4 HC |
69 | cpumask_t mask; |
70 | ||
d1e57508 | 71 | cpumask_copy(&mask, cpumask_of(cpu)); |
1b25fda0 HC |
72 | switch (topology_mode) { |
73 | case TOPOLOGY_MODE_HW: | |
74 | while (info) { | |
75 | if (cpumask_test_cpu(cpu, &info->mask)) { | |
76 | mask = info->mask; | |
77 | break; | |
78 | } | |
79 | info = info->next; | |
80 | } | |
81 | if (cpumask_empty(&mask)) | |
82 | cpumask_copy(&mask, cpumask_of(cpu)); | |
83 | break; | |
84 | case TOPOLOGY_MODE_PACKAGE: | |
85 | cpumask_copy(&mask, cpu_present_mask); | |
86 | break; | |
87 | default: | |
88 | /* fallthrough */ | |
89 | case TOPOLOGY_MODE_SINGLE: | |
90 | cpumask_copy(&mask, cpumask_of(cpu)); | |
91 | break; | |
0b52783d | 92 | } |
dbd70fb4 HC |
93 | return mask; |
94 | } | |
95 | ||
10ad34bc MS |
96 | static cpumask_t cpu_thread_map(unsigned int cpu) |
97 | { | |
98 | cpumask_t mask; | |
99 | int i; | |
100 | ||
101 | cpumask_copy(&mask, cpumask_of(cpu)); | |
1b25fda0 | 102 | if (topology_mode != TOPOLOGY_MODE_HW) |
10ad34bc MS |
103 | return mask; |
104 | cpu -= cpu % (smp_cpu_mtid + 1); | |
105 | for (i = 0; i <= smp_cpu_mtid; i++) | |
106 | if (cpu_present(cpu + i)) | |
107 | cpumask_set_cpu(cpu + i, &mask); | |
108 | return mask; | |
109 | } | |
110 | ||
251ea0ca HC |
111 | #define TOPOLOGY_CORE_BITS 64 |
112 | ||
86d18a55 HC |
113 | static void add_cpus_to_mask(struct topology_core *tl_core, |
114 | struct mask_info *drawer, | |
115 | struct mask_info *book, | |
116 | struct mask_info *socket) | |
dbd70fb4 | 117 | { |
439eb131 | 118 | struct cpu_topology_s390 *topo; |
10ad34bc | 119 | unsigned int core; |
dbd70fb4 | 120 | |
251ea0ca | 121 | for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { |
10ad34bc MS |
122 | unsigned int rcore; |
123 | int lcpu, i; | |
dbd70fb4 | 124 | |
10ad34bc MS |
125 | rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; |
126 | lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); | |
d1e57508 HC |
127 | if (lcpu < 0) |
128 | continue; | |
10ad34bc | 129 | for (i = 0; i <= smp_cpu_mtid; i++) { |
30fc4ca2 | 130 | topo = &cpu_topology[lcpu + i]; |
adac0f1e | 131 | topo->drawer_id = drawer->id; |
439eb131 | 132 | topo->book_id = book->id; |
86d18a55 | 133 | topo->socket_id = socket->id; |
439eb131 HC |
134 | topo->core_id = rcore; |
135 | topo->thread_id = lcpu + i; | |
adac0f1e | 136 | cpumask_set_cpu(lcpu + i, &drawer->mask); |
10ad34bc MS |
137 | cpumask_set_cpu(lcpu + i, &book->mask); |
138 | cpumask_set_cpu(lcpu + i, &socket->mask); | |
8c910580 | 139 | cpumask_set_cpu(lcpu + i, &cpus_with_topology); |
10ad34bc | 140 | smp_cpu_set_polarization(lcpu + i, tl_core->pp); |
dbd70fb4 HC |
141 | } |
142 | } | |
143 | } | |
144 | ||
4cb14bc8 | 145 | static void clear_masks(void) |
dbd70fb4 | 146 | { |
4cb14bc8 | 147 | struct mask_info *info; |
dbd70fb4 | 148 | |
d1e57508 | 149 | info = &socket_info; |
4cb14bc8 | 150 | while (info) { |
0f1959f5 | 151 | cpumask_clear(&info->mask); |
4cb14bc8 HC |
152 | info = info->next; |
153 | } | |
4cb14bc8 HC |
154 | info = &book_info; |
155 | while (info) { | |
0f1959f5 | 156 | cpumask_clear(&info->mask); |
4cb14bc8 | 157 | info = info->next; |
dbd70fb4 | 158 | } |
adac0f1e HC |
159 | info = &drawer_info; |
160 | while (info) { | |
161 | cpumask_clear(&info->mask); | |
162 | info = info->next; | |
163 | } | |
dbd70fb4 HC |
164 | } |
165 | ||
c30f91b6 | 166 | static union topology_entry *next_tle(union topology_entry *tle) |
dbd70fb4 | 167 | { |
c30f91b6 | 168 | if (!tle->nl) |
10ad34bc | 169 | return (union topology_entry *)((struct topology_core *)tle + 1); |
c30f91b6 | 170 | return (union topology_entry *)((struct topology_container *)tle + 1); |
dbd70fb4 HC |
171 | } |
172 | ||
86d18a55 | 173 | static void tl_to_masks(struct sysinfo_15_1_x *info) |
dbd70fb4 | 174 | { |
d1e57508 | 175 | struct mask_info *socket = &socket_info; |
83a24e32 | 176 | struct mask_info *book = &book_info; |
adac0f1e | 177 | struct mask_info *drawer = &drawer_info; |
c30f91b6 | 178 | union topology_entry *tle, *end; |
4cb14bc8 | 179 | |
86d18a55 | 180 | clear_masks(); |
c10fde0d | 181 | tle = info->tle; |
c30f91b6 | 182 | end = (union topology_entry *)((unsigned long)info + info->length); |
dbd70fb4 HC |
183 | while (tle < end) { |
184 | switch (tle->nl) { | |
adac0f1e HC |
185 | case 3: |
186 | drawer = drawer->next; | |
187 | drawer->id = tle->container.id; | |
188 | break; | |
dbd70fb4 | 189 | case 2: |
4cb14bc8 HC |
190 | book = book->next; |
191 | book->id = tle->container.id; | |
dbd70fb4 HC |
192 | break; |
193 | case 1: | |
d1e57508 HC |
194 | socket = socket->next; |
195 | socket->id = tle->container.id; | |
dbd70fb4 HC |
196 | break; |
197 | case 0: | |
86d18a55 | 198 | add_cpus_to_mask(&tle->cpu, drawer, book, socket); |
4baeb964 HC |
199 | break; |
200 | default: | |
201 | clear_masks(); | |
202 | return; | |
203 | } | |
204 | tle = next_tle(tle); | |
205 | } | |
206 | } | |
207 | ||
c10fde0d HC |
208 | static void topology_update_polarization_simple(void) |
209 | { | |
210 | int cpu; | |
211 | ||
5439050f | 212 | for_each_possible_cpu(cpu) |
50ab9a9a | 213 | smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); |
c10fde0d HC |
214 | } |
215 | ||
216 | static int ptf(unsigned long fc) | |
dbd70fb4 HC |
217 | { |
218 | int rc; | |
219 | ||
220 | asm volatile( | |
221 | " .insn rre,0xb9a20000,%1,%1\n" | |
222 | " ipm %0\n" | |
223 | " srl %0,28\n" | |
224 | : "=d" (rc) | |
c10fde0d HC |
225 | : "d" (fc) : "cc"); |
226 | return rc; | |
227 | } | |
228 | ||
229 | int topology_set_cpu_management(int fc) | |
230 | { | |
83a24e32 | 231 | int cpu, rc; |
c10fde0d | 232 | |
9186d7a9 | 233 | if (!MACHINE_HAS_TOPOLOGY) |
c10fde0d HC |
234 | return -EOPNOTSUPP; |
235 | if (fc) | |
236 | rc = ptf(PTF_VERTICAL); | |
237 | else | |
238 | rc = ptf(PTF_HORIZONTAL); | |
239 | if (rc) | |
240 | return -EBUSY; | |
5439050f | 241 | for_each_possible_cpu(cpu) |
50ab9a9a | 242 | smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
dbd70fb4 HC |
243 | return rc; |
244 | } | |
245 | ||
d1e57508 | 246 | static void update_cpu_masks(void) |
d00aa4e7 | 247 | { |
439eb131 | 248 | struct cpu_topology_s390 *topo; |
1b25fda0 | 249 | int cpu, id; |
d00aa4e7 | 250 | |
4cb14bc8 | 251 | for_each_possible_cpu(cpu) { |
30fc4ca2 | 252 | topo = &cpu_topology[cpu]; |
439eb131 HC |
253 | topo->thread_mask = cpu_thread_map(cpu); |
254 | topo->core_mask = cpu_group_map(&socket_info, cpu); | |
255 | topo->book_mask = cpu_group_map(&book_info, cpu); | |
adac0f1e | 256 | topo->drawer_mask = cpu_group_map(&drawer_info, cpu); |
1b25fda0 HC |
257 | if (topology_mode != TOPOLOGY_MODE_HW) { |
258 | id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; | |
439eb131 HC |
259 | topo->thread_id = cpu; |
260 | topo->core_id = cpu; | |
1b25fda0 HC |
261 | topo->socket_id = id; |
262 | topo->book_id = id; | |
263 | topo->drawer_id = id; | |
8c910580 HC |
264 | if (cpu_present(cpu)) |
265 | cpumask_set_cpu(cpu, &cpus_with_topology); | |
d1e57508 | 266 | } |
4cb14bc8 | 267 | } |
3a368f74 | 268 | numa_update_cpu_topology(); |
4cb14bc8 HC |
269 | } |
270 | ||
96f4a70d | 271 | void store_topology(struct sysinfo_15_1_x *info) |
4cb14bc8 | 272 | { |
ae5ca67a | 273 | stsi(info, 15, 1, topology_mnest_limit()); |
d00aa4e7 HC |
274 | } |
275 | ||
8c910580 | 276 | static int __arch_update_cpu_topology(void) |
dbd70fb4 | 277 | { |
c30f91b6 | 278 | struct sysinfo_15_1_x *info = tl_info; |
8c910580 | 279 | int rc = 0; |
dbd70fb4 | 280 | |
51dce386 | 281 | mutex_lock(&smp_cpu_state_mutex); |
8c910580 | 282 | cpumask_clear(&cpus_with_topology); |
3a368f74 PH |
283 | if (MACHINE_HAS_TOPOLOGY) { |
284 | rc = 1; | |
285 | store_topology(info); | |
286 | tl_to_masks(info); | |
c10fde0d | 287 | } |
d1e57508 | 288 | update_cpu_masks(); |
3a368f74 PH |
289 | if (!MACHINE_HAS_TOPOLOGY) |
290 | topology_update_polarization_simple(); | |
51dce386 | 291 | mutex_unlock(&smp_cpu_state_mutex); |
8c910580 HC |
292 | return rc; |
293 | } | |
294 | ||
295 | int arch_update_cpu_topology(void) | |
296 | { | |
297 | struct device *dev; | |
298 | int cpu, rc; | |
299 | ||
300 | rc = __arch_update_cpu_topology(); | |
dbd70fb4 | 301 | for_each_online_cpu(cpu) { |
8a25a2fd KS |
302 | dev = get_cpu_device(cpu); |
303 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | |
dbd70fb4 | 304 | } |
3a368f74 | 305 | return rc; |
dbd70fb4 HC |
306 | } |
307 | ||
fd781fa2 HC |
308 | static void topology_work_fn(struct work_struct *work) |
309 | { | |
f414f5f1 | 310 | rebuild_sched_domains(); |
dbd70fb4 HC |
311 | } |
312 | ||
c10fde0d HC |
313 | void topology_schedule_update(void) |
314 | { | |
315 | schedule_work(&topology_work); | |
316 | } | |
317 | ||
51dce386 HC |
318 | static void topology_flush_work(void) |
319 | { | |
320 | flush_work(&topology_work); | |
321 | } | |
322 | ||
dbd70fb4 HC |
323 | static void topology_timer_fn(unsigned long ignored) |
324 | { | |
c10fde0d HC |
325 | if (ptf(PTF_CHECK)) |
326 | topology_schedule_update(); | |
dbd70fb4 HC |
327 | set_topology_timer(); |
328 | } | |
329 | ||
d68bddb7 HC |
330 | static struct timer_list topology_timer = |
331 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | |
332 | ||
333 | static atomic_t topology_poll = ATOMIC_INIT(0); | |
334 | ||
dbd70fb4 HC |
335 | static void set_topology_timer(void) |
336 | { | |
d68bddb7 HC |
337 | if (atomic_add_unless(&topology_poll, -1, 0)) |
338 | mod_timer(&topology_timer, jiffies + HZ / 10); | |
339 | else | |
340 | mod_timer(&topology_timer, jiffies + HZ * 60); | |
341 | } | |
342 | ||
343 | void topology_expect_change(void) | |
344 | { | |
345 | if (!MACHINE_HAS_TOPOLOGY) | |
346 | return; | |
347 | /* This is racy, but it doesn't matter since it is just a heuristic. | |
348 | * Worst case is that we poll in a higher frequency for a bit longer. | |
349 | */ | |
350 | if (atomic_read(&topology_poll) > 60) | |
351 | return; | |
352 | atomic_add(60, &topology_poll); | |
353 | set_topology_timer(); | |
dbd70fb4 HC |
354 | } |
355 | ||
83a24e32 HC |
356 | static int cpu_management; |
357 | ||
72f31889 LT |
358 | static ssize_t dispatching_show(struct device *dev, |
359 | struct device_attribute *attr, | |
83a24e32 HC |
360 | char *buf) |
361 | { | |
362 | ssize_t count; | |
363 | ||
364 | mutex_lock(&smp_cpu_state_mutex); | |
365 | count = sprintf(buf, "%d\n", cpu_management); | |
366 | mutex_unlock(&smp_cpu_state_mutex); | |
367 | return count; | |
368 | } | |
369 | ||
72f31889 LT |
370 | static ssize_t dispatching_store(struct device *dev, |
371 | struct device_attribute *attr, | |
83a24e32 HC |
372 | const char *buf, |
373 | size_t count) | |
374 | { | |
375 | int val, rc; | |
376 | char delim; | |
377 | ||
378 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | |
379 | return -EINVAL; | |
380 | if (val != 0 && val != 1) | |
381 | return -EINVAL; | |
382 | rc = 0; | |
383 | get_online_cpus(); | |
384 | mutex_lock(&smp_cpu_state_mutex); | |
385 | if (cpu_management == val) | |
386 | goto out; | |
387 | rc = topology_set_cpu_management(val); | |
d68bddb7 HC |
388 | if (rc) |
389 | goto out; | |
390 | cpu_management = val; | |
391 | topology_expect_change(); | |
83a24e32 HC |
392 | out: |
393 | mutex_unlock(&smp_cpu_state_mutex); | |
394 | put_online_cpus(); | |
395 | return rc ? rc : count; | |
396 | } | |
72f31889 | 397 | static DEVICE_ATTR(dispatching, 0644, dispatching_show, |
83a24e32 HC |
398 | dispatching_store); |
399 | ||
72f31889 LT |
400 | static ssize_t cpu_polarization_show(struct device *dev, |
401 | struct device_attribute *attr, char *buf) | |
83a24e32 HC |
402 | { |
403 | int cpu = dev->id; | |
404 | ssize_t count; | |
405 | ||
406 | mutex_lock(&smp_cpu_state_mutex); | |
50ab9a9a | 407 | switch (smp_cpu_get_polarization(cpu)) { |
83a24e32 HC |
408 | case POLARIZATION_HRZ: |
409 | count = sprintf(buf, "horizontal\n"); | |
410 | break; | |
411 | case POLARIZATION_VL: | |
412 | count = sprintf(buf, "vertical:low\n"); | |
413 | break; | |
414 | case POLARIZATION_VM: | |
415 | count = sprintf(buf, "vertical:medium\n"); | |
416 | break; | |
417 | case POLARIZATION_VH: | |
418 | count = sprintf(buf, "vertical:high\n"); | |
419 | break; | |
420 | default: | |
421 | count = sprintf(buf, "unknown\n"); | |
422 | break; | |
423 | } | |
424 | mutex_unlock(&smp_cpu_state_mutex); | |
425 | return count; | |
426 | } | |
72f31889 | 427 | static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); |
83a24e32 HC |
428 | |
429 | static struct attribute *topology_cpu_attrs[] = { | |
72f31889 | 430 | &dev_attr_polarization.attr, |
83a24e32 HC |
431 | NULL, |
432 | }; | |
433 | ||
434 | static struct attribute_group topology_cpu_attr_group = { | |
435 | .attrs = topology_cpu_attrs, | |
436 | }; | |
437 | ||
438 | int topology_cpu_init(struct cpu *cpu) | |
439 | { | |
72f31889 | 440 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); |
83a24e32 HC |
441 | } |
442 | ||
3ddb1b75 | 443 | static const struct cpumask *cpu_thread_mask(int cpu) |
10ad34bc | 444 | { |
30fc4ca2 | 445 | return &cpu_topology[cpu].thread_mask; |
10ad34bc MS |
446 | } |
447 | ||
448 | ||
2dfd7476 VG |
449 | const struct cpumask *cpu_coregroup_mask(int cpu) |
450 | { | |
30fc4ca2 | 451 | return &cpu_topology[cpu].core_mask; |
2dfd7476 VG |
452 | } |
453 | ||
454 | static const struct cpumask *cpu_book_mask(int cpu) | |
455 | { | |
30fc4ca2 | 456 | return &cpu_topology[cpu].book_mask; |
2dfd7476 VG |
457 | } |
458 | ||
adac0f1e HC |
459 | static const struct cpumask *cpu_drawer_mask(int cpu) |
460 | { | |
30fc4ca2 | 461 | return &cpu_topology[cpu].drawer_mask; |
adac0f1e HC |
462 | } |
463 | ||
2dfd7476 | 464 | static struct sched_domain_topology_level s390_topology[] = { |
10ad34bc | 465 | { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, |
2dfd7476 VG |
466 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
467 | { cpu_book_mask, SD_INIT_NAME(BOOK) }, | |
adac0f1e | 468 | { cpu_drawer_mask, SD_INIT_NAME(DRAWER) }, |
c0e5ddab | 469 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
2dfd7476 VG |
470 | { NULL, }, |
471 | }; | |
472 | ||
d05d15da HC |
473 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
474 | struct mask_info *mask, int offset) | |
475 | { | |
476 | int i, nr_masks; | |
477 | ||
478 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; | |
479 | for (i = 0; i < info->mnest - offset; i++) | |
480 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; | |
481 | nr_masks = max(nr_masks, 1); | |
482 | for (i = 0; i < nr_masks; i++) { | |
8c910580 | 483 | mask->next = memblock_virt_alloc(sizeof(*mask->next), 8); |
d05d15da HC |
484 | mask = mask->next; |
485 | } | |
486 | } | |
487 | ||
8c910580 | 488 | void __init topology_init_early(void) |
d05d15da HC |
489 | { |
490 | struct sysinfo_15_1_x *info; | |
d05d15da | 491 | |
ebb299a5 | 492 | set_sched_topology(s390_topology); |
1b25fda0 HC |
493 | if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { |
494 | if (MACHINE_HAS_TOPOLOGY) | |
495 | topology_mode = TOPOLOGY_MODE_HW; | |
496 | else | |
497 | topology_mode = TOPOLOGY_MODE_SINGLE; | |
498 | } | |
d05d15da | 499 | if (!MACHINE_HAS_TOPOLOGY) |
8c910580 | 500 | goto out; |
8676caa4 | 501 | tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); |
d05d15da HC |
502 | info = tl_info; |
503 | store_topology(info); | |
496e59cc HC |
504 | pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", |
505 | info->mag[0], info->mag[1], info->mag[2], info->mag[3], | |
506 | info->mag[4], info->mag[5], info->mnest); | |
d05d15da HC |
507 | alloc_masks(info, &socket_info, 1); |
508 | alloc_masks(info, &book_info, 2); | |
adac0f1e | 509 | alloc_masks(info, &drawer_info, 3); |
8c910580 HC |
510 | out: |
511 | __arch_update_cpu_topology(); | |
d05d15da | 512 | } |
d05d15da | 513 | |
1b25fda0 HC |
514 | static inline int topology_get_mode(int enabled) |
515 | { | |
516 | if (!enabled) | |
517 | return TOPOLOGY_MODE_SINGLE; | |
518 | return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; | |
519 | } | |
520 | ||
51dce386 HC |
521 | static inline int topology_is_enabled(void) |
522 | { | |
523 | return topology_mode != TOPOLOGY_MODE_SINGLE; | |
524 | } | |
525 | ||
1b25fda0 HC |
526 | static int __init topology_setup(char *str) |
527 | { | |
528 | bool enabled; | |
529 | int rc; | |
530 | ||
531 | rc = kstrtobool(str, &enabled); | |
532 | if (rc) | |
533 | return rc; | |
534 | topology_mode = topology_get_mode(enabled); | |
535 | return 0; | |
536 | } | |
537 | early_param("topology", topology_setup); | |
538 | ||
51dce386 HC |
539 | static int topology_ctl_handler(struct ctl_table *ctl, int write, |
540 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
541 | { | |
542 | unsigned int len; | |
543 | int new_mode; | |
544 | char buf[2]; | |
545 | ||
546 | if (!*lenp || *ppos) { | |
547 | *lenp = 0; | |
548 | return 0; | |
549 | } | |
550 | if (!write) { | |
551 | strncpy(buf, topology_is_enabled() ? "1\n" : "0\n", | |
552 | ARRAY_SIZE(buf)); | |
553 | len = strnlen(buf, ARRAY_SIZE(buf)); | |
554 | if (len > *lenp) | |
555 | len = *lenp; | |
556 | if (copy_to_user(buffer, buf, len)) | |
557 | return -EFAULT; | |
558 | goto out; | |
559 | } | |
560 | len = *lenp; | |
561 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) | |
562 | return -EFAULT; | |
563 | if (buf[0] != '0' && buf[0] != '1') | |
564 | return -EINVAL; | |
565 | mutex_lock(&smp_cpu_state_mutex); | |
566 | new_mode = topology_get_mode(buf[0] == '1'); | |
567 | if (topology_mode != new_mode) { | |
568 | topology_mode = new_mode; | |
569 | topology_schedule_update(); | |
570 | } | |
571 | mutex_unlock(&smp_cpu_state_mutex); | |
572 | topology_flush_work(); | |
573 | out: | |
574 | *lenp = len; | |
575 | *ppos += len; | |
576 | return 0; | |
577 | } | |
578 | ||
579 | static struct ctl_table topology_ctl_table[] = { | |
580 | { | |
581 | .procname = "topology", | |
582 | .mode = 0644, | |
583 | .proc_handler = topology_ctl_handler, | |
584 | }, | |
585 | { }, | |
586 | }; | |
587 | ||
588 | static struct ctl_table topology_dir_table[] = { | |
589 | { | |
590 | .procname = "s390", | |
591 | .maxlen = 0, | |
592 | .mode = 0555, | |
593 | .child = topology_ctl_table, | |
594 | }, | |
595 | { }, | |
596 | }; | |
597 | ||
83a24e32 HC |
598 | static int __init topology_init(void) |
599 | { | |
48e9a6c1 MS |
600 | if (MACHINE_HAS_TOPOLOGY) |
601 | set_topology_timer(); | |
602 | else | |
83a24e32 | 603 | topology_update_polarization_simple(); |
51dce386 | 604 | register_sysctl_table(topology_dir_table); |
72f31889 | 605 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); |
83a24e32 HC |
606 | } |
607 | device_initcall(topology_init); |