mm: remove include/linux/bootmem.h
[linux-2.6-block.git] / arch / s390 / kernel / smp.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
8b646bd7 3 * SMP related functions
1da177e4 4 *
a53c8fab 5 * Copyright IBM Corp. 1999, 2012
8b646bd7
MS
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
1da177e4 9 *
39ce010d 10 * based on other smp stuff by
1da177e4
LT
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
13 *
8b646bd7
MS
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
1da177e4
LT
17 */
18
395d31d4
MS
19#define KMSG_COMPONENT "cpu"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
f230886b 22#include <linux/workqueue.h>
57c8a661 23#include <linux/memblock.h>
3994a52b 24#include <linux/export.h>
1da177e4 25#include <linux/init.h>
1da177e4 26#include <linux/mm.h>
4e950f6f 27#include <linux/err.h>
1da177e4
LT
28#include <linux/spinlock.h>
29#include <linux/kernel_stat.h>
1da177e4 30#include <linux/delay.h>
1da177e4 31#include <linux/interrupt.h>
3324e60a 32#include <linux/irqflags.h>
1da177e4 33#include <linux/cpu.h>
5a0e3ad6 34#include <linux/slab.h>
ef8bd77f 35#include <linux/sched/hotplug.h>
68db0cf1 36#include <linux/sched/task_stack.h>
60a0c68d 37#include <linux/crash_dump.h>
00a8f886 38#include <linux/kprobes.h>
cbb870c8 39#include <asm/asm-offsets.h>
1ec2772e 40#include <asm/diag.h>
1e3cab2f
HC
41#include <asm/switch_to.h>
42#include <asm/facility.h>
46b05d26 43#include <asm/ipl.h>
2b67fc46 44#include <asm/setup.h>
1da177e4 45#include <asm/irq.h>
1da177e4 46#include <asm/tlbflush.h>
27f6b416 47#include <asm/vtimer.h>
411ed322 48#include <asm/lowcore.h>
08d07968 49#include <asm/sclp.h>
c742b31c 50#include <asm/vdso.h>
3ab121ab 51#include <asm/debug.h>
4857d4bb 52#include <asm/os_info.h>
a9ae32c3 53#include <asm/sigp.h>
b5f87f15 54#include <asm/idle.h>
916cda1a 55#include <asm/nmi.h>
38389ec8 56#include <asm/topology.h>
a806170e 57#include "entry.h"
1da177e4 58
8b646bd7
MS
59enum {
60 ec_schedule = 0,
8b646bd7
MS
61 ec_call_function_single,
62 ec_stop_cpu,
63};
08d07968 64
8b646bd7 65enum {
08d07968
HC
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
2f859d0d
HC
70static DEFINE_PER_CPU(struct cpu *, cpu_device);
71
8b646bd7 72struct pcpu {
c667aeac 73 struct lowcore *lowcore; /* lowcore page(s) for the cpu */
8b646bd7 74 unsigned long ec_mask; /* bit mask for ec_xxx functions */
3dbc78d3 75 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
2f859d0d
HC
76 signed char state; /* physical cpu state */
77 signed char polarization; /* physical polarization */
8b646bd7
MS
78 u16 address; /* physical cpu address */
79};
80
d08d9430 81static u8 boot_core_type;
8b646bd7
MS
82static struct pcpu pcpu_devices[NR_CPUS];
83
10ad34bc
MS
84unsigned int smp_cpu_mt_shift;
85EXPORT_SYMBOL(smp_cpu_mt_shift);
86
87unsigned int smp_cpu_mtid;
88EXPORT_SYMBOL(smp_cpu_mtid);
89
1a36a39e
MS
90#ifdef CONFIG_CRASH_DUMP
91__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
92#endif
93
10ad34bc
MS
94static unsigned int smp_max_threads __initdata = -1U;
95
96static int __init early_nosmt(char *s)
97{
98 smp_max_threads = 1;
99 return 0;
100}
101early_param("nosmt", early_nosmt);
102
103static int __init early_smt(char *s)
104{
105 get_option(&s, &smp_max_threads);
106 return 0;
107}
108early_param("smt", early_smt);
109
50ab9a9a
HC
110/*
111 * The smp_cpu_state_mutex must be held when changing the state or polarization
112 * member of a pcpu data structure within the pcpu_devices arreay.
113 */
dbd70fb4 114DEFINE_MUTEX(smp_cpu_state_mutex);
08d07968 115
8b646bd7
MS
116/*
117 * Signal processor helper functions.
118 */
1a36a39e 119static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
5c0b912e 120{
8b646bd7 121 int cc;
5c0b912e 122
8b646bd7 123 while (1) {
c5e3acd6 124 cc = __pcpu_sigp(addr, order, parm, NULL);
a9ae32c3 125 if (cc != SIGP_CC_BUSY)
8b646bd7
MS
126 return cc;
127 cpu_relax();
5c0b912e 128 }
5c0b912e
HC
129}
130
8b646bd7 131static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
a93b8ec1 132{
8b646bd7
MS
133 int cc, retry;
134
135 for (retry = 0; ; retry++) {
c5e3acd6 136 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
a9ae32c3 137 if (cc != SIGP_CC_BUSY)
8b646bd7
MS
138 break;
139 if (retry >= 3)
140 udelay(10);
141 }
142 return cc;
143}
144
145static inline int pcpu_stopped(struct pcpu *pcpu)
146{
41459d36 147 u32 uninitialized_var(status);
c5e3acd6 148
a9ae32c3 149 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
c5e3acd6 150 0, &status) != SIGP_CC_STATUS_STORED)
8b646bd7 151 return 0;
c5e3acd6 152 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
8b646bd7
MS
153}
154
155static inline int pcpu_running(struct pcpu *pcpu)
a93b8ec1 156{
a9ae32c3 157 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
c5e3acd6 158 0, NULL) != SIGP_CC_STATUS_STORED)
8b646bd7 159 return 1;
524b24ad
HC
160 /* Status stored condition code is equivalent to cpu not running. */
161 return 0;
a93b8ec1
HC
162}
163
1943f53c 164/*
8b646bd7 165 * Find struct pcpu by cpu address.
1943f53c 166 */
10ad34bc 167static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
1943f53c
MH
168{
169 int cpu;
170
8b646bd7
MS
171 for_each_cpu(cpu, mask)
172 if (pcpu_devices[cpu].address == address)
173 return pcpu_devices + cpu;
174 return NULL;
175}
176
177static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
178{
179 int order;
180
dea24190
HC
181 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
182 return;
183 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
3dbc78d3 184 pcpu->ec_clk = get_tod_clock_fast();
8b646bd7
MS
185 pcpu_sigp_retry(pcpu, order, 0);
186}
187
e2741f17 188static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
8b646bd7 189{
ce3dc447 190 unsigned long async_stack, nodat_stack;
c667aeac 191 struct lowcore *lc;
8b646bd7
MS
192
193 if (pcpu != &pcpu_devices[0]) {
c667aeac 194 pcpu->lowcore = (struct lowcore *)
8b646bd7 195 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
32ce55a6 196 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
ce3dc447 197 if (!pcpu->lowcore || !nodat_stack)
8b646bd7 198 goto out;
2f859d0d 199 } else {
ce3dc447 200 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
1943f53c 201 }
ce3dc447
MS
202 async_stack = stack_alloc();
203 if (!async_stack)
204 goto out;
8b646bd7
MS
205 lc = pcpu->lowcore;
206 memcpy(lc, &S390_lowcore, 512);
207 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
ce3dc447
MS
208 lc->async_stack = async_stack + STACK_INIT_OFFSET;
209 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
8b646bd7 210 lc->cpu_nr = cpu;
6c8cd5bb 211 lc->spinlock_lockval = arch_spin_lockval(cpu);
b96f7d88 212 lc->spinlock_index = 0;
f19fbd5e 213 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
6c81511c 214 if (nmi_alloc_per_cpu(lc))
ce3dc447 215 goto out_async;
6c81511c
MS
216 if (vdso_alloc_per_cpu(lc))
217 goto out_mcesa;
8b646bd7 218 lowcore_ptr[cpu] = lc;
a9ae32c3 219 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
8b646bd7 220 return 0;
6c81511c
MS
221
222out_mcesa:
223 nmi_free_per_cpu(lc);
ce3dc447
MS
224out_async:
225 stack_free(async_stack);
8b646bd7
MS
226out:
227 if (pcpu != &pcpu_devices[0]) {
32ce55a6 228 free_pages(nodat_stack, THREAD_SIZE_ORDER);
8b646bd7
MS
229 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
230 }
231 return -ENOMEM;
1943f53c
MH
232}
233
9d0f46af
HC
234#ifdef CONFIG_HOTPLUG_CPU
235
8b646bd7 236static void pcpu_free_lowcore(struct pcpu *pcpu)
2c2df118 237{
ce3dc447
MS
238 unsigned long async_stack, nodat_stack, lowcore;
239
240 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
241 async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
242 lowcore = (unsigned long) pcpu->lowcore;
243
a9ae32c3 244 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
8b646bd7 245 lowcore_ptr[pcpu - pcpu_devices] = NULL;
8b646bd7 246 vdso_free_per_cpu(pcpu->lowcore);
6c81511c 247 nmi_free_per_cpu(pcpu->lowcore);
ce3dc447 248 stack_free(async_stack);
2f859d0d
HC
249 if (pcpu == &pcpu_devices[0])
250 return;
32ce55a6 251 free_pages(nodat_stack, THREAD_SIZE_ORDER);
ce3dc447 252 free_pages(lowcore, LC_ORDER);
8b646bd7
MS
253}
254
9d0f46af
HC
255#endif /* CONFIG_HOTPLUG_CPU */
256
8b646bd7
MS
257static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
258{
c667aeac 259 struct lowcore *lc = pcpu->lowcore;
8b646bd7 260
64f31d58 261 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
1b948d6c 262 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
8b646bd7 263 lc->cpu_nr = cpu;
6c8cd5bb 264 lc->spinlock_lockval = arch_spin_lockval(cpu);
b96f7d88 265 lc->spinlock_index = 0;
8b646bd7
MS
266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->machine_flags = S390_lowcore.machine_flags;
8b646bd7
MS
269 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
270 __ctl_store(lc->cregs_save_area, 0, 15);
271 save_access_regs((unsigned int *) lc->access_regs_save_area);
272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
cf148998
MS
273 sizeof(lc->stfle_fac_list));
274 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
275 sizeof(lc->alt_stfle_fac_list));
b96f7d88 276 arch_spin_lock_setup(cpu);
8b646bd7
MS
277}
278
279static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
280{
c667aeac 281 struct lowcore *lc = pcpu->lowcore;
8b646bd7 282
dc7ee00d
MS
283 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
284 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
8b646bd7 285 lc->current_task = (unsigned long) tsk;
e22cf8ca
CB
286 lc->lpp = LPP_MAGIC;
287 lc->current_pid = tsk->pid;
90c53e65 288 lc->user_timer = tsk->thread.user_timer;
b7662eef 289 lc->guest_timer = tsk->thread.guest_timer;
90c53e65 290 lc->system_timer = tsk->thread.system_timer;
b7662eef
CB
291 lc->hardirq_timer = tsk->thread.hardirq_timer;
292 lc->softirq_timer = tsk->thread.softirq_timer;
8b646bd7
MS
293 lc->steal_timer = 0;
294}
295
296static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
297{
c667aeac 298 struct lowcore *lc = pcpu->lowcore;
8b646bd7 299
ce3dc447 300 lc->restart_stack = lc->nodat_stack;
8b646bd7
MS
301 lc->restart_fn = (unsigned long) func;
302 lc->restart_data = (unsigned long) data;
303 lc->restart_source = -1UL;
a9ae32c3 304 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
8b646bd7
MS
305}
306
307/*
308 * Call function via PSW restart on pcpu and stop the current cpu.
309 */
ce3dc447
MS
310static void __pcpu_delegate(void (*func)(void*), void *data)
311{
312 func(data); /* should not return */
313}
314
ac1256f8
VG
315static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
316 void (*func)(void *),
317 void *data, unsigned long stack)
8b646bd7 318{
c667aeac 319 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
fbe76568 320 unsigned long source_cpu = stap();
8b646bd7 321
ce3dc447 322 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
fbe76568 323 if (pcpu->address == source_cpu)
ce3dc447 324 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
8b646bd7 325 /* Stop target cpu (if func returns this stops the current cpu). */
a9ae32c3 326 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
8b646bd7 327 /* Restart func on the target cpu and stop the current cpu. */
fbe76568
HC
328 mem_assign_absolute(lc->restart_stack, stack);
329 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
330 mem_assign_absolute(lc->restart_data, (unsigned long) data);
331 mem_assign_absolute(lc->restart_source, source_cpu);
d768bd89 332 __bpon();
8b646bd7 333 asm volatile(
eb546195 334 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
8b646bd7 335 " brc 2,0b # busy, try again\n"
eb546195 336 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
8b646bd7 337 " brc 2,1b # busy, try again\n"
fbe76568 338 : : "d" (pcpu->address), "d" (source_cpu),
eb546195
HC
339 "K" (SIGP_RESTART), "K" (SIGP_STOP)
340 : "0", "1", "cc");
8b646bd7
MS
341 for (;;) ;
342}
343
10ad34bc
MS
344/*
345 * Enable additional logical cpus for multi-threading.
346 */
347static int pcpu_set_smt(unsigned int mtid)
348{
10ad34bc
MS
349 int cc;
350
351 if (smp_cpu_mtid == mtid)
352 return 0;
80a60f6e 353 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
10ad34bc
MS
354 if (cc == 0) {
355 smp_cpu_mtid = mtid;
356 smp_cpu_mt_shift = 0;
357 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
358 smp_cpu_mt_shift++;
359 pcpu_devices[0].address = stap();
360 }
361 return cc;
362}
363
8b646bd7
MS
364/*
365 * Call function on an online CPU.
366 */
367void smp_call_online_cpu(void (*func)(void *), void *data)
368{
369 struct pcpu *pcpu;
370
371 /* Use the current cpu if it is online. */
372 pcpu = pcpu_find_address(cpu_online_mask, stap());
373 if (!pcpu)
374 /* Use the first online cpu. */
375 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
376 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
377}
378
379/*
380 * Call function on the ipl CPU.
381 */
382void smp_call_ipl_cpu(void (*func)(void *), void *data)
383{
c6da39f2 384 pcpu_delegate(&pcpu_devices[0], func, data,
ce3dc447 385 pcpu_devices->lowcore->nodat_stack);
8b646bd7
MS
386}
387
388int smp_find_processor_id(u16 address)
389{
390 int cpu;
391
392 for_each_present_cpu(cpu)
393 if (pcpu_devices[cpu].address == address)
394 return cpu;
395 return -1;
2c2df118
HC
396}
397
760928c0 398bool arch_vcpu_is_preempted(int cpu)
85ac7ca5 399{
760928c0
CB
400 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
401 return false;
402 if (pcpu_running(pcpu_devices + cpu))
403 return false;
404 return true;
8b646bd7 405}
760928c0 406EXPORT_SYMBOL(arch_vcpu_is_preempted);
8b646bd7 407
8b646bd7 408void smp_yield_cpu(int cpu)
85ac7ca5 409{
1ec2772e 410 if (MACHINE_HAS_DIAG9C) {
b5a6b71b 411 diag_stat_inc_norecursion(DIAG_STAT_X09C);
8b646bd7
MS
412 asm volatile("diag %0,0,0x9c"
413 : : "d" (pcpu_devices[cpu].address));
1ec2772e 414 } else if (MACHINE_HAS_DIAG44) {
b5a6b71b 415 diag_stat_inc_norecursion(DIAG_STAT_X044);
8b646bd7 416 asm volatile("diag 0,0,0x44");
1ec2772e 417 }
8b646bd7
MS
418}
419
420/*
421 * Send cpus emergency shutdown signal. This gives the cpus the
422 * opportunity to complete outstanding interrupts.
423 */
00a8f886 424void notrace smp_emergency_stop(void)
8b646bd7 425{
00a8f886 426 cpumask_t cpumask;
8b646bd7
MS
427 u64 end;
428 int cpu;
429
00a8f886
MS
430 cpumask_copy(&cpumask, cpu_online_mask);
431 cpumask_clear_cpu(smp_processor_id(), &cpumask);
432
1aae0560 433 end = get_tod_clock() + (1000000UL << 12);
00a8f886 434 for_each_cpu(cpu, &cpumask) {
8b646bd7
MS
435 struct pcpu *pcpu = pcpu_devices + cpu;
436 set_bit(ec_stop_cpu, &pcpu->ec_mask);
a9ae32c3
HC
437 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
438 0, NULL) == SIGP_CC_BUSY &&
1aae0560 439 get_tod_clock() < end)
8b646bd7
MS
440 cpu_relax();
441 }
1aae0560 442 while (get_tod_clock() < end) {
00a8f886 443 for_each_cpu(cpu, &cpumask)
8b646bd7 444 if (pcpu_stopped(pcpu_devices + cpu))
00a8f886
MS
445 cpumask_clear_cpu(cpu, &cpumask);
446 if (cpumask_empty(&cpumask))
8b646bd7 447 break;
85ac7ca5 448 cpu_relax();
8b646bd7 449 }
85ac7ca5 450}
00a8f886 451NOKPROBE_SYMBOL(smp_emergency_stop);
85ac7ca5 452
8b646bd7
MS
453/*
454 * Stop all cpus but the current one.
455 */
677d7623 456void smp_send_stop(void)
1da177e4 457{
85ac7ca5 458 int cpu;
1da177e4 459
677d7623 460 /* Disable all interrupts/machine checks */
e258d719 461 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
3324e60a 462 trace_hardirqs_off();
1da177e4 463
3ab121ab 464 debug_set_critical();
85ac7ca5 465
8b646bd7 466 if (oops_in_progress)
00a8f886 467 smp_emergency_stop();
1da177e4 468
85ac7ca5 469 /* stop all processors */
00a8f886
MS
470 for_each_online_cpu(cpu) {
471 if (cpu == smp_processor_id())
472 continue;
473 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
474 while (!pcpu_stopped(pcpu_devices + cpu))
c6b5b847
HC
475 cpu_relax();
476 }
477}
478
1da177e4
LT
479/*
480 * This is the main routine where commands issued by other
481 * cpus are handled.
482 */
9acf73b7 483static void smp_handle_ext_call(void)
1da177e4 484{
39ce010d 485 unsigned long bits;
1da177e4 486
9acf73b7
HC
487 /* handle bit signal external calls */
488 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
85ac7ca5
MS
489 if (test_bit(ec_stop_cpu, &bits))
490 smp_stop_cpu();
184748cc
PZ
491 if (test_bit(ec_schedule, &bits))
492 scheduler_ipi();
ca9fc75a
HC
493 if (test_bit(ec_call_function_single, &bits))
494 generic_smp_call_function_single_interrupt();
9acf73b7 495}
85ac7ca5 496
9acf73b7
HC
497static void do_ext_call_interrupt(struct ext_code ext_code,
498 unsigned int param32, unsigned long param64)
499{
500 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
501 smp_handle_ext_call();
1da177e4
LT
502}
503
630cd046 504void arch_send_call_function_ipi_mask(const struct cpumask *mask)
ca9fc75a
HC
505{
506 int cpu;
507
630cd046 508 for_each_cpu(cpu, mask)
b6ed49e0 509 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
ca9fc75a
HC
510}
511
512void arch_send_call_function_single_ipi(int cpu)
513{
8b646bd7 514 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
ca9fc75a
HC
515}
516
1da177e4
LT
517/*
518 * this function sends a 'reschedule' IPI to another CPU.
519 * it goes straight through and wastes no time serializing
520 * anything. Worst case is that we lose a reschedule ...
521 */
522void smp_send_reschedule(int cpu)
523{
8b646bd7 524 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
1da177e4
LT
525}
526
527/*
528 * parameter area for the set/clear control bit callbacks
529 */
94c12cc7 530struct ec_creg_mask_parms {
8b646bd7
MS
531 unsigned long orval;
532 unsigned long andval;
533 int cr;
94c12cc7 534};
1da177e4
LT
535
536/*
537 * callback for setting/clearing control bits
538 */
39ce010d
HC
539static void smp_ctl_bit_callback(void *info)
540{
94c12cc7 541 struct ec_creg_mask_parms *pp = info;
1da177e4 542 unsigned long cregs[16];
39ce010d 543
94c12cc7 544 __ctl_store(cregs, 0, 15);
8b646bd7 545 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
94c12cc7 546 __ctl_load(cregs, 0, 15);
1da177e4
LT
547}
548
549/*
550 * Set a bit in a control register of all cpus
551 */
94c12cc7
MS
552void smp_ctl_set_bit(int cr, int bit)
553{
8b646bd7 554 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
1da177e4 555
15c8b6c1 556 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1da177e4 557}
39ce010d 558EXPORT_SYMBOL(smp_ctl_set_bit);
1da177e4
LT
559
560/*
561 * Clear a bit in a control register of all cpus
562 */
94c12cc7
MS
563void smp_ctl_clear_bit(int cr, int bit)
564{
8b646bd7 565 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
1da177e4 566
15c8b6c1 567 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1da177e4 568}
39ce010d 569EXPORT_SYMBOL(smp_ctl_clear_bit);
1da177e4 570
bf28a597 571#ifdef CONFIG_CRASH_DUMP
411ed322 572
1af135a1
HC
573int smp_store_status(int cpu)
574{
1a36a39e
MS
575 struct pcpu *pcpu = pcpu_devices + cpu;
576 unsigned long pa;
1af135a1 577
1a36a39e
MS
578 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
579 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
580 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
1af135a1 581 return -EIO;
916cda1a 582 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
1af135a1 583 return 0;
916cda1a
MS
584 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
585 if (MACHINE_HAS_GS)
586 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
1a36a39e
MS
587 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
588 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
589 return -EIO;
1af135a1
HC
590 return 0;
591}
592
10ad34bc
MS
593/*
594 * Collect CPU state of the previous, crashed system.
595 * There are four cases:
596 * 1) standard zfcp dump
597 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
598 * The state for all CPUs except the boot CPU needs to be collected
599 * with sigp stop-and-store-status. The boot CPU state is located in
600 * the absolute lowcore of the memory stored in the HSA. The zcore code
1a36a39e 601 * will copy the boot CPU state from the HSA.
10ad34bc
MS
602 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
603 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
604 * The state for all CPUs except the boot CPU needs to be collected
605 * with sigp stop-and-store-status. The firmware or the boot-loader
606 * stored the registers of the boot CPU in the absolute lowcore in the
607 * memory of the old system.
608 * 3) kdump and the old kernel did not store the CPU state,
609 * or stand-alone kdump for DASD
610 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
611 * The state for all CPUs except the boot CPU needs to be collected
612 * with sigp stop-and-store-status. The kexec code or the boot-loader
613 * stored the registers of the boot CPU in the memory of the old system.
614 * 4) kdump and the old kernel stored the CPU state
615 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
8a07dd02
MS
616 * This case does not exist for s390 anymore, setup_arch explicitly
617 * deactivates the elfcorehdr= kernel parameter
10ad34bc 618 */
1a2c5840 619static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
1a36a39e
MS
620 bool is_boot_cpu, unsigned long page)
621{
622 __vector128 *vxrs = (__vector128 *) page;
623
624 if (is_boot_cpu)
625 vxrs = boot_cpu_vector_save_area;
626 else
627 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
1a2c5840 628 save_area_add_vxrs(sa, vxrs);
1a36a39e
MS
629}
630
1a2c5840 631static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
1a36a39e
MS
632 bool is_boot_cpu, unsigned long page)
633{
634 void *regs = (void *) page;
635
636 if (is_boot_cpu)
637 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
638 else
639 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
1a2c5840 640 save_area_add_regs(sa, regs);
1a36a39e
MS
641}
642
1592a8e4 643void __init smp_save_dump_cpus(void)
10ad34bc 644{
1a2c5840
MS
645 int addr, boot_cpu_addr, max_cpu_addr;
646 struct save_area *sa;
1a36a39e 647 unsigned long page;
1592a8e4 648 bool is_boot_cpu;
10ad34bc 649
10ad34bc
MS
650 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
651 /* No previous system present, normal boot. */
652 return;
1a36a39e
MS
653 /* Allocate a page as dumping area for the store status sigps */
654 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
10ad34bc 655 /* Set multi-threading state to the previous system. */
37c5f6c8 656 pcpu_set_smt(sclp.mtid_prev);
1592a8e4 657 boot_cpu_addr = stap();
1a2c5840
MS
658 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
659 for (addr = 0; addr <= max_cpu_addr; addr++) {
1a36a39e 660 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
1592a8e4
MH
661 SIGP_CC_NOT_OPERATIONAL)
662 continue;
1592a8e4 663 is_boot_cpu = (addr == boot_cpu_addr);
1a2c5840
MS
664 /* Allocate save area */
665 sa = save_area_alloc(is_boot_cpu);
666 if (!sa)
667 panic("could not allocate memory for save area\n");
1a36a39e
MS
668 if (MACHINE_HAS_VX)
669 /* Get the vector registers */
1a2c5840 670 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
1a36a39e
MS
671 /*
672 * For a zfcp dump OLDMEM_BASE == NULL and the registers
673 * of the boot CPU are stored in the HSA. To retrieve
674 * these registers an SCLP request is required which is
675 * done by drivers/s390/char/zcore.c:init_cpu_info()
676 */
677 if (!is_boot_cpu || OLDMEM_BASE)
678 /* Get the CPU registers */
1a2c5840 679 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
10ad34bc 680 }
1a36a39e 681 memblock_free(page, PAGE_SIZE);
1592a8e4
MH
682 diag308_reset();
683 pcpu_set_smt(0);
1af135a1 684}
1a36a39e 685#endif /* CONFIG_CRASH_DUMP */
08d07968 686
50ab9a9a
HC
687void smp_cpu_set_polarization(int cpu, int val)
688{
689 pcpu_devices[cpu].polarization = val;
690}
691
692int smp_cpu_get_polarization(int cpu)
693{
694 return pcpu_devices[cpu].polarization;
695}
696
af51160e 697static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
08d07968 698{
8b646bd7 699 static int use_sigp_detection;
8b646bd7
MS
700 int address;
701
af51160e 702 if (use_sigp_detection || sclp_get_core_info(info, early)) {
8b646bd7 703 use_sigp_detection = 1;
e7086eb1 704 for (address = 0;
d08d9430 705 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
10ad34bc 706 address += (1U << smp_cpu_mt_shift)) {
1a36a39e 707 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
a9ae32c3 708 SIGP_CC_NOT_OPERATIONAL)
8b646bd7 709 continue;
d08d9430 710 info->core[info->configured].core_id =
10ad34bc 711 address >> smp_cpu_mt_shift;
8b646bd7
MS
712 info->configured++;
713 }
714 info->combined = info->configured;
08d07968 715 }
08d07968
HC
716}
717
e2741f17 718static int smp_add_present_cpu(int cpu);
8b646bd7 719
d08d9430 720static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
08d07968 721{
8b646bd7 722 struct pcpu *pcpu;
08d07968 723 cpumask_t avail;
10ad34bc
MS
724 int cpu, nr, i, j;
725 u16 address;
08d07968 726
8b646bd7 727 nr = 0;
0f1959f5 728 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
8b646bd7
MS
729 cpu = cpumask_first(&avail);
730 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
d08d9430 731 if (sclp.has_core_type && info->core[i].type != boot_core_type)
8b646bd7 732 continue;
d08d9430 733 address = info->core[i].core_id << smp_cpu_mt_shift;
10ad34bc
MS
734 for (j = 0; j <= smp_cpu_mtid; j++) {
735 if (pcpu_find_address(cpu_present_mask, address + j))
736 continue;
737 pcpu = pcpu_devices + cpu;
738 pcpu->address = address + j;
739 pcpu->state =
740 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
741 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
742 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
743 set_cpu_present(cpu, true);
744 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
745 set_cpu_present(cpu, false);
746 else
747 nr++;
748 cpu = cpumask_next(cpu, &avail);
749 if (cpu >= nr_cpu_ids)
750 break;
751 }
8b646bd7
MS
752 }
753 return nr;
1da177e4
LT
754}
755
af51160e 756void __init smp_detect_cpus(void)
48483b32 757{
10ad34bc 758 unsigned int cpu, mtid, c_cpus, s_cpus;
d08d9430 759 struct sclp_core_info *info;
10ad34bc 760 u16 address;
48483b32 761
10ad34bc 762 /* Get CPU information */
eb31d559 763 info = memblock_alloc(sizeof(*info), 8);
af51160e 764 smp_get_core_info(info, 1);
10ad34bc 765 /* Find boot CPU type */
d08d9430 766 if (sclp.has_core_type) {
10ad34bc
MS
767 address = stap();
768 for (cpu = 0; cpu < info->combined; cpu++)
d08d9430 769 if (info->core[cpu].core_id == address) {
10ad34bc 770 /* The boot cpu dictates the cpu type. */
d08d9430 771 boot_core_type = info->core[cpu].type;
10ad34bc
MS
772 break;
773 }
774 if (cpu >= info->combined)
775 panic("Could not find boot CPU type");
48483b32 776 }
10ad34bc 777
10ad34bc 778 /* Set multi-threading state for the current system */
d08d9430 779 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
10ad34bc
MS
780 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
781 pcpu_set_smt(mtid);
782
783 /* Print number of CPUs */
8b646bd7 784 c_cpus = s_cpus = 0;
48483b32 785 for (cpu = 0; cpu < info->combined; cpu++) {
d08d9430
MS
786 if (sclp.has_core_type &&
787 info->core[cpu].type != boot_core_type)
48483b32 788 continue;
10ad34bc
MS
789 if (cpu < info->configured)
790 c_cpus += smp_cpu_mtid + 1;
791 else
792 s_cpus += smp_cpu_mtid + 1;
48483b32 793 }
395d31d4 794 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
10ad34bc
MS
795
796 /* Add CPUs present at boot */
9d40d2e3 797 get_online_cpus();
8b646bd7 798 __smp_rescan_cpus(info, 0);
9d40d2e3 799 put_online_cpus();
af51160e 800 memblock_free_early((unsigned long)info, sizeof(*info));
48483b32
HC
801}
802
ce3dc447 803static void smp_init_secondary(void)
1da177e4 804{
1887aa07
MS
805 int cpu = smp_processor_id();
806
9e8df6da
VG
807 S390_lowcore.last_update_clock = get_tod_clock();
808 restore_access_regs(S390_lowcore.access_regs_save_area);
39ce010d 809 cpu_init();
5bfb5d69 810 preempt_disable();
39ce010d 811 init_cpu_timer();
b5f87f15 812 vtime_init();
29b08d2b 813 pfault_init();
ce3dc447 814 notify_cpu_starting(smp_processor_id());
1887aa07
MS
815 if (topology_cpu_dedicated(cpu))
816 set_cpu_flag(CIF_DEDICATED_CPU);
817 else
818 clear_cpu_flag(CIF_DEDICATED_CPU);
ce3dc447 819 set_cpu_online(smp_processor_id(), true);
93f3b2ee 820 inc_irq_stat(CPU_RST);
1da177e4 821 local_irq_enable();
fc6d73d6 822 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1da177e4
LT
823}
824
ce3dc447
MS
825/*
826 * Activate a secondary processor.
827 */
9e8df6da 828static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
ce3dc447 829{
ce3dc447
MS
830 S390_lowcore.restart_stack = (unsigned long) restart_stack;
831 S390_lowcore.restart_fn = (unsigned long) do_restart;
832 S390_lowcore.restart_data = 0;
833 S390_lowcore.restart_source = -1UL;
ce3dc447
MS
834 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
835 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
836 CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
837}
838
1da177e4 839/* Upping and downing of CPUs */
e2741f17 840int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1da177e4 841{
8b646bd7 842 struct pcpu *pcpu;
10ad34bc 843 int base, i, rc;
1da177e4 844
8b646bd7
MS
845 pcpu = pcpu_devices + cpu;
846 if (pcpu->state != CPU_STATE_CONFIGURED)
08d07968 847 return -EIO;
5423145f 848 base = smp_get_base_cpu(cpu);
10ad34bc
MS
849 for (i = 0; i <= smp_cpu_mtid; i++) {
850 if (base + i < nr_cpu_ids)
851 if (cpu_online(base + i))
852 break;
853 }
854 /*
855 * If this is the first CPU of the core to get online
856 * do an initial CPU reset.
857 */
858 if (i > smp_cpu_mtid &&
859 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
a9ae32c3 860 SIGP_CC_ORDER_CODE_ACCEPTED)
08d07968 861 return -EIO;
e80e7813 862
8b646bd7
MS
863 rc = pcpu_alloc_lowcore(pcpu, cpu);
864 if (rc)
865 return rc;
866 pcpu_prepare_secondary(pcpu, cpu);
e80e7813 867 pcpu_attach_task(pcpu, tidle);
8b646bd7 868 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
a1307bba 869 /* Wait until cpu puts itself in the online & active maps */
e9d867a6 870 while (!cpu_online(cpu))
1da177e4
LT
871 cpu_relax();
872 return 0;
873}
874
d80512f8 875static unsigned int setup_possible_cpus __initdata;
255acee7 876
d80512f8
HC
877static int __init _setup_possible_cpus(char *s)
878{
879 get_option(&s, &setup_possible_cpus);
37a33026
HC
880 return 0;
881}
d80512f8 882early_param("possible_cpus", _setup_possible_cpus);
37a33026 883
48483b32
HC
884#ifdef CONFIG_HOTPLUG_CPU
885
39ce010d 886int __cpu_disable(void)
1da177e4 887{
8b646bd7 888 unsigned long cregs[16];
1da177e4 889
9acf73b7
HC
890 /* Handle possible pending IPIs */
891 smp_handle_ext_call();
8b646bd7
MS
892 set_cpu_online(smp_processor_id(), false);
893 /* Disable pseudo page faults on this cpu. */
29b08d2b 894 pfault_fini();
8b646bd7
MS
895 /* Disable interrupt sources via control register. */
896 __ctl_store(cregs, 0, 15);
897 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
898 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
899 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
900 __ctl_load(cregs, 0, 15);
fe0f4976 901 clear_cpu_flag(CIF_NOHZ_DELAY);
1da177e4
LT
902 return 0;
903}
904
39ce010d 905void __cpu_die(unsigned int cpu)
1da177e4 906{
8b646bd7
MS
907 struct pcpu *pcpu;
908
1da177e4 909 /* Wait until target cpu is down */
8b646bd7
MS
910 pcpu = pcpu_devices + cpu;
911 while (!pcpu_stopped(pcpu))
1da177e4 912 cpu_relax();
8b646bd7 913 pcpu_free_lowcore(pcpu);
1b948d6c 914 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
64f31d58 915 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
1da177e4
LT
916}
917
b456d94a 918void __noreturn cpu_die(void)
1da177e4
LT
919{
920 idle_task_exit();
d768bd89 921 __bpon();
a9ae32c3 922 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
8b646bd7 923 for (;;) ;
1da177e4
LT
924}
925
255acee7
HC
926#endif /* CONFIG_HOTPLUG_CPU */
927
d80512f8
HC
928void __init smp_fill_possible_mask(void)
929{
9747bc47 930 unsigned int possible, sclp_max, cpu;
d80512f8 931
3a9f3fe6
DH
932 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
933 sclp_max = min(smp_max_threads, sclp_max);
61282aff 934 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
cf813db0 935 possible = setup_possible_cpus ?: nr_cpu_ids;
9747bc47 936 possible = min(possible, sclp_max);
d80512f8
HC
937 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
938 set_cpu_possible(cpu, true);
939}
940
1da177e4
LT
941void __init smp_prepare_cpus(unsigned int max_cpus)
942{
39ce010d 943 /* request the 0x1201 emergency signal external interrupt */
1dad093b 944 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
39ce010d 945 panic("Couldn't request external interrupt 0x1201");
d98e19cc 946 /* request the 0x1202 external call external interrupt */
1dad093b 947 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
d98e19cc 948 panic("Couldn't request external interrupt 0x1202");
1da177e4
LT
949}
950
ea1f4eec 951void __init smp_prepare_boot_cpu(void)
1da177e4 952{
8b646bd7
MS
953 struct pcpu *pcpu = pcpu_devices;
954
0861b5a7 955 WARN_ON(!cpu_present(0) || !cpu_online(0));
8b646bd7 956 pcpu->state = CPU_STATE_CONFIGURED;
c667aeac 957 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
1da177e4 958 S390_lowcore.percpu_offset = __per_cpu_offset[0];
50ab9a9a 959 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
1da177e4
LT
960}
961
ea1f4eec 962void __init smp_cpus_done(unsigned int max_cpus)
1da177e4 963{
1da177e4
LT
964}
965
02beaccc
HC
966void __init smp_setup_processor_id(void)
967{
0861b5a7 968 pcpu_devices[0].address = stap();
02beaccc 969 S390_lowcore.cpu_nr = 0;
6c8cd5bb 970 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
b96f7d88 971 S390_lowcore.spinlock_index = 0;
02beaccc
HC
972}
973
1da177e4
LT
974/*
975 * the frequency of the profiling timer can be changed
976 * by writing a multiplier value into /proc/profile.
977 *
978 * usually you want to run this on all CPUs ;)
979 */
980int setup_profiling_timer(unsigned int multiplier)
981{
39ce010d 982 return 0;
1da177e4
LT
983}
984
08d07968 985#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 986static ssize_t cpu_configure_show(struct device *dev,
8b646bd7 987 struct device_attribute *attr, char *buf)
08d07968
HC
988{
989 ssize_t count;
990
991 mutex_lock(&smp_cpu_state_mutex);
8b646bd7 992 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
08d07968
HC
993 mutex_unlock(&smp_cpu_state_mutex);
994 return count;
995}
996
8a25a2fd 997static ssize_t cpu_configure_store(struct device *dev,
8b646bd7
MS
998 struct device_attribute *attr,
999 const char *buf, size_t count)
08d07968 1000{
8b646bd7 1001 struct pcpu *pcpu;
10ad34bc 1002 int cpu, val, rc, i;
08d07968
HC
1003 char delim;
1004
1005 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1006 return -EINVAL;
1007 if (val != 0 && val != 1)
1008 return -EINVAL;
9d40d2e3 1009 get_online_cpus();
0b18d318 1010 mutex_lock(&smp_cpu_state_mutex);
08d07968 1011 rc = -EBUSY;
2c2df118 1012 /* disallow configuration changes of online cpus and cpu 0 */
8b646bd7 1013 cpu = dev->id;
5423145f 1014 cpu = smp_get_base_cpu(cpu);
10ad34bc 1015 if (cpu == 0)
08d07968 1016 goto out;
10ad34bc
MS
1017 for (i = 0; i <= smp_cpu_mtid; i++)
1018 if (cpu_online(cpu + i))
1019 goto out;
8b646bd7 1020 pcpu = pcpu_devices + cpu;
08d07968
HC
1021 rc = 0;
1022 switch (val) {
1023 case 0:
8b646bd7
MS
1024 if (pcpu->state != CPU_STATE_CONFIGURED)
1025 break;
d08d9430 1026 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
8b646bd7
MS
1027 if (rc)
1028 break;
10ad34bc
MS
1029 for (i = 0; i <= smp_cpu_mtid; i++) {
1030 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1031 continue;
1032 pcpu[i].state = CPU_STATE_STANDBY;
1033 smp_cpu_set_polarization(cpu + i,
1034 POLARIZATION_UNKNOWN);
1035 }
8b646bd7 1036 topology_expect_change();
08d07968
HC
1037 break;
1038 case 1:
8b646bd7
MS
1039 if (pcpu->state != CPU_STATE_STANDBY)
1040 break;
d08d9430 1041 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
8b646bd7
MS
1042 if (rc)
1043 break;
10ad34bc
MS
1044 for (i = 0; i <= smp_cpu_mtid; i++) {
1045 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1046 continue;
1047 pcpu[i].state = CPU_STATE_CONFIGURED;
1048 smp_cpu_set_polarization(cpu + i,
1049 POLARIZATION_UNKNOWN);
1050 }
8b646bd7 1051 topology_expect_change();
08d07968
HC
1052 break;
1053 default:
1054 break;
1055 }
1056out:
08d07968 1057 mutex_unlock(&smp_cpu_state_mutex);
0b18d318 1058 put_online_cpus();
08d07968
HC
1059 return rc ? rc : count;
1060}
8a25a2fd 1061static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
08d07968
HC
1062#endif /* CONFIG_HOTPLUG_CPU */
1063
8a25a2fd
KS
1064static ssize_t show_cpu_address(struct device *dev,
1065 struct device_attribute *attr, char *buf)
08d07968 1066{
8b646bd7 1067 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
08d07968 1068}
8a25a2fd 1069static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
08d07968 1070
08d07968
HC
1071static struct attribute *cpu_common_attrs[] = {
1072#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 1073 &dev_attr_configure.attr,
08d07968 1074#endif
8a25a2fd 1075 &dev_attr_address.attr,
08d07968
HC
1076 NULL,
1077};
1078
1079static struct attribute_group cpu_common_attr_group = {
1080 .attrs = cpu_common_attrs,
1081};
1da177e4 1082
08d07968 1083static struct attribute *cpu_online_attrs[] = {
8a25a2fd
KS
1084 &dev_attr_idle_count.attr,
1085 &dev_attr_idle_time_us.attr,
fae8b22d
HC
1086 NULL,
1087};
1088
08d07968
HC
1089static struct attribute_group cpu_online_attr_group = {
1090 .attrs = cpu_online_attrs,
fae8b22d
HC
1091};
1092
dfbbd86a 1093static int smp_cpu_online(unsigned int cpu)
2fc2d1e9 1094{
2f859d0d 1095 struct device *s = &per_cpu(cpu_device, cpu)->dev;
2fc2d1e9 1096
dfbbd86a
SAS
1097 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1098}
1099static int smp_cpu_pre_down(unsigned int cpu)
1100{
1101 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1102
1103 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1104 return 0;
2fc2d1e9
HC
1105}
1106
e2741f17 1107static int smp_add_present_cpu(int cpu)
08d07968 1108{
96619fc1
HC
1109 struct device *s;
1110 struct cpu *c;
08d07968
HC
1111 int rc;
1112
96619fc1
HC
1113 c = kzalloc(sizeof(*c), GFP_KERNEL);
1114 if (!c)
1115 return -ENOMEM;
2f859d0d 1116 per_cpu(cpu_device, cpu) = c;
96619fc1 1117 s = &c->dev;
08d07968
HC
1118 c->hotpluggable = 1;
1119 rc = register_cpu(c, cpu);
1120 if (rc)
1121 goto out;
1122 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1123 if (rc)
1124 goto out_cpu;
83a24e32
HC
1125 rc = topology_cpu_init(c);
1126 if (rc)
1127 goto out_topology;
1128 return 0;
1129
1130out_topology:
08d07968
HC
1131 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1132out_cpu:
1133#ifdef CONFIG_HOTPLUG_CPU
1134 unregister_cpu(c);
1135#endif
1136out:
1137 return rc;
1138}
1139
1140#ifdef CONFIG_HOTPLUG_CPU
1e489518 1141
67060d9c 1142int __ref smp_rescan_cpus(void)
08d07968 1143{
d08d9430 1144 struct sclp_core_info *info;
8b646bd7 1145 int nr;
08d07968 1146
af51160e 1147 info = kzalloc(sizeof(*info), GFP_KERNEL);
8b646bd7
MS
1148 if (!info)
1149 return -ENOMEM;
af51160e 1150 smp_get_core_info(info, 0);
9d40d2e3 1151 get_online_cpus();
0b18d318 1152 mutex_lock(&smp_cpu_state_mutex);
8b646bd7 1153 nr = __smp_rescan_cpus(info, 1);
08d07968 1154 mutex_unlock(&smp_cpu_state_mutex);
0b18d318 1155 put_online_cpus();
8b646bd7
MS
1156 kfree(info);
1157 if (nr)
c10fde0d 1158 topology_schedule_update();
8b646bd7 1159 return 0;
1e489518
HC
1160}
1161
8a25a2fd
KS
1162static ssize_t __ref rescan_store(struct device *dev,
1163 struct device_attribute *attr,
c9be0a36 1164 const char *buf,
1e489518
HC
1165 size_t count)
1166{
1167 int rc;
1168
1169 rc = smp_rescan_cpus();
08d07968
HC
1170 return rc ? rc : count;
1171}
6cbaefb4 1172static DEVICE_ATTR_WO(rescan);
08d07968
HC
1173#endif /* CONFIG_HOTPLUG_CPU */
1174
83a24e32 1175static int __init s390_smp_init(void)
1da177e4 1176{
f4edbcd5 1177 int cpu, rc = 0;
2fc2d1e9 1178
08d07968 1179#ifdef CONFIG_HOTPLUG_CPU
8a25a2fd 1180 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
08d07968
HC
1181 if (rc)
1182 return rc;
1183#endif
1184 for_each_present_cpu(cpu) {
1185 rc = smp_add_present_cpu(cpu);
fae8b22d 1186 if (rc)
f4edbcd5 1187 goto out;
1da177e4 1188 }
f4edbcd5 1189
dfbbd86a
SAS
1190 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1191 smp_cpu_online, smp_cpu_pre_down);
e1108e8f 1192 rc = rc <= 0 ? rc : 0;
f4edbcd5 1193out:
f4edbcd5 1194 return rc;
1da177e4 1195}
83a24e32 1196subsys_initcall(s390_smp_init);