Merge tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / arm / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/smp.c
4  *
5  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
6  */
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/spinlock.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/interrupt.h>
15 #include <linux/cache.h>
16 #include <linux/profile.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/cpu.h>
21 #include <linux/seq_file.h>
22 #include <linux/irq.h>
23 #include <linux/nmi.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 #include <linux/irq_work.h>
29 #include <linux/kernel_stat.h>
30
31 #include <linux/atomic.h>
32 #include <asm/bugs.h>
33 #include <asm/smp.h>
34 #include <asm/cacheflush.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/exception.h>
38 #include <asm/idmap.h>
39 #include <asm/topology.h>
40 #include <asm/mmu_context.h>
41 #include <asm/procinfo.h>
42 #include <asm/processor.h>
43 #include <asm/sections.h>
44 #include <asm/tlbflush.h>
45 #include <asm/ptrace.h>
46 #include <asm/smp_plat.h>
47 #include <asm/virt.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mpu.h>
50
51 #include <trace/events/ipi.h>
52
53 /*
54  * as from 2.5, kernels no longer have an init_tasks structure
55  * so we need some other way of telling a new secondary core
56  * where to place its SVC stack
57  */
58 struct secondary_data secondary_data;
59
60 enum ipi_msg_type {
61         IPI_WAKEUP,
62         IPI_TIMER,
63         IPI_RESCHEDULE,
64         IPI_CALL_FUNC,
65         IPI_CPU_STOP,
66         IPI_IRQ_WORK,
67         IPI_COMPLETION,
68         NR_IPI,
69         /*
70          * CPU_BACKTRACE is special and not included in NR_IPI
71          * or tracable with trace_ipi_*
72          */
73         IPI_CPU_BACKTRACE = NR_IPI,
74         /*
75          * SGI8-15 can be reserved by secure firmware, and thus may
76          * not be usable by the kernel. Please keep the above limited
77          * to at most 8 entries.
78          */
79         MAX_IPI
80 };
81
82 static int ipi_irq_base __read_mostly;
83 static int nr_ipi __read_mostly = NR_IPI;
84 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
85
86 static void ipi_setup(int cpu);
87
88 static DECLARE_COMPLETION(cpu_running);
89
90 static struct smp_operations smp_ops __ro_after_init;
91
92 void __init smp_set_ops(const struct smp_operations *ops)
93 {
94         if (ops)
95                 smp_ops = *ops;
96 };
97
98 static unsigned long get_arch_pgd(pgd_t *pgd)
99 {
100 #ifdef CONFIG_ARM_LPAE
101         return __phys_to_pfn(virt_to_phys(pgd));
102 #else
103         return virt_to_phys(pgd);
104 #endif
105 }
106
107 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108 static int secondary_biglittle_prepare(unsigned int cpu)
109 {
110         if (!cpu_vtable[cpu])
111                 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
112
113         return cpu_vtable[cpu] ? 0 : -ENOMEM;
114 }
115
116 static void secondary_biglittle_init(void)
117 {
118         init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
119 }
120 #else
121 static int secondary_biglittle_prepare(unsigned int cpu)
122 {
123         return 0;
124 }
125
126 static void secondary_biglittle_init(void)
127 {
128 }
129 #endif
130
131 int __cpu_up(unsigned int cpu, struct task_struct *idle)
132 {
133         int ret;
134
135         if (!smp_ops.smp_boot_secondary)
136                 return -ENOSYS;
137
138         ret = secondary_biglittle_prepare(cpu);
139         if (ret)
140                 return ret;
141
142         /*
143          * We need to tell the secondary core where to find
144          * its stack and the page tables.
145          */
146         secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
147 #ifdef CONFIG_ARM_MPU
148         secondary_data.mpu_rgn_info = &mpu_rgn_info;
149 #endif
150
151 #ifdef CONFIG_MMU
152         secondary_data.pgdir = virt_to_phys(idmap_pgd);
153         secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
154 #endif
155         secondary_data.task = idle;
156         sync_cache_w(&secondary_data);
157
158         /*
159          * Now bring the CPU into our world.
160          */
161         ret = smp_ops.smp_boot_secondary(cpu, idle);
162         if (ret == 0) {
163                 /*
164                  * CPU was successfully started, wait for it
165                  * to come online or time out.
166                  */
167                 wait_for_completion_timeout(&cpu_running,
168                                                  msecs_to_jiffies(1000));
169
170                 if (!cpu_online(cpu)) {
171                         pr_crit("CPU%u: failed to come online\n", cpu);
172                         ret = -EIO;
173                 }
174         } else {
175                 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
176         }
177
178
179         memset(&secondary_data, 0, sizeof(secondary_data));
180         return ret;
181 }
182
183 /* platform specific SMP operations */
184 void __init smp_init_cpus(void)
185 {
186         if (smp_ops.smp_init_cpus)
187                 smp_ops.smp_init_cpus();
188 }
189
190 int platform_can_secondary_boot(void)
191 {
192         return !!smp_ops.smp_boot_secondary;
193 }
194
195 int platform_can_cpu_hotplug(void)
196 {
197 #ifdef CONFIG_HOTPLUG_CPU
198         if (smp_ops.cpu_kill)
199                 return 1;
200 #endif
201
202         return 0;
203 }
204
205 #ifdef CONFIG_HOTPLUG_CPU
206 static int platform_cpu_kill(unsigned int cpu)
207 {
208         if (smp_ops.cpu_kill)
209                 return smp_ops.cpu_kill(cpu);
210         return 1;
211 }
212
213 static int platform_cpu_disable(unsigned int cpu)
214 {
215         if (smp_ops.cpu_disable)
216                 return smp_ops.cpu_disable(cpu);
217
218         return 0;
219 }
220
221 int platform_can_hotplug_cpu(unsigned int cpu)
222 {
223         /* cpu_die must be specified to support hotplug */
224         if (!smp_ops.cpu_die)
225                 return 0;
226
227         if (smp_ops.cpu_can_disable)
228                 return smp_ops.cpu_can_disable(cpu);
229
230         /*
231          * By default, allow disabling all CPUs except the first one,
232          * since this is special on a lot of platforms, e.g. because
233          * of clock tick interrupts.
234          */
235         return cpu != 0;
236 }
237
238 static void ipi_teardown(int cpu)
239 {
240         int i;
241
242         if (WARN_ON_ONCE(!ipi_irq_base))
243                 return;
244
245         for (i = 0; i < nr_ipi; i++)
246                 disable_percpu_irq(ipi_irq_base + i);
247 }
248
249 /*
250  * __cpu_disable runs on the processor to be shutdown.
251  */
252 int __cpu_disable(void)
253 {
254         unsigned int cpu = smp_processor_id();
255         int ret;
256
257         ret = platform_cpu_disable(cpu);
258         if (ret)
259                 return ret;
260
261 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
262         remove_cpu_topology(cpu);
263 #endif
264
265         /*
266          * Take this CPU offline.  Once we clear this, we can't return,
267          * and we must not schedule until we're ready to give up the cpu.
268          */
269         set_cpu_online(cpu, false);
270         ipi_teardown(cpu);
271
272         /*
273          * OK - migrate IRQs away from this CPU
274          */
275         irq_migrate_all_off_this_cpu();
276
277         /*
278          * Flush user cache and TLB mappings, and then remove this CPU
279          * from the vm mask set of all processes.
280          *
281          * Caches are flushed to the Level of Unification Inner Shareable
282          * to write-back dirty lines to unified caches shared by all CPUs.
283          */
284         flush_cache_louis();
285         local_flush_tlb_all();
286
287         return 0;
288 }
289
290 /*
291  * called on the thread which is asking for a CPU to be shutdown -
292  * waits until shutdown has completed, or it is timed out.
293  */
294 void __cpu_die(unsigned int cpu)
295 {
296         if (!cpu_wait_death(cpu, 5)) {
297                 pr_err("CPU%u: cpu didn't die\n", cpu);
298                 return;
299         }
300         pr_debug("CPU%u: shutdown\n", cpu);
301
302         clear_tasks_mm_cpumask(cpu);
303         /*
304          * platform_cpu_kill() is generally expected to do the powering off
305          * and/or cutting of clocks to the dying CPU.  Optionally, this may
306          * be done by the CPU which is dying in preference to supporting
307          * this call, but that means there is _no_ synchronisation between
308          * the requesting CPU and the dying CPU actually losing power.
309          */
310         if (!platform_cpu_kill(cpu))
311                 pr_err("CPU%u: unable to kill\n", cpu);
312 }
313
314 /*
315  * Called from the idle thread for the CPU which has been shutdown.
316  *
317  * Note that we disable IRQs here, but do not re-enable them
318  * before returning to the caller. This is also the behaviour
319  * of the other hotplug-cpu capable cores, so presumably coming
320  * out of idle fixes this.
321  */
322 void __noreturn arch_cpu_idle_dead(void)
323 {
324         unsigned int cpu = smp_processor_id();
325
326         idle_task_exit();
327
328         local_irq_disable();
329
330         /*
331          * Flush the data out of the L1 cache for this CPU.  This must be
332          * before the completion to ensure that data is safely written out
333          * before platform_cpu_kill() gets called - which may disable
334          * *this* CPU and power down its cache.
335          */
336         flush_cache_louis();
337
338         /*
339          * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
340          * this returns, power and/or clocks can be removed at any point
341          * from this CPU and its cache by platform_cpu_kill().
342          */
343         (void)cpu_report_death();
344
345         /*
346          * Ensure that the cache lines associated with that completion are
347          * written out.  This covers the case where _this_ CPU is doing the
348          * powering down, to ensure that the completion is visible to the
349          * CPU waiting for this one.
350          */
351         flush_cache_louis();
352
353         /*
354          * The actual CPU shutdown procedure is at least platform (if not
355          * CPU) specific.  This may remove power, or it may simply spin.
356          *
357          * Platforms are generally expected *NOT* to return from this call,
358          * although there are some which do because they have no way to
359          * power down the CPU.  These platforms are the _only_ reason we
360          * have a return path which uses the fragment of assembly below.
361          *
362          * The return path should not be used for platforms which can
363          * power off the CPU.
364          */
365         if (smp_ops.cpu_die)
366                 smp_ops.cpu_die(cpu);
367
368         pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
369                 cpu);
370
371         /*
372          * Do not return to the idle loop - jump back to the secondary
373          * cpu initialisation.  There's some initialisation which needs
374          * to be repeated to undo the effects of taking the CPU offline.
375          */
376         __asm__("mov    sp, %0\n"
377         "       mov     fp, #0\n"
378         "       mov     r0, %1\n"
379         "       b       secondary_start_kernel"
380                 :
381                 : "r" (task_stack_page(current) + THREAD_SIZE - 8),
382                   "r" (current)
383                 : "r0");
384
385         unreachable();
386 }
387 #endif /* CONFIG_HOTPLUG_CPU */
388
389 /*
390  * Called by both boot and secondaries to move global data into
391  * per-processor storage.
392  */
393 static void smp_store_cpu_info(unsigned int cpuid)
394 {
395         struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
396
397         cpu_info->loops_per_jiffy = loops_per_jiffy;
398         cpu_info->cpuid = read_cpuid_id();
399
400         store_cpu_topology(cpuid);
401         check_cpu_icache_size(cpuid);
402 }
403
404 static void set_current(struct task_struct *cur)
405 {
406         /* Set TPIDRURO */
407         asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
408 }
409
410 /*
411  * This is the secondary CPU boot entry.  We're using this CPUs
412  * idle thread stack, but a set of temporary page tables.
413  */
414 asmlinkage void secondary_start_kernel(struct task_struct *task)
415 {
416         struct mm_struct *mm = &init_mm;
417         unsigned int cpu;
418
419         set_current(task);
420
421         secondary_biglittle_init();
422
423         /*
424          * The identity mapping is uncached (strongly ordered), so
425          * switch away from it before attempting any exclusive accesses.
426          */
427         cpu_switch_mm(mm->pgd, mm);
428         local_flush_bp_all();
429         enter_lazy_tlb(mm, current);
430         local_flush_tlb_all();
431
432         /*
433          * All kernel threads share the same mm context; grab a
434          * reference and switch to it.
435          */
436         cpu = smp_processor_id();
437         mmgrab(mm);
438         current->active_mm = mm;
439         cpumask_set_cpu(cpu, mm_cpumask(mm));
440
441         cpu_init();
442
443 #ifndef CONFIG_MMU
444         setup_vectors_base();
445 #endif
446         pr_debug("CPU%u: Booted secondary processor\n", cpu);
447
448         trace_hardirqs_off();
449
450         /*
451          * Give the platform a chance to do its own initialisation.
452          */
453         if (smp_ops.smp_secondary_init)
454                 smp_ops.smp_secondary_init(cpu);
455
456         notify_cpu_starting(cpu);
457
458         ipi_setup(cpu);
459
460         calibrate_delay();
461
462         smp_store_cpu_info(cpu);
463
464         /*
465          * OK, now it's safe to let the boot CPU continue.  Wait for
466          * the CPU migration code to notice that the CPU is online
467          * before we continue - which happens after __cpu_up returns.
468          */
469         set_cpu_online(cpu, true);
470
471         check_other_bugs();
472
473         complete(&cpu_running);
474
475         local_irq_enable();
476         local_fiq_enable();
477         local_abt_enable();
478
479         /*
480          * OK, it's off to the idle thread for us
481          */
482         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
483 }
484
485 void __init smp_cpus_done(unsigned int max_cpus)
486 {
487         int cpu;
488         unsigned long bogosum = 0;
489
490         for_each_online_cpu(cpu)
491                 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
492
493         printk(KERN_INFO "SMP: Total of %d processors activated "
494                "(%lu.%02lu BogoMIPS).\n",
495                num_online_cpus(),
496                bogosum / (500000/HZ),
497                (bogosum / (5000/HZ)) % 100);
498
499         hyp_mode_check();
500 }
501
502 void __init smp_prepare_boot_cpu(void)
503 {
504         set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
505 }
506
507 void __init smp_prepare_cpus(unsigned int max_cpus)
508 {
509         unsigned int ncores = num_possible_cpus();
510
511         init_cpu_topology();
512
513         smp_store_cpu_info(smp_processor_id());
514
515         /*
516          * are we trying to boot more cores than exist?
517          */
518         if (max_cpus > ncores)
519                 max_cpus = ncores;
520         if (ncores > 1 && max_cpus) {
521                 /*
522                  * Initialise the present map, which describes the set of CPUs
523                  * actually populated at the present time. A platform should
524                  * re-initialize the map in the platforms smp_prepare_cpus()
525                  * if present != possible (e.g. physical hotplug).
526                  */
527                 init_cpu_present(cpu_possible_mask);
528
529                 /*
530                  * Initialise the SCU if there are more than one CPU
531                  * and let them know where to start.
532                  */
533                 if (smp_ops.smp_prepare_cpus)
534                         smp_ops.smp_prepare_cpus(max_cpus);
535         }
536 }
537
538 static const char *ipi_types[NR_IPI] __tracepoint_string = {
539         [IPI_WAKEUP]            = "CPU wakeup interrupts",
540         [IPI_TIMER]             = "Timer broadcast interrupts",
541         [IPI_RESCHEDULE]        = "Rescheduling interrupts",
542         [IPI_CALL_FUNC]         = "Function call interrupts",
543         [IPI_CPU_STOP]          = "CPU stop interrupts",
544         [IPI_IRQ_WORK]          = "IRQ work interrupts",
545         [IPI_COMPLETION]        = "completion interrupts",
546 };
547
548 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
549
550 void show_ipi_list(struct seq_file *p, int prec)
551 {
552         unsigned int cpu, i;
553
554         for (i = 0; i < NR_IPI; i++) {
555                 if (!ipi_desc[i])
556                         continue;
557
558                 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
559
560                 for_each_online_cpu(cpu)
561                         seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
562
563                 seq_printf(p, " %s\n", ipi_types[i]);
564         }
565 }
566
567 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
568 {
569         smp_cross_call(mask, IPI_CALL_FUNC);
570 }
571
572 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
573 {
574         smp_cross_call(mask, IPI_WAKEUP);
575 }
576
577 void arch_send_call_function_single_ipi(int cpu)
578 {
579         smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
580 }
581
582 #ifdef CONFIG_IRQ_WORK
583 void arch_irq_work_raise(void)
584 {
585         if (arch_irq_work_has_interrupt())
586                 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
587 }
588 #endif
589
590 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
591 void tick_broadcast(const struct cpumask *mask)
592 {
593         smp_cross_call(mask, IPI_TIMER);
594 }
595 #endif
596
597 static DEFINE_RAW_SPINLOCK(stop_lock);
598
599 /*
600  * ipi_cpu_stop - handle IPI from smp_send_stop()
601  */
602 static void ipi_cpu_stop(unsigned int cpu)
603 {
604         local_fiq_disable();
605
606         if (system_state <= SYSTEM_RUNNING) {
607                 raw_spin_lock(&stop_lock);
608                 pr_crit("CPU%u: stopping\n", cpu);
609                 dump_stack();
610                 raw_spin_unlock(&stop_lock);
611         }
612
613         set_cpu_online(cpu, false);
614
615         while (1) {
616                 cpu_relax();
617                 wfe();
618         }
619 }
620
621 static DEFINE_PER_CPU(struct completion *, cpu_completion);
622
623 int register_ipi_completion(struct completion *completion, int cpu)
624 {
625         per_cpu(cpu_completion, cpu) = completion;
626         return IPI_COMPLETION;
627 }
628
629 static void ipi_complete(unsigned int cpu)
630 {
631         complete(per_cpu(cpu_completion, cpu));
632 }
633
634 /*
635  * Main handler for inter-processor interrupts
636  */
637 static void do_handle_IPI(int ipinr)
638 {
639         unsigned int cpu = smp_processor_id();
640
641         if ((unsigned)ipinr < NR_IPI)
642                 trace_ipi_entry(ipi_types[ipinr]);
643
644         switch (ipinr) {
645         case IPI_WAKEUP:
646                 break;
647
648 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
649         case IPI_TIMER:
650                 tick_receive_broadcast();
651                 break;
652 #endif
653
654         case IPI_RESCHEDULE:
655                 scheduler_ipi();
656                 break;
657
658         case IPI_CALL_FUNC:
659                 generic_smp_call_function_interrupt();
660                 break;
661
662         case IPI_CPU_STOP:
663                 ipi_cpu_stop(cpu);
664                 break;
665
666 #ifdef CONFIG_IRQ_WORK
667         case IPI_IRQ_WORK:
668                 irq_work_run();
669                 break;
670 #endif
671
672         case IPI_COMPLETION:
673                 ipi_complete(cpu);
674                 break;
675
676         case IPI_CPU_BACKTRACE:
677                 printk_deferred_enter();
678                 nmi_cpu_backtrace(get_irq_regs());
679                 printk_deferred_exit();
680                 break;
681
682         default:
683                 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
684                         cpu, ipinr);
685                 break;
686         }
687
688         if ((unsigned)ipinr < NR_IPI)
689                 trace_ipi_exit(ipi_types[ipinr]);
690 }
691
692 /* Legacy version, should go away once all irqchips have been converted */
693 void handle_IPI(int ipinr, struct pt_regs *regs)
694 {
695         struct pt_regs *old_regs = set_irq_regs(regs);
696
697         irq_enter();
698         do_handle_IPI(ipinr);
699         irq_exit();
700
701         set_irq_regs(old_regs);
702 }
703
704 static irqreturn_t ipi_handler(int irq, void *data)
705 {
706         do_handle_IPI(irq - ipi_irq_base);
707         return IRQ_HANDLED;
708 }
709
710 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
711 {
712         trace_ipi_raise(target, ipi_types[ipinr]);
713         __ipi_send_mask(ipi_desc[ipinr], target);
714 }
715
716 static void ipi_setup(int cpu)
717 {
718         int i;
719
720         if (WARN_ON_ONCE(!ipi_irq_base))
721                 return;
722
723         for (i = 0; i < nr_ipi; i++)
724                 enable_percpu_irq(ipi_irq_base + i, 0);
725 }
726
727 void __init set_smp_ipi_range(int ipi_base, int n)
728 {
729         int i;
730
731         WARN_ON(n < MAX_IPI);
732         nr_ipi = min(n, MAX_IPI);
733
734         for (i = 0; i < nr_ipi; i++) {
735                 int err;
736
737                 err = request_percpu_irq(ipi_base + i, ipi_handler,
738                                          "IPI", &irq_stat);
739                 WARN_ON(err);
740
741                 ipi_desc[i] = irq_to_desc(ipi_base + i);
742                 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
743         }
744
745         ipi_irq_base = ipi_base;
746
747         /* Setup the boot CPU immediately */
748         ipi_setup(smp_processor_id());
749 }
750
751 void arch_smp_send_reschedule(int cpu)
752 {
753         smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
754 }
755
756 void smp_send_stop(void)
757 {
758         unsigned long timeout;
759         struct cpumask mask;
760
761         cpumask_copy(&mask, cpu_online_mask);
762         cpumask_clear_cpu(smp_processor_id(), &mask);
763         if (!cpumask_empty(&mask))
764                 smp_cross_call(&mask, IPI_CPU_STOP);
765
766         /* Wait up to one second for other CPUs to stop */
767         timeout = USEC_PER_SEC;
768         while (num_online_cpus() > 1 && timeout--)
769                 udelay(1);
770
771         if (num_online_cpus() > 1)
772                 pr_warn("SMP: failed to stop secondary CPUs\n");
773 }
774
775 /* In case panic() and panic() called at the same time on CPU1 and CPU2,
776  * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
777  * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
778  * kdump fails. So split out the panic_smp_self_stop() and add
779  * set_cpu_online(smp_processor_id(), false).
780  */
781 void __noreturn panic_smp_self_stop(void)
782 {
783         pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
784                  smp_processor_id());
785         set_cpu_online(smp_processor_id(), false);
786         while (1)
787                 cpu_relax();
788 }
789
790 #ifdef CONFIG_CPU_FREQ
791
792 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
793 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
794 static unsigned long global_l_p_j_ref;
795 static unsigned long global_l_p_j_ref_freq;
796
797 static int cpufreq_callback(struct notifier_block *nb,
798                                         unsigned long val, void *data)
799 {
800         struct cpufreq_freqs *freq = data;
801         struct cpumask *cpus = freq->policy->cpus;
802         int cpu, first = cpumask_first(cpus);
803         unsigned int lpj;
804
805         if (freq->flags & CPUFREQ_CONST_LOOPS)
806                 return NOTIFY_OK;
807
808         if (!per_cpu(l_p_j_ref, first)) {
809                 for_each_cpu(cpu, cpus) {
810                         per_cpu(l_p_j_ref, cpu) =
811                                 per_cpu(cpu_data, cpu).loops_per_jiffy;
812                         per_cpu(l_p_j_ref_freq, cpu) = freq->old;
813                 }
814
815                 if (!global_l_p_j_ref) {
816                         global_l_p_j_ref = loops_per_jiffy;
817                         global_l_p_j_ref_freq = freq->old;
818                 }
819         }
820
821         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
822             (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
823                 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
824                                                 global_l_p_j_ref_freq,
825                                                 freq->new);
826
827                 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
828                                     per_cpu(l_p_j_ref_freq, first), freq->new);
829                 for_each_cpu(cpu, cpus)
830                         per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
831         }
832         return NOTIFY_OK;
833 }
834
835 static struct notifier_block cpufreq_notifier = {
836         .notifier_call  = cpufreq_callback,
837 };
838
839 static int __init register_cpufreq_notifier(void)
840 {
841         return cpufreq_register_notifier(&cpufreq_notifier,
842                                                 CPUFREQ_TRANSITION_NOTIFIER);
843 }
844 core_initcall(register_cpufreq_notifier);
845
846 #endif
847
848 static void raise_nmi(cpumask_t *mask)
849 {
850         __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
851 }
852
853 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
854 {
855         nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
856 }