4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002 - 2008 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <asm/atomic.h>
24 #include <asm/processor.h>
25 #include <asm/system.h>
26 #include <asm/mmu_context.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
34 static inline void __init smp_store_cpu_info(unsigned int cpu)
36 struct sh_cpuinfo *c = cpu_data + cpu;
38 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
40 c->loops_per_jiffy = loops_per_jiffy;
43 void __init smp_prepare_cpus(unsigned int max_cpus)
45 unsigned int cpu = smp_processor_id();
47 init_new_context(current, &init_mm);
48 current_thread_info()->cpu = cpu;
49 plat_prepare_cpus(max_cpus);
51 #ifndef CONFIG_HOTPLUG_CPU
52 init_cpu_present(&cpu_possible_map);
56 void __devinit smp_prepare_boot_cpu(void)
58 unsigned int cpu = smp_processor_id();
60 __cpu_number_map[0] = cpu;
61 __cpu_logical_map[0] = cpu;
63 set_cpu_online(cpu, true);
64 set_cpu_possible(cpu, true);
67 asmlinkage void __cpuinit start_secondary(void)
70 struct mm_struct *mm = &init_mm;
72 atomic_inc(&mm->mm_count);
73 atomic_inc(&mm->mm_users);
74 current->active_mm = mm;
76 enter_lazy_tlb(mm, current);
82 notify_cpu_starting(smp_processor_id());
86 cpu = smp_processor_id();
88 /* Enable local timers */
89 local_timer_setup(cpu);
92 smp_store_cpu_info(cpu);
94 cpu_set(cpu, cpu_online_map);
101 unsigned long bss_start;
102 unsigned long bss_end;
103 void *start_kernel_fn;
108 int __cpuinit __cpu_up(unsigned int cpu)
110 struct task_struct *tsk;
111 unsigned long timeout;
113 tsk = fork_idle(cpu);
115 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
119 /* Fill in data in head.S for secondary cpus */
120 stack_start.sp = tsk->thread.sp;
121 stack_start.thread_info = tsk->stack;
122 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
123 stack_start.start_kernel_fn = start_secondary;
125 flush_icache_range((unsigned long)&stack_start,
126 (unsigned long)&stack_start + sizeof(stack_start));
129 plat_start_cpu(cpu, (unsigned long)_stext);
131 timeout = jiffies + HZ;
132 while (time_before(jiffies, timeout)) {
145 void __init smp_cpus_done(unsigned int max_cpus)
147 unsigned long bogosum = 0;
150 for_each_online_cpu(cpu)
151 bogosum += cpu_data[cpu].loops_per_jiffy;
153 printk(KERN_INFO "SMP: Total of %d processors activated "
154 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
155 bogosum / (500000/HZ),
156 (bogosum / (5000/HZ)) % 100);
159 void smp_send_reschedule(int cpu)
161 plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
164 void smp_send_stop(void)
166 smp_call_function(stop_this_cpu, 0, 0);
169 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
173 for_each_cpu(cpu, mask)
174 plat_send_ipi(cpu, SMP_MSG_FUNCTION);
177 void arch_send_call_function_single_ipi(int cpu)
179 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
182 void smp_timer_broadcast(const struct cpumask *mask)
186 for_each_cpu(cpu, mask)
187 plat_send_ipi(cpu, SMP_MSG_TIMER);
190 static void ipi_timer(void)
193 local_timer_interrupt();
197 void smp_message_recv(unsigned int msg)
200 case SMP_MSG_FUNCTION:
201 generic_smp_call_function_interrupt();
203 case SMP_MSG_RESCHEDULE:
205 case SMP_MSG_FUNCTION_SINGLE:
206 generic_smp_call_function_single_interrupt();
212 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
213 smp_processor_id(), __func__, msg);
218 /* Not really SMP stuff ... */
219 int setup_profiling_timer(unsigned int multiplier)
224 static void flush_tlb_all_ipi(void *info)
226 local_flush_tlb_all();
229 void flush_tlb_all(void)
231 on_each_cpu(flush_tlb_all_ipi, 0, 1);
234 static void flush_tlb_mm_ipi(void *mm)
236 local_flush_tlb_mm((struct mm_struct *)mm);
240 * The following tlb flush calls are invoked when old translations are
241 * being torn down, or pte attributes are changing. For single threaded
242 * address spaces, a new context is obtained on the current cpu, and tlb
243 * context on other cpus are invalidated to force a new context allocation
244 * at switch_mm time, should the mm ever be used on other cpus. For
245 * multithreaded address spaces, intercpu interrupts have to be sent.
246 * Another case where intercpu interrupts are required is when the target
247 * mm might be active on another cpu (eg debuggers doing the flushes on
248 * behalf of debugees, kswapd stealing pages from another process etc).
252 void flush_tlb_mm(struct mm_struct *mm)
256 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
257 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
260 for (i = 0; i < num_online_cpus(); i++)
261 if (smp_processor_id() != i)
262 cpu_context(i, mm) = 0;
264 local_flush_tlb_mm(mm);
269 struct flush_tlb_data {
270 struct vm_area_struct *vma;
275 static void flush_tlb_range_ipi(void *info)
277 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
279 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
282 void flush_tlb_range(struct vm_area_struct *vma,
283 unsigned long start, unsigned long end)
285 struct mm_struct *mm = vma->vm_mm;
288 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
289 struct flush_tlb_data fd;
294 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
297 for (i = 0; i < num_online_cpus(); i++)
298 if (smp_processor_id() != i)
299 cpu_context(i, mm) = 0;
301 local_flush_tlb_range(vma, start, end);
305 static void flush_tlb_kernel_range_ipi(void *info)
307 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
309 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
312 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
314 struct flush_tlb_data fd;
318 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
321 static void flush_tlb_page_ipi(void *info)
323 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
325 local_flush_tlb_page(fd->vma, fd->addr1);
328 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
331 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
332 (current->mm != vma->vm_mm)) {
333 struct flush_tlb_data fd;
337 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
340 for (i = 0; i < num_online_cpus(); i++)
341 if (smp_processor_id() != i)
342 cpu_context(i, vma->vm_mm) = 0;
344 local_flush_tlb_page(vma, page);
348 static void flush_tlb_one_ipi(void *info)
350 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
351 local_flush_tlb_one(fd->addr1, fd->addr2);
354 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
356 struct flush_tlb_data fd;
361 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
362 local_flush_tlb_one(asid, vaddr);