Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6-block.git] / arch / sh / kernel / smp.c
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2008 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <asm/atomic.h>
24 #include <asm/processor.h>
25 #include <asm/system.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30
31 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
33
34 static inline void __init smp_store_cpu_info(unsigned int cpu)
35 {
36         struct sh_cpuinfo *c = cpu_data + cpu;
37
38         memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
39
40         c->loops_per_jiffy = loops_per_jiffy;
41 }
42
43 void __init smp_prepare_cpus(unsigned int max_cpus)
44 {
45         unsigned int cpu = smp_processor_id();
46
47         init_new_context(current, &init_mm);
48         current_thread_info()->cpu = cpu;
49         plat_prepare_cpus(max_cpus);
50
51 #ifndef CONFIG_HOTPLUG_CPU
52         init_cpu_present(&cpu_possible_map);
53 #endif
54 }
55
56 void __devinit smp_prepare_boot_cpu(void)
57 {
58         unsigned int cpu = smp_processor_id();
59
60         __cpu_number_map[0] = cpu;
61         __cpu_logical_map[0] = cpu;
62
63         set_cpu_online(cpu, true);
64         set_cpu_possible(cpu, true);
65 }
66
67 asmlinkage void __cpuinit start_secondary(void)
68 {
69         unsigned int cpu;
70         struct mm_struct *mm = &init_mm;
71
72         atomic_inc(&mm->mm_count);
73         atomic_inc(&mm->mm_users);
74         current->active_mm = mm;
75         BUG_ON(current->mm);
76         enter_lazy_tlb(mm, current);
77
78         per_cpu_trap_init();
79
80         preempt_disable();
81
82         notify_cpu_starting(smp_processor_id());
83
84         local_irq_enable();
85
86         cpu = smp_processor_id();
87
88         /* Enable local timers */
89         local_timer_setup(cpu);
90         calibrate_delay();
91
92         smp_store_cpu_info(cpu);
93
94         cpu_set(cpu, cpu_online_map);
95
96         cpu_idle();
97 }
98
99 extern struct {
100         unsigned long sp;
101         unsigned long bss_start;
102         unsigned long bss_end;
103         void *start_kernel_fn;
104         void *cpu_init_fn;
105         void *thread_info;
106 } stack_start;
107
108 int __cpuinit __cpu_up(unsigned int cpu)
109 {
110         struct task_struct *tsk;
111         unsigned long timeout;
112
113         tsk = fork_idle(cpu);
114         if (IS_ERR(tsk)) {
115                 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
116                 return PTR_ERR(tsk);
117         }
118
119         /* Fill in data in head.S for secondary cpus */
120         stack_start.sp = tsk->thread.sp;
121         stack_start.thread_info = tsk->stack;
122         stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
123         stack_start.start_kernel_fn = start_secondary;
124
125         flush_icache_range((unsigned long)&stack_start,
126                            (unsigned long)&stack_start + sizeof(stack_start));
127         wmb();
128
129         plat_start_cpu(cpu, (unsigned long)_stext);
130
131         timeout = jiffies + HZ;
132         while (time_before(jiffies, timeout)) {
133                 if (cpu_online(cpu))
134                         break;
135
136                 udelay(10);
137         }
138
139         if (cpu_online(cpu))
140                 return 0;
141
142         return -ENOENT;
143 }
144
145 void __init smp_cpus_done(unsigned int max_cpus)
146 {
147         unsigned long bogosum = 0;
148         int cpu;
149
150         for_each_online_cpu(cpu)
151                 bogosum += cpu_data[cpu].loops_per_jiffy;
152
153         printk(KERN_INFO "SMP: Total of %d processors activated "
154                "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
155                bogosum / (500000/HZ),
156                (bogosum / (5000/HZ)) % 100);
157 }
158
159 void smp_send_reschedule(int cpu)
160 {
161         plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
162 }
163
164 void smp_send_stop(void)
165 {
166         smp_call_function(stop_this_cpu, 0, 0);
167 }
168
169 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
170 {
171         int cpu;
172
173         for_each_cpu(cpu, mask)
174                 plat_send_ipi(cpu, SMP_MSG_FUNCTION);
175 }
176
177 void arch_send_call_function_single_ipi(int cpu)
178 {
179         plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
180 }
181
182 void smp_timer_broadcast(const struct cpumask *mask)
183 {
184         int cpu;
185
186         for_each_cpu(cpu, mask)
187                 plat_send_ipi(cpu, SMP_MSG_TIMER);
188 }
189
190 static void ipi_timer(void)
191 {
192         irq_enter();
193         local_timer_interrupt();
194         irq_exit();
195 }
196
197 void smp_message_recv(unsigned int msg)
198 {
199         switch (msg) {
200         case SMP_MSG_FUNCTION:
201                 generic_smp_call_function_interrupt();
202                 break;
203         case SMP_MSG_RESCHEDULE:
204                 break;
205         case SMP_MSG_FUNCTION_SINGLE:
206                 generic_smp_call_function_single_interrupt();
207                 break;
208         case SMP_MSG_TIMER:
209                 ipi_timer();
210                 break;
211         default:
212                 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
213                        smp_processor_id(), __func__, msg);
214                 break;
215         }
216 }
217
218 /* Not really SMP stuff ... */
219 int setup_profiling_timer(unsigned int multiplier)
220 {
221         return 0;
222 }
223
224 static void flush_tlb_all_ipi(void *info)
225 {
226         local_flush_tlb_all();
227 }
228
229 void flush_tlb_all(void)
230 {
231         on_each_cpu(flush_tlb_all_ipi, 0, 1);
232 }
233
234 static void flush_tlb_mm_ipi(void *mm)
235 {
236         local_flush_tlb_mm((struct mm_struct *)mm);
237 }
238
239 /*
240  * The following tlb flush calls are invoked when old translations are
241  * being torn down, or pte attributes are changing. For single threaded
242  * address spaces, a new context is obtained on the current cpu, and tlb
243  * context on other cpus are invalidated to force a new context allocation
244  * at switch_mm time, should the mm ever be used on other cpus. For
245  * multithreaded address spaces, intercpu interrupts have to be sent.
246  * Another case where intercpu interrupts are required is when the target
247  * mm might be active on another cpu (eg debuggers doing the flushes on
248  * behalf of debugees, kswapd stealing pages from another process etc).
249  * Kanoj 07/00.
250  */
251
252 void flush_tlb_mm(struct mm_struct *mm)
253 {
254         preempt_disable();
255
256         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
257                 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
258         } else {
259                 int i;
260                 for (i = 0; i < num_online_cpus(); i++)
261                         if (smp_processor_id() != i)
262                                 cpu_context(i, mm) = 0;
263         }
264         local_flush_tlb_mm(mm);
265
266         preempt_enable();
267 }
268
269 struct flush_tlb_data {
270         struct vm_area_struct *vma;
271         unsigned long addr1;
272         unsigned long addr2;
273 };
274
275 static void flush_tlb_range_ipi(void *info)
276 {
277         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
278
279         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
280 }
281
282 void flush_tlb_range(struct vm_area_struct *vma,
283                      unsigned long start, unsigned long end)
284 {
285         struct mm_struct *mm = vma->vm_mm;
286
287         preempt_disable();
288         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
289                 struct flush_tlb_data fd;
290
291                 fd.vma = vma;
292                 fd.addr1 = start;
293                 fd.addr2 = end;
294                 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
295         } else {
296                 int i;
297                 for (i = 0; i < num_online_cpus(); i++)
298                         if (smp_processor_id() != i)
299                                 cpu_context(i, mm) = 0;
300         }
301         local_flush_tlb_range(vma, start, end);
302         preempt_enable();
303 }
304
305 static void flush_tlb_kernel_range_ipi(void *info)
306 {
307         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
308
309         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
310 }
311
312 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
313 {
314         struct flush_tlb_data fd;
315
316         fd.addr1 = start;
317         fd.addr2 = end;
318         on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
319 }
320
321 static void flush_tlb_page_ipi(void *info)
322 {
323         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
324
325         local_flush_tlb_page(fd->vma, fd->addr1);
326 }
327
328 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
329 {
330         preempt_disable();
331         if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
332             (current->mm != vma->vm_mm)) {
333                 struct flush_tlb_data fd;
334
335                 fd.vma = vma;
336                 fd.addr1 = page;
337                 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
338         } else {
339                 int i;
340                 for (i = 0; i < num_online_cpus(); i++)
341                         if (smp_processor_id() != i)
342                                 cpu_context(i, vma->vm_mm) = 0;
343         }
344         local_flush_tlb_page(vma, page);
345         preempt_enable();
346 }
347
348 static void flush_tlb_one_ipi(void *info)
349 {
350         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
351         local_flush_tlb_one(fd->addr1, fd->addr2);
352 }
353
354 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
355 {
356         struct flush_tlb_data fd;
357
358         fd.addr1 = asid;
359         fd.addr2 = vaddr;
360
361         smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
362         local_flush_tlb_one(asid, vaddr);
363 }