Commit | Line | Data |
---|---|---|
538380da | 1 | #include <linux/types.h> |
51533b61 | 2 | #include <asm/delay.h> |
538380da JN |
3 | #include <irq.h> |
4 | #include <hwregs/intr_vect.h> | |
5 | #include <hwregs/intr_vect_defs.h> | |
51533b61 MS |
6 | #include <asm/tlbflush.h> |
7 | #include <asm/mmu_context.h> | |
538380da JN |
8 | #include <hwregs/asm/mmu_defs_asm.h> |
9 | #include <hwregs/supp_reg.h> | |
60063497 | 10 | #include <linux/atomic.h> |
51533b61 MS |
11 | |
12 | #include <linux/err.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/timex.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/cpumask.h> | |
18 | #include <linux/interrupt.h> | |
c8923c6b | 19 | #include <linux/module.h> |
51533b61 MS |
20 | |
21 | #define IPI_SCHEDULE 1 | |
22 | #define IPI_CALL 2 | |
23 | #define IPI_FLUSH_TLB 4 | |
538380da | 24 | #define IPI_BOOT 8 |
51533b61 MS |
25 | |
26 | #define FLUSH_ALL (void*)0xffffffff | |
27 | ||
28 | /* Vector of locks used for various atomic operations */ | |
e41c8ab1 TG |
29 | spinlock_t cris_atomic_locks[] = { |
30 | [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks) | |
31 | }; | |
51533b61 MS |
32 | |
33 | /* CPU masks */ | |
51533b61 | 34 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
c8923c6b | 35 | EXPORT_SYMBOL(phys_cpu_present_map); |
51533b61 MS |
36 | |
37 | /* Variables used during SMP boot */ | |
38 | volatile int cpu_now_booting = 0; | |
39 | volatile struct thread_info *smp_init_current_idle_thread; | |
40 | ||
41 | /* Variables used during IPI */ | |
42 | static DEFINE_SPINLOCK(call_lock); | |
43 | static DEFINE_SPINLOCK(tlbstate_lock); | |
44 | ||
45 | struct call_data_struct { | |
46 | void (*func) (void *info); | |
47 | void *info; | |
48 | int wait; | |
49 | }; | |
50 | ||
51 | static struct call_data_struct * call_data; | |
52 | ||
53 | static struct mm_struct* flush_mm; | |
54 | static struct vm_area_struct* flush_vma; | |
55 | static unsigned long flush_addr; | |
56 | ||
51533b61 | 57 | /* Mode registers */ |
538380da | 58 | static unsigned long irq_regs[NR_CPUS] = { |
51533b61 MS |
59 | regi_irq, |
60 | regi_irq2 | |
61 | }; | |
62 | ||
538380da | 63 | static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id); |
51533b61 | 64 | static int send_ipi(int vector, int wait, cpumask_t cpu_mask); |
e5f71781 TG |
65 | static struct irqaction irq_ipi = { |
66 | .handler = crisv32_ipi_interrupt, | |
67 | .flags = IRQF_DISABLED, | |
e5f71781 TG |
68 | .name = "ipi", |
69 | }; | |
51533b61 MS |
70 | |
71 | extern void cris_mmu_init(void); | |
72 | extern void cris_timer_init(void); | |
73 | ||
74 | /* SMP initialization */ | |
75 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
76 | { | |
77 | int i; | |
78 | ||
79 | /* From now on we can expect IPIs so set them up */ | |
80 | setup_irq(IPI_INTR_VECT, &irq_ipi); | |
81 | ||
82 | /* Mark all possible CPUs as present */ | |
83 | for (i = 0; i < max_cpus; i++) | |
8aebe21e | 84 | cpumask_set_cpu(i, &phys_cpu_present_map); |
51533b61 MS |
85 | } |
86 | ||
b881bc46 | 87 | void smp_prepare_boot_cpu(void) |
51533b61 MS |
88 | { |
89 | /* PGD pointer has moved after per_cpu initialization so | |
90 | * update the MMU. | |
91 | */ | |
92 | pgd_t **pgd; | |
93 | pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); | |
94 | ||
95 | SUPP_BANK_SEL(1); | |
96 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); | |
97 | SUPP_BANK_SEL(2); | |
98 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); | |
99 | ||
afc6ca01 | 100 | set_cpu_online(0, true); |
8aebe21e | 101 | cpumask_set_cpu(0, &phys_cpu_present_map); |
afc6ca01 | 102 | set_cpu_possible(0, true); |
51533b61 MS |
103 | } |
104 | ||
105 | void __init smp_cpus_done(unsigned int max_cpus) | |
106 | { | |
107 | } | |
108 | ||
109 | /* Bring one cpu online.*/ | |
110 | static int __init | |
a4cfc31d | 111 | smp_boot_one_cpu(int cpuid, struct task_struct idle) |
51533b61 MS |
112 | { |
113 | unsigned timeout; | |
8aebe21e | 114 | cpumask_t cpu_mask; |
51533b61 | 115 | |
8aebe21e | 116 | cpumask_clear(&cpu_mask); |
718d6114 | 117 | task_thread_info(idle)->cpu = cpuid; |
51533b61 MS |
118 | |
119 | /* Information to the CPU that is about to boot */ | |
718d6114 | 120 | smp_init_current_idle_thread = task_thread_info(idle); |
51533b61 MS |
121 | cpu_now_booting = cpuid; |
122 | ||
538380da | 123 | /* Kick it */ |
8aebe21e KM |
124 | set_cpu_online(cpuid, true); |
125 | cpumask_set_cpu(cpuid, &cpu_mask); | |
538380da | 126 | send_ipi(IPI_BOOT, 0, cpu_mask); |
8aebe21e | 127 | set_cpu_online(cpuid, false); |
538380da | 128 | |
51533b61 MS |
129 | /* Wait for CPU to come online */ |
130 | for (timeout = 0; timeout < 10000; timeout++) { | |
131 | if(cpu_online(cpuid)) { | |
132 | cpu_now_booting = 0; | |
133 | smp_init_current_idle_thread = NULL; | |
134 | return 0; /* CPU online */ | |
135 | } | |
136 | udelay(100); | |
137 | barrier(); | |
138 | } | |
139 | ||
51533b61 MS |
140 | printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); |
141 | return -1; | |
142 | } | |
143 | ||
49b4ff33 | 144 | /* Secondary CPUs starts using C here. Here we need to setup CPU |
51533b61 MS |
145 | * specific stuff such as the local timer and the MMU. */ |
146 | void __init smp_callin(void) | |
147 | { | |
51533b61 MS |
148 | int cpu = cpu_now_booting; |
149 | reg_intr_vect_rw_mask vect_mask = {0}; | |
150 | ||
151 | /* Initialise the idle task for this CPU */ | |
152 | atomic_inc(&init_mm.mm_count); | |
153 | current->active_mm = &init_mm; | |
154 | ||
155 | /* Set up MMU */ | |
156 | cris_mmu_init(); | |
157 | __flush_tlb_all(); | |
158 | ||
159 | /* Setup local timer. */ | |
160 | cris_timer_init(); | |
161 | ||
162 | /* Enable IRQ and idle */ | |
163 | REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); | |
4150764f JN |
164 | crisv32_unmask_irq(IPI_INTR_VECT); |
165 | crisv32_unmask_irq(TIMER0_INTR_VECT); | |
5bfb5d69 | 166 | preempt_disable(); |
e545a614 | 167 | notify_cpu_starting(cpu); |
51533b61 MS |
168 | local_irq_enable(); |
169 | ||
8aebe21e | 170 | set_cpu_online(cpu, true); |
8dc7c5ec | 171 | cpu_startup_entry(CPUHP_ONLINE); |
51533b61 MS |
172 | } |
173 | ||
174 | /* Stop execution on this CPU.*/ | |
175 | void stop_this_cpu(void* dummy) | |
176 | { | |
177 | local_irq_disable(); | |
178 | asm volatile("halt"); | |
179 | } | |
180 | ||
181 | /* Other calls */ | |
182 | void smp_send_stop(void) | |
183 | { | |
8691e5a8 | 184 | smp_call_function(stop_this_cpu, NULL, 0); |
51533b61 MS |
185 | } |
186 | ||
187 | int setup_profiling_timer(unsigned int multiplier) | |
188 | { | |
189 | return -EINVAL; | |
190 | } | |
191 | ||
192 | ||
193 | /* cache_decay_ticks is used by the scheduler to decide if a process | |
194 | * is "hot" on one CPU. A higher value means a higher penalty to move | |
195 | * a process to another CPU. Our cache is rather small so we report | |
196 | * 1 tick. | |
197 | */ | |
198 | unsigned long cache_decay_ticks = 1; | |
199 | ||
8239c25f | 200 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) |
51533b61 | 201 | { |
a4cfc31d | 202 | smp_boot_one_cpu(cpu, tidle); |
51533b61 MS |
203 | return cpu_online(cpu) ? 0 : -ENOSYS; |
204 | } | |
205 | ||
206 | void smp_send_reschedule(int cpu) | |
207 | { | |
8aebe21e KM |
208 | cpumask_t cpu_mask; |
209 | cpumask_clear(&cpu_mask); | |
210 | cpumask_set_cpu(cpu, &cpu_mask); | |
51533b61 MS |
211 | send_ipi(IPI_SCHEDULE, 0, cpu_mask); |
212 | } | |
213 | ||
214 | /* TLB flushing | |
215 | * | |
216 | * Flush needs to be done on the local CPU and on any other CPU that | |
217 | * may have the same mapping. The mm->cpu_vm_mask is used to keep track | |
218 | * of which CPUs that a specific process has been executed on. | |
219 | */ | |
220 | void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) | |
221 | { | |
222 | unsigned long flags; | |
223 | cpumask_t cpu_mask; | |
224 | ||
225 | spin_lock_irqsave(&tlbstate_lock, flags); | |
b9d65c04 | 226 | cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); |
8aebe21e | 227 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
51533b61 MS |
228 | flush_mm = mm; |
229 | flush_vma = vma; | |
230 | flush_addr = addr; | |
231 | send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); | |
232 | spin_unlock_irqrestore(&tlbstate_lock, flags); | |
233 | } | |
234 | ||
235 | void flush_tlb_all(void) | |
236 | { | |
237 | __flush_tlb_all(); | |
238 | flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0); | |
239 | } | |
240 | ||
241 | void flush_tlb_mm(struct mm_struct *mm) | |
242 | { | |
243 | __flush_tlb_mm(mm); | |
244 | flush_tlb_common(mm, FLUSH_ALL, 0); | |
245 | /* No more mappings in other CPUs */ | |
b9d65c04 RR |
246 | cpumask_clear(mm_cpumask(mm)); |
247 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
51533b61 MS |
248 | } |
249 | ||
250 | void flush_tlb_page(struct vm_area_struct *vma, | |
251 | unsigned long addr) | |
252 | { | |
253 | __flush_tlb_page(vma, addr); | |
254 | flush_tlb_common(vma->vm_mm, vma, addr); | |
255 | } | |
256 | ||
257 | /* Inter processor interrupts | |
258 | * | |
259 | * The IPIs are used for: | |
260 | * * Force a schedule on a CPU | |
261 | * * FLush TLB on other CPUs | |
262 | * * Call a function on other CPUs | |
263 | */ | |
264 | ||
265 | int send_ipi(int vector, int wait, cpumask_t cpu_mask) | |
266 | { | |
267 | int i = 0; | |
268 | reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); | |
269 | int ret = 0; | |
270 | ||
271 | /* Calculate CPUs to send to. */ | |
8aebe21e | 272 | cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask); |
51533b61 MS |
273 | |
274 | /* Send the IPI. */ | |
8aebe21e | 275 | for_each_cpu(i, &cpu_mask) |
51533b61 MS |
276 | { |
277 | ipi.vector |= vector; | |
278 | REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); | |
279 | } | |
280 | ||
281 | /* Wait for IPI to finish on other CPUS */ | |
282 | if (wait) { | |
8aebe21e | 283 | for_each_cpu(i, &cpu_mask) { |
51533b61 MS |
284 | int j; |
285 | for (j = 0 ; j < 1000; j++) { | |
286 | ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); | |
287 | if (!ipi.vector) | |
288 | break; | |
289 | udelay(100); | |
290 | } | |
291 | ||
292 | /* Timeout? */ | |
293 | if (ipi.vector) { | |
294 | printk("SMP call timeout from %d to %d\n", smp_processor_id(), i); | |
295 | ret = -ETIMEDOUT; | |
296 | dump_stack(); | |
297 | } | |
298 | } | |
299 | } | |
300 | return ret; | |
301 | } | |
302 | ||
303 | /* | |
304 | * You must not call this function with disabled interrupts or from a | |
305 | * hardware interrupt handler or from a bottom half handler. | |
306 | */ | |
8691e5a8 | 307 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
51533b61 | 308 | { |
8aebe21e | 309 | cpumask_t cpu_mask; |
51533b61 MS |
310 | struct call_data_struct data; |
311 | int ret; | |
312 | ||
8aebe21e KM |
313 | cpumask_setall(&cpu_mask); |
314 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); | |
51533b61 MS |
315 | |
316 | WARN_ON(irqs_disabled()); | |
317 | ||
318 | data.func = func; | |
319 | data.info = info; | |
320 | data.wait = wait; | |
321 | ||
322 | spin_lock(&call_lock); | |
323 | call_data = &data; | |
324 | ret = send_ipi(IPI_CALL, wait, cpu_mask); | |
325 | spin_unlock(&call_lock); | |
326 | ||
327 | return ret; | |
328 | } | |
329 | ||
538380da | 330 | irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) |
51533b61 MS |
331 | { |
332 | void (*func) (void *info) = call_data->func; | |
333 | void *info = call_data->info; | |
334 | reg_intr_vect_rw_ipi ipi; | |
335 | ||
336 | ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); | |
337 | ||
184748cc PZ |
338 | if (ipi.vector & IPI_SCHEDULE) { |
339 | scheduler_ipi(); | |
340 | } | |
51533b61 | 341 | if (ipi.vector & IPI_CALL) { |
184748cc | 342 | func(info); |
51533b61 MS |
343 | } |
344 | if (ipi.vector & IPI_FLUSH_TLB) { | |
184748cc PZ |
345 | if (flush_mm == FLUSH_ALL) |
346 | __flush_tlb_all(); | |
347 | else if (flush_vma == FLUSH_ALL) | |
51533b61 | 348 | __flush_tlb_mm(flush_mm); |
184748cc | 349 | else |
51533b61 MS |
350 | __flush_tlb_page(flush_vma, flush_addr); |
351 | } | |
352 | ||
353 | ipi.vector = 0; | |
354 | REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi); | |
355 | ||
356 | return IRQ_HANDLED; | |
357 | } | |
358 |