Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/vfp/vfpmodule.c | |
3 | * | |
4 | * Copyright (C) 2004 ARM Limited. | |
5 | * Written by Deep Blue Solutions Limited. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/module.h> | |
1da177e4 LT |
12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> | |
14 | #include <linux/signal.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/init.h> | |
d6551e88 RK |
17 | |
18 | #include <asm/thread_notify.h> | |
1da177e4 LT |
19 | #include <asm/vfp.h> |
20 | ||
21 | #include "vfpinstr.h" | |
22 | #include "vfp.h" | |
23 | ||
24 | /* | |
25 | * Our undef handlers (in entry.S) | |
26 | */ | |
27 | void vfp_testing_entry(void); | |
28 | void vfp_support_entry(void); | |
5d4cae5f | 29 | void vfp_null_entry(void); |
1da177e4 | 30 | |
5d4cae5f | 31 | void (*vfp_vector)(void) = vfp_null_entry; |
c6428464 | 32 | union vfp_state *last_VFP_context[NR_CPUS]; |
1da177e4 LT |
33 | |
34 | /* | |
35 | * Dual-use variable. | |
36 | * Used in startup: set to non-zero if VFP checks fail | |
37 | * After startup, holds VFP architecture | |
38 | */ | |
39 | unsigned int VFP_arch; | |
40 | ||
0d782dc4 RK |
41 | /* |
42 | * Per-thread VFP initialization. | |
43 | */ | |
44 | static void vfp_thread_flush(struct thread_info *thread) | |
45 | { | |
46 | union vfp_state *vfp = &thread->vfpstate; | |
47 | unsigned int cpu; | |
48 | ||
49 | memset(vfp, 0, sizeof(union vfp_state)); | |
50 | ||
51 | vfp->hard.fpexc = FPEXC_EN; | |
52 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | |
53 | ||
54 | /* | |
55 | * Disable VFP to ensure we initialize it first. We must ensure | |
56 | * that the modification of last_VFP_context[] and hardware disable | |
57 | * are done for the same CPU and without preemption. | |
58 | */ | |
59 | cpu = get_cpu(); | |
60 | if (last_VFP_context[cpu] == vfp) | |
61 | last_VFP_context[cpu] = NULL; | |
62 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
63 | put_cpu(); | |
64 | } | |
65 | ||
797245f5 | 66 | static void vfp_thread_exit(struct thread_info *thread) |
0d782dc4 RK |
67 | { |
68 | /* release case: Per-thread VFP cleanup. */ | |
69 | union vfp_state *vfp = &thread->vfpstate; | |
797245f5 | 70 | unsigned int cpu = get_cpu(); |
0d782dc4 RK |
71 | |
72 | if (last_VFP_context[cpu] == vfp) | |
73 | last_VFP_context[cpu] = NULL; | |
797245f5 | 74 | put_cpu(); |
0d782dc4 RK |
75 | } |
76 | ||
77 | /* | |
78 | * When this function is called with the following 'cmd's, the following | |
79 | * is true while this function is being run: | |
80 | * THREAD_NOFTIFY_SWTICH: | |
81 | * - the previously running thread will not be scheduled onto another CPU. | |
82 | * - the next thread to be run (v) will not be running on another CPU. | |
83 | * - thread->cpu is the local CPU number | |
84 | * - not preemptible as we're called in the middle of a thread switch | |
85 | * THREAD_NOTIFY_FLUSH: | |
86 | * - the thread (v) will be running on the local CPU, so | |
87 | * v === current_thread_info() | |
88 | * - thread->cpu is the local CPU number at the time it is accessed, | |
89 | * but may change at any time. | |
90 | * - we could be preempted if tree preempt rcu is enabled, so | |
91 | * it is unsafe to use thread->cpu. | |
797245f5 RK |
92 | * THREAD_NOTIFY_EXIT |
93 | * - the thread (v) will be running on the local CPU, so | |
94 | * v === current_thread_info() | |
95 | * - thread->cpu is the local CPU number at the time it is accessed, | |
96 | * but may change at any time. | |
97 | * - we could be preempted if tree preempt rcu is enabled, so | |
98 | * it is unsafe to use thread->cpu. | |
0d782dc4 | 99 | */ |
d6551e88 | 100 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
1da177e4 | 101 | { |
d6551e88 | 102 | struct thread_info *thread = v; |
1da177e4 | 103 | |
681a4991 | 104 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { |
c6428464 CM |
105 | u32 fpexc = fmrx(FPEXC); |
106 | ||
107 | #ifdef CONFIG_SMP | |
0d782dc4 RK |
108 | unsigned int cpu = thread->cpu; |
109 | ||
c6428464 CM |
110 | /* |
111 | * On SMP, if VFP is enabled, save the old state in | |
112 | * case the thread migrates to a different CPU. The | |
113 | * restoring is done lazily. | |
114 | */ | |
228adef1 | 115 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { |
c6428464 CM |
116 | vfp_save_state(last_VFP_context[cpu], fpexc); |
117 | last_VFP_context[cpu]->hard.cpu = cpu; | |
118 | } | |
119 | /* | |
120 | * Thread migration, just force the reloading of the | |
121 | * state on the new CPU in case the VFP registers | |
122 | * contain stale data. | |
123 | */ | |
124 | if (thread->vfpstate.hard.cpu != cpu) | |
125 | last_VFP_context[cpu] = NULL; | |
126 | #endif | |
127 | ||
681a4991 RK |
128 | /* |
129 | * Always disable VFP so we can lazily save/restore the | |
130 | * old state. | |
131 | */ | |
228adef1 | 132 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
681a4991 RK |
133 | return NOTIFY_DONE; |
134 | } | |
135 | ||
0d782dc4 RK |
136 | if (cmd == THREAD_NOTIFY_FLUSH) |
137 | vfp_thread_flush(thread); | |
138 | else | |
797245f5 | 139 | vfp_thread_exit(thread); |
681a4991 | 140 | |
d6551e88 | 141 | return NOTIFY_DONE; |
1da177e4 LT |
142 | } |
143 | ||
d6551e88 RK |
144 | static struct notifier_block vfp_notifier_block = { |
145 | .notifier_call = vfp_notifier, | |
146 | }; | |
147 | ||
1da177e4 LT |
148 | /* |
149 | * Raise a SIGFPE for the current process. | |
150 | * sicode describes the signal being raised. | |
151 | */ | |
152 | void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) | |
153 | { | |
154 | siginfo_t info; | |
155 | ||
156 | memset(&info, 0, sizeof(info)); | |
157 | ||
158 | info.si_signo = SIGFPE; | |
159 | info.si_code = sicode; | |
35d59fc5 | 160 | info.si_addr = (void __user *)(instruction_pointer(regs) - 4); |
1da177e4 LT |
161 | |
162 | /* | |
163 | * This is the same as NWFPE, because it's not clear what | |
164 | * this is used for | |
165 | */ | |
166 | current->thread.error_code = 0; | |
167 | current->thread.trap_no = 6; | |
168 | ||
da41119a | 169 | send_sig_info(SIGFPE, &info, current); |
1da177e4 LT |
170 | } |
171 | ||
c98929c0 | 172 | static void vfp_panic(char *reason, u32 inst) |
1da177e4 LT |
173 | { |
174 | int i; | |
175 | ||
176 | printk(KERN_ERR "VFP: Error: %s\n", reason); | |
177 | printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", | |
c98929c0 | 178 | fmrx(FPEXC), fmrx(FPSCR), inst); |
1da177e4 LT |
179 | for (i = 0; i < 32; i += 2) |
180 | printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", | |
181 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); | |
182 | } | |
183 | ||
184 | /* | |
185 | * Process bitmask of exception conditions. | |
186 | */ | |
187 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) | |
188 | { | |
189 | int si_code = 0; | |
190 | ||
191 | pr_debug("VFP: raising exceptions %08x\n", exceptions); | |
192 | ||
7c6f2514 | 193 | if (exceptions == VFP_EXCEPTION_ERROR) { |
c98929c0 | 194 | vfp_panic("unhandled bounce", inst); |
1da177e4 LT |
195 | vfp_raise_sigfpe(0, regs); |
196 | return; | |
197 | } | |
198 | ||
199 | /* | |
c98929c0 | 200 | * Update the FPSCR with the additional exception flags. |
1da177e4 LT |
201 | * Comparison instructions always return at least one of |
202 | * these flags set. | |
203 | */ | |
1da177e4 LT |
204 | fpscr |= exceptions; |
205 | ||
206 | fmxr(FPSCR, fpscr); | |
207 | ||
208 | #define RAISE(stat,en,sig) \ | |
209 | if (exceptions & stat && fpscr & en) \ | |
210 | si_code = sig; | |
211 | ||
212 | /* | |
213 | * These are arranged in priority order, least to highest. | |
214 | */ | |
e0f205d9 | 215 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); |
1da177e4 LT |
216 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); |
217 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); | |
218 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); | |
219 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); | |
220 | ||
221 | if (si_code) | |
222 | vfp_raise_sigfpe(si_code, regs); | |
223 | } | |
224 | ||
225 | /* | |
226 | * Emulate a VFP instruction. | |
227 | */ | |
228 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) | |
229 | { | |
7c6f2514 | 230 | u32 exceptions = VFP_EXCEPTION_ERROR; |
1da177e4 LT |
231 | |
232 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); | |
233 | ||
234 | if (INST_CPRTDO(inst)) { | |
235 | if (!INST_CPRT(inst)) { | |
236 | /* | |
237 | * CPDO | |
238 | */ | |
239 | if (vfp_single(inst)) { | |
240 | exceptions = vfp_single_cpdo(inst, fpscr); | |
241 | } else { | |
242 | exceptions = vfp_double_cpdo(inst, fpscr); | |
243 | } | |
244 | } else { | |
245 | /* | |
246 | * A CPRT instruction can not appear in FPINST2, nor | |
247 | * can it cause an exception. Therefore, we do not | |
248 | * have to emulate it. | |
249 | */ | |
250 | } | |
251 | } else { | |
252 | /* | |
253 | * A CPDT instruction can not appear in FPINST2, nor can | |
254 | * it cause an exception. Therefore, we do not have to | |
255 | * emulate it. | |
256 | */ | |
257 | } | |
928bd1b4 | 258 | return exceptions & ~VFP_NAN_FLAG; |
1da177e4 LT |
259 | } |
260 | ||
261 | /* | |
262 | * Package up a bounce condition. | |
263 | */ | |
c98929c0 | 264 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
1da177e4 | 265 | { |
c98929c0 | 266 | u32 fpscr, orig_fpscr, fpsid, exceptions; |
1da177e4 LT |
267 | |
268 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); | |
269 | ||
270 | /* | |
c98929c0 CM |
271 | * At this point, FPEXC can have the following configuration: |
272 | * | |
273 | * EX DEX IXE | |
274 | * 0 1 x - synchronous exception | |
275 | * 1 x 0 - asynchronous exception | |
276 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later | |
277 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 | |
278 | * implementation), undefined otherwise | |
279 | * | |
280 | * Clear various bits and enable access to the VFP so we can | |
281 | * handle the bounce. | |
1da177e4 | 282 | */ |
c98929c0 | 283 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); |
1da177e4 | 284 | |
c98929c0 | 285 | fpsid = fmrx(FPSID); |
1da177e4 LT |
286 | orig_fpscr = fpscr = fmrx(FPSCR); |
287 | ||
288 | /* | |
c98929c0 | 289 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case |
1da177e4 | 290 | */ |
c98929c0 CM |
291 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) |
292 | && (fpscr & FPSCR_IXE)) { | |
293 | /* | |
294 | * Synchronous exception, emulate the trigger instruction | |
295 | */ | |
1da177e4 LT |
296 | goto emulate; |
297 | } | |
298 | ||
c98929c0 | 299 | if (fpexc & FPEXC_EX) { |
85d6943a | 300 | #ifndef CONFIG_CPU_FEROCEON |
c98929c0 CM |
301 | /* |
302 | * Asynchronous exception. The instruction is read from FPINST | |
303 | * and the interrupted instruction has to be restarted. | |
304 | */ | |
305 | trigger = fmrx(FPINST); | |
306 | regs->ARM_pc -= 4; | |
85d6943a | 307 | #endif |
c98929c0 CM |
308 | } else if (!(fpexc & FPEXC_DEX)) { |
309 | /* | |
310 | * Illegal combination of bits. It can be caused by an | |
311 | * unallocated VFP instruction but with FPSCR.IXE set and not | |
312 | * on VFP subarch 1. | |
313 | */ | |
314 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); | |
f2255be8 | 315 | goto exit; |
c98929c0 | 316 | } |
1da177e4 LT |
317 | |
318 | /* | |
c98929c0 CM |
319 | * Modify fpscr to indicate the number of iterations remaining. |
320 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates | |
321 | * whether FPEXC.VECITR or FPSCR.LEN is used. | |
1da177e4 | 322 | */ |
c98929c0 | 323 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { |
1da177e4 LT |
324 | u32 len; |
325 | ||
326 | len = fpexc + (1 << FPEXC_LENGTH_BIT); | |
327 | ||
328 | fpscr &= ~FPSCR_LENGTH_MASK; | |
329 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); | |
330 | } | |
331 | ||
332 | /* | |
333 | * Handle the first FP instruction. We used to take note of the | |
334 | * FPEXC bounce reason, but this appears to be unreliable. | |
335 | * Emulate the bounced instruction instead. | |
336 | */ | |
c98929c0 | 337 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); |
1da177e4 | 338 | if (exceptions) |
c98929c0 | 339 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
1da177e4 LT |
340 | |
341 | /* | |
c98929c0 CM |
342 | * If there isn't a second FP instruction, exit now. Note that |
343 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. | |
1da177e4 | 344 | */ |
c98929c0 | 345 | if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) |
f2255be8 | 346 | goto exit; |
1da177e4 LT |
347 | |
348 | /* | |
349 | * The barrier() here prevents fpinst2 being read | |
350 | * before the condition above. | |
351 | */ | |
352 | barrier(); | |
353 | trigger = fmrx(FPINST2); | |
1da177e4 LT |
354 | |
355 | emulate: | |
c98929c0 | 356 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); |
1da177e4 LT |
357 | if (exceptions) |
358 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | |
f2255be8 GD |
359 | exit: |
360 | preempt_enable(); | |
1da177e4 | 361 | } |
efe90d27 | 362 | |
8e140362 RK |
363 | static void vfp_enable(void *unused) |
364 | { | |
365 | u32 access = get_copro_access(); | |
366 | ||
367 | /* | |
368 | * Enable full access to VFP (cp10 and cp11) | |
369 | */ | |
370 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); | |
371 | } | |
372 | ||
fc0b7a20 BD |
373 | #ifdef CONFIG_PM |
374 | #include <linux/sysdev.h> | |
375 | ||
376 | static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | |
377 | { | |
378 | struct thread_info *ti = current_thread_info(); | |
379 | u32 fpexc = fmrx(FPEXC); | |
380 | ||
381 | /* if vfp is on, then save state for resumption */ | |
382 | if (fpexc & FPEXC_EN) { | |
383 | printk(KERN_DEBUG "%s: saving vfp state\n", __func__); | |
384 | vfp_save_state(&ti->vfpstate, fpexc); | |
385 | ||
386 | /* disable, just in case */ | |
387 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
388 | } | |
389 | ||
390 | /* clear any information we had about last context state */ | |
391 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | static int vfp_pm_resume(struct sys_device *dev) | |
397 | { | |
398 | /* ensure we have access to the vfp */ | |
399 | vfp_enable(NULL); | |
400 | ||
401 | /* and disable it to ensure the next usage restores the state */ | |
402 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | static struct sysdev_class vfp_pm_sysclass = { | |
408 | .name = "vfp", | |
409 | .suspend = vfp_pm_suspend, | |
410 | .resume = vfp_pm_resume, | |
411 | }; | |
412 | ||
413 | static struct sys_device vfp_pm_sysdev = { | |
414 | .cls = &vfp_pm_sysclass, | |
415 | }; | |
416 | ||
417 | static void vfp_pm_init(void) | |
418 | { | |
419 | sysdev_class_register(&vfp_pm_sysclass); | |
420 | sysdev_register(&vfp_pm_sysdev); | |
421 | } | |
422 | ||
423 | ||
424 | #else | |
425 | static inline void vfp_pm_init(void) { } | |
426 | #endif /* CONFIG_PM */ | |
427 | ||
3d1228ea CM |
428 | /* |
429 | * Synchronise the hardware VFP state of a thread other than current with the | |
430 | * saved one. This function is used by the ptrace mechanism. | |
431 | */ | |
432 | #ifdef CONFIG_SMP | |
433 | void vfp_sync_state(struct thread_info *thread) | |
434 | { | |
435 | /* | |
436 | * On SMP systems, the VFP state is automatically saved at every | |
437 | * context switch. We mark the thread VFP state as belonging to a | |
438 | * non-existent CPU so that the saved one will be reloaded when | |
439 | * needed. | |
440 | */ | |
441 | thread->vfpstate.hard.cpu = NR_CPUS; | |
442 | } | |
443 | #else | |
444 | void vfp_sync_state(struct thread_info *thread) | |
445 | { | |
446 | unsigned int cpu = get_cpu(); | |
447 | u32 fpexc = fmrx(FPEXC); | |
448 | ||
449 | /* | |
450 | * If VFP is enabled, the previous state was already saved and | |
451 | * last_VFP_context updated. | |
452 | */ | |
453 | if (fpexc & FPEXC_EN) | |
454 | goto out; | |
455 | ||
456 | if (!last_VFP_context[cpu]) | |
457 | goto out; | |
458 | ||
459 | /* | |
460 | * Save the last VFP state on this CPU. | |
461 | */ | |
462 | fmxr(FPEXC, fpexc | FPEXC_EN); | |
463 | vfp_save_state(last_VFP_context[cpu], fpexc); | |
464 | fmxr(FPEXC, fpexc); | |
465 | ||
466 | /* | |
467 | * Set the context to NULL to force a reload the next time the thread | |
468 | * uses the VFP. | |
469 | */ | |
470 | last_VFP_context[cpu] = NULL; | |
471 | ||
472 | out: | |
473 | put_cpu(); | |
474 | } | |
475 | #endif | |
476 | ||
8e140362 RK |
477 | #include <linux/smp.h> |
478 | ||
1da177e4 LT |
479 | /* |
480 | * VFP support code initialisation. | |
481 | */ | |
482 | static int __init vfp_init(void) | |
483 | { | |
484 | unsigned int vfpsid; | |
efe90d27 | 485 | unsigned int cpu_arch = cpu_architecture(); |
efe90d27 | 486 | |
c98929c0 CM |
487 | if (cpu_arch >= CPU_ARCH_ARMv6) |
488 | vfp_enable(NULL); | |
1da177e4 LT |
489 | |
490 | /* | |
491 | * First check that there is a VFP that we can use. | |
492 | * The handler is already setup to just log calls, so | |
493 | * we just need to read the VFPSID register. | |
494 | */ | |
5d4cae5f | 495 | vfp_vector = vfp_testing_entry; |
b9338a78 | 496 | barrier(); |
1da177e4 | 497 | vfpsid = fmrx(FPSID); |
8e140362 | 498 | barrier(); |
5d4cae5f | 499 | vfp_vector = vfp_null_entry; |
1da177e4 LT |
500 | |
501 | printk(KERN_INFO "VFP support v0.3: "); | |
c98929c0 | 502 | if (VFP_arch) |
1da177e4 | 503 | printk("not present\n"); |
c98929c0 | 504 | else if (vfpsid & FPSID_NODOUBLE) { |
1da177e4 LT |
505 | printk("no double precision support\n"); |
506 | } else { | |
8691e5a8 | 507 | smp_call_function(vfp_enable, NULL, 1); |
8e140362 | 508 | |
1da177e4 LT |
509 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ |
510 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", | |
511 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, | |
512 | (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, | |
513 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, | |
514 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, | |
515 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); | |
efe90d27 | 516 | |
1da177e4 | 517 | vfp_vector = vfp_support_entry; |
d6551e88 RK |
518 | |
519 | thread_register_notifier(&vfp_notifier_block); | |
fc0b7a20 | 520 | vfp_pm_init(); |
efe90d27 RK |
521 | |
522 | /* | |
523 | * We detected VFP, and the support code is | |
524 | * in place; report VFP support to userspace. | |
525 | */ | |
526 | elf_hwcap |= HWCAP_VFP; | |
7279dc3e CM |
527 | #ifdef CONFIG_VFPv3 |
528 | if (VFP_arch >= 3) { | |
529 | elf_hwcap |= HWCAP_VFPv3; | |
530 | ||
531 | /* | |
532 | * Check for VFPv3 D16. CPUs in this configuration | |
533 | * only have 16 x 64bit registers. | |
534 | */ | |
535 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) | |
536 | elf_hwcap |= HWCAP_VFPv3D16; | |
537 | } | |
538 | #endif | |
2bedbdf4 CM |
539 | #ifdef CONFIG_NEON |
540 | /* | |
541 | * Check for the presence of the Advanced SIMD | |
542 | * load/store instructions, integer and single | |
543 | * precision floating point operations. | |
544 | */ | |
545 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) | |
546 | elf_hwcap |= HWCAP_NEON; | |
547 | #endif | |
1da177e4 LT |
548 | } |
549 | return 0; | |
550 | } | |
551 | ||
552 | late_initcall(vfp_init); |