2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
18 #include <asm/thread_notify.h>
25 * Our undef handlers (in entry.S)
27 void vfp_testing_entry(void);
28 void vfp_support_entry(void);
29 void vfp_null_entry(void);
31 void (*vfp_vector)(void) = vfp_null_entry;
32 union vfp_state *last_VFP_context[NR_CPUS];
36 * Used in startup: set to non-zero if VFP checks fail
37 * After startup, holds VFP architecture
39 unsigned int VFP_arch;
42 * Per-thread VFP initialization.
44 static void vfp_thread_flush(struct thread_info *thread)
46 union vfp_state *vfp = &thread->vfpstate;
49 memset(vfp, 0, sizeof(union vfp_state));
51 vfp->hard.fpexc = FPEXC_EN;
52 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
55 * Disable VFP to ensure we initialize it first. We must ensure
56 * that the modification of last_VFP_context[] and hardware disable
57 * are done for the same CPU and without preemption.
60 if (last_VFP_context[cpu] == vfp)
61 last_VFP_context[cpu] = NULL;
62 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
66 static void vfp_thread_exit(struct thread_info *thread)
68 /* release case: Per-thread VFP cleanup. */
69 union vfp_state *vfp = &thread->vfpstate;
70 unsigned int cpu = get_cpu();
72 if (last_VFP_context[cpu] == vfp)
73 last_VFP_context[cpu] = NULL;
78 * When this function is called with the following 'cmd's, the following
79 * is true while this function is being run:
80 * THREAD_NOFTIFY_SWTICH:
81 * - the previously running thread will not be scheduled onto another CPU.
82 * - the next thread to be run (v) will not be running on another CPU.
83 * - thread->cpu is the local CPU number
84 * - not preemptible as we're called in the middle of a thread switch
85 * THREAD_NOTIFY_FLUSH:
86 * - the thread (v) will be running on the local CPU, so
87 * v === current_thread_info()
88 * - thread->cpu is the local CPU number at the time it is accessed,
89 * but may change at any time.
90 * - we could be preempted if tree preempt rcu is enabled, so
91 * it is unsafe to use thread->cpu.
93 * - the thread (v) will be running on the local CPU, so
94 * v === current_thread_info()
95 * - thread->cpu is the local CPU number at the time it is accessed,
96 * but may change at any time.
97 * - we could be preempted if tree preempt rcu is enabled, so
98 * it is unsafe to use thread->cpu.
100 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
102 struct thread_info *thread = v;
104 if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
105 u32 fpexc = fmrx(FPEXC);
108 unsigned int cpu = thread->cpu;
111 * On SMP, if VFP is enabled, save the old state in
112 * case the thread migrates to a different CPU. The
113 * restoring is done lazily.
115 if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
116 vfp_save_state(last_VFP_context[cpu], fpexc);
117 last_VFP_context[cpu]->hard.cpu = cpu;
120 * Thread migration, just force the reloading of the
121 * state on the new CPU in case the VFP registers
122 * contain stale data.
124 if (thread->vfpstate.hard.cpu != cpu)
125 last_VFP_context[cpu] = NULL;
129 * Always disable VFP so we can lazily save/restore the
132 fmxr(FPEXC, fpexc & ~FPEXC_EN);
136 if (cmd == THREAD_NOTIFY_FLUSH)
137 vfp_thread_flush(thread);
139 vfp_thread_exit(thread);
144 static struct notifier_block vfp_notifier_block = {
145 .notifier_call = vfp_notifier,
149 * Raise a SIGFPE for the current process.
150 * sicode describes the signal being raised.
152 void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
156 memset(&info, 0, sizeof(info));
158 info.si_signo = SIGFPE;
159 info.si_code = sicode;
160 info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
163 * This is the same as NWFPE, because it's not clear what
166 current->thread.error_code = 0;
167 current->thread.trap_no = 6;
169 send_sig_info(SIGFPE, &info, current);
172 static void vfp_panic(char *reason, u32 inst)
176 printk(KERN_ERR "VFP: Error: %s\n", reason);
177 printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
178 fmrx(FPEXC), fmrx(FPSCR), inst);
179 for (i = 0; i < 32; i += 2)
180 printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
181 i, vfp_get_float(i), i+1, vfp_get_float(i+1));
185 * Process bitmask of exception conditions.
187 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
191 pr_debug("VFP: raising exceptions %08x\n", exceptions);
193 if (exceptions == VFP_EXCEPTION_ERROR) {
194 vfp_panic("unhandled bounce", inst);
195 vfp_raise_sigfpe(0, regs);
200 * Update the FPSCR with the additional exception flags.
201 * Comparison instructions always return at least one of
208 #define RAISE(stat,en,sig) \
209 if (exceptions & stat && fpscr & en) \
213 * These are arranged in priority order, least to highest.
215 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
216 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
217 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
218 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
219 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
222 vfp_raise_sigfpe(si_code, regs);
226 * Emulate a VFP instruction.
228 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
230 u32 exceptions = VFP_EXCEPTION_ERROR;
232 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
234 if (INST_CPRTDO(inst)) {
235 if (!INST_CPRT(inst)) {
239 if (vfp_single(inst)) {
240 exceptions = vfp_single_cpdo(inst, fpscr);
242 exceptions = vfp_double_cpdo(inst, fpscr);
246 * A CPRT instruction can not appear in FPINST2, nor
247 * can it cause an exception. Therefore, we do not
248 * have to emulate it.
253 * A CPDT instruction can not appear in FPINST2, nor can
254 * it cause an exception. Therefore, we do not have to
258 return exceptions & ~VFP_NAN_FLAG;
262 * Package up a bounce condition.
264 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
266 u32 fpscr, orig_fpscr, fpsid, exceptions;
268 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
271 * At this point, FPEXC can have the following configuration:
274 * 0 1 x - synchronous exception
275 * 1 x 0 - asynchronous exception
276 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
277 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
278 * implementation), undefined otherwise
280 * Clear various bits and enable access to the VFP so we can
283 fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
286 orig_fpscr = fpscr = fmrx(FPSCR);
289 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
291 if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
292 && (fpscr & FPSCR_IXE)) {
294 * Synchronous exception, emulate the trigger instruction
299 if (fpexc & FPEXC_EX) {
300 #ifndef CONFIG_CPU_FEROCEON
302 * Asynchronous exception. The instruction is read from FPINST
303 * and the interrupted instruction has to be restarted.
305 trigger = fmrx(FPINST);
308 } else if (!(fpexc & FPEXC_DEX)) {
310 * Illegal combination of bits. It can be caused by an
311 * unallocated VFP instruction but with FPSCR.IXE set and not
314 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
319 * Modify fpscr to indicate the number of iterations remaining.
320 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
321 * whether FPEXC.VECITR or FPSCR.LEN is used.
323 if (fpexc & (FPEXC_EX | FPEXC_VV)) {
326 len = fpexc + (1 << FPEXC_LENGTH_BIT);
328 fpscr &= ~FPSCR_LENGTH_MASK;
329 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
333 * Handle the first FP instruction. We used to take note of the
334 * FPEXC bounce reason, but this appears to be unreliable.
335 * Emulate the bounced instruction instead.
337 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
339 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
342 * If there isn't a second FP instruction, exit now. Note that
343 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
345 if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
349 * The barrier() here prevents fpinst2 being read
350 * before the condition above.
353 trigger = fmrx(FPINST2);
356 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
358 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
363 static void vfp_enable(void *unused)
365 u32 access = get_copro_access();
368 * Enable full access to VFP (cp10 and cp11)
370 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
374 #include <linux/sysdev.h>
376 static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
378 struct thread_info *ti = current_thread_info();
379 u32 fpexc = fmrx(FPEXC);
381 /* if vfp is on, then save state for resumption */
382 if (fpexc & FPEXC_EN) {
383 printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
384 vfp_save_state(&ti->vfpstate, fpexc);
386 /* disable, just in case */
387 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
390 /* clear any information we had about last context state */
391 memset(last_VFP_context, 0, sizeof(last_VFP_context));
396 static int vfp_pm_resume(struct sys_device *dev)
398 /* ensure we have access to the vfp */
401 /* and disable it to ensure the next usage restores the state */
402 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
407 static struct sysdev_class vfp_pm_sysclass = {
409 .suspend = vfp_pm_suspend,
410 .resume = vfp_pm_resume,
413 static struct sys_device vfp_pm_sysdev = {
414 .cls = &vfp_pm_sysclass,
417 static void vfp_pm_init(void)
419 sysdev_class_register(&vfp_pm_sysclass);
420 sysdev_register(&vfp_pm_sysdev);
425 static inline void vfp_pm_init(void) { }
426 #endif /* CONFIG_PM */
429 * Synchronise the hardware VFP state of a thread other than current with the
430 * saved one. This function is used by the ptrace mechanism.
433 void vfp_sync_state(struct thread_info *thread)
436 * On SMP systems, the VFP state is automatically saved at every
437 * context switch. We mark the thread VFP state as belonging to a
438 * non-existent CPU so that the saved one will be reloaded when
441 thread->vfpstate.hard.cpu = NR_CPUS;
444 void vfp_sync_state(struct thread_info *thread)
446 unsigned int cpu = get_cpu();
447 u32 fpexc = fmrx(FPEXC);
450 * If VFP is enabled, the previous state was already saved and
451 * last_VFP_context updated.
453 if (fpexc & FPEXC_EN)
456 if (!last_VFP_context[cpu])
460 * Save the last VFP state on this CPU.
462 fmxr(FPEXC, fpexc | FPEXC_EN);
463 vfp_save_state(last_VFP_context[cpu], fpexc);
467 * Set the context to NULL to force a reload the next time the thread
470 last_VFP_context[cpu] = NULL;
477 #include <linux/smp.h>
480 * VFP support code initialisation.
482 static int __init vfp_init(void)
485 unsigned int cpu_arch = cpu_architecture();
487 if (cpu_arch >= CPU_ARCH_ARMv6)
491 * First check that there is a VFP that we can use.
492 * The handler is already setup to just log calls, so
493 * we just need to read the VFPSID register.
495 vfp_vector = vfp_testing_entry;
497 vfpsid = fmrx(FPSID);
499 vfp_vector = vfp_null_entry;
501 printk(KERN_INFO "VFP support v0.3: ");
503 printk("not present\n");
504 else if (vfpsid & FPSID_NODOUBLE) {
505 printk("no double precision support\n");
507 smp_call_function(vfp_enable, NULL, 1);
509 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
510 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
511 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
512 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
513 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
514 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
515 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
517 vfp_vector = vfp_support_entry;
519 thread_register_notifier(&vfp_notifier_block);
523 * We detected VFP, and the support code is
524 * in place; report VFP support to userspace.
526 elf_hwcap |= HWCAP_VFP;
529 elf_hwcap |= HWCAP_VFPv3;
532 * Check for VFPv3 D16. CPUs in this configuration
533 * only have 16 x 64bit registers.
535 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
536 elf_hwcap |= HWCAP_VFPv3D16;
541 * Check for the presence of the Advanced SIMD
542 * load/store instructions, integer and single
543 * precision floating point operations.
545 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
546 elf_hwcap |= HWCAP_NEON;
552 late_initcall(vfp_init);