Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/arch/arm/vfp/vfpmodule.c | |
4 | * | |
5 | * Copyright (C) 2004 ARM Limited. | |
6 | * Written by Deep Blue Solutions Limited. | |
1da177e4 | 7 | */ |
1da177e4 | 8 | #include <linux/types.h> |
90b44199 | 9 | #include <linux/cpu.h> |
746a9d19 | 10 | #include <linux/cpu_pm.h> |
998de4ac | 11 | #include <linux/hardirq.h> |
1da177e4 | 12 | #include <linux/kernel.h> |
90b44199 | 13 | #include <linux/notifier.h> |
1da177e4 | 14 | #include <linux/signal.h> |
3f07c014 | 15 | #include <linux/sched/signal.h> |
90b44199 | 16 | #include <linux/smp.h> |
1da177e4 | 17 | #include <linux/init.h> |
2498814f WD |
18 | #include <linux/uaccess.h> |
19 | #include <linux/user.h> | |
73c132c1 | 20 | #include <linux/export.h> |
87691776 | 21 | #include <linux/perf_event.h> |
d6551e88 | 22 | |
15d07dc9 | 23 | #include <asm/cp15.h> |
5aaf2544 | 24 | #include <asm/cputype.h> |
9f97da78 | 25 | #include <asm/system_info.h> |
d6551e88 | 26 | #include <asm/thread_notify.h> |
f77ac2e3 | 27 | #include <asm/traps.h> |
1da177e4 | 28 | #include <asm/vfp.h> |
2332c615 | 29 | #include <asm/neon.h> |
1da177e4 LT |
30 | |
31 | #include "vfpinstr.h" | |
32 | #include "vfp.h" | |
33 | ||
c76c6c4e | 34 | static bool have_vfp __ro_after_init; |
af61bdf0 | 35 | |
f8f2a852 RK |
36 | /* |
37 | * Dual-use variable. | |
38 | * Used in startup: set to non-zero if VFP checks fail | |
39 | * After startup, holds VFP architecture | |
40 | */ | |
4a0548c6 AB |
41 | static unsigned int VFP_arch; |
42 | ||
43 | #ifdef CONFIG_CPU_FEROCEON | |
44 | extern unsigned int VFP_arch_feroceon __alias(VFP_arch); | |
45 | #endif | |
f8f2a852 | 46 | |
af61bdf0 RK |
47 | /* |
48 | * The pointer to the vfpstate structure of the thread which currently | |
49 | * owns the context held in the VFP hardware, or NULL if the hardware | |
50 | * context is invalid. | |
f8f2a852 RK |
51 | * |
52 | * For UP, this is sufficient to tell which thread owns the VFP context. | |
53 | * However, for SMP, we also need to check the CPU number stored in the | |
54 | * saved state too to catch migrations. | |
af61bdf0 RK |
55 | */ |
56 | union vfp_state *vfp_current_hw_state[NR_CPUS]; | |
1da177e4 LT |
57 | |
58 | /* | |
f8f2a852 RK |
59 | * Is 'thread's most up to date state stored in this CPUs hardware? |
60 | * Must be called from non-preemptible context. | |
1da177e4 | 61 | */ |
f8f2a852 RK |
62 | static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) |
63 | { | |
64 | #ifdef CONFIG_SMP | |
65 | if (thread->vfpstate.hard.cpu != cpu) | |
66 | return false; | |
67 | #endif | |
68 | return vfp_current_hw_state[cpu] == &thread->vfpstate; | |
69 | } | |
70 | ||
71 | /* | |
72 | * Force a reload of the VFP context from the thread structure. We do | |
73 | * this by ensuring that access to the VFP hardware is disabled, and | |
48af9fea | 74 | * clear vfp_current_hw_state. Must be called from non-preemptible context. |
f8f2a852 RK |
75 | */ |
76 | static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) | |
77 | { | |
78 | if (vfp_state_in_hw(cpu, thread)) { | |
79 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
80 | vfp_current_hw_state[cpu] = NULL; | |
81 | } | |
82 | #ifdef CONFIG_SMP | |
83 | thread->vfpstate.hard.cpu = NR_CPUS; | |
84 | #endif | |
85 | } | |
1da177e4 | 86 | |
0d782dc4 RK |
87 | /* |
88 | * Per-thread VFP initialization. | |
89 | */ | |
90 | static void vfp_thread_flush(struct thread_info *thread) | |
91 | { | |
92 | union vfp_state *vfp = &thread->vfpstate; | |
93 | unsigned int cpu; | |
94 | ||
0d782dc4 RK |
95 | /* |
96 | * Disable VFP to ensure we initialize it first. We must ensure | |
19dad35f RK |
97 | * that the modification of vfp_current_hw_state[] and hardware |
98 | * disable are done for the same CPU and without preemption. | |
99 | * | |
100 | * Do this first to ensure that preemption won't overwrite our | |
101 | * state saving should access to the VFP be enabled at this point. | |
0d782dc4 RK |
102 | */ |
103 | cpu = get_cpu(); | |
af61bdf0 RK |
104 | if (vfp_current_hw_state[cpu] == vfp) |
105 | vfp_current_hw_state[cpu] = NULL; | |
0d782dc4 RK |
106 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
107 | put_cpu(); | |
19dad35f RK |
108 | |
109 | memset(vfp, 0, sizeof(union vfp_state)); | |
110 | ||
111 | vfp->hard.fpexc = FPEXC_EN; | |
112 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | |
113 | #ifdef CONFIG_SMP | |
114 | vfp->hard.cpu = NR_CPUS; | |
115 | #endif | |
0d782dc4 RK |
116 | } |
117 | ||
797245f5 | 118 | static void vfp_thread_exit(struct thread_info *thread) |
0d782dc4 RK |
119 | { |
120 | /* release case: Per-thread VFP cleanup. */ | |
121 | union vfp_state *vfp = &thread->vfpstate; | |
797245f5 | 122 | unsigned int cpu = get_cpu(); |
0d782dc4 | 123 | |
af61bdf0 RK |
124 | if (vfp_current_hw_state[cpu] == vfp) |
125 | vfp_current_hw_state[cpu] = NULL; | |
797245f5 | 126 | put_cpu(); |
0d782dc4 RK |
127 | } |
128 | ||
c98c0977 CM |
129 | static void vfp_thread_copy(struct thread_info *thread) |
130 | { | |
131 | struct thread_info *parent = current_thread_info(); | |
132 | ||
133 | vfp_sync_hwstate(parent); | |
134 | thread->vfpstate = parent->vfpstate; | |
f8f2a852 RK |
135 | #ifdef CONFIG_SMP |
136 | thread->vfpstate.hard.cpu = NR_CPUS; | |
137 | #endif | |
c98c0977 CM |
138 | } |
139 | ||
0d782dc4 RK |
140 | /* |
141 | * When this function is called with the following 'cmd's, the following | |
142 | * is true while this function is being run: | |
143 | * THREAD_NOFTIFY_SWTICH: | |
144 | * - the previously running thread will not be scheduled onto another CPU. | |
145 | * - the next thread to be run (v) will not be running on another CPU. | |
146 | * - thread->cpu is the local CPU number | |
147 | * - not preemptible as we're called in the middle of a thread switch | |
148 | * THREAD_NOTIFY_FLUSH: | |
149 | * - the thread (v) will be running on the local CPU, so | |
150 | * v === current_thread_info() | |
151 | * - thread->cpu is the local CPU number at the time it is accessed, | |
152 | * but may change at any time. | |
153 | * - we could be preempted if tree preempt rcu is enabled, so | |
154 | * it is unsafe to use thread->cpu. | |
797245f5 | 155 | * THREAD_NOTIFY_EXIT |
797245f5 RK |
156 | * - we could be preempted if tree preempt rcu is enabled, so |
157 | * it is unsafe to use thread->cpu. | |
0d782dc4 | 158 | */ |
d6551e88 | 159 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
1da177e4 | 160 | { |
d6551e88 | 161 | struct thread_info *thread = v; |
2e82669a CM |
162 | u32 fpexc; |
163 | #ifdef CONFIG_SMP | |
164 | unsigned int cpu; | |
165 | #endif | |
1da177e4 | 166 | |
2e82669a CM |
167 | switch (cmd) { |
168 | case THREAD_NOTIFY_SWITCH: | |
169 | fpexc = fmrx(FPEXC); | |
c6428464 CM |
170 | |
171 | #ifdef CONFIG_SMP | |
2e82669a | 172 | cpu = thread->cpu; |
0d782dc4 | 173 | |
c6428464 CM |
174 | /* |
175 | * On SMP, if VFP is enabled, save the old state in | |
176 | * case the thread migrates to a different CPU. The | |
177 | * restoring is done lazily. | |
178 | */ | |
f8f2a852 | 179 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) |
af61bdf0 | 180 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
c6428464 CM |
181 | #endif |
182 | ||
681a4991 RK |
183 | /* |
184 | * Always disable VFP so we can lazily save/restore the | |
185 | * old state. | |
186 | */ | |
228adef1 | 187 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
2e82669a | 188 | break; |
681a4991 | 189 | |
2e82669a | 190 | case THREAD_NOTIFY_FLUSH: |
0d782dc4 | 191 | vfp_thread_flush(thread); |
2e82669a CM |
192 | break; |
193 | ||
194 | case THREAD_NOTIFY_EXIT: | |
797245f5 | 195 | vfp_thread_exit(thread); |
c98c0977 CM |
196 | break; |
197 | ||
198 | case THREAD_NOTIFY_COPY: | |
199 | vfp_thread_copy(thread); | |
2e82669a CM |
200 | break; |
201 | } | |
681a4991 | 202 | |
d6551e88 | 203 | return NOTIFY_DONE; |
1da177e4 LT |
204 | } |
205 | ||
d6551e88 RK |
206 | static struct notifier_block vfp_notifier_block = { |
207 | .notifier_call = vfp_notifier, | |
208 | }; | |
209 | ||
1da177e4 LT |
210 | /* |
211 | * Raise a SIGFPE for the current process. | |
212 | * sicode describes the signal being raised. | |
213 | */ | |
2bbd7e9b | 214 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) |
1da177e4 | 215 | { |
1da177e4 LT |
216 | /* |
217 | * This is the same as NWFPE, because it's not clear what | |
218 | * this is used for | |
219 | */ | |
220 | current->thread.error_code = 0; | |
221 | current->thread.trap_no = 6; | |
222 | ||
b0594548 EB |
223 | send_sig_fault(SIGFPE, sicode, |
224 | (void __user *)(instruction_pointer(regs) - 4), | |
225 | current); | |
1da177e4 LT |
226 | } |
227 | ||
c98929c0 | 228 | static void vfp_panic(char *reason, u32 inst) |
1da177e4 LT |
229 | { |
230 | int i; | |
231 | ||
dc457078 NP |
232 | pr_err("VFP: Error: %s\n", reason); |
233 | pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", | |
c98929c0 | 234 | fmrx(FPEXC), fmrx(FPSCR), inst); |
1da177e4 | 235 | for (i = 0; i < 32; i += 2) |
dc457078 | 236 | pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n", |
1da177e4 LT |
237 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); |
238 | } | |
239 | ||
240 | /* | |
241 | * Process bitmask of exception conditions. | |
242 | */ | |
243 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) | |
244 | { | |
245 | int si_code = 0; | |
246 | ||
247 | pr_debug("VFP: raising exceptions %08x\n", exceptions); | |
248 | ||
7c6f2514 | 249 | if (exceptions == VFP_EXCEPTION_ERROR) { |
c98929c0 | 250 | vfp_panic("unhandled bounce", inst); |
92d44a42 | 251 | vfp_raise_sigfpe(FPE_FLTINV, regs); |
1da177e4 LT |
252 | return; |
253 | } | |
254 | ||
255 | /* | |
dbead405 | 256 | * If any of the status flags are set, update the FPSCR. |
1da177e4 LT |
257 | * Comparison instructions always return at least one of |
258 | * these flags set. | |
259 | */ | |
dbead405 CM |
260 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) |
261 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); | |
262 | ||
1da177e4 LT |
263 | fpscr |= exceptions; |
264 | ||
265 | fmxr(FPSCR, fpscr); | |
266 | ||
267 | #define RAISE(stat,en,sig) \ | |
268 | if (exceptions & stat && fpscr & en) \ | |
269 | si_code = sig; | |
270 | ||
271 | /* | |
272 | * These are arranged in priority order, least to highest. | |
273 | */ | |
e0f205d9 | 274 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); |
1da177e4 LT |
275 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); |
276 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); | |
277 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); | |
278 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); | |
279 | ||
280 | if (si_code) | |
281 | vfp_raise_sigfpe(si_code, regs); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Emulate a VFP instruction. | |
286 | */ | |
287 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) | |
288 | { | |
7c6f2514 | 289 | u32 exceptions = VFP_EXCEPTION_ERROR; |
1da177e4 LT |
290 | |
291 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); | |
292 | ||
293 | if (INST_CPRTDO(inst)) { | |
294 | if (!INST_CPRT(inst)) { | |
295 | /* | |
296 | * CPDO | |
297 | */ | |
298 | if (vfp_single(inst)) { | |
299 | exceptions = vfp_single_cpdo(inst, fpscr); | |
300 | } else { | |
301 | exceptions = vfp_double_cpdo(inst, fpscr); | |
302 | } | |
303 | } else { | |
304 | /* | |
305 | * A CPRT instruction can not appear in FPINST2, nor | |
306 | * can it cause an exception. Therefore, we do not | |
307 | * have to emulate it. | |
308 | */ | |
309 | } | |
310 | } else { | |
311 | /* | |
312 | * A CPDT instruction can not appear in FPINST2, nor can | |
313 | * it cause an exception. Therefore, we do not have to | |
314 | * emulate it. | |
315 | */ | |
316 | } | |
87691776 | 317 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); |
928bd1b4 | 318 | return exceptions & ~VFP_NAN_FLAG; |
1da177e4 LT |
319 | } |
320 | ||
321 | /* | |
322 | * Package up a bounce condition. | |
323 | */ | |
4708fb04 | 324 | static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
1da177e4 | 325 | { |
c98929c0 | 326 | u32 fpscr, orig_fpscr, fpsid, exceptions; |
1da177e4 LT |
327 | |
328 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); | |
329 | ||
330 | /* | |
c98929c0 CM |
331 | * At this point, FPEXC can have the following configuration: |
332 | * | |
333 | * EX DEX IXE | |
334 | * 0 1 x - synchronous exception | |
335 | * 1 x 0 - asynchronous exception | |
336 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later | |
337 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 | |
338 | * implementation), undefined otherwise | |
339 | * | |
340 | * Clear various bits and enable access to the VFP so we can | |
341 | * handle the bounce. | |
1da177e4 | 342 | */ |
c98929c0 | 343 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); |
1da177e4 | 344 | |
c98929c0 | 345 | fpsid = fmrx(FPSID); |
1da177e4 LT |
346 | orig_fpscr = fpscr = fmrx(FPSCR); |
347 | ||
348 | /* | |
c98929c0 | 349 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case |
1da177e4 | 350 | */ |
c98929c0 CM |
351 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) |
352 | && (fpscr & FPSCR_IXE)) { | |
353 | /* | |
354 | * Synchronous exception, emulate the trigger instruction | |
355 | */ | |
1da177e4 LT |
356 | goto emulate; |
357 | } | |
358 | ||
c98929c0 CM |
359 | if (fpexc & FPEXC_EX) { |
360 | /* | |
361 | * Asynchronous exception. The instruction is read from FPINST | |
362 | * and the interrupted instruction has to be restarted. | |
363 | */ | |
364 | trigger = fmrx(FPINST); | |
365 | regs->ARM_pc -= 4; | |
366 | } else if (!(fpexc & FPEXC_DEX)) { | |
367 | /* | |
368 | * Illegal combination of bits. It can be caused by an | |
369 | * unallocated VFP instruction but with FPSCR.IXE set and not | |
370 | * on VFP subarch 1. | |
371 | */ | |
372 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); | |
4708fb04 | 373 | return; |
c98929c0 | 374 | } |
1da177e4 LT |
375 | |
376 | /* | |
c98929c0 CM |
377 | * Modify fpscr to indicate the number of iterations remaining. |
378 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates | |
379 | * whether FPEXC.VECITR or FPSCR.LEN is used. | |
1da177e4 | 380 | */ |
c98929c0 | 381 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { |
1da177e4 LT |
382 | u32 len; |
383 | ||
384 | len = fpexc + (1 << FPEXC_LENGTH_BIT); | |
385 | ||
386 | fpscr &= ~FPSCR_LENGTH_MASK; | |
387 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); | |
388 | } | |
389 | ||
390 | /* | |
391 | * Handle the first FP instruction. We used to take note of the | |
392 | * FPEXC bounce reason, but this appears to be unreliable. | |
393 | * Emulate the bounced instruction instead. | |
394 | */ | |
c98929c0 | 395 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); |
1da177e4 | 396 | if (exceptions) |
c98929c0 | 397 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
1da177e4 LT |
398 | |
399 | /* | |
c98929c0 CM |
400 | * If there isn't a second FP instruction, exit now. Note that |
401 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. | |
1da177e4 | 402 | */ |
5e4ba617 | 403 | if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) |
4708fb04 | 404 | return; |
1da177e4 LT |
405 | |
406 | /* | |
407 | * The barrier() here prevents fpinst2 being read | |
408 | * before the condition above. | |
409 | */ | |
410 | barrier(); | |
411 | trigger = fmrx(FPINST2); | |
1da177e4 LT |
412 | |
413 | emulate: | |
c98929c0 | 414 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); |
1da177e4 LT |
415 | if (exceptions) |
416 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | |
417 | } | |
efe90d27 | 418 | |
8e140362 RK |
419 | static void vfp_enable(void *unused) |
420 | { | |
998de4ac WD |
421 | u32 access; |
422 | ||
423 | BUG_ON(preemptible()); | |
424 | access = get_copro_access(); | |
8e140362 RK |
425 | |
426 | /* | |
427 | * Enable full access to VFP (cp10 and cp11) | |
428 | */ | |
429 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); | |
430 | } | |
431 | ||
7d7d7a41 FF |
432 | /* Called by platforms on which we want to disable VFP because it may not be |
433 | * present on all CPUs within a SMP complex. Needs to be called prior to | |
434 | * vfp_init(). | |
435 | */ | |
3cce9d44 | 436 | void __init vfp_disable(void) |
7d7d7a41 FF |
437 | { |
438 | if (VFP_arch) { | |
439 | pr_debug("%s: should be called prior to vfp_init\n", __func__); | |
440 | return; | |
441 | } | |
442 | VFP_arch = 1; | |
443 | } | |
444 | ||
746a9d19 | 445 | #ifdef CONFIG_CPU_PM |
328f5cc3 | 446 | static int vfp_pm_suspend(void) |
fc0b7a20 BD |
447 | { |
448 | struct thread_info *ti = current_thread_info(); | |
449 | u32 fpexc = fmrx(FPEXC); | |
450 | ||
451 | /* if vfp is on, then save state for resumption */ | |
452 | if (fpexc & FPEXC_EN) { | |
dc457078 | 453 | pr_debug("%s: saving vfp state\n", __func__); |
fc0b7a20 BD |
454 | vfp_save_state(&ti->vfpstate, fpexc); |
455 | ||
456 | /* disable, just in case */ | |
457 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
24b35521 CC |
458 | } else if (vfp_current_hw_state[ti->cpu]) { |
459 | #ifndef CONFIG_SMP | |
460 | fmxr(FPEXC, fpexc | FPEXC_EN); | |
461 | vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); | |
462 | fmxr(FPEXC, fpexc); | |
463 | #endif | |
fc0b7a20 BD |
464 | } |
465 | ||
466 | /* clear any information we had about last context state */ | |
a84b895a | 467 | vfp_current_hw_state[ti->cpu] = NULL; |
fc0b7a20 BD |
468 | |
469 | return 0; | |
470 | } | |
471 | ||
328f5cc3 | 472 | static void vfp_pm_resume(void) |
fc0b7a20 BD |
473 | { |
474 | /* ensure we have access to the vfp */ | |
475 | vfp_enable(NULL); | |
476 | ||
477 | /* and disable it to ensure the next usage restores the state */ | |
478 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
fc0b7a20 BD |
479 | } |
480 | ||
746a9d19 CC |
481 | static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, |
482 | void *v) | |
483 | { | |
484 | switch (cmd) { | |
485 | case CPU_PM_ENTER: | |
486 | vfp_pm_suspend(); | |
487 | break; | |
488 | case CPU_PM_ENTER_FAILED: | |
489 | case CPU_PM_EXIT: | |
490 | vfp_pm_resume(); | |
491 | break; | |
492 | } | |
493 | return NOTIFY_OK; | |
494 | } | |
495 | ||
496 | static struct notifier_block vfp_cpu_pm_notifier_block = { | |
497 | .notifier_call = vfp_cpu_pm_notifier, | |
fc0b7a20 BD |
498 | }; |
499 | ||
fc0b7a20 BD |
500 | static void vfp_pm_init(void) |
501 | { | |
746a9d19 | 502 | cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block); |
fc0b7a20 BD |
503 | } |
504 | ||
fc0b7a20 BD |
505 | #else |
506 | static inline void vfp_pm_init(void) { } | |
746a9d19 | 507 | #endif /* CONFIG_CPU_PM */ |
fc0b7a20 | 508 | |
f8f2a852 RK |
509 | /* |
510 | * Ensure that the VFP state stored in 'thread->vfpstate' is up to date | |
511 | * with the hardware state. | |
512 | */ | |
ad187f95 | 513 | void vfp_sync_hwstate(struct thread_info *thread) |
3d1228ea CM |
514 | { |
515 | unsigned int cpu = get_cpu(); | |
3d1228ea | 516 | |
62b95a7b AB |
517 | local_bh_disable(); |
518 | ||
f8f2a852 | 519 | if (vfp_state_in_hw(cpu, thread)) { |
54cb3dbb | 520 | u32 fpexc = fmrx(FPEXC); |
3d1228ea | 521 | |
54cb3dbb RK |
522 | /* |
523 | * Save the last VFP state on this CPU. | |
524 | */ | |
525 | fmxr(FPEXC, fpexc | FPEXC_EN); | |
526 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); | |
ad187f95 RK |
527 | fmxr(FPEXC, fpexc); |
528 | } | |
3d1228ea | 529 | |
62b95a7b | 530 | local_bh_enable(); |
ad187f95 RK |
531 | put_cpu(); |
532 | } | |
533 | ||
f8f2a852 | 534 | /* Ensure that the thread reloads the hardware VFP state on the next use. */ |
ad187f95 RK |
535 | void vfp_flush_hwstate(struct thread_info *thread) |
536 | { | |
537 | unsigned int cpu = get_cpu(); | |
3d1228ea | 538 | |
f8f2a852 | 539 | vfp_force_reload(cpu, thread); |
ad187f95 | 540 | |
3d1228ea CM |
541 | put_cpu(); |
542 | } | |
3d1228ea | 543 | |
2498814f WD |
544 | /* |
545 | * Save the current VFP state into the provided structures and prepare | |
546 | * for entry into a new function (signal handler). | |
547 | */ | |
3aa2df6e JT |
548 | int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, |
549 | struct user_vfp_exc *ufp_exc) | |
2498814f WD |
550 | { |
551 | struct thread_info *thread = current_thread_info(); | |
552 | struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; | |
2498814f WD |
553 | |
554 | /* Ensure that the saved hwstate is up-to-date. */ | |
555 | vfp_sync_hwstate(thread); | |
556 | ||
557 | /* | |
558 | * Copy the floating point registers. There can be unused | |
559 | * registers see asm/hwcap.h for details. | |
560 | */ | |
3aa2df6e JT |
561 | memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); |
562 | ||
2498814f WD |
563 | /* |
564 | * Copy the status and control register. | |
565 | */ | |
3aa2df6e | 566 | ufp->fpscr = hwstate->fpscr; |
2498814f WD |
567 | |
568 | /* | |
569 | * Copy the exception registers. | |
570 | */ | |
3aa2df6e JT |
571 | ufp_exc->fpexc = hwstate->fpexc; |
572 | ufp_exc->fpinst = hwstate->fpinst; | |
5df7a99b | 573 | ufp_exc->fpinst2 = hwstate->fpinst2; |
ff9a184c WD |
574 | |
575 | /* Ensure that VFP is disabled. */ | |
576 | vfp_flush_hwstate(thread); | |
577 | ||
578 | /* | |
579 | * As per the PCS, clear the length and stride bits for function | |
580 | * entry. | |
581 | */ | |
582 | hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); | |
2498814f WD |
583 | return 0; |
584 | } | |
585 | ||
586 | /* Sanitise and restore the current VFP state from the provided structures. */ | |
42019fc5 | 587 | int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc) |
2498814f WD |
588 | { |
589 | struct thread_info *thread = current_thread_info(); | |
590 | struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; | |
591 | unsigned long fpexc; | |
2498814f | 592 | |
56cb2484 WD |
593 | /* Disable VFP to avoid corrupting the new thread state. */ |
594 | vfp_flush_hwstate(thread); | |
2498814f WD |
595 | |
596 | /* | |
597 | * Copy the floating point registers. There can be unused | |
598 | * registers see asm/hwcap.h for details. | |
599 | */ | |
42019fc5 | 600 | memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs)); |
2498814f WD |
601 | /* |
602 | * Copy the status and control register. | |
603 | */ | |
42019fc5 | 604 | hwstate->fpscr = ufp->fpscr; |
2498814f WD |
605 | |
606 | /* | |
607 | * Sanitise and restore the exception registers. | |
608 | */ | |
42019fc5 | 609 | fpexc = ufp_exc->fpexc; |
2498814f WD |
610 | |
611 | /* Ensure the VFP is enabled. */ | |
612 | fpexc |= FPEXC_EN; | |
613 | ||
614 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ | |
615 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); | |
616 | hwstate->fpexc = fpexc; | |
617 | ||
42019fc5 RK |
618 | hwstate->fpinst = ufp_exc->fpinst; |
619 | hwstate->fpinst2 = ufp_exc->fpinst2; | |
2498814f | 620 | |
42019fc5 | 621 | return 0; |
2498814f WD |
622 | } |
623 | ||
90b44199 RK |
624 | /* |
625 | * VFP hardware can lose all context when a CPU goes offline. | |
74c25bee RK |
626 | * As we will be running in SMP mode with CPU hotplug, we will save the |
627 | * hardware state at every thread switch. We clear our held state when | |
628 | * a CPU has been killed, indicating that the VFP hardware doesn't contain | |
629 | * a threads VFP state. When a CPU starts up, we re-enable access to the | |
e5b61baf | 630 | * VFP hardware. The callbacks below are called on the CPU which |
90b44199 RK |
631 | * is being offlined/onlined. |
632 | */ | |
e5b61baf | 633 | static int vfp_dying_cpu(unsigned int cpu) |
90b44199 | 634 | { |
1328f020 | 635 | vfp_current_hw_state[cpu] = NULL; |
e5b61baf TG |
636 | return 0; |
637 | } | |
638 | ||
639 | static int vfp_starting_cpu(unsigned int unused) | |
640 | { | |
641 | vfp_enable(NULL); | |
642 | return 0; | |
90b44199 | 643 | } |
8e140362 | 644 | |
f77ac2e3 | 645 | static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) |
ab3da156 AB |
646 | { |
647 | /* | |
648 | * If we reach this point, a floating point exception has been raised | |
649 | * while running in kernel mode. If the NEON/VFP unit was enabled at the | |
650 | * time, it means a VFP instruction has been issued that requires | |
651 | * software assistance to complete, something which is not currently | |
652 | * supported in kernel mode. | |
653 | * If the NEON/VFP unit was disabled, and the location pointed to below | |
654 | * is properly preceded by a call to kernel_neon_begin(), something has | |
655 | * caused the task to be scheduled out and back in again. In this case, | |
656 | * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should | |
657 | * be helpful in localizing the problem. | |
658 | */ | |
659 | if (fmrx(FPEXC) & FPEXC_EN) | |
660 | pr_crit("BUG: unsupported FP instruction in kernel mode\n"); | |
661 | else | |
662 | pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); | |
f77ac2e3 AB |
663 | pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC)); |
664 | return 1; | |
ab3da156 AB |
665 | } |
666 | ||
c76c6c4e | 667 | /* |
cdd87465 | 668 | * vfp_support_entry - Handle VFP exception |
c76c6c4e | 669 | * |
4708fb04 AB |
670 | * @regs: pt_regs structure holding the register state at exception entry |
671 | * @trigger: The opcode of the instruction that triggered the exception | |
672 | * | |
673 | * Returns 0 if the exception was handled, or an error code otherwise. | |
c76c6c4e | 674 | */ |
cdd87465 | 675 | static int vfp_support_entry(struct pt_regs *regs, u32 trigger) |
c76c6c4e | 676 | { |
4708fb04 AB |
677 | struct thread_info *ti = current_thread_info(); |
678 | u32 fpexc; | |
679 | ||
c76c6c4e | 680 | if (unlikely(!have_vfp)) |
4708fb04 | 681 | return -ENODEV; |
c76c6c4e | 682 | |
cdd87465 AB |
683 | if (!user_mode(regs)) |
684 | return vfp_kmode_exception(regs, trigger); | |
685 | ||
c76c6c4e | 686 | local_bh_disable(); |
4708fb04 AB |
687 | fpexc = fmrx(FPEXC); |
688 | ||
689 | /* | |
690 | * If the VFP unit was not enabled yet, we have to check whether the | |
691 | * VFP state in the CPU's registers is the most recent VFP state | |
692 | * associated with the process. On UP systems, we don't save the VFP | |
693 | * state eagerly on a context switch, so we may need to save the | |
694 | * VFP state to memory first, as it may belong to another process. | |
695 | */ | |
696 | if (!(fpexc & FPEXC_EN)) { | |
697 | /* | |
698 | * Enable the VFP unit but mask the FP exception flag for the | |
699 | * time being, so we can access all the registers. | |
700 | */ | |
701 | fpexc |= FPEXC_EN; | |
702 | fmxr(FPEXC, fpexc & ~FPEXC_EX); | |
703 | ||
704 | /* | |
705 | * Check whether or not the VFP state in the CPU's registers is | |
706 | * the most recent VFP state associated with this task. On SMP, | |
707 | * migration may result in multiple CPUs holding VFP states | |
708 | * that belong to the same task, but only the most recent one | |
709 | * is valid. | |
710 | */ | |
711 | if (!vfp_state_in_hw(ti->cpu, ti)) { | |
712 | if (!IS_ENABLED(CONFIG_SMP) && | |
713 | vfp_current_hw_state[ti->cpu] != NULL) { | |
714 | /* | |
715 | * This CPU is currently holding the most | |
716 | * recent VFP state associated with another | |
717 | * task, and we must save that to memory first. | |
718 | */ | |
719 | vfp_save_state(vfp_current_hw_state[ti->cpu], | |
720 | fpexc); | |
721 | } | |
722 | ||
723 | /* | |
724 | * We can now proceed with loading the task's VFP state | |
725 | * from memory into the CPU registers. | |
726 | */ | |
727 | fpexc = vfp_load_state(&ti->vfpstate); | |
728 | vfp_current_hw_state[ti->cpu] = &ti->vfpstate; | |
729 | #ifdef CONFIG_SMP | |
730 | /* | |
731 | * Record that this CPU is now the one holding the most | |
732 | * recent VFP state of the task. | |
733 | */ | |
734 | ti->vfpstate.hard.cpu = ti->cpu; | |
735 | #endif | |
736 | } | |
737 | ||
738 | if (fpexc & FPEXC_EX) | |
739 | /* | |
740 | * Might as well handle the pending exception before | |
741 | * retrying branch out before setting an FPEXC that | |
742 | * stops us reading stuff. | |
743 | */ | |
744 | goto bounce; | |
745 | ||
746 | /* | |
747 | * No FP exception is pending: just enable the VFP and | |
748 | * replay the instruction that trapped. | |
749 | */ | |
750 | fmxr(FPEXC, fpexc); | |
4708fb04 AB |
751 | } else { |
752 | /* Check for synchronous or asynchronous exceptions */ | |
753 | if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { | |
754 | u32 fpscr = fmrx(FPSCR); | |
755 | ||
756 | /* | |
757 | * On some implementations of the VFP subarch 1, | |
758 | * setting FPSCR.IXE causes all the CDP instructions to | |
759 | * be bounced synchronously without setting the | |
760 | * FPEXC.EX bit | |
761 | */ | |
762 | if (!(fpscr & FPSCR_IXE)) { | |
763 | if (!(fpscr & FPSCR_LENGTH_MASK)) { | |
764 | pr_debug("not VFP\n"); | |
765 | local_bh_enable(); | |
766 | return -ENOEXEC; | |
767 | } | |
768 | fpexc |= FPEXC_DEX; | |
769 | } | |
770 | } | |
cdd87465 AB |
771 | bounce: regs->ARM_pc += 4; |
772 | VFP_bounce(trigger, fpexc, regs); | |
4708fb04 AB |
773 | } |
774 | ||
775 | local_bh_enable(); | |
776 | return 0; | |
c76c6c4e AB |
777 | } |
778 | ||
cdd87465 | 779 | static struct undef_hook neon_support_hook[] = {{ |
f77ac2e3 AB |
780 | .instr_mask = 0xfe000000, |
781 | .instr_val = 0xf2000000, | |
cdd87465 AB |
782 | .cpsr_mask = PSR_T_BIT, |
783 | .cpsr_val = 0, | |
784 | .fn = vfp_support_entry, | |
f77ac2e3 AB |
785 | }, { |
786 | .instr_mask = 0xff100000, | |
787 | .instr_val = 0xf4000000, | |
cdd87465 AB |
788 | .cpsr_mask = PSR_T_BIT, |
789 | .cpsr_val = 0, | |
790 | .fn = vfp_support_entry, | |
f77ac2e3 AB |
791 | }, { |
792 | .instr_mask = 0xef000000, | |
793 | .instr_val = 0xef000000, | |
cdd87465 AB |
794 | .cpsr_mask = PSR_T_BIT, |
795 | .cpsr_val = PSR_T_BIT, | |
796 | .fn = vfp_support_entry, | |
f77ac2e3 AB |
797 | }, { |
798 | .instr_mask = 0xff100000, | |
799 | .instr_val = 0xf9000000, | |
cdd87465 AB |
800 | .cpsr_mask = PSR_T_BIT, |
801 | .cpsr_val = PSR_T_BIT, | |
802 | .fn = vfp_support_entry, | |
fd7c3c37 MPT |
803 | }, { |
804 | .instr_mask = 0xff000800, | |
805 | .instr_val = 0xfc000800, | |
806 | .cpsr_mask = 0, | |
807 | .cpsr_val = 0, | |
808 | .fn = vfp_support_entry, | |
809 | }, { | |
810 | .instr_mask = 0xff000800, | |
811 | .instr_val = 0xfd000800, | |
812 | .cpsr_mask = 0, | |
813 | .cpsr_val = 0, | |
814 | .fn = vfp_support_entry, | |
815 | }, { | |
816 | .instr_mask = 0xff000800, | |
817 | .instr_val = 0xfe000800, | |
818 | .cpsr_mask = 0, | |
819 | .cpsr_val = 0, | |
820 | .fn = vfp_support_entry, | |
f77ac2e3 AB |
821 | }}; |
822 | ||
cdd87465 AB |
823 | static struct undef_hook vfp_support_hook = { |
824 | .instr_mask = 0x0c000e00, | |
825 | .instr_val = 0x0c000a00, | |
826 | .fn = vfp_support_entry, | |
827 | }; | |
f77ac2e3 | 828 | |
cdd87465 | 829 | #ifdef CONFIG_KERNEL_MODE_NEON |
73c132c1 AB |
830 | |
831 | /* | |
832 | * Kernel-side NEON support functions | |
833 | */ | |
834 | void kernel_neon_begin(void) | |
835 | { | |
836 | struct thread_info *thread = current_thread_info(); | |
837 | unsigned int cpu; | |
838 | u32 fpexc; | |
839 | ||
62b95a7b AB |
840 | local_bh_disable(); |
841 | ||
73c132c1 | 842 | /* |
c79f8163 AB |
843 | * Kernel mode NEON is only allowed outside of hardirq context with |
844 | * preemption and softirq processing disabled. This will make sure that | |
845 | * the kernel mode NEON register contents never need to be preserved. | |
73c132c1 | 846 | */ |
c79f8163 AB |
847 | BUG_ON(in_hardirq()); |
848 | cpu = __smp_processor_id(); | |
73c132c1 AB |
849 | |
850 | fpexc = fmrx(FPEXC) | FPEXC_EN; | |
851 | fmxr(FPEXC, fpexc); | |
852 | ||
853 | /* | |
854 | * Save the userland NEON/VFP state. Under UP, | |
855 | * the owner could be a task other than 'current' | |
856 | */ | |
857 | if (vfp_state_in_hw(cpu, thread)) | |
858 | vfp_save_state(&thread->vfpstate, fpexc); | |
859 | #ifndef CONFIG_SMP | |
860 | else if (vfp_current_hw_state[cpu] != NULL) | |
861 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); | |
862 | #endif | |
863 | vfp_current_hw_state[cpu] = NULL; | |
864 | } | |
865 | EXPORT_SYMBOL(kernel_neon_begin); | |
866 | ||
867 | void kernel_neon_end(void) | |
868 | { | |
869 | /* Disable the NEON/VFP unit. */ | |
870 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | |
c79f8163 | 871 | local_bh_enable(); |
73c132c1 AB |
872 | } |
873 | EXPORT_SYMBOL(kernel_neon_end); | |
874 | ||
875 | #endif /* CONFIG_KERNEL_MODE_NEON */ | |
876 | ||
3cce9d44 AB |
877 | static int __init vfp_detect(struct pt_regs *regs, unsigned int instr) |
878 | { | |
879 | VFP_arch = UINT_MAX; /* mark as not present */ | |
880 | regs->ARM_pc += 4; | |
881 | return 0; | |
882 | } | |
883 | ||
884 | static struct undef_hook vfp_detect_hook __initdata = { | |
885 | .instr_mask = 0x0c000e00, | |
886 | .instr_val = 0x0c000a00, | |
887 | .cpsr_mask = MODE_MASK, | |
888 | .cpsr_val = SVC_MODE, | |
889 | .fn = vfp_detect, | |
890 | }; | |
891 | ||
1da177e4 LT |
892 | /* |
893 | * VFP support code initialisation. | |
894 | */ | |
895 | static int __init vfp_init(void) | |
896 | { | |
897 | unsigned int vfpsid; | |
efe90d27 | 898 | unsigned int cpu_arch = cpu_architecture(); |
62ea0d87 | 899 | unsigned int isar6; |
efe90d27 | 900 | |
e5b61baf TG |
901 | /* |
902 | * Enable the access to the VFP on all online CPUs so the | |
903 | * following test on FPSID will succeed. | |
904 | */ | |
c98929c0 | 905 | if (cpu_arch >= CPU_ARCH_ARMv6) |
998de4ac | 906 | on_each_cpu(vfp_enable, NULL, 1); |
1da177e4 LT |
907 | |
908 | /* | |
909 | * First check that there is a VFP that we can use. | |
910 | * The handler is already setup to just log calls, so | |
911 | * we just need to read the VFPSID register. | |
912 | */ | |
3cce9d44 | 913 | register_undef_hook(&vfp_detect_hook); |
b9338a78 | 914 | barrier(); |
1da177e4 | 915 | vfpsid = fmrx(FPSID); |
8e140362 | 916 | barrier(); |
3cce9d44 | 917 | unregister_undef_hook(&vfp_detect_hook); |
1da177e4 | 918 | |
dc457078 | 919 | pr_info("VFP support v0.3: "); |
6c96a4a6 | 920 | if (VFP_arch) { |
dc457078 | 921 | pr_cont("not present\n"); |
6c96a4a6 SB |
922 | return 0; |
923 | /* Extract the architecture on CPUID scheme */ | |
924 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | |
925 | VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK; | |
926 | VFP_arch >>= FPSID_ARCH_BIT; | |
efe90d27 | 927 | /* |
6c96a4a6 SB |
928 | * Check for the presence of the Advanced SIMD |
929 | * load/store instructions, integer and single | |
930 | * precision floating point operations. Only check | |
931 | * for NEON if the hardware has the MVFR registers. | |
efe90d27 | 932 | */ |
2b94fe2a | 933 | if (IS_ENABLED(CONFIG_NEON) && |
cdd87465 | 934 | (fmrx(MVFR1) & 0x000fff00) == 0x00011100) { |
6c96a4a6 | 935 | elf_hwcap |= HWCAP_NEON; |
cdd87465 AB |
936 | for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++) |
937 | register_undef_hook(&neon_support_hook[i]); | |
938 | } | |
6c96a4a6 | 939 | |
2b94fe2a SB |
940 | if (IS_ENABLED(CONFIG_VFPv3)) { |
941 | u32 mvfr0 = fmrx(MVFR0); | |
942 | if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 || | |
943 | ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) { | |
944 | elf_hwcap |= HWCAP_VFPv3; | |
945 | /* | |
946 | * Check for VFPv3 D16 and VFPv4 D16. CPUs in | |
947 | * this configuration only have 16 x 64bit | |
948 | * registers. | |
949 | */ | |
950 | if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1) | |
951 | /* also v4-D16 */ | |
952 | elf_hwcap |= HWCAP_VFPv3D16; | |
953 | else | |
954 | elf_hwcap |= HWCAP_VFPD32; | |
955 | } | |
956 | ||
957 | if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) | |
958 | elf_hwcap |= HWCAP_VFPv4; | |
c00a19c8 ADK |
959 | if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2) |
960 | elf_hwcap |= HWCAP_ASIMDHP; | |
961 | if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3) | |
962 | elf_hwcap |= HWCAP_FPHP; | |
2b94fe2a | 963 | } |
62ea0d87 ADK |
964 | |
965 | /* | |
966 | * Check for the presence of Advanced SIMD Dot Product | |
967 | * instructions. | |
968 | */ | |
969 | isar6 = read_cpuid_ext(CPUID_EXT_ISAR6); | |
970 | if (cpuid_feature_extract_field(isar6, 4) == 0x1) | |
971 | elf_hwcap |= HWCAP_ASIMDDP; | |
ce483549 ADK |
972 | /* |
973 | * Check for the presence of Advanced SIMD Floating point | |
974 | * half-precision multiplication instructions. | |
975 | */ | |
976 | if (cpuid_feature_extract_field(isar6, 8) == 0x1) | |
977 | elf_hwcap |= HWCAP_ASIMDFHM; | |
23b6d4ad ADK |
978 | /* |
979 | * Check for the presence of Advanced SIMD Bfloat16 | |
980 | * floating point instructions. | |
981 | */ | |
982 | if (cpuid_feature_extract_field(isar6, 20) == 0x1) | |
983 | elf_hwcap |= HWCAP_ASIMDBF16; | |
956ca3a4 ADK |
984 | /* |
985 | * Check for the presence of Advanced SIMD and floating point | |
986 | * Int8 matrix multiplication instructions instructions. | |
987 | */ | |
988 | if (cpuid_feature_extract_field(isar6, 24) == 0x1) | |
989 | elf_hwcap |= HWCAP_I8MM; | |
62ea0d87 | 990 | |
6c96a4a6 SB |
991 | /* Extract the architecture version on pre-cpuid scheme */ |
992 | } else { | |
993 | if (vfpsid & FPSID_NODOUBLE) { | |
994 | pr_cont("no double precision support\n"); | |
995 | return 0; | |
18b9dc13 | 996 | } |
6c96a4a6 SB |
997 | |
998 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; | |
1da177e4 | 999 | } |
6c96a4a6 | 1000 | |
e5b61baf | 1001 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING, |
73c1b41e | 1002 | "arm/vfp:starting", vfp_starting_cpu, |
e5b61baf | 1003 | vfp_dying_cpu); |
6c96a4a6 | 1004 | |
c76c6c4e | 1005 | have_vfp = true; |
6c96a4a6 | 1006 | |
cdd87465 | 1007 | register_undef_hook(&vfp_support_hook); |
6c96a4a6 SB |
1008 | thread_register_notifier(&vfp_notifier_block); |
1009 | vfp_pm_init(); | |
1010 | ||
1011 | /* | |
1012 | * We detected VFP, and the support code is | |
1013 | * in place; report VFP support to userspace. | |
1014 | */ | |
1015 | elf_hwcap |= HWCAP_VFP; | |
1016 | ||
1017 | pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n", | |
1018 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, | |
1019 | VFP_arch, | |
1020 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, | |
1021 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, | |
1022 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); | |
1023 | ||
1da177e4 LT |
1024 | return 0; |
1025 | } | |
1026 | ||
0773d73d | 1027 | core_initcall(vfp_init); |