2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/hardirq.h>
16 #include <linux/kernel.h>
17 #include <linux/notifier.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
23 #include <asm/cputype.h>
24 #include <asm/thread_notify.h>
31 * Our undef handlers (in entry.S)
33 void vfp_testing_entry(void);
34 void vfp_support_entry(void);
35 void vfp_null_entry(void);
37 void (*vfp_vector)(void) = vfp_null_entry;
40 * The pointer to the vfpstate structure of the thread which currently
41 * owns the context held in the VFP hardware, or NULL if the hardware
44 union vfp_state *vfp_current_hw_state[NR_CPUS];
48 * Used in startup: set to non-zero if VFP checks fail
49 * After startup, holds VFP architecture
51 unsigned int VFP_arch;
54 * Per-thread VFP initialization.
56 static void vfp_thread_flush(struct thread_info *thread)
58 union vfp_state *vfp = &thread->vfpstate;
61 memset(vfp, 0, sizeof(union vfp_state));
63 vfp->hard.fpexc = FPEXC_EN;
64 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
67 * Disable VFP to ensure we initialize it first. We must ensure
68 * that the modification of vfp_current_hw_state[] and hardware disable
69 * are done for the same CPU and without preemption.
72 if (vfp_current_hw_state[cpu] == vfp)
73 vfp_current_hw_state[cpu] = NULL;
74 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
78 static void vfp_thread_exit(struct thread_info *thread)
80 /* release case: Per-thread VFP cleanup. */
81 union vfp_state *vfp = &thread->vfpstate;
82 unsigned int cpu = get_cpu();
84 if (vfp_current_hw_state[cpu] == vfp)
85 vfp_current_hw_state[cpu] = NULL;
89 static void vfp_thread_copy(struct thread_info *thread)
91 struct thread_info *parent = current_thread_info();
93 vfp_sync_hwstate(parent);
94 thread->vfpstate = parent->vfpstate;
98 * When this function is called with the following 'cmd's, the following
99 * is true while this function is being run:
100 * THREAD_NOFTIFY_SWTICH:
101 * - the previously running thread will not be scheduled onto another CPU.
102 * - the next thread to be run (v) will not be running on another CPU.
103 * - thread->cpu is the local CPU number
104 * - not preemptible as we're called in the middle of a thread switch
105 * THREAD_NOTIFY_FLUSH:
106 * - the thread (v) will be running on the local CPU, so
107 * v === current_thread_info()
108 * - thread->cpu is the local CPU number at the time it is accessed,
109 * but may change at any time.
110 * - we could be preempted if tree preempt rcu is enabled, so
111 * it is unsafe to use thread->cpu.
113 * - the thread (v) will be running on the local CPU, so
114 * v === current_thread_info()
115 * - thread->cpu is the local CPU number at the time it is accessed,
116 * but may change at any time.
117 * - we could be preempted if tree preempt rcu is enabled, so
118 * it is unsafe to use thread->cpu.
120 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
122 struct thread_info *thread = v;
129 case THREAD_NOTIFY_SWITCH:
136 * On SMP, if VFP is enabled, save the old state in
137 * case the thread migrates to a different CPU. The
138 * restoring is done lazily.
140 if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
141 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
142 vfp_current_hw_state[cpu]->hard.cpu = cpu;
145 * Thread migration, just force the reloading of the
146 * state on the new CPU in case the VFP registers
147 * contain stale data.
149 if (thread->vfpstate.hard.cpu != cpu)
150 vfp_current_hw_state[cpu] = NULL;
154 * Always disable VFP so we can lazily save/restore the
157 fmxr(FPEXC, fpexc & ~FPEXC_EN);
160 case THREAD_NOTIFY_FLUSH:
161 vfp_thread_flush(thread);
164 case THREAD_NOTIFY_EXIT:
165 vfp_thread_exit(thread);
168 case THREAD_NOTIFY_COPY:
169 vfp_thread_copy(thread);
176 static struct notifier_block vfp_notifier_block = {
177 .notifier_call = vfp_notifier,
181 * Raise a SIGFPE for the current process.
182 * sicode describes the signal being raised.
184 static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
188 memset(&info, 0, sizeof(info));
190 info.si_signo = SIGFPE;
191 info.si_code = sicode;
192 info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
195 * This is the same as NWFPE, because it's not clear what
198 current->thread.error_code = 0;
199 current->thread.trap_no = 6;
201 send_sig_info(SIGFPE, &info, current);
204 static void vfp_panic(char *reason, u32 inst)
208 printk(KERN_ERR "VFP: Error: %s\n", reason);
209 printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
210 fmrx(FPEXC), fmrx(FPSCR), inst);
211 for (i = 0; i < 32; i += 2)
212 printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
213 i, vfp_get_float(i), i+1, vfp_get_float(i+1));
217 * Process bitmask of exception conditions.
219 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
223 pr_debug("VFP: raising exceptions %08x\n", exceptions);
225 if (exceptions == VFP_EXCEPTION_ERROR) {
226 vfp_panic("unhandled bounce", inst);
227 vfp_raise_sigfpe(0, regs);
232 * If any of the status flags are set, update the FPSCR.
233 * Comparison instructions always return at least one of
236 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
237 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
243 #define RAISE(stat,en,sig) \
244 if (exceptions & stat && fpscr & en) \
248 * These are arranged in priority order, least to highest.
250 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
251 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
252 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
253 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
254 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
257 vfp_raise_sigfpe(si_code, regs);
261 * Emulate a VFP instruction.
263 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
265 u32 exceptions = VFP_EXCEPTION_ERROR;
267 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
269 if (INST_CPRTDO(inst)) {
270 if (!INST_CPRT(inst)) {
274 if (vfp_single(inst)) {
275 exceptions = vfp_single_cpdo(inst, fpscr);
277 exceptions = vfp_double_cpdo(inst, fpscr);
281 * A CPRT instruction can not appear in FPINST2, nor
282 * can it cause an exception. Therefore, we do not
283 * have to emulate it.
288 * A CPDT instruction can not appear in FPINST2, nor can
289 * it cause an exception. Therefore, we do not have to
293 return exceptions & ~VFP_NAN_FLAG;
297 * Package up a bounce condition.
299 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
301 u32 fpscr, orig_fpscr, fpsid, exceptions;
303 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
306 * At this point, FPEXC can have the following configuration:
309 * 0 1 x - synchronous exception
310 * 1 x 0 - asynchronous exception
311 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
312 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
313 * implementation), undefined otherwise
315 * Clear various bits and enable access to the VFP so we can
318 fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
321 orig_fpscr = fpscr = fmrx(FPSCR);
324 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
326 if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
327 && (fpscr & FPSCR_IXE)) {
329 * Synchronous exception, emulate the trigger instruction
334 if (fpexc & FPEXC_EX) {
335 #ifndef CONFIG_CPU_FEROCEON
337 * Asynchronous exception. The instruction is read from FPINST
338 * and the interrupted instruction has to be restarted.
340 trigger = fmrx(FPINST);
343 } else if (!(fpexc & FPEXC_DEX)) {
345 * Illegal combination of bits. It can be caused by an
346 * unallocated VFP instruction but with FPSCR.IXE set and not
349 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
354 * Modify fpscr to indicate the number of iterations remaining.
355 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
356 * whether FPEXC.VECITR or FPSCR.LEN is used.
358 if (fpexc & (FPEXC_EX | FPEXC_VV)) {
361 len = fpexc + (1 << FPEXC_LENGTH_BIT);
363 fpscr &= ~FPSCR_LENGTH_MASK;
364 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
368 * Handle the first FP instruction. We used to take note of the
369 * FPEXC bounce reason, but this appears to be unreliable.
370 * Emulate the bounced instruction instead.
372 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
374 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
377 * If there isn't a second FP instruction, exit now. Note that
378 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
380 if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
384 * The barrier() here prevents fpinst2 being read
385 * before the condition above.
388 trigger = fmrx(FPINST2);
391 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
393 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
398 static void vfp_enable(void *unused)
402 BUG_ON(preemptible());
403 access = get_copro_access();
406 * Enable full access to VFP (cp10 and cp11)
408 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
412 static int vfp_pm_suspend(void)
414 struct thread_info *ti = current_thread_info();
415 u32 fpexc = fmrx(FPEXC);
417 /* if vfp is on, then save state for resumption */
418 if (fpexc & FPEXC_EN) {
419 printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
420 vfp_save_state(&ti->vfpstate, fpexc);
422 /* disable, just in case */
423 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
424 } else if (vfp_current_hw_state[ti->cpu]) {
426 fmxr(FPEXC, fpexc | FPEXC_EN);
427 vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
432 /* clear any information we had about last context state */
433 vfp_current_hw_state[ti->cpu] = NULL;
438 static void vfp_pm_resume(void)
440 /* ensure we have access to the vfp */
443 /* and disable it to ensure the next usage restores the state */
444 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
447 static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
454 case CPU_PM_ENTER_FAILED:
462 static struct notifier_block vfp_cpu_pm_notifier_block = {
463 .notifier_call = vfp_cpu_pm_notifier,
466 static void vfp_pm_init(void)
468 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
472 static inline void vfp_pm_init(void) { }
473 #endif /* CONFIG_CPU_PM */
475 void vfp_sync_hwstate(struct thread_info *thread)
477 unsigned int cpu = get_cpu();
480 * If the thread we're interested in is the current owner of the
481 * hardware VFP state, then we need to save its state.
483 if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
484 u32 fpexc = fmrx(FPEXC);
487 * Save the last VFP state on this CPU.
489 fmxr(FPEXC, fpexc | FPEXC_EN);
490 vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
497 void vfp_flush_hwstate(struct thread_info *thread)
499 unsigned int cpu = get_cpu();
502 * If the thread we're interested in is the current owner of the
503 * hardware VFP state, then we need to save its state.
505 if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
506 u32 fpexc = fmrx(FPEXC);
508 fmxr(FPEXC, fpexc & ~FPEXC_EN);
511 * Set the context to NULL to force a reload the next time
512 * the thread uses the VFP.
514 vfp_current_hw_state[cpu] = NULL;
519 * For SMP we still have to take care of the case where the thread
520 * migrates to another CPU and then back to the original CPU on which
521 * the last VFP user is still the same thread. Mark the thread VFP
522 * state as belonging to a non-existent CPU so that the saved one will
523 * be reloaded in the above case.
525 thread->vfpstate.hard.cpu = NR_CPUS;
531 * VFP hardware can lose all context when a CPU goes offline.
532 * As we will be running in SMP mode with CPU hotplug, we will save the
533 * hardware state at every thread switch. We clear our held state when
534 * a CPU has been killed, indicating that the VFP hardware doesn't contain
535 * a threads VFP state. When a CPU starts up, we re-enable access to the
538 * Both CPU_DYING and CPU_STARTING are called on the CPU which
539 * is being offlined/onlined.
541 static int vfp_hotplug(struct notifier_block *b, unsigned long action,
544 if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
545 unsigned int cpu = (long)hcpu;
546 vfp_current_hw_state[cpu] = NULL;
547 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
553 * VFP support code initialisation.
555 static int __init vfp_init(void)
558 unsigned int cpu_arch = cpu_architecture();
560 if (cpu_arch >= CPU_ARCH_ARMv6)
561 on_each_cpu(vfp_enable, NULL, 1);
564 * First check that there is a VFP that we can use.
565 * The handler is already setup to just log calls, so
566 * we just need to read the VFPSID register.
568 vfp_vector = vfp_testing_entry;
570 vfpsid = fmrx(FPSID);
572 vfp_vector = vfp_null_entry;
574 printk(KERN_INFO "VFP support v0.3: ");
576 printk("not present\n");
577 else if (vfpsid & FPSID_NODOUBLE) {
578 printk("no double precision support\n");
580 hotcpu_notifier(vfp_hotplug, 0);
582 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
583 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
584 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
585 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
586 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
587 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
588 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
590 vfp_vector = vfp_support_entry;
592 thread_register_notifier(&vfp_notifier_block);
596 * We detected VFP, and the support code is
597 * in place; report VFP support to userspace.
599 elf_hwcap |= HWCAP_VFP;
602 elf_hwcap |= HWCAP_VFPv3;
605 * Check for VFPv3 D16 and VFPv4 D16. CPUs in
606 * this configuration only have 16 x 64bit
609 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
610 elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
612 elf_hwcap |= HWCAP_VFPD32;
617 * Check for the presence of the Advanced SIMD
618 * load/store instructions, integer and single
619 * precision floating point operations. Only check
620 * for NEON if the hardware has the MVFR registers.
622 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
623 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
624 elf_hwcap |= HWCAP_NEON;
631 late_initcall(vfp_init);