[ARM] Handle HWCAP_VFP in VFP support code
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Fri, 8 Dec 2006 15:22:20 +0000 (15:22 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 8 Dec 2006 16:05:26 +0000 (16:05 +0000)
Don't set HWCAP_VFP in the processor support file; not only does it
depend on the processor features, but it also depends on the support
code being present.  Therefore, only set it if the support code
detects that we have a VFP coprocessor attached.

Also, move the VFP handling of the coprocessor access register into
the VFP support code.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/kernel/setup.c
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-v6.S
arch/arm/vfp/vfpmodule.c
include/asm-arm/system.h

index 238dd9b6db847474d536af3555a5ea9760fba806..cf2bd42428035079943592d45a65374f3081dd57 100644 (file)
@@ -354,9 +354,6 @@ static void __init setup_processor(void)
 #ifndef CONFIG_ARM_THUMB
        elf_hwcap &= ~HWCAP_THUMB;
 #endif
-#ifndef CONFIG_VFP
-       elf_hwcap &= ~HWCAP_VFP;
-#endif
 
        cpu_proc_init();
 }
index 8628ed29a955d9f168f6b785ab2960acb72a891c..080efac9d9ff857259ad9ea688befe16977f8e21 100644 (file)
@@ -480,7 +480,7 @@ __arm926_proc_info:
        b       __arm926_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
        .long   cpu_arm926_name
        .long   arm926_processor_functions
        .long   v4wbi_tlb_fns
index b440c8a1d3458ed00d957b8b45bc9b776ed691bf..c40baf8a47f028f48426f0616f982beb10bc9c40 100644 (file)
@@ -207,11 +207,6 @@ __v6_setup:
 #endif
        mcr     p15, 0, r4, c2, c0, 1           @ load TTB1
 #endif /* CONFIG_MMU */
-#ifdef CONFIG_VFP
-       mrc     p15, 0, r0, c1, c0, 2
-       orr     r0, r0, #(0xf << 20)
-       mcr     p15, 0, r0, c1, c0, 2           @ Enable full access to VFP
-#endif
        adr     r5, v6_crval
        ldmia   r5, {r5, r6}
        mrc     p15, 0, r0, c1, c0, 0           @ read control register
@@ -273,7 +268,7 @@ __v6_proc_info:
        b       __v6_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
-       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
        .long   cpu_v6_name
        .long   v6_processor_functions
        .long   v6wbi_tlb_fns
index f08eafbddcc1ba7660e2d5ed68849b7595c36b49..e26cc1f599489f24274bbd0625b49bddc8bd31f4 100644 (file)
@@ -263,13 +263,24 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
        if (exceptions)
                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 }
+
 /*
  * VFP support code initialisation.
  */
 static int __init vfp_init(void)
 {
        unsigned int vfpsid;
+       unsigned int cpu_arch = cpu_architecture();
+       u32 access = 0;
+
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               access = get_copro_access();
+
+               /*
+                * Enable full access to VFP (cp10 and cp11)
+                */
+               set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
+       }
 
        /*
         * First check that there is a VFP that we can use.
@@ -281,6 +292,12 @@ static int __init vfp_init(void)
        printk(KERN_INFO "VFP support v0.3: ");
        if (VFP_arch) {
                printk("not present\n");
+
+               /*
+                * Restore the copro access register.
+                */
+               if (cpu_arch >= CPU_ARCH_ARMv6)
+                       set_copro_access(access);
        } else if (vfpsid & FPSID_NODOUBLE) {
                printk("no double precision support\n");
        } else {
@@ -291,9 +308,16 @@ static int __init vfp_init(void)
                        (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
                        (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
                        (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
+
                vfp_vector = vfp_support_entry;
 
                thread_register_notifier(&vfp_notifier_block);
+
+               /*
+                * We detected VFP, and the support code is
+                * in place; report VFP support to userspace.
+                */
+               elf_hwcap |= HWCAP_VFP;
        }
        return 0;
 }
index f05fbe31576cbeabb2b6d717698d7cebfdc63b4e..f60faccf01fa5766951d444978a4014552d75b02 100644 (file)
@@ -139,19 +139,36 @@ static inline int cpu_is_xsc3(void)
 #define        cpu_is_xscale() 1
 #endif
 
-#define set_cr(x)                                      \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c1, c0, 0   @ set CR"       \
-       : : "r" (x) : "cc")
-
-#define get_cr()                                       \
-       ({                                              \
-       unsigned int __val;                             \
-       __asm__ __volatile__(                           \
-       "mrc    p15, 0, %0, c1, c0, 0   @ get CR"       \
-       : "=r" (__val) : : "cc");                       \
-       __val;                                          \
-       })
+static inline unsigned int get_cr(void)
+{
+       unsigned int val;
+       asm("mrc p15, 0, %0, c1, c0, 0  @ get CR" : "=r" (val) : : "cc");
+       return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+       asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+         : : "r" (val) : "cc");
+}
+
+#define CPACC_FULL(n)          (3 << (n * 2))
+#define CPACC_SVC(n)           (1 << (n * 2))
+#define CPACC_DISABLE(n)       (0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+       unsigned int val;
+       asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+         : "=r" (val) : : "cc");
+       return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+       asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+         : : "r" (val) : "cc");
+}
 
 extern unsigned long cr_no_alignment;  /* defined in entry-armv.S */
 extern unsigned long cr_alignment;     /* defined in entry-armv.S */