1 ; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
2 ; RUN: | FileCheck %s -check-prefix=VFP2
4 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - \
5 ; RUN: | FileCheck %s -check-prefix=NEON
7 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \
8 ; RUN: | FileCheck %s -check-prefix=A8
10 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic %s -o - \
11 ; RUN: | FileCheck %s -check-prefix=A8
13 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \
14 ; RUN: | FileCheck %s -check-prefix=A8U
16 ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \
17 ; RUN: | FileCheck %s -check-prefix=A8U
19 define float @t1(float %acc, float %a, float %b) nounwind {
28 ; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
29 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
32 ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
33 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
34 %0 = fmul float %a, %b
35 %1 = fsub float -0.0, %0
36 %2 = fsub float %1, %acc
40 define float @t2(float %acc, float %a, float %b) nounwind {
49 ; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
50 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
53 ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
54 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
55 %0 = fmul float %a, %b
56 %1 = fmul float -1.0, %0
57 %2 = fsub float %1, %acc
61 define double @t3(double %acc, double %a, double %b) nounwind {
76 %0 = fmul double %a, %b
77 %1 = fsub double -0.0, %0
78 %2 = fsub double %1, %acc
82 define double @t4(double %acc, double %a, double %b) nounwind {
97 %0 = fmul double %a, %b
98 %1 = fmul double -1.0, %0
99 %2 = fsub double %1, %acc