1 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
2 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
3 ; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
4 ; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=A8
5 ; RUN: llc < %s -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=A8U
6 ; RUN: llc < %s -mtriple=arm-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8U
8 define float @t1(float %acc, float %a, float %b) nounwind {
17 ; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
18 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
21 ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
22 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
23 %0 = fmul float %a, %b
24 %1 = fsub float -0.0, %0
25 %2 = fsub float %1, %acc
29 define float @t2(float %acc, float %a, float %b) nounwind {
38 ; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
39 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
42 ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
43 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
44 %0 = fmul float %a, %b
45 %1 = fmul float -1.0, %0
46 %2 = fsub float %1, %acc
50 define double @t3(double %acc, double %a, double %b) nounwind {
65 %0 = fmul double %a, %b
66 %1 = fsub double -0.0, %0
67 %2 = fsub double %1, %acc
71 define double @t4(double %acc, double %a, double %b) nounwind {
86 %0 = fmul double %a, %b
87 %1 = fmul double -1.0, %0
88 %2 = fsub double %1, %acc