1 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
2 ; RUN: grep fabs | count 2
3 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
4 ; RUN: grep fmscs | count 1
5 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
6 ; RUN: grep fcvt | count 2
7 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
8 ; RUN: grep fuito | count 2
9 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
10 ; RUN: grep fto.i | count 4
11 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
12 ; RUN: grep bmi | count 1
13 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
14 ; RUN: grep bgt | count 1
15 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
16 ; RUN: grep fcmpezs | count 1
18 void %test(float *%P, double* %D) {
21 store float %A, float* %P
22 store double %B, double* %D
26 declare float %fabsf(float)
27 declare double %fabs(double)
29 void %test_abs(float *%P, double* %D) {
31 %b = call float %fabsf(float %a)
32 store float %b, float* %P
35 %B = call double %fabs(double %A)
36 store double %B, double* %D
40 void %test_add(float *%P, double* %D) {
43 store float %b, float* %P
46 %B = add double %A, %A
47 store double %B, double* %D
51 void %test_ext_round(float *%P, double* %D) {
53 %b = cast float %a to double
56 %B = cast double %A to float
58 store double %b, double* %D
59 store float %B, float* %P
63 void %test_fma(float *%P1, float* %P2, float *%P3) {
68 %X = mul float %a1, %a2
69 %Y = sub float %X, %a3
71 store float %Y, float* %P1
75 int %test_ftoi(float *%P1) {
77 %b1 = cast float %a1 to int
81 uint %test_ftou(float *%P1) {
83 %b1 = cast float %a1 to uint
87 int %test_dtoi(double *%P1) {
88 %a1 = load double* %P1
89 %b1 = cast double %a1 to int
93 uint %test_dtou(double *%P1) {
94 %a1 = load double* %P1
95 %b1 = cast double %a1 to uint
99 void %test_utod(double *%P1, uint %X) {
100 %b1 = cast uint %X to double
101 store double %b1, double* %P1
105 void %test_utod2(double *%P1, ubyte %X) {
106 %b1 = cast ubyte %X to double
107 store double %b1, double* %P1
111 void %test_cmp(float* %glob, int %X) {
113 %tmp = load float* %glob ; <float> [#uses=2]
114 %tmp3 = getelementptr float* %glob, int 2 ; <float*> [#uses=1]
115 %tmp4 = load float* %tmp3 ; <float> [#uses=2]
116 %tmp = seteq float %tmp, %tmp4 ; <bool> [#uses=1]
117 %tmp5 = tail call bool %llvm.isunordered.f32( float %tmp, float %tmp4 ) ; <bool> [#uses=1]
118 %tmp6 = or bool %tmp, %tmp5 ; <bool> [#uses=1]
119 br bool %tmp6, label %cond_true, label %cond_false
121 cond_true: ; preds = %entry
122 %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
125 cond_false: ; preds = %entry
126 %tmp7 = tail call int (...)* %baz( ) ; <int> [#uses=0]
130 declare bool %llvm.isunordered.f32(float, float)
132 declare int %bar(...)
134 declare int %baz(...)
136 void %test_cmpfp0(float* %glob, int %X) {
138 %tmp = load float* %glob ; <float> [#uses=1]
139 %tmp = setgt float %tmp, 0.000000e+00 ; <bool> [#uses=1]
140 br bool %tmp, label %cond_true, label %cond_false
142 cond_true: ; preds = %entry
143 %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
146 cond_false: ; preds = %entry
147 %tmp1 = tail call int (...)* %baz( ) ; <int> [#uses=0]