1 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 &&
2 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fabs | wc -l | grep 2 &&
3 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fmscs | wc -l | grep 1 &&
4 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fcvt | wc -l | grep 2 &&
5 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fuito | wc -l | grep 2 &&
6 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fto.i | wc -l | grep 4 &&
7 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep bmi | wc -l | grep 1 &&
8 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep bgt | wc -l | grep 1 &&
9 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | grep fcmpezs | wc -l | grep 1
11 void %test(float *%P, double* %D) {
14 store float %A, float* %P
15 store double %B, double* %D
19 declare float %fabsf(float)
20 declare double %fabs(double)
22 void %test_abs(float *%P, double* %D) {
24 %b = call float %fabsf(float %a)
25 store float %b, float* %P
28 %B = call double %fabs(double %A)
29 store double %B, double* %D
33 void %test_add(float *%P, double* %D) {
36 store float %b, float* %P
39 %B = add double %A, %A
40 store double %B, double* %D
44 void %test_ext_round(float *%P, double* %D) {
46 %b = cast float %a to double
49 %B = cast double %A to float
51 store double %b, double* %D
52 store float %B, float* %P
56 void %test_fma(float *%P1, float* %P2, float *%P3) {
61 %X = mul float %a1, %a2
62 %Y = sub float %X, %a3
64 store float %Y, float* %P1
68 int %test_ftoi(float *%P1) {
70 %b1 = cast float %a1 to int
74 uint %test_ftou(float *%P1) {
76 %b1 = cast float %a1 to uint
80 int %test_dtoi(double *%P1) {
81 %a1 = load double* %P1
82 %b1 = cast double %a1 to int
86 uint %test_dtou(double *%P1) {
87 %a1 = load double* %P1
88 %b1 = cast double %a1 to uint
92 void %test_utod(double *%P1, uint %X) {
93 %b1 = cast uint %X to double
94 store double %b1, double* %P1
98 void %test_utod2(double *%P1, ubyte %X) {
99 %b1 = cast ubyte %X to double
100 store double %b1, double* %P1
104 void %test_cmp(float* %glob, int %X) {
106 %tmp = load float* %glob ; <float> [#uses=2]
107 %tmp3 = getelementptr float* %glob, int 2 ; <float*> [#uses=1]
108 %tmp4 = load float* %tmp3 ; <float> [#uses=2]
109 %tmp = seteq float %tmp, %tmp4 ; <bool> [#uses=1]
110 %tmp5 = tail call bool %llvm.isunordered.f32( float %tmp, float %tmp4 ) ; <bool> [#uses=1]
111 %tmp6 = or bool %tmp, %tmp5 ; <bool> [#uses=1]
112 br bool %tmp6, label %cond_true, label %cond_false
114 cond_true: ; preds = %entry
115 %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
118 cond_false: ; preds = %entry
119 %tmp7 = tail call int (...)* %baz( ) ; <int> [#uses=0]
123 declare bool %llvm.isunordered.f32(float, float)
125 declare int %bar(...)
127 declare int %baz(...)
129 void %test_cmpfp0(float* %glob, int %X) {
131 %tmp = load float* %glob ; <float> [#uses=1]
132 %tmp = setgt float %tmp, 0.000000e+00 ; <bool> [#uses=1]
133 br bool %tmp, label %cond_true, label %cond_false
135 cond_true: ; preds = %entry
136 %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
139 cond_false: ; preds = %entry
140 %tmp1 = tail call int (...)* %baz( ) ; <int> [#uses=0]