-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fabs | count 2
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fmscs | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fcvt | count 2
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fuito | count 2
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fto.i | count 4
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep bmi | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep bgt | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \
-; RUN: grep fcmpezs | count 1
-
-void %test(float *%P, double* %D) {
- %A = load float* %P
- %B = load double* %D
- store float %A, float* %P
- store double %B, double* %D
- ret void
+; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
+
+define void @test(float* %P, double* %D) {
+ %A = load float* %P ; <float> [#uses=1]
+ %B = load double* %D ; <double> [#uses=1]
+ store float %A, float* %P
+ store double %B, double* %D
+ ret void
}
-declare float %fabsf(float)
-declare double %fabs(double)
-
-void %test_abs(float *%P, double* %D) {
- %a = load float* %P
- %b = call float %fabsf(float %a)
- store float %b, float* %P
-
- %A = load double* %D
- %B = call double %fabs(double %A)
- store double %B, double* %D
- ret void
+declare float @fabsf(float)
+
+declare double @fabs(double)
+
+define void @test_abs(float* %P, double* %D) {
+;CHECK: test_abs:
+ %a = load float* %P ; <float> [#uses=1]
+;CHECK: vabs.f32
+ %b = call float @fabsf( float %a ) ; <float> [#uses=1]
+ store float %b, float* %P
+ %A = load double* %D ; <double> [#uses=1]
+;CHECK: vabs.f64
+ %B = call double @fabs( double %A ) ; <double> [#uses=1]
+ store double %B, double* %D
+ ret void
}
-void %test_add(float *%P, double* %D) {
- %a = load float* %P
- %b = add float %a, %a
- store float %b, float* %P
-
- %A = load double* %D
- %B = add double %A, %A
- store double %B, double* %D
- ret void
+define void @test_add(float* %P, double* %D) {
+;CHECK: test_add:
+ %a = load float* %P ; <float> [#uses=2]
+ %b = fadd float %a, %a ; <float> [#uses=1]
+ store float %b, float* %P
+ %A = load double* %D ; <double> [#uses=2]
+ %B = fadd double %A, %A ; <double> [#uses=1]
+ store double %B, double* %D
+ ret void
}
-void %test_ext_round(float *%P, double* %D) {
- %a = load float* %P
- %b = cast float %a to double
-
- %A = load double* %D
- %B = cast double %A to float
-
- store double %b, double* %D
- store float %B, float* %P
- ret void
+define void @test_ext_round(float* %P, double* %D) {
+;CHECK: test_ext_round:
+ %a = load float* %P ; <float> [#uses=1]
+;CHECK: vcvt.f64.f32
+ %b = fpext float %a to double ; <double> [#uses=1]
+ %A = load double* %D ; <double> [#uses=1]
+;CHECK: vcvt.f32.f64
+ %B = fptrunc double %A to float ; <float> [#uses=1]
+ store double %b, double* %D
+ store float %B, float* %P
+ ret void
}
-void %test_fma(float *%P1, float* %P2, float *%P3) {
- %a1 = load float* %P1
- %a2 = load float* %P2
- %a3 = load float* %P3
-
- %X = mul float %a1, %a2
- %Y = sub float %X, %a3
-
- store float %Y, float* %P1
- ret void
+define void @test_fma(float* %P1, float* %P2, float* %P3) {
+;CHECK: test_fma:
+ %a1 = load float* %P1 ; <float> [#uses=1]
+ %a2 = load float* %P2 ; <float> [#uses=1]
+ %a3 = load float* %P3 ; <float> [#uses=1]
+;CHECK: vnmls.f32
+ %X = fmul float %a1, %a2 ; <float> [#uses=1]
+ %Y = fsub float %X, %a3 ; <float> [#uses=1]
+ store float %Y, float* %P1
+ ret void
}
-int %test_ftoi(float *%P1) {
- %a1 = load float* %P1
- %b1 = cast float %a1 to int
- ret int %b1
+define i32 @test_ftoi(float* %P1) {
+;CHECK: test_ftoi:
+ %a1 = load float* %P1 ; <float> [#uses=1]
+;CHECK: vcvt.s32.f32
+ %b1 = fptosi float %a1 to i32 ; <i32> [#uses=1]
+ ret i32 %b1
}
-uint %test_ftou(float *%P1) {
- %a1 = load float* %P1
- %b1 = cast float %a1 to uint
- ret uint %b1
+define i32 @test_ftou(float* %P1) {
+;CHECK: test_ftou:
+ %a1 = load float* %P1 ; <float> [#uses=1]
+;CHECK: vcvt.u32.f32
+ %b1 = fptoui float %a1 to i32 ; <i32> [#uses=1]
+ ret i32 %b1
}
-int %test_dtoi(double *%P1) {
- %a1 = load double* %P1
- %b1 = cast double %a1 to int
- ret int %b1
+define i32 @test_dtoi(double* %P1) {
+;CHECK: test_dtoi:
+ %a1 = load double* %P1 ; <double> [#uses=1]
+;CHECK: vcvt.s32.f64
+ %b1 = fptosi double %a1 to i32 ; <i32> [#uses=1]
+ ret i32 %b1
}
-uint %test_dtou(double *%P1) {
- %a1 = load double* %P1
- %b1 = cast double %a1 to uint
- ret uint %b1
+define i32 @test_dtou(double* %P1) {
+;CHECK: test_dtou:
+ %a1 = load double* %P1 ; <double> [#uses=1]
+;CHECK: vcvt.u32.f64
+ %b1 = fptoui double %a1 to i32 ; <i32> [#uses=1]
+ ret i32 %b1
}
-void %test_utod(double *%P1, uint %X) {
- %b1 = cast uint %X to double
- store double %b1, double* %P1
- ret void
+define void @test_utod(double* %P1, i32 %X) {
+;CHECK: test_utod:
+;CHECK: vcvt.f64.u32
+ %b1 = uitofp i32 %X to double ; <double> [#uses=1]
+ store double %b1, double* %P1
+ ret void
}
-void %test_utod2(double *%P1, ubyte %X) {
- %b1 = cast ubyte %X to double
- store double %b1, double* %P1
- ret void
+define void @test_utod2(double* %P1, i8 %X) {
+;CHECK: test_utod2:
+;CHECK: vcvt.f64.u32
+ %b1 = uitofp i8 %X to double ; <double> [#uses=1]
+ store double %b1, double* %P1
+ ret void
}
-void %test_cmp(float* %glob, int %X) {
+define void @test_cmp(float* %glob, i32 %X) {
+;CHECK: test_cmp:
entry:
- %tmp = load float* %glob ; <float> [#uses=2]
- %tmp3 = getelementptr float* %glob, int 2 ; <float*> [#uses=1]
- %tmp4 = load float* %tmp3 ; <float> [#uses=2]
- %tmp = seteq float %tmp, %tmp4 ; <bool> [#uses=1]
- %tmp5 = tail call bool %llvm.isunordered.f32( float %tmp, float %tmp4 ) ; <bool> [#uses=1]
- %tmp6 = or bool %tmp, %tmp5 ; <bool> [#uses=1]
- br bool %tmp6, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
- ret void
-
-cond_false: ; preds = %entry
- %tmp7 = tail call int (...)* %baz( ) ; <int> [#uses=0]
- ret void
+ %tmp = load float* %glob ; <float> [#uses=2]
+ %tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
+ %tmp4 = load float* %tmp3 ; <float> [#uses=2]
+ %tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4 ; <i1> [#uses=1]
+ %tmp5 = fcmp uno float %tmp, %tmp4 ; <i1> [#uses=1]
+ %tmp6 = or i1 %tmp.upgrd.1, %tmp5 ; <i1> [#uses=1]
+;CHECK: bmi
+;CHECK-NEXT: bgt
+ br i1 %tmp6, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ ret void
+
+cond_false: ; preds = %entry
+ %tmp7 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0]
+ ret void
}
-declare bool %llvm.isunordered.f32(float, float)
+declare i1 @llvm.isunordered.f32(float, float)
-declare int %bar(...)
+declare i32 @bar(...)
-declare int %baz(...)
+declare i32 @baz(...)
-void %test_cmpfp0(float* %glob, int %X) {
+define void @test_cmpfp0(float* %glob, i32 %X) {
+;CHECK: test_cmpfp0:
entry:
- %tmp = load float* %glob ; <float> [#uses=1]
- %tmp = setgt float %tmp, 0.000000e+00 ; <bool> [#uses=1]
- br bool %tmp, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0]
- ret void
-
-cond_false: ; preds = %entry
- %tmp1 = tail call int (...)* %baz( ) ; <int> [#uses=0]
- ret void
+ %tmp = load float* %glob ; <float> [#uses=1]
+;CHECK: vcmpe.f32
+ %tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; <i1> [#uses=1]
+ br i1 %tmp.upgrd.3, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ %tmp.upgrd.4 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
+ ret void
+
+cond_false: ; preds = %entry
+ %tmp1 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0]
+ ret void
}
-