From: Matthias Braun Date: Thu, 10 Oct 2013 22:37:47 +0000 (+0000) Subject: Tests: Use CHECK-LABEL where possible X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=82eb6198c8b6280aaba06dd33b367f4305364c00;p=oota-llvm.git Tests: Use CHECK-LABEL where possible git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192403 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll index 8bc8cb1d890..2bebcf48185 100644 --- a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll +++ b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll @@ -82,7 +82,7 @@ KBBlockZero.exit: ; preds = %bb2.i ; ; Hard-coded registers comes from the ABI. -; CHECK: wrapDistance: +; CHECK-LABEL: wrapDistance: ; CHECK: cmp r1, #59 ; CHECK-NEXT: itt le ; CHECK-NEXT: suble r0, r2, #1 diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll index a99a7ec86c1..3e986d802d8 100644 --- a/test/CodeGen/ARM/long_shift.ll +++ b/test/CodeGen/ARM/long_shift.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=arm | FileCheck %s define i64 @f0(i64 %A, i64 %B) { -; CHECK: f0 +; CHECK-LABEL: f0: ; CHECK: lsrs r3, r3, #1 ; CHECK-NEXT: rrx r2, r2 ; CHECK-NEXT: subs r0, r0, r2 @@ -13,7 +13,7 @@ define i64 @f0(i64 %A, i64 %B) { } define i32 @f1(i64 %x, i64 %y) { -; CHECK: f1 +; CHECK-LABEL: f1: ; CHECK: lsl{{.*}}r2 %a = shl i64 %x, %y %b = trunc i64 %a to i32 @@ -21,7 +21,7 @@ define i32 @f1(i64 %x, i64 %y) { } define i32 @f2(i64 %x, i64 %y) { -; CHECK: f2 +; CHECK-LABEL: f2: ; CHECK: lsr{{.*}}r2 ; CHECK-NEXT: rsb r3, r2, #32 ; CHECK-NEXT: sub r2, r2, #32 @@ -34,7 +34,7 @@ define i32 @f2(i64 %x, i64 %y) { } define i32 @f3(i64 %x, i64 %y) { -; CHECK: f3 +; CHECK-LABEL: f3: ; CHECK: lsr{{.*}}r2 ; CHECK-NEXT: rsb r3, r2, #32 ; CHECK-NEXT: sub r2, r2, #32 diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll index e98ac7daffd..ed006d643f8 100644 --- a/test/CodeGen/ARM/select.ll +++ b/test/CodeGen/ARM/select.ll @@ -75,7 +75,7 @@ define double @f7(double %a, double %b) { ; into the constant pool based on the value of the "icmp". If we have one "it" ; block generated, odds are good that we have close to the ideal code for this: ; -; CHECK-NEON: _f8: +; CHECK-NEON-LABEL: f8: ; CHECK-NEON: movw [[R3:r[0-9]+]], #1123 ; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0 ; CHECK-NEON-NEXT: cmp r0, [[R3]] @@ -113,7 +113,7 @@ entry: ret void } -; CHECK: f10 +; CHECK-LABEL: f10: define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp { ; CHECK-NOT: floatsisf %1 = icmp eq i32 %a, %b @@ -122,7 +122,7 @@ define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp { ret float %3 } -; CHECK: f11 +; CHECK-LABEL: f11: define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp { ; CHECK-NOT: floatsisf %1 = icmp eq i32 %a, %b @@ -130,7 +130,7 @@ define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp { ret float %2 } -; CHECK: f12 +; CHECK-LABEL: f12: define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp { ; CHECK-NOT: floatunsisf %1 = icmp eq i32 %a, %b diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll index 793934e746b..759da2235e4 100644 --- a/test/CodeGen/ARM/vector-DAGCombine.ll +++ b/test/CodeGen/ARM/vector-DAGCombine.ll @@ -29,7 +29,7 @@ entry: ; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is ; converted back to be used as a vector type. -; CHECK: test_vmovrrd_combine +; CHECK-LABEL: test_vmovrrd_combine: define <4 x i32> @test_vmovrrd_combine() nounwind { entry: br i1 undef, label %bb1, label %bb2 @@ -136,7 +136,7 @@ define i16 @foldBuildVectors() { ; Test that we are generating vrev and vext for reverse shuffles of v8i16 ; shuffles. -; CHECK: reverse_v8i16 +; CHECK-LABEL: reverse_v8i16: define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) { %v0 = load <8 x i16>* %loadaddr ; CHECK: vrev64.16 @@ -149,7 +149,7 @@ define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) { ; Test that we are generating vrev and vext for reverse shuffles of v16i8 ; shuffles. -; CHECK: reverse_v16i8 +; CHECK-LABEL: reverse_v16i8: define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) { %v0 = load <16 x i8>* %loadaddr ; CHECK: vrev64.8 @@ -165,7 +165,7 @@ define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) { ; vldr cannot handle unaligned loads. ; Fall back to vld1.32, which can, instead of using the general purpose loads ; followed by a costly sequence of instructions to build the vector register. -; CHECK: t3 +; CHECK-LABEL: t3: ; CHECK: vld1.32 {[[REG:d[0-9]+]][0]} ; CHECK: vld1.32 {[[REG]][1]} ; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]] @@ -188,7 +188,7 @@ declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) ; Check that (insert_vector_elt (load)) => (vector_load). ; Thus, check that scalar_to_vector do not interfer with that. define <8 x i16> @t4(i8* nocapture %sp0) { -; CHECK: t4 +; CHECK-LABEL: t4: ; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0] entry: %pix_sp0.0.cast = bitcast i8* %sp0 to i32* @@ -202,7 +202,7 @@ entry: ; Make sure vector load is used for all three loads. ; Lowering to build vector was breaking the single use property of the load of ; %pix_sp0.0.copyload. -; CHECK: t5 +; CHECK-LABEL: t5: ; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0] ; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]] ; CHECK: vld1.32 {[[REG1]][0]}, [r1]