X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FTransforms%2FInstCombine%2Fmul.ll;h=16213b8628ca401ed17463950454e51e5ad85fe3;hb=20680b045aea83b5f476a1d253b9262ff4d3f71b;hp=d7ad546f0e55bd076a5993bcd3f6bbed79199ab1;hpb=53dce0c43f25c64935ea5f26b19b04b0e533ef83;p=oota-llvm.git diff --git a/test/Transforms/InstCombine/mul.ll b/test/Transforms/InstCombine/mul.ll index d7ad546f0e5..16213b8628c 100644 --- a/test/Transforms/InstCombine/mul.ll +++ b/test/Transforms/InstCombine/mul.ll @@ -1,52 +1,183 @@ ; This test makes sure that mul instructions are properly eliminated. -; +; RUN: opt < %s -instcombine -S | FileCheck %s -; RUN: as < %s | opt -instcombine | dis | not grep mul +define i32 @test1(i32 %A) { +; CHECK: @test1 + %B = mul i32 %A, 1 ; [#uses=1] + ret i32 %B +; CHECK: ret i32 %A +} + +define i32 @test2(i32 %A) { +; CHECK: @test2 + ; Should convert to an add instruction + %B = mul i32 %A, 2 ; [#uses=1] + ret i32 %B +; CHECK: shl i32 %A, 1 +} + +define i32 @test3(i32 %A) { +; CHECK: @test3 + ; This should disappear entirely + %B = mul i32 %A, 0 ; [#uses=1] + ret i32 %B +; CHECK: ret i32 0 +} + +define double @test4(double %A) { +; CHECK: @test4 + ; This is safe for FP + %B = fmul double 1.000000e+00, %A ; [#uses=1] + ret double %B +; CHECK: ret double %A +} + +define i32 @test5(i32 %A) { +; CHECK: @test5 + %B = mul i32 %A, 8 ; [#uses=1] + ret i32 %B +; CHECK: shl i32 %A, 3 +} + +define i8 @test6(i8 %A) { +; CHECK: @test6 + %B = mul i8 %A, 8 ; [#uses=1] + %C = mul i8 %B, 8 ; [#uses=1] + ret i8 %C +; CHECK: shl i8 %A, 6 +} -implementation +define i32 @test7(i32 %i) { +; CHECK: @test7 + %tmp = mul i32 %i, -1 ; [#uses=1] + ret i32 %tmp +; CHECK: sub i32 0, %i +} + +define i64 @test8(i64 %i) { +; CHECK: @test8 + %j = mul i64 %i, -1 ; [#uses=1] + ret i64 %j +; CHECK: sub i64 0, %i +} -int %test1(int %A) { - %B = mul int %A, 1 - ret int %B +define i32 @test9(i32 %i) { +; CHECK: @test9 + %j = mul i32 %i, -1 ; [#uses=1] + ret i32 %j +; CHECK: sub i32 0, %i } -int %test2(int %A) { - %B = mul int %A, 2 ; Should convert to an add instruction - ret int %B +define i32 @test10(i32 %a, i32 %b) { +; CHECK: @test10 + %c = icmp slt i32 %a, 0 ; [#uses=1] + %d = zext i1 %c to i32 ; [#uses=1] + ; e = b & (a >> 31) + %e = mul i32 %d, %b ; [#uses=1] + ret i32 %e +; CHECK: [[TEST10:%.*]] = ashr i32 %a, 31 +; CHECK-NEXT: %e = and i32 [[TEST10]], %b +; CHECK-NEXT: ret i32 %e } -int %test3(int %A) { - %B = mul int %A, 0 ; This should disappear entirely - ret int %B +define i32 @test11(i32 %a, i32 %b) { +; CHECK: @test11 + %c = icmp sle i32 %a, -1 ; [#uses=1] + %d = zext i1 %c to i32 ; [#uses=1] + ; e = b & (a >> 31) + %e = mul i32 %d, %b ; [#uses=1] + ret i32 %e +; CHECK: [[TEST11:%.*]] = ashr i32 %a, 31 +; CHECK-NEXT: %e = and i32 [[TEST11]], %b +; CHECK-NEXT: ret i32 %e +} + +define i32 @test12(i32 %a, i32 %b) { +; CHECK: @test12 + %c = icmp ugt i32 %a, 2147483647 ; [#uses=1] + %d = zext i1 %c to i32 ; [#uses=1] + %e = mul i32 %d, %b ; [#uses=1] + ret i32 %e +; CHECK: [[TEST12:%.*]] = ashr i32 %a, 31 +; CHECK-NEXT: %e = and i32 [[TEST12]], %b +; CHECK-NEXT: ret i32 %e + } -double %test4(double %A) { - %B = mul double 1.0, %A ; This is safe for FP - ret double %B +; PR2642 +define internal void @test13(<4 x float>*) { +; CHECK: @test13 + load <4 x float>* %0, align 1 + fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 > + store <4 x float> %3, <4 x float>* %0, align 1 + ret void +; CHECK-NEXT: ret void } -int %test5(int %A) { - %B = mul int %A, 8 - ret int %B +define <16 x i8> @test14(<16 x i8> %a) { +; CHECK: @test14 + %b = mul <16 x i8> %a, zeroinitializer + ret <16 x i8> %b +; CHECK-NEXT: ret <16 x i8> zeroinitializer } -ubyte %test6(ubyte %A) { - %B = mul ubyte %A, 8 - %C = mul ubyte %B, 8 - ret ubyte %C +; rdar://7293527 +define i32 @test15(i32 %A, i32 %B) { +; CHECK: @test15 +entry: + %shl = shl i32 1, %B + %m = mul i32 %shl, %A + ret i32 %m +; CHECK: shl i32 %A, %B } -int %test7(int %i) { - %tmp = mul int %i, -1 ; %tmp = sub 0, %i - ret int %tmp +; X * Y (when Y is 0 or 1) --> x & (0-Y) +define i32 @test16(i32 %b, i1 %c) { +; CHECK: @test16 + %d = zext i1 %c to i32 ; [#uses=1] + ; e = b & (a >> 31) + %e = mul i32 %d, %b ; [#uses=1] + ret i32 %e +; CHECK: [[TEST16:%.*]] = select i1 %c, i32 %b, i32 0 +; CHECK-NEXT: ret i32 [[TEST16]] } -ulong %test8(ulong %i) { - %j = mul ulong %i, 18446744073709551615 ; tmp = sub 0, %i - ret ulong %j +; X * Y (when Y is 0 or 1) --> x & (0-Y) +define i32 @test17(i32 %a, i32 %b) { +; CHECK: @test17 + %a.lobit = lshr i32 %a, 31 + %e = mul i32 %a.lobit, %b + ret i32 %e +; CHECK: [[TEST17:%.*]] = ashr i32 %a, 31 +; CHECK-NEXT: %e = and i32 [[TEST17]], %b +; CHECK-NEXT: ret i32 %e } -uint %test9(uint %i) { - %j = mul uint %i, 4294967295 ; %j = sub 0, %i - ret uint %j +define i32 @test18(i32 %A, i32 %B) { +; CHECK: @test18 + %C = and i32 %A, 1 + %D = and i32 %B, 1 + + %E = mul i32 %C, %D + %F = and i32 %E, 16 + ret i32 %F +; CHECK-NEXT: ret i32 0 +} + +declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) +declare void @use(i1) + +define i32 @test19(i32 %A, i32 %B) { +; CHECK: @test19 + %C = and i32 %A, 1 + %D = and i32 %B, 1 + +; It would be nice if we also started proving that this doesn't overflow. + %E = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %C, i32 %D) + %F = extractvalue {i32, i1} %E, 0 + %G = extractvalue {i32, i1} %E, 1 + call void @use(i1 %G) + %H = and i32 %F, 16 + ret i32 %H +; CHECK: ret i32 0 }