From: Juergen Ributzka Date: Tue, 2 Sep 2014 22:33:57 +0000 (+0000) Subject: [FastISel][AArch64] Use the target-dependent selection code for shifts first. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=79ec2ed4171b5496f58f28998b85fd46827d22f8;p=oota-llvm.git [FastISel][AArch64] Use the target-dependent selection code for shifts first. This uses the target-dependent selection code for shifts first, which allows us to create better code for shifts with immediates and sign-/zero-extend folding. Vector type are not handled yet and the code falls back to target-independent instruction selection for these cases. This fixes rdar://problem/17907920. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216985 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index 280d11af470..75163807565 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -3475,16 +3475,16 @@ bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) { case Instruction::FRem: return SelectBinaryOp(I, ISD::FREM); case Instruction::Shl: - if (!SelectBinaryOp(I, ISD::SHL)) - return SelectShift(I); + if (!SelectShift(I)) + return SelectBinaryOp(I, ISD::SHL); return true; case Instruction::LShr: - if (!SelectBinaryOp(I, ISD::SRL)) - return SelectShift(I); + if (!SelectShift(I)) + return SelectBinaryOp(I, ISD::SRL); return true; case Instruction::AShr: - if (!SelectBinaryOp(I, ISD::SRA)) - return SelectShift(I); + if (!SelectShift(I)) + return SelectBinaryOp(I, ISD::SRA); return true; case Instruction::And: return SelectBinaryOp(I, ISD::AND); diff --git a/test/CodeGen/AArch64/fast-isel-addressing-modes.ll b/test/CodeGen/AArch64/fast-isel-addressing-modes.ll index 86755d90c63..e2444fb27ef 100644 --- a/test/CodeGen/AArch64/fast-isel-addressing-modes.ll +++ b/test/CodeGen/AArch64/fast-isel-addressing-modes.ll @@ -365,7 +365,7 @@ define i32 @load_breg_shift_offreg_3(i64 %a, i64 %b) { ; SDAG: lsl [[REG:x[0-9]+]], x0, #2 ; SDAG-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x1, lsl #2{{\]}} ; FAST-LABEL: load_breg_shift_offreg_3 -; FAST: lsl [[REG:x[0-9]+]], x1, {{x[0-9]+}} +; FAST: lsl [[REG:x[0-9]+]], x1, #2 ; FAST-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x0, lsl #2{{\]}} %1 = shl i64 %a, 2 %2 = shl i64 %b, 2 @@ -380,7 +380,7 @@ define i32 @load_breg_shift_offreg_4(i64 %a, i64 %b) { ; SDAG: lsl [[REG:x[0-9]+]], x1, #2 ; SDAG-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x0, lsl #2{{\]}} ; FAST-LABEL: load_breg_shift_offreg_4 -; FAST: lsl [[REG:x[0-9]+]], x0, {{x[0-9]+}} +; FAST: lsl [[REG:x[0-9]+]], x0, #2 ; FAST-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x1, lsl #2{{\]}} %1 = shl i64 %a, 2 %2 = shl i64 %b, 2 @@ -395,7 +395,7 @@ define i32 @load_breg_shift_offreg_5(i64 %a, i64 %b) { ; SDAG: lsl [[REG:x[0-9]+]], x1, #3 ; SDAG-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x0, lsl #2{{\]}} ; FAST-LABEL: load_breg_shift_offreg_5 -; FAST: lsl [[REG:x[0-9]+]], x1, {{x[0-9]+}} +; FAST: lsl [[REG:x[0-9]+]], x1, #3 ; FAST-NEXT: ldr {{w[0-9]+}}, {{\[}}[[REG]], x0, lsl #2{{\]}} %1 = shl i64 %a, 2 %2 = shl i64 %b, 3 diff --git a/test/CodeGen/AArch64/fast-isel-shift.ll b/test/CodeGen/AArch64/fast-isel-shift.ll index 2a4fffcf0f9..da8469c0ecd 100644 --- a/test/CodeGen/AArch64/fast-isel-shift.ll +++ b/test/CodeGen/AArch64/fast-isel-shift.ll @@ -48,18 +48,16 @@ define i32 @lsl_sext_i8_i32(i8 %b) { ret i32 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_zext_i8_i64 +; CHECK: ubfiz {{x[0-9]*}}, {{x[0-9]*}}, #4, #8 define i64 @lsl_zext_i8_i64(i8 %b) { %1 = zext i8 %b to i64 %2 = shl i64 %1, 4 ret i64 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_sext_i8_i64 +; CHECK: sbfiz {{x[0-9]*}}, {{x[0-9]*}}, #4, #8 define i64 @lsl_sext_i8_i64(i8 %b) { %1 = sext i8 %b to i64 %2 = shl i64 %1, 4 @@ -98,18 +96,16 @@ define i32 @lsl_sext_i16_i32(i16 %b) { ret i32 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_zext_i16_i64 +; CHECK: ubfiz {{x[0-9]*}}, {{x[0-9]*}}, #8, #16 define i64 @lsl_zext_i16_i64(i16 %b) { %1 = zext i16 %b to i64 %2 = shl i64 %1, 8 ret i64 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_sext_i16_i64 +; CHECK: sbfiz {{x[0-9]*}}, {{x[0-9]*}}, #8, #16 define i64 @lsl_sext_i16_i64(i16 %b) { %1 = sext i16 %b to i64 %2 = shl i64 %1, 8 @@ -130,26 +126,22 @@ define zeroext i32 @lsl_i32(i32 %a) { ret i32 %1 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_zext_i32_i64 +; CHECK: ubfiz {{x[0-9]+}}, {{x[0-9]+}}, #16, #32 define i64 @lsl_zext_i32_i64(i32 %b) { %1 = zext i32 %b to i64 %2 = shl i64 %1, 16 ret i64 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lsl_sext_i32_i64 +; CHECK: sbfiz {{x[0-9]+}}, {{x[0-9]+}}, #16, #32 define i64 @lsl_sext_i32_i64(i32 %b) { %1 = sext i32 %b to i64 %2 = shl i64 %1, 16 ret i64 %2 } -; FIXME: Cannot test this yet, because the target-independent instruction -; selector handles this. ; CHECK-LABEL: lslv_i64 ; CHECK: lsl {{x[0-9]*}}, x0, x1 define i64 @lslv_i64(i64 %a, i64 %b) { @@ -157,9 +149,8 @@ define i64 @lslv_i64(i64 %a, i64 %b) { ret i64 %1 } -; FIXME: This shouldn't use the variable shift version. ; CHECK-LABEL: lsl_i64 -; CHECK: lsl {{x[0-9]*}}, {{x[0-9]*}}, {{x[0-9]*}} +; CHECK: lsl {{x[0-9]*}}, {{x[0-9]*}}, #32 define i64 @lsl_i64(i64 %a) { %1 = shl i64 %a, 32 ret i64 %1 @@ -254,9 +245,8 @@ define i64 @lsrv_i64(i64 %a, i64 %b) { ret i64 %1 } -; FIXME: This shouldn't use the variable shift version. ; CHECK-LABEL: lsr_i64 -; CHECK: lsr {{x[0-9]*}}, {{x[0-9]*}}, {{x[0-9]*}} +; CHECK: lsr {{x[0-9]*}}, {{x[0-9]*}}, #32 define i64 @lsr_i64(i64 %a) { %1 = lshr i64 %a, 32 ret i64 %1 @@ -349,9 +339,8 @@ define i64 @asrv_i64(i64 %a, i64 %b) { ret i64 %1 } -; FIXME: This shouldn't use the variable shift version. ; CHECK-LABEL: asr_i64 -; CHECK: asr {{x[0-9]*}}, {{x[0-9]*}}, {{x[0-9]*}} +; CHECK: asr {{x[0-9]*}}, {{x[0-9]*}}, #32 define i64 @asr_i64(i64 %a) { %1 = ashr i64 %a, 32 ret i64 %1