From d3027738856d57ee22930deca0c7977fdc13e633 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Sun, 13 Feb 2011 09:02:52 +0000 Subject: [PATCH] fix visitShift to properly zero extend the shift amount if the provided operand is narrower than the shift register. Doing an anyext provides undefined bits in the top part of the register. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@125457 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../SelectionDAG/SelectionDAGBuilder.cpp | 39 +++++++++---------- test/CodeGen/CellSPU/shift_ops.ll | 2 +- test/CodeGen/MBlaze/shift.ll | 2 - 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 7c49c223582..d9faea12476 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2424,31 +2424,30 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) { void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); - if (!I.getType()->isVectorTy() && - Op2.getValueType() != TLI.getShiftAmountTy()) { + + MVT ShiftTy = TLI.getShiftAmountTy(); + unsigned ShiftSize = ShiftTy.getSizeInBits(); + unsigned Op2Size = Op2.getValueType().getSizeInBits(); + + // Coerce the shift amount to the right type if we can. + if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { + DebugLoc DL = getCurDebugLoc(); + // If the operand is smaller than the shift count type, promote it. - EVT PTy = TLI.getPointerTy(); - EVT STy = TLI.getShiftAmountTy(); - if (STy.bitsGT(Op2.getValueType())) - Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(), - TLI.getShiftAmountTy(), Op2); + MVT PtrTy = TLI.getPointerTy(); + if (ShiftSize > Op2Size) + Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); + // If the operand is larger than the shift count type but the shift // count type has enough bits to represent any shift value, truncate // it now. This is a common case and it exposes the truncate to // optimization early. - else if (STy.getSizeInBits() >= - Log2_32_Ceil(Op2.getValueType().getSizeInBits())) - Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), - TLI.getShiftAmountTy(), Op2); - // Otherwise we'll need to temporarily settle for some other - // convenient type; type legalization will make adjustments as - // needed. - else if (PTy.bitsLT(Op2.getValueType())) - Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), - TLI.getPointerTy(), Op2); - else if (PTy.bitsGT(Op2.getValueType())) - Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(), - TLI.getPointerTy(), Op2); + else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits())) + Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); + // Otherwise we'll need to temporarily settle for some other convenient + // type. Type legalization will make adjustments as needed. + else + Op2 = DAG.getZExtOrTrunc(Op2, DL, PtrTy); } setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(), diff --git a/test/CodeGen/CellSPU/shift_ops.ll b/test/CodeGen/CellSPU/shift_ops.ll index 9dffb98a234..92390abf946 100644 --- a/test/CodeGen/CellSPU/shift_ops.ll +++ b/test/CodeGen/CellSPU/shift_ops.ll @@ -4,7 +4,7 @@ ; RUN: grep {shl } %t1.s | count 9 ; RUN: grep {shli } %t1.s | count 3 ; RUN: grep {xshw } %t1.s | count 5 -; RUN: grep {and } %t1.s | count 5 +; RUN: grep {and } %t1.s | count 14 ; RUN: grep {andi } %t1.s | count 2 ; RUN: grep {rotmi } %t1.s | count 2 ; RUN: grep {rotqmbyi } %t1.s | count 1 diff --git a/test/CodeGen/MBlaze/shift.ll b/test/CodeGen/MBlaze/shift.ll index 7eac6411dec..99f0519c020 100644 --- a/test/CodeGen/MBlaze/shift.ll +++ b/test/CodeGen/MBlaze/shift.ll @@ -13,7 +13,6 @@ define i8 @test_i8(i8 %a, i8 %b) { ; FUN: andi ; FUN: add ; FUN: bnei - ; SHT-NOT: andi ; SHT-NOT: bnei ret i8 %tmp.1 @@ -50,7 +49,6 @@ define i16 @test_i16(i16 %a, i16 %b) { ; FUN: andi ; FUN: add ; FUN: bnei - ; SHT-NOT: andi ; SHT-NOT: bnei ret i16 %tmp.1 -- 2.34.1