From 8a7186dbc2df4879f511b2ae6f2bce25ad37d965 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Thu, 6 Dec 2012 01:28:01 +0000 Subject: [PATCH] Let targets provide hooks that compute known zero and ones for any_extend and extload's. If they are implemented as zero-extend, or implicitly zero-extend, then this can enable more demanded bits optimizations. e.g. define void @foo(i16* %ptr, i32 %a) nounwind { entry: %tmp1 = icmp ult i32 %a, 100 br i1 %tmp1, label %bb1, label %bb2 bb1: %tmp2 = load i16* %ptr, align 2 br label %bb2 bb2: %tmp3 = phi i16 [ 0, %entry ], [ %tmp2, %bb1 ] %cmp = icmp ult i16 %tmp3, 24 br i1 %cmp, label %bb3, label %exit bb3: call void @bar() nounwind br label %exit exit: ret void } This compiles to the followings before: push {lr} mov r2, #0 cmp r1, #99 bhi LBB0_2 @ BB#1: @ %bb1 ldrh r2, [r0] LBB0_2: @ %bb2 uxth r0, r2 cmp r0, #23 bhi LBB0_4 @ BB#3: @ %bb3 bl _bar LBB0_4: @ %exit pop {lr} bx lr The uxth is not needed since ldrh implicitly zero-extend the high bits. With this change it's eliminated. rdar://12771555 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169459 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 10 +++++++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 10 ++----- lib/CodeGen/SelectionDAG/TargetLowering.cpp | 24 ++++++++++++++++ lib/Target/ARM/ARMISelLowering.cpp | 30 +++++++++++++++++++ lib/Target/ARM/ARMISelLowering.h | 5 ++++ lib/Target/X86/X86ISelLowering.cpp | 32 +++++++++++++++++++++ lib/Target/X86/X86ISelLowering.h | 6 ++++ test/CodeGen/ARM/extload-knownzero.ll | 27 +++++++++++++++++ 8 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 test/CodeGen/ARM/extload-knownzero.ll diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 687109a1b0d..2cdc05096b8 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -935,6 +935,16 @@ public: const SelectionDAG &DAG, unsigned Depth = 0) const; + /// computeMaskedBitsForAnyExtend - Since each target implement ANY_EXTEND + /// and ExtLoad nodes specifically, let the target determine which of the bits + /// specified in Mask are known to be either zero or one and return them in + /// the KnownZero/KnownOne bitsets. + virtual void computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth = 0) const; + /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 7dd57d54201..631449ca79a 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1930,6 +1930,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); } else if (const MDNode *Ranges = LD->getRanges()) { computeMaskedBitsLoad(*Ranges, KnownZero); + } else if (ISD::isEXTLoad(Op.getNode())) { + TLI.computeMaskedBitsForAnyExtend(Op, KnownZero, KnownOne, *this, Depth); } return; } @@ -1972,13 +1974,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, return; } case ISD::ANY_EXTEND: { - EVT InVT = Op.getOperand(0).getValueType(); - unsigned InBits = InVT.getScalarType().getSizeInBits(); - KnownZero = KnownZero.trunc(InBits); - KnownOne = KnownOne.trunc(InBits); - ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); - KnownZero = KnownZero.zext(BitWidth); - KnownOne = KnownOne.zext(BitWidth); + TLI.computeMaskedBitsForAnyExtend(Op, KnownZero, KnownOne, *this, Depth); return; } case ISD::TRUNCATE: { diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 931c569d422..b410988dbde 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1856,6 +1856,30 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); } +void TargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth) const { + unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); + if (Op.getOpcode() == ISD::ANY_EXTEND) { + EVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getScalarType().getSizeInBits(); + KnownZero = KnownZero.trunc(InBits); + KnownOne = KnownOne.trunc(InBits); + DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); + KnownZero = KnownZero.zext(BitWidth); + KnownOne = KnownOne.zext(BitWidth); + return; + } else if (ISD::isEXTLoad(Op.getNode())) { + KnownZero = KnownOne = APInt(BitWidth, 0); + return; + } + + assert(0 && "Expecting an ANY_EXTEND or extload!"); +} + + /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index c1b31b3c9f1..c0a785338d6 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -9878,6 +9878,36 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, } } +void ARMTargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth) const { + unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); + if (Op.getOpcode() == ISD::ANY_EXTEND) { + // Implemented as a zero_extend. + EVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getScalarType().getSizeInBits(); + KnownZero = KnownZero.trunc(InBits); + KnownOne = KnownOne.trunc(InBits); + DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); + KnownZero = KnownZero.zext(BitWidth); + KnownOne = KnownOne.zext(BitWidth); + APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); + KnownZero |= NewBits; + return; + } else if (ISD::isEXTLoad(Op.getNode())) { + // Implemented as zextloads. + LoadSDNode *LD = cast(Op); + EVT VT = LD->getMemoryVT(); + unsigned MemBits = VT.getScalarType().getSizeInBits(); + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); + return; + } + + assert(0 && "Expecting an ANY_EXTEND or extload!"); +} + //===----------------------------------------------------------------------===// // ARM Inline Assembly Support //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 487c925dd9b..bde2ad49245 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -333,6 +333,11 @@ namespace llvm { const SelectionDAG &DAG, unsigned Depth) const; + virtual void computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth) const; virtual bool ExpandInlineAsm(CallInst *CI) const; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b3ff4ee98e3..20fe41dc3bf 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14112,6 +14112,38 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, } } +void X86TargetLowering::computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth) const { + unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); + if (Op.getOpcode() == ISD::ANY_EXTEND) { + // Implemented as a zero_extend except for i16 -> i32 + EVT InVT = Op.getOperand(0).getValueType(); + unsigned InBits = InVT.getScalarType().getSizeInBits(); + KnownZero = KnownZero.trunc(InBits); + KnownOne = KnownOne.trunc(InBits); + DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); + KnownZero = KnownZero.zext(BitWidth); + KnownOne = KnownOne.zext(BitWidth); + if (BitWidth != 32 || InBits != 16) { + APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); + KnownZero |= NewBits; + } + return; + } else if (ISD::isEXTLoad(Op.getNode())) { + // Implemented as zextloads or implicitly zero-extended (i32 -> i64) + LoadSDNode *LD = cast(Op); + EVT VT = LD->getMemoryVT(); + unsigned MemBits = VT.getScalarType().getSizeInBits(); + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); + return; + } + + assert(0 && "Expecting an ANY_EXTEND or extload!"); +} + unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, unsigned Depth) const { // SETCC_CARRY sets the dest to ~0 for true or 0 for false. diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index e830c5fef62..099f1d884e7 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -558,6 +558,12 @@ namespace llvm { const SelectionDAG &DAG, unsigned Depth = 0) const; + virtual void computeMaskedBitsForAnyExtend(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth) const; + // ComputeNumSignBitsForTargetNode - Determine the number of bits in the // operation that are sign bits. virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, diff --git a/test/CodeGen/ARM/extload-knownzero.ll b/test/CodeGen/ARM/extload-knownzero.ll new file mode 100644 index 00000000000..348285399ad --- /dev/null +++ b/test/CodeGen/ARM/extload-knownzero.ll @@ -0,0 +1,27 @@ +; RUN: llc < %s -march=arm | FileCheck %s +; rdar://12771555 + +define void @foo(i16* %ptr, i32 %a) nounwind { +entry: +; CHECK: foo: + %tmp1 = icmp ult i32 %a, 100 + br i1 %tmp1, label %bb1, label %bb2 +bb1: +; CHECK: %bb1 +; CHECK: ldrh + %tmp2 = load i16* %ptr, align 2 + br label %bb2 +bb2: +; CHECK-NOT: uxth +; CHECK: cmp + %tmp3 = phi i16 [ 0, %entry ], [ %tmp2, %bb1 ] + %cmp = icmp ult i16 %tmp3, 24 + br i1 %cmp, label %bb3, label %exit +bb3: + call void @bar() nounwind + br label %exit +exit: + ret void +} + +declare void @bar () -- 2.34.1