#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
using namespace llvm;
+
+namespace {
+
+/// Diagnostic information for unimplemented or unsupported feature reporting.
+class DiagnosticInfoUnsupported : public DiagnosticInfo {
+private:
+ const Twine &Description;
+ const Function &Fn;
+
+ static int KindID;
+
+ static int getKindID() {
+ if (KindID == 0)
+ KindID = llvm::getNextAvailablePluginDiagnosticKind();
+ return KindID;
+ }
+
+public:
+ DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(getKindID(), Severity),
+ Description(Desc),
+ Fn(Fn) { }
+
+ const Function &getFunction() const { return Fn; }
+ const Twine &getDescription() const { return Description; }
+
+ void print(DiagnosticPrinter &DP) const override {
+ DP << "unsupported " << getDescription() << " in " << Fn.getName();
+ }
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == getKindID();
+ }
+};
+
+int DiagnosticInfoUnsupported::KindID = 0;
+}
+
+
static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
#include "AMDGPUGenCallingConv.inc"
+// Find a larger type to do a load / store of a vector with.
+EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
+ unsigned StoreSize = VT.getStoreSizeInBits();
+ if (StoreSize <= 32)
+ return EVT::getIntegerVT(Ctx, StoreSize);
+
+ assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
+ return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
+}
+
+// Type for a vector that will be loaded to.
+EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
+ unsigned StoreSize = VT.getStoreSizeInBits();
+ if (StoreSize <= 32)
+ return EVT::getIntegerVT(Ctx, 32);
+
+ return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
+}
+
AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
TargetLowering(TM, new TargetLoweringObjectFileELF()) {
setOperationAction(ISD::FROUND, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
- // The hardware supports ROTR, but not ROTL
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
-
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);
setOperationAction(ISD::STORE, MVT::f64, Promote);
AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
+ setOperationAction(ISD::STORE, MVT::v2f64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
+
// Custom lowering of vector stores is required for local address space
// stores.
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
// handle 64-bit stores.
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i8, Expand);
setTruncStoreAction(MVT::i64, MVT::i1, Expand);
setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
setOperationAction(ISD::LOAD, MVT::f64, Promote);
AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
+ setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
+
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
- setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
+ if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
+ setOperationAction(ISD::FCEIL, MVT::f64, Custom);
+ setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
+ setOperationAction(ISD::FRINT, MVT::f64, Custom);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
+ }
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+ if (!Subtarget->hasBFI()) {
+ // fcopysign can be done in a single instruction with BFI.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ }
- setOperationAction(ISD::MUL, MVT::i64, Expand);
+ const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
+ for (MVT VT : ScalarIntVTs) {
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::SDIV, VT, Custom);
+
+ // GPU does not have divrem function for signed or unsigned.
+ setOperationAction(ISD::SDIVREM, VT, Expand);
+ setOperationAction(ISD::UDIVREM, VT, Custom);
+
+ // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+
+ setOperationAction(ISD::BSWAP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ }
+
+ if (!Subtarget->hasBCNT(32))
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ if (!Subtarget->hasBCNT(64))
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
+
+ // The hardware supports 32-bit ROTR, but not ROTL.
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
+ setOperationAction(ISD::MUL, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+ setOperationAction(ISD::SUB, MVT::i64, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- static const MVT::SimpleValueType IntTypes[] = {
+ static const MVT::SimpleValueType VectorIntTypes[] = {
MVT::v2i32, MVT::v4i32
};
- const size_t NumIntTypes = array_lengthof(IntTypes);
- for (unsigned int x = 0; x < NumIntTypes; ++x) {
- MVT::SimpleValueType VT = IntTypes[x];
- //Expand the following operations for the current type by default
+ for (MVT VT : VectorIntTypes) {
+ // Expand the following operations for the current type by default.
setOperationAction(ISD::ADD, VT, Expand);
setOperationAction(ISD::AND, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
setOperationAction(ISD::MUL, VT, Expand);
setOperationAction(ISD::OR, VT, Expand);
setOperationAction(ISD::SHL, VT, Expand);
- setOperationAction(ISD::SINT_TO_FP, VT, Expand);
- setOperationAction(ISD::SRL, VT, Expand);
setOperationAction(ISD::SRA, VT, Expand);
+ setOperationAction(ISD::SRL, VT, Expand);
+ setOperationAction(ISD::ROTL, VT, Expand);
+ setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::SUB, VT, Expand);
- setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SINT_TO_FP, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
+ // TODO: Implement custom UREM / SREM routines.
+ setOperationAction(ISD::SDIV, VT, Custom);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::SDIVREM, VT, Expand);
+ setOperationAction(ISD::UDIVREM, VT, Custom);
setOperationAction(ISD::SELECT, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::XOR, VT, Expand);
+ setOperationAction(ISD::BSWAP, VT, Expand);
+ setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
}
- static const MVT::SimpleValueType FloatTypes[] = {
+ static const MVT::SimpleValueType FloatVectorTypes[] = {
MVT::v2f32, MVT::v4f32
};
- const size_t NumFloatTypes = array_lengthof(FloatTypes);
- for (unsigned int x = 0; x < NumFloatTypes; ++x) {
- MVT::SimpleValueType VT = FloatTypes[x];
+ for (MVT VT : FloatVectorTypes) {
setOperationAction(ISD::FABS, VT, Expand);
setOperationAction(ISD::FADD, VT, Expand);
+ setOperationAction(ISD::FCEIL, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FDIV, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FMUL, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
+ setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::FSQRT, VT, Expand);
+ setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FSUB, VT, Expand);
+ setOperationAction(ISD::FNEG, VT, Expand);
setOperationAction(ISD::SELECT, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
}
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SELECT_CC);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
+ setSchedulingPreference(Sched::RegPressure);
+ setJumpIsExpensive(true);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Custom);
+ // There are no integer divide instructions, and these expand to a pretty
+ // large sequence of instructions.
+ setIntDivIsCheap(false);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
+ // TODO: Investigate this when 64-bit divides are implemented.
+ addBypassSlowDiv(64, 32);
- setTargetDAGCombine(ISD::MUL);
+ // FIXME: Need to really handle these.
+ MaxStoresPerMemcpy = 4096;
+ MaxStoresPerMemmove = 4096;
+ MaxStoresPerMemset = 4096;
}
//===----------------------------------------------------------------------===//
return MVT::i32;
}
+// The backend supports 32 and 64 bit floating point immediates.
+// FIXME: Why are we reporting vectors of FP immediates as legal?
+bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
+ EVT ScalarVT = VT.getScalarType();
+ return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
+}
+
+// We don't want to shrink f64 / f32 constants.
+bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
+ EVT ScalarVT = VT.getScalarType();
+ return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
+}
+
bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
EVT CastTy) const {
if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
// Target specific lowering
//===---------------------------------------------------------------------===//
-SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
- const {
+SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SDValue Callee = CLI.Callee;
+ SelectionDAG &DAG = CLI.DAG;
+
+ const Function &Fn = *DAG.getMachineFunction().getFunction();
+
+ StringRef FuncName("<unknown>");
+
+ if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
+ FuncName = G->getSymbol();
+ else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ FuncName = G->getGlobal()->getName();
+
+ DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
+ DAG.getContext()->diagnose(NoCalls);
+ return SDValue();
+}
+
+SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default:
Op.getNode()->dump();
llvm_unreachable("Custom lowering code for this"
"instruction is not implemented yet!");
break;
- // AMDIL DAG lowering
- case ISD::SDIV: return LowerSDIV(Op, DAG);
- case ISD::SREM: return LowerSREM(Op, DAG);
+ // AMDGPU DAG lowering.
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
- // AMDGPU DAG lowering
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::SDIV: return LowerSDIV(Op, DAG);
+ case ISD::SREM: return LowerSREM(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
+ case ISD::FCEIL: return LowerFCEIL(Op, DAG);
+ case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
+ case ISD::FRINT: return LowerFRINT(Op, DAG);
+ case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
+ case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
+
+ // AMDIL DAG lowering.
+ case ISD::BRCOND: return LowerBRCOND(Op, DAG);
}
return Op;
}
// ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
// nothing here and let the illegal result integer be handled normally.
return;
+ case ISD::UDIV: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(UDIVREM);
+ break;
+ }
+ case ISD::UREM: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
+ N->getOperand(0), N->getOperand(1));
+ Results.push_back(UDIVREM.getValue(1));
+ break;
+ }
+ case ISD::UDIVREM: {
+ SDValue Op = SDValue(N, 0);
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
+
+ SDValue one = DAG.getConstant(1, HalfVT);
+ SDValue zero = DAG.getConstant(0, HalfVT);
+
+ //HiLo split
+ SDValue LHS = N->getOperand(0);
+ SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
+ SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
+
+ SDValue RHS = N->getOperand(1);
+ SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
+ SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
+
+ // Get Speculative values
+ SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
+ SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
+
+ SDValue REM_Hi = zero;
+ SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
+
+ SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
+ SDValue DIV_Lo = zero;
+
+ const unsigned halfBitWidth = HalfVT.getSizeInBits();
+
+ for (unsigned i = 0; i < halfBitWidth; ++i) {
+ SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
+ // Get Value of high bit
+ SDValue HBit;
+ if (halfBitWidth == 32 && Subtarget->hasBFE()) {
+ HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
+ } else {
+ HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
+ HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
+ }
+
+ SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
+ DAG.getConstant(halfBitWidth - 1, HalfVT));
+ REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
+ REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
+
+ REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
+ REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
+
+
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
+
+ SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
+ SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETGE);
+
+ DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
+ // Update REM
+
+ SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
+
+ REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETGE);
+ REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
+ REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
+ }
+
+ SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
+ SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
+ Results.push_back(DIV);
+ Results.push_back(REM);
+ break;
+ }
default:
return;
}
}
+// FIXME: This implements accesses to initialized globals in the constant
+// address space by copying them to private and accessing that. It does not
+// properly handle illegal types or vectors. The private vector loads are not
+// scalarized, and the illegal scalars hit an assertion. This technique will not
+// work well with large initializers, and this should eventually be
+// removed. Initialized globals should be placed into a data section that the
+// runtime will load into a buffer before the kernel is executed. Uses of the
+// global need to be replaced with a pointer loaded from an implicit kernel
+// argument into this buffer holding the copy of the data, which will remove the
+// need for any of this.
SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
const GlobalValue *GV,
const SDValue &InitPtr,
SelectionDAG &DAG) const {
const DataLayout *TD = getTargetMachine().getDataLayout();
SDLoc DL(InitPtr);
+ Type *InitTy = Init->getType();
+
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
- EVT VT = EVT::getEVT(CI->getType());
- PointerType *PtrTy = PointerType::get(CI->getType(), 0);
- return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
- MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
- TD->getPrefTypeAlignment(CI->getType()));
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
+ EVT VT = EVT::getEVT(InitTy);
+ PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
+ return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
+ TD->getPrefTypeAlignment(InitTy));
+ }
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
EVT VT = EVT::getEVT(CFP->getType());
PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
TD->getPrefTypeAlignment(CFP->getType()));
- } else if (Init->getType()->isAggregateType()) {
+ }
+
+ if (StructType *ST = dyn_cast<StructType>(InitTy)) {
+ const StructLayout *SL = TD->getStructLayout(ST);
+
+ EVT PtrVT = InitPtr.getValueType();
+ SmallVector<SDValue, 8> Chains;
+
+ for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
+ SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
+
+ Constant *Elt = Init->getAggregateElement(I);
+ Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
+ }
+
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ }
+
+ if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
EVT PtrVT = InitPtr.getValueType();
- unsigned NumElements = Init->getType()->getArrayNumElements();
+
+ unsigned NumElements;
+ if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
+ NumElements = AT->getNumElements();
+ else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
+ NumElements = VT->getNumElements();
+ else
+ llvm_unreachable("Unexpected type");
+
+ unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
SmallVector<SDValue, 8> Chains;
for (unsigned i = 0; i < NumElements; ++i) {
- SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
- Init->getType()->getArrayElementType()), PtrVT);
+ SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
- Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
- GV, Ptr, Chain, DAG));
+
+ Constant *Elt = Init->getAggregateElement(i);
+ Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
}
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- Chains.data(), Chains.size());
- } else {
- Init->dump();
- llvm_unreachable("Unhandled constant initializer");
+
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
+
+ if (isa<UndefValue>(Init)) {
+ EVT VT = EVT::getEVT(InitTy);
+ PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
+ return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
+ TD->getPrefTypeAlignment(InitTy));
+ }
+
+ Init->dump();
+ llvm_unreachable("Unhandled constant initializer");
}
SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
unsigned Size = TD->getTypeAllocSize(EltType);
unsigned Alignment = TD->getPrefTypeAlignment(EltType);
- const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
- const Constant *Init = Var->getInitializer();
+ MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
+ MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
+
int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
- SDValue InitPtr = DAG.getFrameIndex(FI,
- getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
+ SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
+
+ const GlobalVariable *Var = cast<GlobalVariable>(GV);
+ if (!Var->hasInitializer()) {
+ // This has no use, but bugpoint will hit it.
+ return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
+ }
+
+ const Constant *Init = Var->getInitializer();
SmallVector<SDNode*, 8> WorkList;
for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
Ops.push_back((*I)->getOperand(i));
}
- DAG.UpdateNodeOperands(*I, Ops.data(), Ops.size());
+ DAG.UpdateNodeOperands(*I, Ops);
}
- return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
- getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
+ return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
}
}
}
DAG.ExtractVectorElements(A, Args);
DAG.ExtractVectorElements(B, Args);
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
- Args.data(), Args.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
}
SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
VT.getVectorNumElements());
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
- Args.data(), Args.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
}
SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
const AMDGPUFrameLowering *TFL =
static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
- FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
- assert(FIN);
+ FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
unsigned FrameIndex = FIN->getIndex();
unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
switch (IntrinsicID) {
default: return Op;
- case AMDGPUIntrinsic::AMDIL_abs:
+ case AMDGPUIntrinsic::AMDGPU_abs:
+ case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
return LowerIntrinsicIABS(Op, DAG);
- case AMDGPUIntrinsic::AMDIL_exp:
- return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
case AMDGPUIntrinsic::AMDGPU_lrp:
return LowerIntrinsicLRP(Op, DAG);
- case AMDGPUIntrinsic::AMDIL_fraction:
+ case AMDGPUIntrinsic::AMDGPU_fract:
+ case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
- case AMDGPUIntrinsic::AMDIL_max:
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_clamp:
+ case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
+ return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
case AMDGPUIntrinsic::AMDGPU_imax:
return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_umax:
return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
- case AMDGPUIntrinsic::AMDIL_min:
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_imin:
return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
Op.getOperand(2));
return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
Op.getOperand(2));
+ case AMDGPUIntrinsic::AMDGPU_umul24:
+ return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_imul24:
+ return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case AMDGPUIntrinsic::AMDGPU_umad24:
+ return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_imad24:
+ return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
+
case AMDGPUIntrinsic::AMDGPU_bfe_i32:
return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
Op.getOperand(1),
Op.getOperand(1),
Op.getOperand(2));
- case AMDGPUIntrinsic::AMDIL_round_nearest:
+ case AMDGPUIntrinsic::AMDGPU_brev:
+ return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
+ return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
+
+ case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
}
}
///IABS(a) = SMAX(sub(0, a), a)
SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
- SelectionDAG &DAG) const {
-
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
/// Linear Interpolation
/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
}
/// \brief Generate Min/Max node
-SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
+SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
+ SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue True = Op.getOperand(2);
- SDValue False = Op.getOperand(3);
- SDValue CC = Op.getOperand(4);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue True = N->getOperand(2);
+ SDValue False = N->getOperand(3);
+ SDValue CC = N->getOperand(4);
if (VT != MVT::f32 ||
!((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
case ISD::SETOLT:
case ISD::SETLE:
case ISD::SETLT: {
- if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
- else
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
+ unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
case ISD::SETGT:
case ISD::SETGE:
case ISD::SETOGE:
case ISD::SETUGT:
case ISD::SETOGT: {
- if (LHS == True)
- return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
- else
- return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
+ unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
case ISD::SETCC_INVALID:
llvm_unreachable("Invalid setcc condcode!");
}
- return Op;
+ return SDValue();
}
SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
Load->getAlignment()));
}
- return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
- Loads.data(), Loads.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), Loads);
}
SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
SelectionDAG &DAG) const {
- StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
EVT MemVT = Store->getMemoryVT();
unsigned MemBits = MemVT.getSizeInBits();
}
SDLoc DL(Op);
- const SDValue &Value = Store->getValue();
+ SDValue Value = Store->getValue();
EVT VT = Value.getValueType();
- const SDValue &Ptr = Store->getBasePtr();
+ EVT ElemVT = VT.getVectorElementType();
+ SDValue Ptr = Store->getBasePtr();
EVT MemEltVT = MemVT.getVectorElementType();
unsigned MemEltBits = MemEltVT.getSizeInBits();
unsigned MemNumElements = MemVT.getVectorNumElements();
- EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
- SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
+ unsigned PackedSize = MemVT.getStoreSizeInBits();
+ SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
+
+ assert(Value.getValueType().getScalarSizeInBits() >= 32);
SDValue PackedValue;
for (unsigned i = 0; i < MemNumElements; ++i) {
- EVT ElemVT = VT.getVectorElementType();
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
DAG.getConstant(i, MVT::i32));
- Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
- Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
- SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
- Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
+ Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
+ Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
+
+ SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
+ Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
+
if (i == 0) {
PackedValue = Elt;
} else {
- PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
+ PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
}
}
+
+ if (PackedSize < 32) {
+ EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
+ return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
+ Store->getMemOperand()->getPointerInfo(),
+ PackedVT,
+ Store->isNonTemporal(), Store->isVolatile(),
+ Store->getAlignment());
+ }
+
return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
- MachinePointerInfo(Store->getMemOperand()->getValue()),
+ Store->getMemOperand()->getPointerInfo(),
Store->isVolatile(), Store->isNonTemporal(),
Store->getAlignment());
}
MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
Store->getAlignment()));
}
- return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains.data(), NumElts);
+ return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
}
SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
+SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ MVT INTTY;
+ MVT FLTTY;
+ if (!OVT.isVector()) {
+ INTTY = MVT::i32;
+ FLTTY = MVT::f32;
+ } else if (OVT.getVectorNumElements() == 2) {
+ INTTY = MVT::v2i32;
+ FLTTY = MVT::v2f32;
+ } else if (OVT.getVectorNumElements() == 4) {
+ INTTY = MVT::v4i32;
+ FLTTY = MVT::v4f32;
+ }
+ unsigned bitsize = OVT.getScalarType().getSizeInBits();
+ // char|short jq = ia ^ ib;
+ SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
+
+ // jq = jq >> (bitsize - 2)
+ jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
+
+ // jq = jq | 0x1
+ jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
+
+ // jq = (int)jq
+ jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
+
+ // int ia = (int)LHS;
+ SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
+
+ // int ib, (int)RHS;
+ SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
+
+ // float fa = (float)ia;
+ SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
+
+ // float fb = (float)ib;
+ SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
+
+ // float fq = native_divide(fa, fb);
+ SDValue fq = DAG.getNode(AMDGPUISD::DIV_INF, DL, FLTTY, fa, fb);
+
+ // fq = trunc(fq);
+ fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
+
+ // float fqneg = -fq;
+ SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
+
+ // float fr = mad(fqneg, fb, fa);
+ SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
+ DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
+
+ // int iq = (int)fq;
+ SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
+
+ // fr = fabs(fr);
+ fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
+
+ // fb = fabs(fb);
+ fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
+
+ // int cv = fr >= fb;
+ SDValue cv;
+ if (INTTY == MVT::i32) {
+ cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+ } else {
+ cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+ }
+ // jq = (cv ? jq : 0);
+ jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
+ DAG.getConstant(0, OVT));
+ // dst = iq + jq;
+ iq = DAG.getSExtOrTrunc(iq, DL, OVT);
+ iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
+ return iq;
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ // The LowerSDIV32 function generates equivalent to the following IL.
+ // mov r0, LHS
+ // mov r1, RHS
+ // ilt r10, r0, 0
+ // ilt r11, r1, 0
+ // iadd r0, r0, r10
+ // iadd r1, r1, r11
+ // ixor r0, r0, r10
+ // ixor r1, r1, r11
+ // udiv r0, r0, r1
+ // ixor r10, r10, r11
+ // iadd r0, r0, r10
+ // ixor DST, r0, r10
+
+ // mov r0, LHS
+ SDValue r0 = LHS;
+
+ // mov r1, RHS
+ SDValue r1 = RHS;
+
+ // ilt r10, r0, 0
+ SDValue r10 = DAG.getSelectCC(DL,
+ r0, DAG.getConstant(0, OVT),
+ DAG.getConstant(-1, OVT),
+ DAG.getConstant(0, OVT),
+ ISD::SETLT);
+
+ // ilt r11, r1, 0
+ SDValue r11 = DAG.getSelectCC(DL,
+ r1, DAG.getConstant(0, OVT),
+ DAG.getConstant(-1, OVT),
+ DAG.getConstant(0, OVT),
+ ISD::SETLT);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // iadd r1, r1, r11
+ r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+ // ixor r0, r0, r10
+ r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+ // ixor r1, r1, r11
+ r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+ // udiv r0, r0, r1
+ r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
+
+ // ixor r10, r10, r11
+ r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // ixor DST, r0, r10
+ SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+ return DST;
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
+ EVT OVT = Op.getValueType().getScalarType();
+
+ if (OVT == MVT::i64)
+ return LowerSDIV64(Op, DAG);
+
+ if (OVT.getScalarType() == MVT::i32)
+ return LowerSDIV32(Op, DAG);
+
+ if (OVT == MVT::i16 || OVT == MVT::i8) {
+ // FIXME: We should be checking for the masked bits. This isn't reached
+ // because i8 and i16 are not legal types.
+ return LowerSDIV24(Op, DAG);
+ }
+
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT OVT = Op.getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ // The LowerSREM32 function generates equivalent to the following IL.
+ // mov r0, LHS
+ // mov r1, RHS
+ // ilt r10, r0, 0
+ // ilt r11, r1, 0
+ // iadd r0, r0, r10
+ // iadd r1, r1, r11
+ // ixor r0, r0, r10
+ // ixor r1, r1, r11
+ // udiv r20, r0, r1
+ // umul r20, r20, r1
+ // sub r0, r0, r20
+ // iadd r0, r0, r10
+ // ixor DST, r0, r10
+
+ // mov r0, LHS
+ SDValue r0 = LHS;
+
+ // mov r1, RHS
+ SDValue r1 = RHS;
+
+ // ilt r10, r0, 0
+ SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
+
+ // ilt r11, r1, 0
+ SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // iadd r1, r1, r11
+ r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+ // ixor r0, r0, r10
+ r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+ // ixor r1, r1, r11
+ r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+ // udiv r20, r0, r1
+ SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
+
+ // umul r20, r20, r1
+ r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
+
+ // sub r0, r0, r20
+ r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
+
+ // iadd r0, r0, r10
+ r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+ // ixor DST, r0, r10
+ SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+ return DST;
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
+ return SDValue(Op.getNode(), 0);
+}
+
+SDValue AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
+ EVT OVT = Op.getValueType();
+
+ if (OVT.getScalarType() == MVT::i64)
+ return LowerSREM64(Op, DAG);
+
+ if (OVT.getScalarType() == MVT::i32)
+ return LowerSREM32(Op, DAG);
+
+ return SDValue(Op.getNode(), 0);
+}
+
SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
- SelectionDAG &DAG) const {
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Num = Op.getOperand(0);
SDValue Den = Op.getOperand(1);
- SmallVector<SDValue, 8> Results;
-
// RCP = URECIP(Den) = 2^32 / Den + e
// e is rounding error.
SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
Div,
Rem
};
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
+}
+
+SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ // result = trunc(src)
+ // if (src > 0.0 && src != result)
+ // result += 1.0
+
+ SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
+
+ const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
+ const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+
+ SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
+ SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
+ SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
+
+ SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
+ return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
+}
+
+SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ assert(Op.getValueType() == MVT::f64);
+
+ const SDValue Zero = DAG.getConstant(0, MVT::i32);
+ const SDValue One = DAG.getConstant(1, MVT::i32);
+
+ SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
+
+ // Extract the upper half, since this is where we will find the sign and
+ // exponent.
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
+
+ const unsigned FractBits = 52;
+ const unsigned ExpBits = 11;
+
+ // Extract the exponent.
+ SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_I32, SL, MVT::i32,
+ Hi,
+ DAG.getConstant(FractBits - 32, MVT::i32),
+ DAG.getConstant(ExpBits, MVT::i32));
+ SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
+ DAG.getConstant(1023, MVT::i32));
+
+ // Extract the sign bit.
+ const SDValue SignBitMask = DAG.getConstant(1ul << 31, MVT::i32);
+ SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
+
+ // Extend back to to 64-bits.
+ SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+ Zero, SignBit);
+ SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
+
+ SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
+ const SDValue FractMask = DAG.getConstant((1LL << FractBits) - 1, MVT::i64);
+
+ SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
+ SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
+ SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
+
+ const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
+
+ SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
+ SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
+
+ SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
+ SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
+
+ return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
+}
+
+SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ assert(Op.getValueType() == MVT::f64);
+
+ APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
+ SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
+ SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
+
+ SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
+ SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
+
+ SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
+
+ APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
+ SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+ SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
+
+ return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
+}
+
+SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
+ // FNEARBYINT and FRINT are the same, except in their handling of FP
+ // exceptions. Those aren't really meaningful for us, and OpenCL only has
+ // rint, so just treat them as equivalent.
+ return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
+}
+
+SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ SDValue Src = Op.getOperand(0);
+
+ // result = trunc(src);
+ // if (src < 0.0 && src != result)
+ // result += -1.0.
+
+ SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
+
+ const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
+ const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
+
+ EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
+
+ SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
+ SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
+ SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
+
+ SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
+ return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
}
SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
-
}
SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
MVT VT = Op.getSimpleValueType();
MVT ScalarVT = VT.getScalarType();
- unsigned SrcBits = ExtraVT.getScalarType().getSizeInBits();
- unsigned DestBits = ScalarVT.getSizeInBits();
- unsigned BitsDiff = DestBits - SrcBits;
-
- if (!Subtarget->hasBFE())
- return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
+ if (!VT.isVector())
+ return SDValue();
SDValue Src = Op.getOperand(0);
- if (VT.isVector()) {
- SDLoc DL(Op);
- // Need to scalarize this, and revisit each of the scalars later.
- // TODO: Don't scalarize on Evergreen?
- unsigned NElts = VT.getVectorNumElements();
- SmallVector<SDValue, 8> Args;
- DAG.ExtractVectorElements(Src, Args);
-
- SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
- for (unsigned I = 0; I < NElts; ++I)
- Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
-
- return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
- }
-
- if (SrcBits == 32) {
- SDLoc DL(Op);
-
- // If the source is 32-bits, this is really half of a 2-register pair, and
- // we need to discard the unused half of the pair.
- SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
- return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, TruncSrc);
- }
-
- unsigned NElts = VT.isVector() ? VT.getVectorNumElements() : 1;
-
- // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
- // might not be worth the effort, and will need to expand to shifts when
- // fixing SGPR copies.
- if (SrcBits < 32 && DestBits <= 32) {
- SDLoc DL(Op);
- MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
-
- if (DestBits != 32)
- Src = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Src);
-
- // FIXME: This should use TargetConstant, but that hits assertions for
- // Evergreen.
- SDValue Ext = DAG.getNode(AMDGPUISD::BFE_I32, DL, ExtVT,
- Op.getOperand(0), // Operand
- DAG.getConstant(0, ExtVT), // Offset
- DAG.getConstant(SrcBits, ExtVT)); // Width
-
- // Truncate to the original type if necessary.
- if (ScalarVT == MVT::i32)
- return Ext;
- return DAG.getNode(ISD::TRUNCATE, DL, VT, Ext);
- }
-
- // For small types, extend to 32-bits first.
- if (SrcBits < 32) {
- SDLoc DL(Op);
- MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
+ SDLoc DL(Op);
- SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, Src);
- SDValue Ext32 = DAG.getNode(AMDGPUISD::BFE_I32,
- DL,
- ExtVT,
- TruncSrc, // Operand
- DAG.getConstant(0, ExtVT), // Offset
- DAG.getConstant(SrcBits, ExtVT)); // Width
+ // TODO: Don't scalarize on Evergreen?
+ unsigned NElts = VT.getVectorNumElements();
+ SmallVector<SDValue, 8> Args;
+ DAG.ExtractVectorElements(Src, Args, 0, NElts);
- return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Ext32);
- }
+ SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
+ for (unsigned I = 0; I < NElts; ++I)
+ Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
- // For everything else, use the standard bitshift expansion.
- return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
}
//===----------------------------------------------------------------------===//
static bool isU24(SDValue Op, SelectionDAG &DAG) {
APInt KnownZero, KnownOne;
EVT VT = Op.getValueType();
- DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, KnownZero, KnownOne);
return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
}
DCI.CommitTargetLoweringOpt(TLO);
}
+template <typename IntTy>
+static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
+ uint32_t Offset, uint32_t Width) {
+ if (Width + Offset < 32) {
+ IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
+ return DAG.getConstant(Result, MVT::i32);
+ }
+
+ return DAG.getConstant(Src0 >> Offset, MVT::i32);
+}
+
SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
break;
}
+ // We need to use sext even for MUL_U24, because MUL_U24 is used
+ // for signed multiply of 8 and 16-bit types.
SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
return Reg;
simplifyI24(N1, DCI);
return SDValue();
}
+ case ISD::SELECT_CC: {
+ return CombineMinMax(N, DAG);
+ }
+ case AMDGPUISD::BFE_I32:
+ case AMDGPUISD::BFE_U32: {
+ assert(!N->getValueType(0).isVector() &&
+ "Vector handling of BFE not implemented");
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
+ if (!Width)
+ break;
+
+ uint32_t WidthVal = Width->getZExtValue() & 0x1f;
+ if (WidthVal == 0)
+ return DAG.getConstant(0, MVT::i32);
+
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!Offset)
+ break;
+
+ SDValue BitsFrom = N->getOperand(0);
+ uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
+
+ bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
+
+ if (OffsetVal == 0) {
+ // This is already sign / zero extended, so try to fold away extra BFEs.
+ unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
+
+ unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
+ if (OpSignBits >= SignBits)
+ return BitsFrom;
+
+ EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
+ if (Signed) {
+ // This is a sign_extend_inreg. Replace it to take advantage of existing
+ // DAG Combines. If not eliminated, we will match back to BFE during
+ // selection.
+
+ // TODO: The sext_inreg of extended types ends, although we can could
+ // handle them in a single BFE.
+ return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
+ DAG.getValueType(SmallVT));
+ }
+
+ return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
+ }
+
+ if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ if (Signed) {
+ return constantFoldBFE<int32_t>(DAG,
+ Val->getSExtValue(),
+ OffsetVal,
+ WidthVal);
+ }
+
+ return constantFoldBFE<uint32_t>(DAG,
+ Val->getZExtValue(),
+ OffsetVal,
+ WidthVal);
+ }
+
+ APInt Demanded = APInt::getBitsSet(32,
+ OffsetVal,
+ OffsetVal + WidthVal);
+
+ if ((OffsetVal + WidthVal) >= 32) {
+ SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
+ return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
+ BitsFrom, ShiftVal);
+ }
+
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
+ TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+
+ break;
+ }
}
return SDValue();
}
const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
// AMDIL DAG nodes
NODE_NAME_CASE(CALL);
NODE_NAME_CASE(UMUL);
// AMDGPU DAG nodes
NODE_NAME_CASE(DWORDADDR)
NODE_NAME_CASE(FRACT)
+ NODE_NAME_CASE(CLAMP)
NODE_NAME_CASE(FMAX)
NODE_NAME_CASE(SMAX)
NODE_NAME_CASE(UMAX)
NODE_NAME_CASE(BFE_I32)
NODE_NAME_CASE(BFI)
NODE_NAME_CASE(BFM)
+ NODE_NAME_CASE(BREV)
NODE_NAME_CASE(MUL_U24)
NODE_NAME_CASE(MUL_I24)
+ NODE_NAME_CASE(MAD_U24)
+ NODE_NAME_CASE(MAD_I24)
NODE_NAME_CASE(URECIP)
NODE_NAME_CASE(DOT4)
NODE_NAME_CASE(EXPORT)
NODE_NAME_CASE(SAMPLEB)
NODE_NAME_CASE(SAMPLED)
NODE_NAME_CASE(SAMPLEL)
+ NODE_NAME_CASE(CVT_F32_UBYTE0)
+ NODE_NAME_CASE(CVT_F32_UBYTE1)
+ NODE_NAME_CASE(CVT_F32_UBYTE2)
+ NODE_NAME_CASE(CVT_F32_UBYTE3)
+ NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
NODE_NAME_CASE(STORE_MSKOR)
NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
}
}
-static void computeMaskedBitsForMinMax(const SDValue Op0,
- const SDValue Op1,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) {
+static void computeKnownBitsForMinMax(const SDValue Op0,
+ const SDValue Op1,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) {
APInt Op0Zero, Op0One;
APInt Op1Zero, Op1One;
- DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
- DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
+ DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
+ DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
KnownZero = Op0Zero & Op1Zero;
KnownOne = Op0One & Op1One;
}
-void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
+void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
unsigned Depth) const {
KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
+
+ APInt KnownZero2;
+ APInt KnownOne2;
unsigned Opc = Op.getOpcode();
+
switch (Opc) {
+ default:
+ break;
case ISD::INTRINSIC_WO_CHAIN: {
// FIXME: The intrinsic should just use the node.
switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
case AMDGPUIntrinsic::AMDGPU_umax:
case AMDGPUIntrinsic::AMDGPU_imin:
case AMDGPUIntrinsic::AMDGPU_umin:
- computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
- KnownZero, KnownOne, DAG, Depth);
+ computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
+ KnownZero, KnownOne, DAG, Depth);
break;
default:
break;
case AMDGPUISD::UMAX:
case AMDGPUISD::SMIN:
case AMDGPUISD::UMIN:
- computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
- KnownZero, KnownOne, DAG, Depth);
+ computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
+ KnownZero, KnownOne, DAG, Depth);
break;
- default:
+
+ case AMDGPUISD::BFE_I32:
+ case AMDGPUISD::BFE_U32: {
+ ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ if (!CWidth)
+ return;
+
+ unsigned BitWidth = 32;
+ uint32_t Width = CWidth->getZExtValue() & 0x1f;
+ if (Width == 0) {
+ KnownZero = APInt::getAllOnesValue(BitWidth);
+ KnownOne = APInt::getNullValue(BitWidth);
+ return;
+ }
+
+ // FIXME: This could do a lot more. If offset is 0, should be the same as
+ // sign_extend_inreg implementation, but that involves duplicating it.
+ if (Opc == AMDGPUISD::BFE_I32)
+ KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
+ else
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
+
break;
}
+ }
+}
+
+unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
+ SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
+ switch (Op.getOpcode()) {
+ case AMDGPUISD::BFE_I32: {
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ if (!Width)
+ return 1;
+
+ unsigned SignBits = 32 - Width->getZExtValue() + 1;
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!Offset || !Offset->isNullValue())
+ return SignBits;
+
+ // TODO: Could probably figure something out with non-0 offsets.
+ unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
+ return std::max(SignBits, Op0SignBits);
+ }
+
+ case AMDGPUISD::BFE_U32: {
+ ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+ return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
+ }
+
+ default:
+ return 1;
+ }
}