From 5411a3937f4303f9c3fc50be92f985a4532d95e6 Mon Sep 17 00:00:00 2001 From: Dale Johannesen Date: Thu, 9 Aug 2007 01:04:01 +0000 Subject: [PATCH] long double 9 of N. This finishes up the X86-32 bits (constants are still not handled). Adds ConvertActions to control fp-to-fp conversions (these are currently defaulted for all other targets, so no changes there). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40958 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 35 +++++++++++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 69 ++++++++++++++------- lib/CodeGen/SelectionDAG/TargetLowering.cpp | 1 + lib/Target/X86/X86ISelLowering.cpp | 14 ++++- 4 files changed, 96 insertions(+), 23 deletions(-) diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index c22d3995c58..62959391a92 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -327,6 +327,24 @@ public: getIndexedStoreAction(IdxMode, VT) == Custom; } + /// getConvertAction - Return how the conversion should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const { + if (MVT::isExtendedVT(ToVT) || MVT::isExtendedVT(FromVT)) + return Expand; + return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3); + } + + /// isConvertLegal - Return true if the specified conversion is legal + /// on this target. + bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const { + return getConvertAction(FromVT, ToVT) == Legal || + getConvertAction(FromVT, ToVT) == Custom; + } + /// getTypeToPromoteTo - If the action for this operation is to promote, this /// method returns the ValueType to promote to. MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const { @@ -742,6 +760,16 @@ protected: IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2; } + /// setConvertAction - Indicate that the specified conversion does or does + /// not work with the with specified type and indicate what to do about it. + void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT, + LegalizeAction Action) { + assert(FromVT < MVT::LAST_VALUETYPE && ToVT < 32 && + "Table isn't big enough!"); + ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2); + ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2; + } + /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the /// promotion code defaults to trying a larger integer/fp until it can find /// one that works. If that default is insufficient, this method can be used @@ -1081,6 +1109,13 @@ private: /// deal with the load / store. uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; + /// ConvertActions - For each conversion from source type to destination type, + /// keep a LegalizeAction that indicates how instruction selection should + /// deal with the conversion. + /// Currently, this is used only for floating->floating conversions + /// (FP_EXTEND and FP_ROUND). + uint64_t ConvertActions[MVT::LAST_VALUETYPE]; + ValueTypeActionImpl ValueTypeActions; std::vector LegalFPImmediates; diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 5c8c9e3e33d..8187b3c0803 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3194,33 +3194,58 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } break; - case ISD::FP_ROUND: - if (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)) == - TargetLowering::Expand) { - // The only way we can lower this is to turn it into a TRUNCSTORE, - // EXTLOAD pair, targetting a temporary location (a stack slot). - - // NOTE: there is a choice here between constantly creating new stack - // slots and always reusing the same one. We currently always create - // new ones, as reuse may inhibit scheduling. - MVT::ValueType VT = Op.getValueType(); // 32 - const Type *Ty = MVT::getTypeForValueType(VT); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); - unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); - MachineFunction &MF = DAG.getMachineFunction(); - int SSFI = - MF.getFrameInfo()->CreateStackObject(TySize, Align); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); - Result = DAG.getTruncStore(DAG.getEntryNode(), Node->getOperand(0), - StackSlot, NULL, 0, VT); - Result = DAG.getLoad(VT, Result, StackSlot, NULL, 0, VT); - break; + case ISD::FP_EXTEND: { + MVT::ValueType newVT = Op.getValueType(); + MVT::ValueType oldVT = Op.getOperand(0).getValueType(); + if (TLI.getConvertAction(oldVT, newVT) == TargetLowering::Expand) { + // The only way we can lower this is to turn it into a STORE, + // EXTLOAD pair, targetting a temporary location (a stack slot). + + // NOTE: there is a choice here between constantly creating new stack + // slots and always reusing the same one. We currently always create + // new ones, as reuse may inhibit scheduling. + const Type *Ty = MVT::getTypeForValueType(oldVT); + uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); + MachineFunction &MF = DAG.getMachineFunction(); + int SSFI = + MF.getFrameInfo()->CreateStackObject(TySize, Align); + SDOperand StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); + Result = DAG.getStore(DAG.getEntryNode(), Node->getOperand(0), + StackSlot, NULL, 0); + Result = DAG.getExtLoad(ISD::EXTLOAD, newVT, + Result, StackSlot, NULL, 0, oldVT); + break; + } + } + // FALL THROUGH (to ANY_EXTEND case) + case ISD::FP_ROUND: { + MVT::ValueType newVT = Op.getValueType(); + MVT::ValueType oldVT = Op.getOperand(0).getValueType(); + if (TLI.getConvertAction(oldVT, newVT) == TargetLowering::Expand) { + // The only way we can lower this is to turn it into a TRUNCSTORE, + // LOAD pair, targetting a temporary location (a stack slot). + + // NOTE: there is a choice here between constantly creating new stack + // slots and always reusing the same one. We currently always create + // new ones, as reuse may inhibit scheduling. + const Type *Ty = MVT::getTypeForValueType(newVT); + uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); + MachineFunction &MF = DAG.getMachineFunction(); + int SSFI = + MF.getFrameInfo()->CreateStackObject(TySize, Align); + SDOperand StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); + Result = DAG.getTruncStore(DAG.getEntryNode(), Node->getOperand(0), + StackSlot, NULL, 0, newVT); + Result = DAG.getLoad(newVT, Result, StackSlot, NULL, 0, newVT); + break; + } } // FALL THROUGH case ISD::ANY_EXTEND: case ISD::ZERO_EXTEND: case ISD::SIGN_EXTEND: - case ISD::FP_EXTEND: switch (getTypeAction(Node->getOperand(0).getValueType())) { case Expand: assert(0 && "Shouldn't need to expand other operators here!"); case Legal: diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 1b7b436b031..2e91359b430 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -129,6 +129,7 @@ TargetLowering::TargetLowering(TargetMachine &tm) memset(LoadXActions, 0, sizeof(LoadXActions)); memset(&StoreXActions, 0, sizeof(StoreXActions)); memset(&IndexedModeActions, 0, sizeof(IndexedModeActions)); + memset(&ConvertActions, 0, sizeof(ConvertActions)); // Set all indexed load / store to expand. for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f8ff6a055c3..3af393484ee 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -298,6 +298,14 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) setOperationAction(ISD::ConstantFP, MVT::f64, Expand); setOperationAction(ISD::ConstantFP, MVT::f32, Expand); addLegalFPImmediate(+0.0); // xorps / xorpd + + // Conversions to long double (in X87) go through memory. + setConvertAction(MVT::f32, MVT::f80, Expand); + setConvertAction(MVT::f64, MVT::f80, Expand); + + // Conversions from long double (in X87) go through memory. + setConvertAction(MVT::f80, MVT::f32, Expand); + setConvertAction(MVT::f80, MVT::f64, Expand); } else { // Set up the FP register classes. addRegisterClass(MVT::f64, X86::RFP64RegisterClass); @@ -307,7 +315,11 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) setOperationAction(ISD::UNDEF, MVT::f32, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::f32, Expand); + + // Floating truncations need to go through memory. + setConvertAction(MVT::f80, MVT::f32, Expand); + setConvertAction(MVT::f64, MVT::f32, Expand); + setConvertAction(MVT::f80, MVT::f64, Expand); if (!UnsafeFPMath) { setOperationAction(ISD::FSIN , MVT::f64 , Expand); -- 2.34.1