X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FInstructions.h;h=955522539e01a49a6bf77e103989f9d0433e79c6;hb=37eeb058a30200101836d82098542d3d2fc4f3d5;hp=69538e954f401665544c0d6b0b11736dbae3bd07;hpb=10c6d12a9fd4dab411091f64db4db69670b88850;p=oota-llvm.git diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index 69538e954f4..955522539e0 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -20,6 +20,8 @@ #include "llvm/DerivedTypes.h" #include "llvm/Attributes.h" #include "llvm/CallingConv.h" +#include "llvm/Support/IntegersSubset.h" +#include "llvm/Support/IntegersSubsetMapping.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" @@ -142,12 +144,20 @@ public: LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false, Instruction *InsertBefore = 0); - LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, - unsigned Align, Instruction *InsertBefore = 0); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, BasicBlock *InsertAtEnd); + LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, + unsigned Align, Instruction *InsertBefore = 0); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); + LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope = CrossThread, + Instruction *InsertBefore = 0); + LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); @@ -171,11 +181,47 @@ public: /// getAlignment - Return the alignment of the access that is being performed /// unsigned getAlignment() const { - return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1; + return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; } void setAlignment(unsigned Align); + /// Returns the ordering effect of this fence. + AtomicOrdering getOrdering() const { + return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); + } + + /// Set the ordering constraint on this load. May not be Release or + /// AcquireRelease. + void setOrdering(AtomicOrdering Ordering) { + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | + (Ordering << 7)); + } + + SynchronizationScope getSynchScope() const { + return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + } + + /// Specify whether this load is ordered with respect to all + /// concurrently executing threads, or only with respect to signal handlers + /// executing in the same thread. + void setSynchScope(SynchronizationScope xthread) { + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | + (xthread << 6)); + } + + bool isAtomic() const { return getOrdering() != NotAtomic; } + void setAtomic(AtomicOrdering Ordering, + SynchronizationScope SynchScope = CrossThread) { + setOrdering(Ordering); + setSynchScope(SynchScope); + } + + bool isSimple() const { return !isAtomic() && !isVolatile(); } + bool isUnordered() const { + return getOrdering() <= Unordered && !isVolatile(); + } + Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } @@ -222,19 +268,27 @@ public: StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, Instruction *InsertBefore = 0); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, Instruction *InsertBefore = 0); - StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope = CrossThread, + Instruction *InsertBefore = 0); + StoreInst(Value *Val, Value *Ptr, bool isVolatile, + unsigned Align, AtomicOrdering Order, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd); + - - /// isVolatile - Return true if this is a load from a volatile memory + /// isVolatile - Return true if this is a store to a volatile memory /// location. /// bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } - /// setVolatile - Specify whether this is a volatile load or not. + /// setVolatile - Specify whether this is a volatile store or not. /// void setVolatile(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | @@ -247,11 +301,47 @@ public: /// getAlignment - Return the alignment of the access that is being performed /// unsigned getAlignment() const { - return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1; + return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; } void setAlignment(unsigned Align); + /// Returns the ordering effect of this store. + AtomicOrdering getOrdering() const { + return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); + } + + /// Set the ordering constraint on this store. May not be Acquire or + /// AcquireRelease. + void setOrdering(AtomicOrdering Ordering) { + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | + (Ordering << 7)); + } + + SynchronizationScope getSynchScope() const { + return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + } + + /// Specify whether this store instruction is ordered with respect to all + /// concurrently executing threads, or only with respect to signal handlers + /// executing in the same thread. + void setSynchScope(SynchronizationScope xthread) { + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | + (xthread << 6)); + } + + bool isAtomic() const { return getOrdering() != NotAtomic; } + void setAtomic(AtomicOrdering Ordering, + SynchronizationScope SynchScope = CrossThread) { + setOrdering(Ordering); + setSynchScope(SynchScope); + } + + bool isSimple() const { return !isAtomic() && !isVolatile(); } + bool isUnordered() const { + return getOrdering() <= Unordered && !isVolatile(); + } + Value *getValueOperand() { return getOperand(0); } const Value *getValueOperand() const { return getOperand(0); } @@ -319,18 +409,8 @@ public: /// Set the ordering constraint on this fence. May only be Acquire, Release, /// AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { - switch (Ordering) { - case Acquire: - case Release: - case AcquireRelease: - case SequentiallyConsistent: - setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | - (Ordering << 1)); - return; - default: - llvm_unreachable("FenceInst ordering must be Acquire, Release," - " AcquireRelease, or SequentiallyConsistent"); - } + setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | + (Ordering << 1)); } SynchronizationScope getSynchScope() const { @@ -555,7 +635,7 @@ public: void setOrdering(AtomicOrdering Ordering) { assert(Ordering != NotAtomic && "atomicrmw instructions can only be atomic."); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~28) | + setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | (Ordering << 2)); } @@ -569,7 +649,7 @@ public: /// Returns the ordering constraint on this RMW. AtomicOrdering getOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() & 28) >> 2); + return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); } /// Returns whether this RMW is atomic between threads or only within a @@ -621,7 +701,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) // checkGEPType - Simple wrapper function to give a better assertion failure // message on bad indexes for a gep instruction. // -static inline Type *checkGEPType(Type *Ty) { +inline Type *checkGEPType(Type *Ty) { assert(Ty && "Invalid GetElementPtrInst indices for type!"); return Ty; } @@ -698,6 +778,10 @@ public: static Type *getIndexedType(Type *Ptr, ArrayRef IdxList); static Type *getIndexedType(Type *Ptr, ArrayRef IdxList); + /// getIndexedType - Returns the address space used by the GEP pointer. + /// + static unsigned getAddressSpace(Value *Ptr); + inline op_iterator idx_begin() { return op_begin()+1; } inline const_op_iterator idx_begin() const { return op_begin()+1; } inline op_iterator idx_end() { return op_end(); } @@ -710,7 +794,7 @@ public: return getOperand(0); } static unsigned getPointerOperandIndex() { - return 0U; // get index for modifying correct operand + return 0U; // get index for modifying correct operand. } unsigned getPointerAddressSpace() const { @@ -719,10 +803,25 @@ public: /// getPointerOperandType - Method to return the pointer operand as a /// PointerType. - PointerType *getPointerOperandType() const { - return reinterpret_cast(getPointerOperand()->getType()); + Type *getPointerOperandType() const { + return getPointerOperand()->getType(); } + /// GetGEPReturnType - Returns the pointer type returned by the GEP + /// instruction, which may be a vector of pointers. + static Type *getGEPReturnType(Value *Ptr, ArrayRef IdxList) { + Type *PtrTy = PointerType::get(checkGEPType( + getIndexedType(Ptr->getType(), IdxList)), + getAddressSpace(Ptr)); + // Vector GEP + if (Ptr->getType()->isVectorTy()) { + unsigned NumElem = cast(Ptr->getType())->getNumElements(); + return VectorType::get(PtrTy, NumElem); + } + + // Scalar GEP + return PtrTy; + } unsigned getNumIndices() const { // Note: always non-negative return getNumOperands() - 1; @@ -769,10 +868,7 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr, unsigned Values, const Twine &NameStr, Instruction *InsertBefore) - : Instruction(PointerType::get(checkGEPType( - getIndexedType(Ptr->getType(), IdxList)), - cast(Ptr->getType()) - ->getAddressSpace()), + : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, OperandTraits::op_end(this) - Values, Values, InsertBefore) { @@ -783,10 +879,7 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd) - : Instruction(PointerType::get(checkGEPType( - getIndexedType(Ptr->getType(), IdxList)), - cast(Ptr->getType()) - ->getAddressSpace()), + : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, OperandTraits::op_end(this) - Values, Values, InsertAtEnd) { @@ -827,7 +920,7 @@ public: "Both operands to ICmp instruction are not of the same type!"); // Check that the operands are the right type assert((getOperand(0)->getType()->isIntOrIntVectorTy() || - getOperand(0)->getType()->isPointerTy()) && + getOperand(0)->getType()->getScalarType()->isPointerTy()) && "Invalid operand types for ICmp instruction"); } @@ -867,7 +960,7 @@ public: "Both operands to ICmp instruction are not of the same type!"); // Check that the operands are the right type assert((getOperand(0)->getType()->isIntOrIntVectorTy() || - getOperand(0)->getType()->isPointerTy()) && + getOperand(0)->getType()->getScalarType()->isPointerTy()) && "Invalid operand types for ICmp instruction"); } @@ -1189,6 +1282,15 @@ public: else removeAttribute(~0, Attribute::NoInline); } + /// @brief Return true if the call can return twice + bool canReturnTwice() const { + return paramHasAttr(~0, Attribute::ReturnsTwice); + } + void setCanReturnTwice(bool Value = true) { + if (Value) addAttribute(~0, Attribute::ReturnsTwice); + else removeAttribute(~0, Attribute::ReturnsTwice); + } + /// @brief Determine if the call does not access memory. bool doesNotAccessMemory() const { return paramHasAttr(~0, Attribute::ReadNone); @@ -1570,10 +1672,33 @@ public: /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + Constant *getMask() const { + return reinterpret_cast(getOperand(2)); + } + /// getMaskValue - Return the index from the shuffle mask for the specified /// output result. This is either -1 if the element is undef or a number less /// than 2*numelements. - int getMaskValue(unsigned i) const; + static int getMaskValue(Constant *Mask, unsigned i); + + int getMaskValue(unsigned i) const { + return getMaskValue(getMask(), i); + } + + /// getShuffleMask - Return the full mask for this instruction, where each + /// element is the element number and undef's are returned as -1. + static void getShuffleMask(Constant *Mask, SmallVectorImpl &Result); + + void getShuffleMask(SmallVectorImpl &Result) const { + return getShuffleMask(getMask(), Result); + } + + SmallVector getShuffleMask() const { + SmallVector Mask; + getShuffleMask(Mask); + return Mask; + } + // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const ShuffleVectorInst *) { return true; } @@ -2028,6 +2153,111 @@ struct OperandTraits : public HungoffOperandTraits<2> { DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) +//===----------------------------------------------------------------------===// +// LandingPadInst Class +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------------- +/// LandingPadInst - The landingpad instruction holds all of the information +/// necessary to generate correct exception handling. The landingpad instruction +/// cannot be moved from the top of a landing pad block, which itself is +/// accessible only from the 'unwind' edge of an invoke. This uses the +/// SubclassData field in Value to store whether or not the landingpad is a +/// cleanup. +/// +class LandingPadInst : public Instruction { + /// ReservedSpace - The number of operands actually allocated. NumOperands is + /// the number actually in use. + unsigned ReservedSpace; + LandingPadInst(const LandingPadInst &LP); +public: + enum ClauseType { Catch, Filter }; +private: + void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + // Allocate space for exactly zero operands. + void *operator new(size_t s) { + return User::operator new(s, 0); + } + void growOperands(unsigned Size); + void init(Value *PersFn, unsigned NumReservedValues, const Twine &NameStr); + + explicit LandingPadInst(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedValues, const Twine &NameStr, + Instruction *InsertBefore); + explicit LandingPadInst(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedValues, const Twine &NameStr, + BasicBlock *InsertAtEnd); +protected: + virtual LandingPadInst *clone_impl() const; +public: + /// Constructors - NumReservedClauses is a hint for the number of incoming + /// clauses that this landingpad will have (use 0 if you really have no idea). + static LandingPadInst *Create(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedClauses, + const Twine &NameStr = "", + Instruction *InsertBefore = 0); + static LandingPadInst *Create(Type *RetTy, Value *PersonalityFn, + unsigned NumReservedClauses, + const Twine &NameStr, BasicBlock *InsertAtEnd); + ~LandingPadInst(); + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// getPersonalityFn - Get the personality function associated with this + /// landing pad. + Value *getPersonalityFn() const { return getOperand(0); } + + /// isCleanup - Return 'true' if this landingpad instruction is a + /// cleanup. I.e., it should be run when unwinding even if its landing pad + /// doesn't catch the exception. + bool isCleanup() const { return getSubclassDataFromInstruction() & 1; } + + /// setCleanup - Indicate that this landingpad instruction is a cleanup. + void setCleanup(bool V) { + setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | + (V ? 1 : 0)); + } + + /// addClause - Add a catch or filter clause to the landing pad. + void addClause(Value *ClauseVal); + + /// getClause - Get the value of the clause at index Idx. Use isCatch/isFilter + /// to determine what type of clause this is. + Value *getClause(unsigned Idx) const { return OperandList[Idx + 1]; } + + /// isCatch - Return 'true' if the clause and index Idx is a catch clause. + bool isCatch(unsigned Idx) const { + return !isa(OperandList[Idx + 1]->getType()); + } + + /// isFilter - Return 'true' if the clause and index Idx is a filter clause. + bool isFilter(unsigned Idx) const { + return isa(OperandList[Idx + 1]->getType()); + } + + /// getNumClauses - Get the number of clauses for this landing pad. + unsigned getNumClauses() const { return getNumOperands() - 1; } + + /// reserveClauses - Grow the size of the operand list to accommodate the new + /// number of clauses. + void reserveClauses(unsigned Size) { growOperands(Size); } + + // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const LandingPadInst *) { return true; } + static inline bool classof(const Instruction *I) { + return I->getOpcode() == Instruction::LandingPad; + } + static inline bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + +template <> +struct OperandTraits : public HungoffOperandTraits<2> { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) //===----------------------------------------------------------------------===// // ReturnInst Class @@ -2175,6 +2405,13 @@ public: *(&Op<-1>() - idx) = (Value*)NewSucc; } + /// \brief Swap the successors of this branch instruction. + /// + /// Swaps the successors of the branch instruction. This also swaps any + /// branch weight metadata associated with the instruction so that it + /// continues to map correctly to each operand. + void swapSuccessors(); + // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const BranchInst *) { return true; } static inline bool classof(const Instruction *I) { @@ -2232,6 +2469,18 @@ class SwitchInst : public TerminatorInst { protected: virtual SwitchInst *clone_impl() const; public: + + template + class CaseIteratorT; + + typedef CaseIteratorT + ConstCaseIt; + + class CaseIt; + + // -2 + static const unsigned DefaultPseudoIndex = static_cast(~0L-1); + static SwitchInst *Create(Value *Value, BasicBlock *Default, unsigned NumCases, Instruction *InsertBefore = 0) { return new SwitchInst(Value, Default, NumCases, InsertBefore); @@ -2240,6 +2489,7 @@ public: unsigned NumCases, BasicBlock *InsertAtEnd) { return new SwitchInst(Value, Default, NumCases, InsertAtEnd); } + ~SwitchInst(); /// Provide fast operand accessors @@ -2253,61 +2503,101 @@ public: return cast(getOperand(1)); } - /// getNumCases - return the number of 'cases' in this switch instruction. - /// Note that case #0 is always the default case. - unsigned getNumCases() const { - return getNumOperands()/2; + void setDefaultDest(BasicBlock *DefaultCase) { + setOperand(1, reinterpret_cast(DefaultCase)); } - /// getCaseValue - Return the specified case value. Note that case #0, the - /// default destination, does not have a case value. - ConstantInt *getCaseValue(unsigned i) { - assert(i && i < getNumCases() && "Illegal case value to get!"); - return getSuccessorValue(i); + /// getNumCases - return the number of 'cases' in this switch instruction, + /// except the default case + unsigned getNumCases() const { + return getNumOperands()/2 - 1; } - /// getCaseValue - Return the specified case value. Note that case #0, the - /// default destination, does not have a case value. - const ConstantInt *getCaseValue(unsigned i) const { - assert(i && i < getNumCases() && "Illegal case value to get!"); - return getSuccessorValue(i); + /// Returns a read/write iterator that points to the first + /// case in SwitchInst. + CaseIt case_begin() { + return CaseIt(this, 0); } - + /// Returns a read-only iterator that points to the first + /// case in the SwitchInst. + ConstCaseIt case_begin() const { + return ConstCaseIt(this, 0); + } + + /// Returns a read/write iterator that points one past the last + /// in the SwitchInst. + CaseIt case_end() { + return CaseIt(this, getNumCases()); + } + /// Returns a read-only iterator that points one past the last + /// in the SwitchInst. + ConstCaseIt case_end() const { + return ConstCaseIt(this, getNumCases()); + } + /// Returns an iterator that points to the default case. + /// Note: this iterator allows to resolve successor only. Attempt + /// to resolve case value causes an assertion. + /// Also note, that increment and decrement also causes an assertion and + /// makes iterator invalid. + CaseIt case_default() { + return CaseIt(this, DefaultPseudoIndex); + } + ConstCaseIt case_default() const { + return ConstCaseIt(this, DefaultPseudoIndex); + } + /// findCaseValue - Search all of the case values for the specified constant. - /// If it is explicitly handled, return the case number of it, otherwise - /// return 0 to indicate that it is handled by the default handler. - unsigned findCaseValue(const ConstantInt *C) const { - for (unsigned i = 1, e = getNumCases(); i != e; ++i) - if (getCaseValue(i) == C) + /// If it is explicitly handled, return the case iterator of it, otherwise + /// return default case iterator to indicate + /// that it is handled by the default handler. + CaseIt findCaseValue(const ConstantInt *C) { + for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) + if (i.getCaseValueEx().isSatisfies(IntItem::fromConstantInt(C))) return i; - return 0; + return case_default(); } - + ConstCaseIt findCaseValue(const ConstantInt *C) const { + for (ConstCaseIt i = case_begin(), e = case_end(); i != e; ++i) + if (i.getCaseValueEx().isSatisfies(IntItem::fromConstantInt(C))) + return i; + return case_default(); + } + /// findCaseDest - Finds the unique case value for a given successor. Returns /// null if the successor is not found, not unique, or is the default case. ConstantInt *findCaseDest(BasicBlock *BB) { if (BB == getDefaultDest()) return NULL; ConstantInt *CI = NULL; - for (unsigned i = 1, e = getNumCases(); i != e; ++i) { - if (getSuccessor(i) == BB) { + for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) { + if (i.getCaseSuccessor() == BB) { if (CI) return NULL; // Multiple cases lead to BB. - else CI = getCaseValue(i); + else CI = i.getCaseValue(); } } return CI; } /// addCase - Add an entry to the switch instruction... - /// + /// @Deprecated + /// Note: + /// This action invalidates case_end(). Old case_end() iterator will + /// point to the added case. void addCase(ConstantInt *OnVal, BasicBlock *Dest); - - /// removeCase - This method removes the specified successor from the switch - /// instruction. Note that this cannot be used to remove the default - /// destination (successor #0). Also note that this operation may reorder the + + /// addCase - Add an entry to the switch instruction. + /// Note: + /// This action invalidates case_end(). Old case_end() iterator will + /// point to the added case. + void addCase(IntegersSubset& OnVal, BasicBlock *Dest); + + /// removeCase - This method removes the specified case and its successor + /// from the switch instruction. Note that this operation may reorder the /// remaining cases at index idx and above. - /// - void removeCase(unsigned idx); + /// Note: + /// This action invalidates iterators for all cases following the one removed, + /// including the case_end() iterator. + void removeCase(CaseIt i); unsigned getNumSuccessors() const { return getNumOperands()/2; } BasicBlock *getSuccessor(unsigned idx) const { @@ -2318,15 +2608,156 @@ public: assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); setOperand(idx*2+1, (Value*)NewSucc); } + + uint16_t hash() const { + uint32_t NumberOfCases = (uint32_t)getNumCases(); + uint16_t Hash = (0xFFFF & NumberOfCases) ^ (NumberOfCases >> 16); + for (ConstCaseIt i = case_begin(), e = case_end(); + i != e; ++i) { + uint32_t NumItems = (uint32_t)i.getCaseValueEx().getNumItems(); + Hash = (Hash << 1) ^ (0xFFFF & NumItems) ^ (NumItems >> 16); + } + return Hash; + } + + // Case iterators definition. + + template + class CaseIteratorT { + protected: + + SwitchInstTy *SI; + unsigned Index; + + public: + + typedef CaseIteratorT Self; + + /// Initializes case iterator for given SwitchInst and for given + /// case number. + CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) { + this->SI = SI; + Index = CaseNum; + } + + /// Initializes case iterator for given SwitchInst and for given + /// TerminatorInst's successor index. + static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) { + assert(SuccessorIndex < SI->getNumSuccessors() && + "Successor index # out of range!"); + return SuccessorIndex != 0 ? + Self(SI, SuccessorIndex - 1) : + Self(SI, DefaultPseudoIndex); + } + + /// Resolves case value for current case. + /// @Deprecated + ConstantIntTy *getCaseValue() { + assert(Index < SI->getNumCases() && "Index out the number of cases."); + IntegersSubset CaseRanges = + reinterpret_cast(SI->getOperand(2 + Index*2)); + IntegersSubset::Range R = CaseRanges.getItem(0); + + // FIXME: Currently we work with ConstantInt based cases. + // So return CaseValue as ConstantInt. + return R.getLow().toConstantInt(); + } - // getSuccessorValue - Return the value associated with the specified - // successor. - ConstantInt *getSuccessorValue(unsigned idx) const { - assert(idx < getNumSuccessors() && "Successor # out of range!"); - return reinterpret_cast(getOperand(idx*2)); - } + /// Resolves case value for current case. + IntegersSubset getCaseValueEx() { + assert(Index < SI->getNumCases() && "Index out the number of cases."); + return reinterpret_cast(SI->getOperand(2 + Index*2)); + } + + /// Resolves successor for current case. + BasicBlockTy *getCaseSuccessor() { + assert((Index < SI->getNumCases() || + Index == DefaultPseudoIndex) && + "Index out the number of cases."); + return SI->getSuccessor(getSuccessorIndex()); + } + + /// Returns number of current case. + unsigned getCaseIndex() const { return Index; } + + /// Returns TerminatorInst's successor index for current case successor. + unsigned getSuccessorIndex() const { + assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) && + "Index out the number of cases."); + return Index != DefaultPseudoIndex ? Index + 1 : 0; + } + + Self operator++() { + // Check index correctness after increment. + // Note: Index == getNumCases() means end(). + assert(Index+1 <= SI->getNumCases() && "Index out the number of cases."); + ++Index; + return *this; + } + Self operator++(int) { + Self tmp = *this; + ++(*this); + return tmp; + } + Self operator--() { + // Check index correctness after decrement. + // Note: Index == getNumCases() means end(). + // Also allow "-1" iterator here. That will became valid after ++. + assert((Index == 0 || Index-1 <= SI->getNumCases()) && + "Index out the number of cases."); + --Index; + return *this; + } + Self operator--(int) { + Self tmp = *this; + --(*this); + return tmp; + } + bool operator==(const Self& RHS) const { + assert(RHS.SI == SI && "Incompatible operators."); + return RHS.Index == Index; + } + bool operator!=(const Self& RHS) const { + assert(RHS.SI == SI && "Incompatible operators."); + return RHS.Index != Index; + } + }; + + class CaseIt : public CaseIteratorT { + + typedef CaseIteratorT ParentTy; + + public: + + CaseIt(const ParentTy& Src) : ParentTy(Src) {} + CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {} + + /// Sets the new value for current case. + /// @Deprecated. + void setValue(ConstantInt *V) { + assert(Index < SI->getNumCases() && "Index out the number of cases."); + IntegersSubsetToBB Mapping; + // FIXME: Currently we work with ConstantInt based cases. + // So inititalize IntItem container directly from ConstantInt. + Mapping.add(IntItem::fromConstantInt(V)); + SI->setOperand(2 + Index*2, + reinterpret_cast((Constant*)Mapping.getCase())); + } + + /// Sets the new value for current case. + void setValueEx(IntegersSubset& V) { + assert(Index < SI->getNumCases() && "Index out the number of cases."); + SI->setOperand(2 + Index*2, reinterpret_cast((Constant*)V)); + } + + /// Sets the new successor for current case. + void setSuccessor(BasicBlock *S) { + SI->setSuccessor(getSuccessorIndex(), S); + } + }; // Methods for support type inquiry through isa, cast, and dyn_cast: + static inline bool classof(const SwitchInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Switch; @@ -2617,6 +3048,10 @@ public: Op<-1>() = reinterpret_cast(B); } + /// getLandingPadInst - Get the landingpad instruction from the landing pad + /// block (the unwind destination). + LandingPadInst *getLandingPadInst() const; + BasicBlock *getSuccessor(unsigned i) const { assert(i < 2 && "Successor # out of range for invoke!"); return i == 0 ? getNormalDest() : getUnwindDest(); @@ -2680,31 +3115,39 @@ InvokeInst::InvokeInst(Value *Func, DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value) //===----------------------------------------------------------------------===// -// UnwindInst Class +// ResumeInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- -/// UnwindInst - Immediately exit the current function, unwinding the stack -/// until an invoke instruction is found. +/// ResumeInst - Resume the propagation of an exception. /// -class UnwindInst : public TerminatorInst { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT +class ResumeInst : public TerminatorInst { + ResumeInst(const ResumeInst &RI); + + explicit ResumeInst(Value *Exn, Instruction *InsertBefore=0); + ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); protected: - virtual UnwindInst *clone_impl() const; + virtual ResumeInst *clone_impl() const; public: - // allocate space for exactly zero operands - void *operator new(size_t s) { - return User::operator new(s, 0); + static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = 0) { + return new(1) ResumeInst(Exn, InsertBefore); + } + static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { + return new(1) ResumeInst(Exn, InsertAtEnd); } - explicit UnwindInst(LLVMContext &C, Instruction *InsertBefore = 0); - explicit UnwindInst(LLVMContext &C, BasicBlock *InsertAtEnd); + + /// Provide fast operand accessors + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + + /// Convenience accessor. + Value *getValue() const { return Op<0>(); } unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UnwindInst *) { return true; } + static inline bool classof(const ResumeInst *) { return true; } static inline bool classof(const Instruction *I) { - return I->getOpcode() == Instruction::Unwind; + return I->getOpcode() == Instruction::Resume; } static inline bool classof(const Value *V) { return isa(V) && classof(cast(V)); @@ -2715,6 +3158,13 @@ private: virtual void setSuccessorV(unsigned idx, BasicBlock *B); }; +template <> +struct OperandTraits : + public FixedNumOperandTraits { +}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) + //===----------------------------------------------------------------------===// // UnreachableInst Class //===----------------------------------------------------------------------===//