From: Evan Cheng Date: Wed, 7 Dec 2011 07:15:52 +0000 (+0000) Subject: Add bundle aware API for querying instruction properties and switch the code X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=5a96b3dad2f634c9081c8b2b6c2575441dc5a2bd;p=oota-llvm.git Add bundle aware API for querying instruction properties and switch the code generator to it. For non-bundle instructions, these behave exactly the same as the MC layer API. For properties like mayLoad / mayStore, look into the bundle and if any of the bundled instructions has the property it would return true. For properties like isPredicable, only return true if *all* of the bundled instructions have the property. For properties like canFoldAsLoad, isCompare, conservatively return false for bundles. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146026 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h index 1558c4f9573..00b41e74c20 100644 --- a/include/llvm/CodeGen/MachineInstr.h +++ b/include/llvm/CodeGen/MachineInstr.h @@ -274,14 +274,267 @@ public: return MemRefsEnd - MemRefs == 1; } - /// API for querying MachineInstr properties. These are bundle aware. + /// API for querying MachineInstr properties. They are the same as MCInstrDesc + /// queries but they are bundle aware. + + /// hasProperty - Return true if the instruction (or in the case of a bundle, + /// the instructions inside the bundle) has the specified property. + /// The first argument is the property being queried. + /// The second argument indicates whether the query should look inside + /// instruction bundles. + /// If the third argument is true, than the query can return true when *any* + /// of the bundled instructions has the queried property. If it's false, then + /// this can return true iff *all* of the instructions have the property. + bool hasProperty(unsigned Flag, + bool PeekInBundle = true, bool IsOr = true) const; + + /// isVariadic - Return true if this instruction can have a variable number of + /// operands. In this case, the variable operands will be after the normal + /// operands but before the implicit definitions and uses (if any are + /// present). + bool isVariadic() const { + return hasProperty(MCID::Variadic, false); + } + + /// hasOptionalDef - Set if this instruction has an optional definition, e.g. + /// ARM instructions which can set condition code if 's' bit is set. + bool hasOptionalDef() const { + return hasProperty(MCID::HasOptionalDef, false); + } + + /// isPseudo - Return true if this is a pseudo instruction that doesn't + /// correspond to a real machine instruction. /// - bool hasProperty(unsigned short Flag) const; + bool isPseudo() const { + return hasProperty(MCID::Pseudo, false); + } + + bool isReturn() const { + return hasProperty(MCID::Return); + } + + bool isCall() const { + return hasProperty(MCID::Call); + } + + /// isBarrier - Returns true if the specified instruction stops control flow + /// from executing the instruction immediately following it. Examples include + /// unconditional branches and return instructions. + bool isBarrier() const { + return hasProperty(MCID::Barrier); + } + /// isTerminator - Returns true if this instruction part of the terminator for + /// a basic block. Typically this is things like return and branch + /// instructions. + /// + /// Various passes use this to insert code into the bottom of a basic block, + /// but before control flow occurs. bool isTerminator() const { return hasProperty(MCID::Terminator); } + /// isBranch - Returns true if this is a conditional, unconditional, or + /// indirect branch. Predicates below can be used to discriminate between + /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to + /// get more information. + bool isBranch() const { + return hasProperty(MCID::Branch); + } + + /// isIndirectBranch - Return true if this is an indirect branch, such as a + /// branch through a register. + bool isIndirectBranch() const { + return hasProperty(MCID::IndirectBranch); + } + + /// isConditionalBranch - Return true if this is a branch which may fall + /// through to the next instruction or may transfer control flow to some other + /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more + /// information about this branch. + bool isConditionalBranch() const { + return isBranch() & !isBarrier() & !isIndirectBranch(); + } + + /// isUnconditionalBranch - Return true if this is a branch which always + /// transfers control flow to some other block. The + /// TargetInstrInfo::AnalyzeBranch method can be used to get more information + /// about this branch. + bool isUnconditionalBranch() const { + return isBranch() & isBarrier() & !isIndirectBranch(); + } + + // isPredicable - Return true if this instruction has a predicate operand that + // controls execution. It may be set to 'always', or may be set to other + /// values. There are various methods in TargetInstrInfo that can be used to + /// control and modify the predicate in this instruction. + bool isPredicable() const { + // If it's a bundle than all bundled instructions must be predicable for this + // to return true. + return hasProperty(MCID::Predicable, true, false); + } + + /// isCompare - Return true if this instruction is a comparison. + bool isCompare() const { + return hasProperty(MCID::Compare, false); + } + + /// isMoveImmediate - Return true if this instruction is a move immediate + /// (including conditional moves) instruction. + bool isMoveImmediate() const { + return hasProperty(MCID::MoveImm, false); + } + + /// isBitcast - Return true if this instruction is a bitcast instruction. + /// + bool isBitcast() const { + return hasProperty(MCID::Bitcast, false); + } + + /// isNotDuplicable - Return true if this instruction cannot be safely + /// duplicated. For example, if the instruction has a unique labels attached + /// to it, duplicating it would cause multiple definition errors. + bool isNotDuplicable() const { + return hasProperty(MCID::NotDuplicable); + } + + /// hasDelaySlot - Returns true if the specified instruction has a delay slot + /// which must be filled by the code generator. + bool hasDelaySlot() const { + return hasProperty(MCID::DelaySlot); + } + + /// canFoldAsLoad - Return true for instructions that can be folded as + /// memory operands in other instructions. The most common use for this + /// is instructions that are simple loads from memory that don't modify + /// the loaded value in any way, but it can also be used for instructions + /// that can be expressed as constant-pool loads, such as V_SETALLONES + /// on x86, to allow them to be folded when it is beneficial. + /// This should only be set on instructions that return a value in their + /// only virtual register definition. + bool canFoldAsLoad() const { + return hasProperty(MCID::FoldableAsLoad, false); + } + + //===--------------------------------------------------------------------===// + // Side Effect Analysis + //===--------------------------------------------------------------------===// + + /// mayLoad - Return true if this instruction could possibly read memory. + /// Instructions with this flag set are not necessarily simple load + /// instructions, they may load a value and modify it, for example. + bool mayLoad() const { + return hasProperty(MCID::MayLoad); + } + + + /// mayStore - Return true if this instruction could possibly modify memory. + /// Instructions with this flag set are not necessarily simple store + /// instructions, they may store a modified value based on their operands, or + /// may not actually modify anything, for example. + bool mayStore() const { + return hasProperty(MCID::MayStore); + } + + //===--------------------------------------------------------------------===// + // Flags that indicate whether an instruction can be modified by a method. + //===--------------------------------------------------------------------===// + + /// isCommutable - Return true if this may be a 2- or 3-address + /// instruction (of the form "X = op Y, Z, ..."), which produces the same + /// result if Y and Z are exchanged. If this flag is set, then the + /// TargetInstrInfo::commuteInstruction method may be used to hack on the + /// instruction. + /// + /// Note that this flag may be set on instructions that are only commutable + /// sometimes. In these cases, the call to commuteInstruction will fail. + /// Also note that some instructions require non-trivial modification to + /// commute them. + bool isCommutable() const { + return hasProperty(MCID::Commutable, false); + } + + /// isConvertibleTo3Addr - Return true if this is a 2-address instruction + /// which can be changed into a 3-address instruction if needed. Doing this + /// transformation can be profitable in the register allocator, because it + /// means that the instruction can use a 2-address form if possible, but + /// degrade into a less efficient form if the source and dest register cannot + /// be assigned to the same register. For example, this allows the x86 + /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which + /// is the same speed as the shift but has bigger code size. + /// + /// If this returns true, then the target must implement the + /// TargetInstrInfo::convertToThreeAddress method for this instruction, which + /// is allowed to fail if the transformation isn't valid for this specific + /// instruction (e.g. shl reg, 4 on x86). + /// + bool isConvertibleTo3Addr() const { + return hasProperty(MCID::ConvertibleTo3Addr, false); + } + + /// usesCustomInsertionHook - Return true if this instruction requires + /// custom insertion support when the DAG scheduler is inserting it into a + /// machine basic block. If this is true for the instruction, it basically + /// means that it is a pseudo instruction used at SelectionDAG time that is + /// expanded out into magic code by the target when MachineInstrs are formed. + /// + /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method + /// is used to insert this into the MachineBasicBlock. + bool usesCustomInsertionHook() const { + return hasProperty(MCID::UsesCustomInserter, false); + } + + /// hasPostISelHook - Return true if this instruction requires *adjustment* + /// after instruction selection by calling a target hook. For example, this + /// can be used to fill in ARM 's' optional operand depending on whether + /// the conditional flag register is used. + bool hasPostISelHook() const { + return hasProperty(MCID::HasPostISelHook, false); + } + + /// isRematerializable - Returns true if this instruction is a candidate for + /// remat. This flag is deprecated, please don't use it anymore. If this + /// flag is set, the isReallyTriviallyReMaterializable() method is called to + /// verify the instruction is really rematable. + bool isRematerializable() const { + // It's only possible to re-mat a bundle if all bundled instructions are + // re-materializable. + return hasProperty(MCID::Rematerializable, true, false); + } + + /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or + /// less) than a move instruction. This is useful during certain types of + /// optimizations (e.g., remat during two-address conversion or machine licm) + /// where we would like to remat or hoist the instruction, but not if it costs + /// more than moving the instruction into the appropriate register. Note, we + /// are not marking copies from and to the same register class with this flag. + bool isAsCheapAsAMove() const { + // Only returns true for a bundle if all bundled instructions are cheap. + // FIXME: This probably requires a target hook. + return hasProperty(MCID::CheapAsAMove, true, true); + } + + /// hasExtraSrcRegAllocReq - Returns true if this instruction source operands + /// have special register allocation requirements that are not captured by the + /// operand register classes. e.g. ARM::STRD's two source registers must be an + /// even / odd pair, ARM::STM registers have to be in ascending order. + /// Post-register allocation passes should not attempt to change allocations + /// for sources of instructions with this flag. + bool hasExtraSrcRegAllocReq() const { + return hasProperty(MCID::ExtraSrcRegAllocReq); + } + + /// hasExtraDefRegAllocReq - Returns true if this instruction def operands + /// have special register allocation requirements that are not captured by the + /// operand register classes. e.g. ARM::LDRD's two def registers must be an + /// even / odd pair, ARM::LDM registers have to be in ascending order. + /// Post-register allocation passes should not attempt to change allocations + /// for definitions of instructions with this flag. + bool hasExtraDefRegAllocReq() const { + return hasProperty(MCID::ExtraDefRegAllocReq); + } + + enum MICheckType { CheckDefs, // Check all operands for equality CheckKillDead, // Check all operands including kill / dead markers diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h index 6c33bfa2ecd..6d71cf570ae 100644 --- a/include/llvm/MC/MCInstrDesc.h +++ b/include/llvm/MC/MCInstrDesc.h @@ -186,7 +186,7 @@ public: /// getFlags - Return flags of this instruction. /// - unsigned short getFlags() const { return Flags; } + unsigned getFlags() const { return Flags; } /// isVariadic - Return true if this instruction can have a variable number of /// operands. In this case, the variable operands will be after the normal @@ -202,84 +202,6 @@ public: return Flags & (1 << MCID::HasOptionalDef); } - /// getImplicitUses - Return a list of registers that are potentially - /// read by any instance of this machine instruction. For example, on X86, - /// the "adc" instruction adds two register operands and adds the carry bit in - /// from the flags register. In this case, the instruction is marked as - /// implicitly reading the flags. Likewise, the variable shift instruction on - /// X86 is marked as implicitly reading the 'CL' register, which it always - /// does. - /// - /// This method returns null if the instruction has no implicit uses. - const unsigned *getImplicitUses() const { - return ImplicitUses; - } - - /// getNumImplicitUses - Return the number of implicit uses this instruction - /// has. - unsigned getNumImplicitUses() const { - if (ImplicitUses == 0) return 0; - unsigned i = 0; - for (; ImplicitUses[i]; ++i) /*empty*/; - return i; - } - - /// getImplicitDefs - Return a list of registers that are potentially - /// written by any instance of this machine instruction. For example, on X86, - /// many instructions implicitly set the flags register. In this case, they - /// are marked as setting the FLAGS. Likewise, many instructions always - /// deposit their result in a physical register. For example, the X86 divide - /// instruction always deposits the quotient and remainder in the EAX/EDX - /// registers. For that instruction, this will return a list containing the - /// EAX/EDX/EFLAGS registers. - /// - /// This method returns null if the instruction has no implicit defs. - const unsigned *getImplicitDefs() const { - return ImplicitDefs; - } - - /// getNumImplicitDefs - Return the number of implicit defs this instruction - /// has. - unsigned getNumImplicitDefs() const { - if (ImplicitDefs == 0) return 0; - unsigned i = 0; - for (; ImplicitDefs[i]; ++i) /*empty*/; - return i; - } - - /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly - /// uses the specified physical register. - bool hasImplicitUseOfPhysReg(unsigned Reg) const { - if (const unsigned *ImpUses = ImplicitUses) - for (; *ImpUses; ++ImpUses) - if (*ImpUses == Reg) return true; - return false; - } - - /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly - /// defines the specified physical register. - bool hasImplicitDefOfPhysReg(unsigned Reg) const { - if (const unsigned *ImpDefs = ImplicitDefs) - for (; *ImpDefs; ++ImpDefs) - if (*ImpDefs == Reg) return true; - return false; - } - - /// getSchedClass - Return the scheduling class for this instruction. The - /// scheduling class is an index into the InstrItineraryData table. This - /// returns zero if there is no known scheduling information for the - /// instruction. - /// - unsigned getSchedClass() const { - return SchedClass; - } - - /// getSize - Return the number of bytes in the encoding of this instruction, - /// or zero if the encoding size cannot be known from the opcode. - unsigned getSize() const { - return Size; - } - /// isPseudo - Return true if this is a pseudo instruction that doesn't /// correspond to a real machine instruction. /// @@ -302,18 +224,6 @@ public: return Flags & (1 << MCID::Barrier); } - /// findFirstPredOperandIdx() - Find the index of the first operand in the - /// operand list that is used to represent the predicate. It returns -1 if - /// none is found. - int findFirstPredOperandIdx() const { - if (isPredicable()) { - for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (OpInfo[i].isPredicate()) - return i; - } - return -1; - } - /// isTerminator - Returns true if this instruction part of the terminator for /// a basic block. Typically this is things like return and branch /// instructions. @@ -534,6 +444,97 @@ public: bool hasExtraDefRegAllocReq() const { return Flags & (1 << MCID::ExtraDefRegAllocReq); } + + + /// getImplicitUses - Return a list of registers that are potentially + /// read by any instance of this machine instruction. For example, on X86, + /// the "adc" instruction adds two register operands and adds the carry bit in + /// from the flags register. In this case, the instruction is marked as + /// implicitly reading the flags. Likewise, the variable shift instruction on + /// X86 is marked as implicitly reading the 'CL' register, which it always + /// does. + /// + /// This method returns null if the instruction has no implicit uses. + const unsigned *getImplicitUses() const { + return ImplicitUses; + } + + /// getNumImplicitUses - Return the number of implicit uses this instruction + /// has. + unsigned getNumImplicitUses() const { + if (ImplicitUses == 0) return 0; + unsigned i = 0; + for (; ImplicitUses[i]; ++i) /*empty*/; + return i; + } + + /// getImplicitDefs - Return a list of registers that are potentially + /// written by any instance of this machine instruction. For example, on X86, + /// many instructions implicitly set the flags register. In this case, they + /// are marked as setting the FLAGS. Likewise, many instructions always + /// deposit their result in a physical register. For example, the X86 divide + /// instruction always deposits the quotient and remainder in the EAX/EDX + /// registers. For that instruction, this will return a list containing the + /// EAX/EDX/EFLAGS registers. + /// + /// This method returns null if the instruction has no implicit defs. + const unsigned *getImplicitDefs() const { + return ImplicitDefs; + } + + /// getNumImplicitDefs - Return the number of implicit defs this instruction + /// has. + unsigned getNumImplicitDefs() const { + if (ImplicitDefs == 0) return 0; + unsigned i = 0; + for (; ImplicitDefs[i]; ++i) /*empty*/; + return i; + } + + /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly + /// uses the specified physical register. + bool hasImplicitUseOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpUses = ImplicitUses) + for (; *ImpUses; ++ImpUses) + if (*ImpUses == Reg) return true; + return false; + } + + /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly + /// defines the specified physical register. + bool hasImplicitDefOfPhysReg(unsigned Reg) const { + if (const unsigned *ImpDefs = ImplicitDefs) + for (; *ImpDefs; ++ImpDefs) + if (*ImpDefs == Reg) return true; + return false; + } + + /// getSchedClass - Return the scheduling class for this instruction. The + /// scheduling class is an index into the InstrItineraryData table. This + /// returns zero if there is no known scheduling information for the + /// instruction. + /// + unsigned getSchedClass() const { + return SchedClass; + } + + /// getSize - Return the number of bytes in the encoding of this instruction, + /// or zero if the encoding size cannot be known from the opcode. + unsigned getSize() const { + return Size; + } + + /// findFirstPredOperandIdx() - Find the index of the first operand in the + /// operand list that is used to represent the predicate. It returns -1 if + /// none is found. + int findFirstPredOperandIdx() const { + if (isPredicable()) { + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + if (OpInfo[i].isPredicate()) + return i; + } + return -1; + } }; } // end namespace llvm diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index 25842a7876a..6cf4571d730 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -148,7 +148,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) { assert(State == NULL); State = new AggressiveAntiDepState(TRI->getNumRegs(), BB); - bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn()); + bool IsReturnBlock = (!BB->empty() && BB->back().isReturn()); std::vector &KillIndices = State->GetKillIndices(); std::vector &DefIndices = State->GetDefIndices(); @@ -384,7 +384,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, // If MI's defs have a special allocation requirement, don't allow // any def registers to be changed. Also assume all registers // defined in a call must not be changed (ABI). - if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() || + if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI)) { DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)"); State->UnionGroups(Reg, 0); @@ -451,8 +451,8 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, // instruction which may not be executed. The second R6 def may or may not // re-define R6 so it's not safe to change it since the last R6 use cannot be // changed. - bool Special = MI->getDesc().isCall() || - MI->getDesc().hasExtraSrcRegAllocReq() || + bool Special = MI->isCall() || + MI->hasExtraSrcRegAllocReq() || TII->isPredicated(MI); // Scan the register uses for this instruction and update diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 23bf602bade..d13eae098f4 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2087,7 +2087,7 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { MachineInstr &MI = *II; // If it is not a simple branch, we are in a table somewhere. - if (!MI.getDesc().isBranch() || MI.getDesc().isIndirectBranch()) + if (!MI.isBranch() || MI.isIndirectBranch()) return false; // If we are the operands of one of the branches, this is not diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp index e0f2e85f623..bf7f7eec9fe 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp @@ -184,7 +184,7 @@ ComputeActionsTable(const SmallVectorImpl &LandingPads, /// CallToNoUnwindFunction - Return `true' if this is a call to a function /// marked `nounwind'. Return `false' otherwise. bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) { - assert(MI->getDesc().isCall() && "This should be a call instruction!"); + assert(MI->isCall() && "This should be a call instruction!"); bool MarkedNoUnwind = false; bool SawFunc = false; @@ -243,7 +243,7 @@ ComputeCallSiteTable(SmallVectorImpl &CallSites, for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end(); MI != E; ++MI) { if (!MI->isLabel()) { - if (MI->getDesc().isCall()) + if (MI->isCall()) SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI); continue; } diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 5dec3681127..0d88e6c2119 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -432,10 +432,9 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, for (; I != E; ++I) { if (I->isDebugValue()) continue; - const MCInstrDesc &MCID = I->getDesc(); - if (MCID.isCall()) + if (I->isCall()) Time += 10; - else if (MCID.mayLoad() || MCID.mayStore()) + else if (I->mayLoad() || I->mayStore()) Time += 2; else ++Time; @@ -502,7 +501,7 @@ static unsigned CountTerminators(MachineBasicBlock *MBB, break; } --I; - if (!I->getDesc().isTerminator()) break; + if (!I->isTerminator()) break; ++NumTerms; } return NumTerms; @@ -550,8 +549,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1, // heuristics. unsigned EffectiveTailLen = CommonTailLen; if (SuccBB && MBB1 != PredBB && MBB2 != PredBB && - !MBB1->back().getDesc().isBarrier() && - !MBB2->back().getDesc().isBarrier()) + !MBB1->back().isBarrier() && + !MBB2->back().isBarrier()) ++EffectiveTailLen; // Check if the common tail is long enough to be worthwhile. @@ -983,7 +982,7 @@ static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) { if (!MBBI->isDebugValue()) break; } - return (MBBI->getDesc().isBranch()); + return (MBBI->isBranch()); } /// IsBetterFallthrough - Return true if it would be clearly better to @@ -1011,7 +1010,7 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1, MachineBasicBlock::iterator MBB2I = --MBB2->end(); while (MBB2I->isDebugValue()) --MBB2I; - return MBB2I->getDesc().isCall() && !MBB1I->getDesc().isCall(); + return MBB2I->isCall() && !MBB1I->isCall(); } /// OptimizeBlock - Analyze and optimize control flow related to the specified diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp index 84c4d59c0e4..128143e70fb 100644 --- a/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -54,7 +54,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) { // Clear "do not change" set. KeepRegs.clear(); - bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn()); + bool IsReturnBlock = (!BB->empty() && BB->back().isReturn()); // Determine the live-out physregs for this block. if (IsReturnBlock) { @@ -193,8 +193,8 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) { // instruction which may not be executed. The second R6 def may or may not // re-define R6 so it's not safe to change it since the last R6 use cannot be // changed. - bool Special = MI->getDesc().isCall() || - MI->getDesc().hasExtraSrcRegAllocReq() || + bool Special = MI->isCall() || + MI->hasExtraSrcRegAllocReq() || TII->isPredicated(MI); // Scan the register operands for this instruction and update @@ -572,7 +572,7 @@ BreakAntiDependencies(const std::vector& SUnits, // If MI's defs have a special allocation requirement, don't allow // any def registers to be changed. Also assume all registers // defined in a call must not be changed (ABI). - if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() || + if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI)) // If this instruction's defs have special allocation requirement, don't // break this anti-dependency. diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index 6de6c0cb81b..ba135e19f4d 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -102,7 +102,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { LivePhysRegs = ReservedRegs; // Also add any explicit live-out physregs for this block. - if (!MBB->empty() && MBB->back().getDesc().isReturn()) + if (!MBB->empty() && MBB->back().isReturn()) for (MachineRegisterInfo::liveout_iterator LOI = MRI->liveout_begin(), LOE = MRI->liveout_end(); LOI != LOE; ++LOI) { unsigned Reg = *LOI; diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp index 300f0371211..4ec75cdfec9 100644 --- a/lib/CodeGen/ExecutionDepsFix.cpp +++ b/lib/CodeGen/ExecutionDepsFix.cpp @@ -454,7 +454,7 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) { assert(!MI->isDebugValue() && "Won't process debug values"); const MCInstrDesc &MCID = MI->getDesc(); for (unsigned i = 0, - e = MCID.isVariadic() ? MI->getNumOperands() : MCID.getNumDefs(); + e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) diff --git a/lib/CodeGen/ExpandISelPseudos.cpp b/lib/CodeGen/ExpandISelPseudos.cpp index a67140ece4a..b5f107d5cdf 100644 --- a/lib/CodeGen/ExpandISelPseudos.cpp +++ b/lib/CodeGen/ExpandISelPseudos.cpp @@ -62,8 +62,7 @@ bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) { MachineInstr *MI = MBBI++; // If MI is a pseudo, expand it. - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.usesCustomInsertionHook()) { + if (MI->usesCustomInsertionHook()) { Changed = true; MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB); diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp index e2a14a8dfd9..3d23db0b8fb 100644 --- a/lib/CodeGen/ExpandPostRAPseudos.cpp +++ b/lib/CodeGen/ExpandPostRAPseudos.cpp @@ -207,7 +207,7 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) { ++mi; // Only expand pseudos. - if (!MI->getDesc().isPseudo()) + if (!MI->isPseudo()) continue; // Give targets a chance to expand even standard pseudos. diff --git a/lib/CodeGen/GCStrategy.cpp b/lib/CodeGen/GCStrategy.cpp index 9349797e228..e2c71322da5 100644 --- a/lib/CodeGen/GCStrategy.cpp +++ b/lib/CodeGen/GCStrategy.cpp @@ -386,7 +386,7 @@ void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) { BBE = MF.end(); BBI != BBE; ++BBI) for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) - if (MI->getDesc().isCall()) + if (MI->isCall()) VisitCallPoint(MI); } diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index d888939d4c2..bd31fdf53a6 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -573,12 +573,12 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI, // blocks, move the end iterators up past any branch instructions. while (TIE != TIB) { --TIE; - if (!TIE->getDesc().isBranch()) + if (!TIE->isBranch()) break; } while (FIE != FIB) { --FIE; - if (!FIE->getDesc().isBranch()) + if (!FIE->isBranch()) break; } @@ -651,12 +651,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) { if (I->isDebugValue()) continue; - const MCInstrDesc &MCID = I->getDesc(); - if (MCID.isNotDuplicable()) + if (I->isNotDuplicable()) BBI.CannotBeCopied = true; bool isPredicated = TII->isPredicated(I); - bool isCondBr = BBI.IsBrAnalyzable && MCID.isConditionalBranch(); + bool isCondBr = BBI.IsBrAnalyzable && I->isConditionalBranch(); if (!isCondBr) { if (!isPredicated) { @@ -1395,9 +1394,8 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI, for (MachineBasicBlock::iterator I = FromBBI.BB->begin(), E = FromBBI.BB->end(); I != E; ++I) { - const MCInstrDesc &MCID = I->getDesc(); // Do not copy the end of the block branches. - if (IgnoreBr && MCID.isBranch()) + if (IgnoreBr && I->isBranch()) break; MachineInstr *MI = MF.CloneMachineInstr(I); diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 59907d9c4e0..8f38887cb1b 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -759,7 +759,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { // Find all spills and copies of VNI. for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); MachineInstr *MI = UI.skipInstruction();) { - if (!MI->isCopy() && !MI->getDesc().mayStore()) + if (!MI->isCopy() && !MI->mayStore()) continue; SlotIndex Idx = LIS.getInstructionIndex(MI); if (LI->getVNInfoAt(Idx) != VNI) @@ -878,7 +878,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // Before rematerializing into a register for a single instruction, try to // fold a load into the instruction. That avoids allocating a new register. - if (RM.OrigMI->getDesc().canFoldAsLoad() && + if (RM.OrigMI->canFoldAsLoad() && foldMemoryOperand(MI, Ops, RM.OrigMI)) { Edit->markRematerialized(RM.ParentVNI); ++NumFoldedLoads; diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index eb54baa7294..c35302a0501 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -920,8 +920,8 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx, } // Don't insert anything after the first terminator, though. - return MI->getDesc().isTerminator() ? MBB->getFirstTerminator() : - llvm::next(MachineBasicBlock::iterator(MI)); + return MI->isTerminator() ? MBB->getFirstTerminator() : + llvm::next(MachineBasicBlock::iterator(MI)); } DebugLoc UserValue::findDebugLoc() { diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index edcfebe866e..1e58173c186 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -794,7 +794,7 @@ LiveIntervals::getLastSplitPoint(const LiveInterval &li, MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin(); while (I != B) { --I; - if (I->getDesc().isCall()) + if (I->isCall()) return I; } // The block contains no calls that can throw, so use the first terminator. diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index 2f283b24432..428b252f97d 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -129,7 +129,7 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, } // If only cheap remats were requested, bail out early. - if (cheapAsAMove && !RM.OrigMI->getDesc().isAsCheapAsAMove()) + if (cheapAsAMove && !RM.OrigMI->isAsCheapAsAMove()) return false; // Verify that all used registers are available with the same values. @@ -174,7 +174,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (MO.isDef()) { if (DefMI && DefMI != MI) return false; - if (!MI->getDesc().canFoldAsLoad()) + if (!MI->canFoldAsLoad()) return false; DefMI = MI; } else if (!MO.isUndef()) { diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp index 03f8221e1f6..7477d919302 100644 --- a/lib/CodeGen/LiveVariables.cpp +++ b/lib/CodeGen/LiveVariables.cpp @@ -590,8 +590,8 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { // them. The tail callee need not take the same registers as input // that it produces as output, and there are dependencies for its input // registers elsewhere. - if (!MBB->empty() && MBB->back().getDesc().isReturn() - && !MBB->back().getDesc().isCall()) { + if (!MBB->empty() && MBB->back().isReturn() + && !MBB->back().isCall()) { MachineInstr *Ret = &MBB->back(); for (MachineRegisterInfo::liveout_iterator diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 46ed082ad2d..4dc8173bd76 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -538,8 +538,8 @@ bool MachineBasicBlock::canFallThrough() { // Barrier is predicated and thus no longer an actual control barrier. This // is over-conservative though, because if an instruction isn't actually // predicated we could still treat it like a barrier. - return empty() || !back().getDesc().isBarrier() || - back().getDesc().isPredicable(); + return empty() || !back().isBarrier() || + back().isPredicable(); } // If there is no branch, control always falls through. @@ -737,7 +737,7 @@ void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock::insn_iterator I = insn_end(); while (I != insn_begin()) { --I; - if (!I->getDesc().isTerminator()) break; + if (!I->isTerminator()) break; // Scan the operands of this machine instruction, replacing any uses of Old // with New. diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index 7eda8c129dc..8c02cd7ddba 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -260,12 +260,11 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) { return false; // Ignore stuff that we obviously can't move. - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.mayStore() || MCID.isCall() || MCID.isTerminator() || + if (MI->mayStore() || MI->isCall() || MI->isTerminator() || MI->hasUnmodeledSideEffects()) return false; - if (MCID.mayLoad()) { + if (MI->mayLoad()) { // Okay, this instruction does a load. As a refinement, we allow the target // to decide whether the loaded value is actually a constant. If so, we can // actually use it as a load. @@ -287,7 +286,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg, // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in // an immediate predecessor. We don't want to increase register pressure and // end up causing other computation to be spilled. - if (MI->getDesc().isAsCheapAsAMove()) { + if (MI->isAsCheapAsAMove()) { MachineBasicBlock *CSBB = CSMI->getParent(); MachineBasicBlock *BB = MI->getParent(); if (CSBB != BB && !CSBB->isSuccessor(BB)) @@ -376,7 +375,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { // Commute commutable instructions. bool Commuted = false; - if (!FoundCSE && MI->getDesc().isCommutable()) { + if (!FoundCSE && MI->isCommutable()) { MachineInstr *NewMI = TII->commuteInstruction(MI); if (NewMI) { Commuted = true; diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index ee36fc6ae15..0471cf2e6ef 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -748,18 +748,25 @@ void MachineInstr::addMemOperand(MachineFunction &MF, MemRefsEnd = NewMemRefsEnd; } -bool MachineInstr::hasProperty(unsigned short MCFlag) const { - if (getOpcode() != TargetOpcode::BUNDLE) +bool +MachineInstr::hasProperty(unsigned MCFlag, bool PeekInBundle, bool IsOr) const { + if (!PeekInBundle || getOpcode() != TargetOpcode::BUNDLE) return getDesc().getFlags() & (1 << MCFlag); const MachineBasicBlock *MBB = getParent(); MachineBasicBlock::const_insn_iterator MII = *this; ++MII; while (MII != MBB->end() && MII->isInsideBundle()) { - if (MII->getDesc().getFlags() & (1 << MCFlag)) - return true; + if (MII->getDesc().getFlags() & (1 << MCFlag)) { + if (IsOr) + return true; + } else { + if (!IsOr) + return false; + } ++MII; } - return false; + + return !IsOr; } bool MachineInstr::isIdenticalTo(const MachineInstr *Other, @@ -1017,6 +1024,9 @@ MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, /// operand list that is used to represent the predicate. It returns -1 if /// none is found. int MachineInstr::findFirstPredOperandIdx() const { + assert(getOpcode() != TargetOpcode::BUNDLE && + "MachineInstr::findFirstPredOperandIdx() can't handle bundles"); + // Don't call MCID.findFirstPredOperandIdx() because this variant // is sometimes called on an instruction that's not yet complete, and // so the number of operands is less than the MCID indicates. In @@ -1166,6 +1176,9 @@ void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) { /// copyPredicates - Copies predicate operand(s) from MI. void MachineInstr::copyPredicates(const MachineInstr *MI) { + assert(getOpcode() != TargetOpcode::BUNDLE && + "MachineInstr::copyPredicates() can't handle bundles"); + const MCInstrDesc &MCID = MI->getDesc(); if (!MCID.isPredicable()) return; @@ -1207,13 +1220,13 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, AliasAnalysis *AA, bool &SawStore) const { // Ignore stuff that we obviously can't move. - if (MCID->mayStore() || MCID->isCall()) { + if (mayStore() || isCall()) { SawStore = true; return false; } if (isLabel() || isDebugValue() || - MCID->isTerminator() || hasUnmodeledSideEffects()) + isTerminator() || hasUnmodeledSideEffects()) return false; // See if this instruction does a load. If so, we have to guarantee that the @@ -1221,7 +1234,7 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, // destination. The check for isInvariantLoad gives the targe the chance to // classify the load as always returning a constant, e.g. a constant pool // load. - if (MCID->mayLoad() && !isInvariantLoad(AA)) + if (mayLoad() && !isInvariantLoad(AA)) // Otherwise, this is a real load. If there is a store between the load and // end of block, or if the load is volatile, we can't move it. return !SawStore && !hasVolatileMemoryRef(); @@ -1261,9 +1274,9 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII, /// have no volatile memory references. bool MachineInstr::hasVolatileMemoryRef() const { // An instruction known never to access memory won't have a volatile access. - if (!MCID->mayStore() && - !MCID->mayLoad() && - !MCID->isCall() && + if (!mayStore() && + !mayLoad() && + !isCall() && !hasUnmodeledSideEffects()) return false; @@ -1287,7 +1300,7 @@ bool MachineInstr::hasVolatileMemoryRef() const { /// *all* loads the instruction does are invariant (if it does multiple loads). bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const { // If the instruction doesn't load at all, it isn't an invariant load. - if (!MCID->mayLoad()) + if (!mayLoad()) return false; // If the instruction has lost its memoperands, conservatively assume that @@ -1340,7 +1353,7 @@ unsigned MachineInstr::isConstantValuePHI() const { } bool MachineInstr::hasUnmodeledSideEffects() const { - if (getDesc().hasUnmodeledSideEffects()) + if (hasProperty(MCID::UnmodeledSideEffects)) return true; if (isInlineAsm()) { unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); @@ -1468,7 +1481,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { // call instructions much less noisy on targets where calls clobber lots // of registers. Don't rely on MO.isDead() because we may be called before // LiveVariables is run, or we may be looking at a non-allocatable reg. - if (MF && getDesc().isCall() && + if (MF && isCall() && MO.isReg() && MO.isImplicit() && MO.isDef()) { unsigned Reg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) { diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index f88b730c6d8..764429dbf12 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -765,7 +765,7 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { /// isLoadFromGOTOrConstantPool - Return true if this machine instruction /// loads from global offset table or constant pool. static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) { - assert (MI.getDesc().mayLoad() && "Expected MI that loads!"); + assert (MI.mayLoad() && "Expected MI that loads!"); for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), E = MI.memoperands_end(); I != E; ++I) { if (const Value *V = (*I)->getValue()) { @@ -792,7 +792,7 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) { // from constant memory are not safe to speculate all the time, for example // indexed load from a jump table. // Stores and side effects are already checked by isSafeToMove. - if (I.getDesc().mayLoad() && !isLoadFromGOTOrConstantPool(I) && + if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) && !IsGuaranteedToExecute(I.getParent())) return false; @@ -921,7 +921,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, /// IsCheapInstruction - Return true if the instruction is marked "cheap" or /// the operand latency between its def and a use is one or less. bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { - if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike()) + if (MI.isAsCheapAsAMove() || MI.isCopyLike()) return true; if (!InstrItins || InstrItins->isEmpty()) return false; @@ -1105,7 +1105,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { // Don't unfold simple loads. - if (MI->getDesc().canFoldAsLoad()) + if (MI->canFoldAsLoad()) return 0; // If not, we may be able to unfold a load and hoist that. diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index 29cfb49953b..8a342377890 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -291,7 +291,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI, if (!CEBCandidates.insert(std::make_pair(From, To))) return true; - if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove()) + if (!MI->isCopy() && !MI->isAsCheapAsAMove()) return true; // MI is cheap, we probably don't want to break the critical edge for it. diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index f231e3c239b..1abc61b96d4 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -435,7 +435,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { report("MBB exits via unconditional fall-through but its successor " "differs from its CFG successor!", MBB); } - if (!MBB->empty() && MBB->back().getDesc().isBarrier() && + if (!MBB->empty() && MBB->back().isBarrier() && !TII->isPredicated(&MBB->back())) { report("MBB exits via unconditional fall-through but ends with a " "barrier instruction!", MBB); @@ -456,10 +456,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { if (MBB->empty()) { report("MBB exits via unconditional branch but doesn't contain " "any instructions!", MBB); - } else if (!MBB->back().getDesc().isBarrier()) { + } else if (!MBB->back().isBarrier()) { report("MBB exits via unconditional branch but doesn't end with a " "barrier instruction!", MBB); - } else if (!MBB->back().getDesc().isTerminator()) { + } else if (!MBB->back().isTerminator()) { report("MBB exits via unconditional branch but the branch isn't a " "terminator instruction!", MBB); } @@ -479,10 +479,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { if (MBB->empty()) { report("MBB exits via conditional branch/fall-through but doesn't " "contain any instructions!", MBB); - } else if (MBB->back().getDesc().isBarrier()) { + } else if (MBB->back().isBarrier()) { report("MBB exits via conditional branch/fall-through but ends with a " "barrier instruction!", MBB); - } else if (!MBB->back().getDesc().isTerminator()) { + } else if (!MBB->back().isTerminator()) { report("MBB exits via conditional branch/fall-through but the branch " "isn't a terminator instruction!", MBB); } @@ -499,10 +499,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { if (MBB->empty()) { report("MBB exits via conditional branch/branch but doesn't " "contain any instructions!", MBB); - } else if (!MBB->back().getDesc().isBarrier()) { + } else if (!MBB->back().isBarrier()) { report("MBB exits via conditional branch/branch but doesn't end with a " "barrier instruction!", MBB); - } else if (!MBB->back().getDesc().isTerminator()) { + } else if (!MBB->back().isTerminator()) { report("MBB exits via conditional branch/branch but the branch " "isn't a terminator instruction!", MBB); } @@ -555,9 +555,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { // Check the MachineMemOperands for basic consistency. for (MachineInstr::mmo_iterator I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I) { - if ((*I)->isLoad() && !MCID.mayLoad()) + if ((*I)->isLoad() && !MI->mayLoad()) report("Missing mayLoad flag", MI); - if ((*I)->isStore() && !MCID.mayStore()) + if ((*I)->isStore() && !MI->mayStore()) report("Missing mayStore flag", MI); } @@ -575,7 +575,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { } // Ensure non-terminators don't follow terminators. - if (MCID.isTerminator()) { + if (MI->isTerminator()) { if (!FirstTerminator) FirstTerminator = MI; } else if (FirstTerminator) { @@ -606,7 +606,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { // Don't check if it's the last operand in a variadic instruction. See, // e.g., LDM_RET in the arm back end. if (MO->isReg() && - !(MCID.isVariadic() && MONum == MCID.getNumOperands()-1)) { + !(MI->isVariadic() && MONum == MCID.getNumOperands()-1)) { if (MO->isDef() && !MCOI.isOptionalDef()) report("Explicit operand marked as def", MO, MONum); if (MO->isImplicit()) @@ -614,7 +614,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { } } else { // ARM adds %reg0 operands to indicate predicates. We'll allow that. - if (MO->isReg() && !MO->isImplicit() && !MCID.isVariadic() && MO->getReg()) + if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) report("Extra explicit operand on non-variadic instruction", MO, MONum); } @@ -800,11 +800,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { LiveInts && !LiveInts->isNotInMIMap(MI)) { LiveInterval &LI = LiveStks->getInterval(MO->getIndex()); SlotIndex Idx = LiveInts->getInstructionIndex(MI); - if (MCID.mayLoad() && !LI.liveAt(Idx.getRegSlot(true))) { + if (MI->mayLoad() && !LI.liveAt(Idx.getRegSlot(true))) { report("Instruction loads from dead spill slot", MO, MONum); *OS << "Live stack: " << LI << '\n'; } - if (MCID.mayStore() && !LI.liveAt(Idx.getRegSlot())) { + if (MI->mayStore() && !LI.liveAt(Idx.getRegSlot())) { report("Instruction stores to dead spill slot", MO, MONum); *OS << "Live stack: " << LI << '\n'; } diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index bbc7ce2d0a4..2a5652ac3d3 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -292,7 +292,7 @@ bool PeepholeOptimizer::OptimizeBitcastInstr(MachineInstr *MI, assert(Def && Src && "Malformed bitcast instruction!"); MachineInstr *DefMI = MRI->getVRegDef(Src); - if (!DefMI || !DefMI->getDesc().isBitcast()) + if (!DefMI || !DefMI->isBitcast()) return false; unsigned SrcSrc = 0; @@ -353,7 +353,7 @@ bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, SmallSet &ImmDefRegs, DenseMap &ImmDefMIs) { const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isMoveImmediate()) + if (!MI->isMoveImmediate()) return false; if (MCID.getNumDefs() != 1) return false; @@ -428,9 +428,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { continue; } - const MCInstrDesc &MCID = MI->getDesc(); - - if (MCID.isBitcast()) { + if (MI->isBitcast()) { if (OptimizeBitcastInstr(MI, MBB)) { // MI is deleted. LocalMIs.erase(MI); @@ -438,7 +436,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { MII = First ? I->begin() : llvm::next(PMII); continue; } - } else if (MCID.isCompare()) { + } else if (MI->isCompare()) { if (OptimizeCmpInstr(MI, MBB)) { // MI is deleted. LocalMIs.erase(MI); diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp index 7205ed6bfab..126a3685602 100644 --- a/lib/CodeGen/PostRASchedulerList.cpp +++ b/lib/CodeGen/PostRASchedulerList.cpp @@ -364,7 +364,7 @@ void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { KillIndices[i] = ~0u; // Determine the live-out physregs for this block. - if (!BB->empty() && BB->back().getDesc().isReturn()) { + if (!BB->empty() && BB->back().isReturn()) { // In a return block, examine the function live-out regs. for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), E = MRI.liveout_end(); I != E; ++I) { diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index c8b02e8c9f7..b4fd1cb4f52 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -332,7 +332,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) { // Skip over all terminator instructions, which are part of the return // sequence. MachineBasicBlock::iterator I2 = I; - while (I2 != MBB->begin() && (--I2)->getDesc().isTerminator()) + while (I2 != MBB->begin() && (--I2)->isTerminator()) I = I2; bool AtStart = I == MBB->begin(); @@ -426,11 +426,11 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) { // Skip over all terminator instructions, which are part of the // return sequence. - if (! I->getDesc().isTerminator()) { + if (! I->isTerminator()) { ++I; } else { MachineBasicBlock::iterator I2 = I; - while (I2 != MBB->begin() && (--I2)->getDesc().isTerminator()) + while (I2 != MBB->begin() && (--I2)->isTerminator()) I = I2; } } @@ -698,7 +698,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) { // Add epilogue to restore the callee-save registers in each exiting block for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { // If last instruction is a return instruction, add an epilogue - if (!I->empty() && I->back().getDesc().isReturn()) + if (!I->empty() && I->back().isReturn()) TFI.emitEpilogue(Fn, *I); } diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp index 4664a3c4299..c2656c5e5c8 100644 --- a/lib/CodeGen/RegAllocFast.cpp +++ b/lib/CodeGen/RegAllocFast.cpp @@ -748,8 +748,8 @@ void RAFast::AllocateBasicBlock() { // and return are tail calls; do not do this for them. The tail callee need // not take the same registers as input that it produces as output, and there // are dependencies for its input registers elsewhere. - if (!MBB->empty() && MBB->back().getDesc().isReturn() && - !MBB->back().getDesc().isCall()) { + if (!MBB->empty() && MBB->back().isReturn() && + !MBB->back().isCall()) { MachineInstr *Ret = &MBB->back(); for (MachineRegisterInfo::liveout_iterator @@ -968,7 +968,7 @@ void RAFast::AllocateBasicBlock() { } unsigned DefOpEnd = MI->getNumOperands(); - if (MCID.isCall()) { + if (MI->isCall()) { // Spill all virtregs before a call. This serves two purposes: 1. If an // exception is thrown, the landing pad is going to expect to find // registers in their spill slots, and 2. we don't have to wade through diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index 3866fc08691..24bcace0739 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -643,8 +643,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP, MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def); if (!DefMI) return false; - const MCInstrDesc &MCID = DefMI->getDesc(); - if (!MCID.isCommutable()) + if (!DefMI->isCommutable()) return false; // If DefMI is a two-address instruction then commuting it will change the // destination register. @@ -802,14 +801,14 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt, if (!DefMI) return false; assert(DefMI && "Defining instruction disappeared"); - const MCInstrDesc &MCID = DefMI->getDesc(); - if (!MCID.isAsCheapAsAMove()) + if (!DefMI->isAsCheapAsAMove()) return false; if (!TII->isTriviallyReMaterializable(DefMI, AA)) return false; bool SawStore = false; if (!DefMI->isSafeToMove(TII, AA, SawStore)) return false; + const MCInstrDesc &MCID = DefMI->getDesc(); if (MCID.getNumDefs() != 1) return false; if (!DefMI->isImplicitDef()) { diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index b382169a361..2bb173b5c4b 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -157,7 +157,7 @@ void ScheduleDAGInstrs::AddSchedBarrierDeps() { MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; ExitSU.setInstr(ExitMI); bool AllDepKnown = ExitMI && - (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier()); + (ExitMI->isCall() || ExitMI->isBarrier()); if (ExitMI && AllDepKnown) { // If it's a call or a barrier, add dependencies on the defs and uses of // instruction. @@ -238,13 +238,12 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { continue; } - const MCInstrDesc &MCID = MI->getDesc(); - assert(!MCID.isTerminator() && !MI->isLabel() && + assert(!MI->isTerminator() && !MI->isLabel() && "Cannot schedule terminators or labels!"); // Create the SUnit for this MI. SUnit *SU = NewSUnit(MI); - SU->isCall = MCID.isCall(); - SU->isCommutable = MCID.isCommutable(); + SU->isCall = MI->isCall(); + SU->isCommutable = MI->isCommutable(); // Assign the Latency field of SU using target-provided information. if (UnitLatencies) @@ -315,7 +314,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); if (RegUseIndex >= 0 && - (UseMCID.mayLoad() || UseMCID.mayStore()) && + (UseMI->mayLoad() || UseMI->mayStore()) && (unsigned)RegUseIndex < UseMCID.getNumOperands() && UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) LDataLatency += SpecialAddressLatency; @@ -419,9 +418,9 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { // produce more precise dependence information. #define STORE_LOAD_LATENCY 1 unsigned TrueMemOrderLatency = 0; - if (MCID.isCall() || MI->hasUnmodeledSideEffects() || + if (MI->isCall() || MI->hasUnmodeledSideEffects() || (MI->hasVolatileMemoryRef() && - (!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) { + (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) { // Be conservative with these and add dependencies on all memory // references, even those that are known to not alias. for (std::map::iterator I = @@ -460,7 +459,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { PendingLoads.clear(); AliasMemDefs.clear(); AliasMemUses.clear(); - } else if (MCID.mayStore()) { + } else if (MI->mayStore()) { bool MayAlias = true; TrueMemOrderLatency = STORE_LOAD_LATENCY; if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { @@ -516,7 +515,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { /*Reg=*/0, /*isNormalMemory=*/false, /*isMustAlias=*/false, /*isArtificial=*/true)); - } else if (MCID.mayLoad()) { + } else if (MI->mayLoad()) { bool MayAlias = true; TrueMemOrderLatency = 0; if (MI->isInvariantLoad(AA)) { @@ -576,7 +575,7 @@ void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { // Simplistic target-independent heuristic: assume that loads take // extra time. - if (SU->getInstr()->getDesc().mayLoad()) + if (SU->getInstr()->mayLoad()) SU->Latency += 2; } else { SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 2ff66f8f871..cb6fd53db07 100644 --- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -294,7 +294,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op, const TargetRegisterClass *DstRC = 0; if (IIOpNum < II->getNumOperands()) DstRC = TII->getRegClass(*II, IIOpNum, TRI); - assert((DstRC || (MCID.isVariadic() && IIOpNum >= MCID.getNumOperands())) && + assert((DstRC || (MI->isVariadic() && IIOpNum >= MCID.getNumOperands())) && "Don't have operand info for this instruction!"); if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) { unsigned NewVReg = MRI->createVirtualRegister(DstRC); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index ddbeaeb7cb7..53f6b3d086b 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -177,7 +177,7 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const { - assert(!MI->getDesc().hasPostISelHook() && + assert(!MI->hasPostISelHook() && "If a target marks an instruction with 'hasPostISelHook', " "it must implement TargetLowering::AdjustInstrPostInstrSelection!"); } diff --git a/lib/CodeGen/ShrinkWrapping.cpp b/lib/CodeGen/ShrinkWrapping.cpp index 13f269eb520..70fcf55cc47 100644 --- a/lib/CodeGen/ShrinkWrapping.cpp +++ b/lib/CodeGen/ShrinkWrapping.cpp @@ -124,7 +124,7 @@ MachineLoop* PEI::getTopLevelLoopParent(MachineLoop *LP) { } bool PEI::isReturnBlock(MachineBasicBlock* MBB) { - return (MBB && !MBB->empty() && MBB->back().getDesc().isReturn()); + return (MBB && !MBB->empty() && MBB->back().isReturn()); } // Initialize shrink wrapping DFA sets, called before iterations. diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp index 751d604cd3d..c0860732b4e 100644 --- a/lib/CodeGen/SplitKit.cpp +++ b/lib/CodeGen/SplitKit.cpp @@ -80,7 +80,7 @@ SlotIndex SplitAnalysis::computeLastSplitPoint(unsigned Num) { for (MachineBasicBlock::const_iterator I = MBB->end(), E = MBB->begin(); I != E;) { --I; - if (I->getDesc().isCall()) { + if (I->isCall()) { LSP.second = LIS.getInstructionIndex(I); break; } diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp index adfce863d0d..031377b36b5 100644 --- a/lib/CodeGen/TailDuplication.cpp +++ b/lib/CodeGen/TailDuplication.cpp @@ -553,7 +553,7 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF, bool HasIndirectbr = false; if (!TailBB.empty()) - HasIndirectbr = TailBB.back().getDesc().isIndirectBranch(); + HasIndirectbr = TailBB.back().isIndirectBranch(); if (HasIndirectbr && PreRegAlloc) MaxDuplicateCount = 20; @@ -563,19 +563,19 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF, unsigned InstrCount = 0; for (MachineBasicBlock::iterator I = TailBB.begin(); I != TailBB.end(); ++I) { // Non-duplicable things shouldn't be tail-duplicated. - if (I->getDesc().isNotDuplicable()) + if (I->isNotDuplicable()) return false; // Do not duplicate 'return' instructions if this is a pre-regalloc run. // A return may expand into a lot more instructions (e.g. reload of callee // saved registers) after PEI. - if (PreRegAlloc && I->getDesc().isReturn()) + if (PreRegAlloc && I->isReturn()) return false; // Avoid duplicating calls before register allocation. Calls presents a // barrier to register allocation so duplicating them may end up increasing // spills. - if (PreRegAlloc && I->getDesc().isCall()) + if (PreRegAlloc && I->isCall()) return false; if (!I->isPHI() && !I->isDebugValue()) @@ -610,7 +610,7 @@ TailDuplicatePass::isSimpleBB(MachineBasicBlock *TailBB) { ++I; if (I == E) return true; - return I->getDesc().isUnconditionalBranch(); + return I->isUnconditionalBranch(); } static bool diff --git a/lib/CodeGen/TargetInstrInfoImpl.cpp b/lib/CodeGen/TargetInstrInfoImpl.cpp index 08e2b168da7..5115d5bd07a 100644 --- a/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -121,6 +121,9 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI, bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const { + assert(MI->getOpcode() != TargetOpcode::BUNDLE && + "TargetInstrInfoImpl::findCommutedOpIndices() can't handle bundles"); + const MCInstrDesc &MCID = MI->getDesc(); if (!MCID.isCommutable()) return false; @@ -139,8 +142,12 @@ bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI, bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI, const SmallVectorImpl &Pred) const { bool MadeChange = false; + + assert(MI->getOpcode() != TargetOpcode::BUNDLE && + "TargetInstrInfoImpl::PredicateInstruction() can't handle bundles"); + const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isPredicable()) + if (!MI->isPredicable()) return false; for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { @@ -218,7 +225,7 @@ TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0, MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig, MachineFunction &MF) const { - assert(!Orig->getDesc().isNotDuplicable() && + assert(!Orig->isNotDuplicable() && "Instruction cannot be duplicated"); return MF.CloneMachineInstr(Orig); } @@ -288,10 +295,10 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) { // Add a memory operand, foldMemoryOperandImpl doesn't do that. assert((!(Flags & MachineMemOperand::MOStore) || - NewMI->getDesc().mayStore()) && + NewMI->mayStore()) && "Folded a def to a non-store!"); assert((!(Flags & MachineMemOperand::MOLoad) || - NewMI->getDesc().mayLoad()) && + NewMI->mayLoad()) && "Folded a use to a non-load!"); const MachineFrameInfo &MFI = *MF.getFrameInfo(); assert(MFI.getObjectOffset(FI) != -1); @@ -331,7 +338,7 @@ MachineInstr* TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, const SmallVectorImpl &Ops, MachineInstr* LoadMI) const { - assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!"); + assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); @@ -382,10 +389,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx)) return true; - const MCInstrDesc &MCID = MI->getDesc(); - // Avoid instructions obviously unsafe for remat. - if (MCID.isNotDuplicable() || MCID.mayStore() || + if (MI->isNotDuplicable() || MI->mayStore() || MI->hasUnmodeledSideEffects()) return false; @@ -395,7 +400,7 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return false; // Avoid instructions which load from potentially varying memory. - if (MCID.mayLoad() && !MI->isInvariantLoad(AA)) + if (MI->mayLoad() && !MI->isInvariantLoad(AA)) return false; // If any of the registers accessed are non-constant, conservatively assume @@ -456,7 +461,7 @@ bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const{ // Terminators and labels can't be scheduled around. - if (MI->getDesc().isTerminator() || MI->isLabel()) + if (MI->isTerminator() || MI->isLabel()) return true; // Don't attempt to schedule around any instruction that defines diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index 956b7065a95..6a63335cf80 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -242,7 +242,7 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB, // appropriate location, we can try to sink the current instruction // past it. if (!KillMI || KillMI->getParent() != MBB || KillMI == MI || - KillMI->getDesc().isTerminator()) + KillMI->isTerminator()) return false; // If any of the definitions are used by another instruction between the @@ -816,10 +816,9 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI, static bool isSafeToDelete(MachineInstr *MI, const TargetInstrInfo *TII, SmallVector &Kills) { - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.mayStore() || MCID.isCall()) + if (MI->mayStore() || MI->isCall()) return false; - if (MCID.isTerminator() || MI->hasUnmodeledSideEffects()) + if (MI->isTerminator() || MI->hasUnmodeledSideEffects()) return false; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { @@ -917,9 +916,8 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB, // Don't mess with copies, they may be coalesced later. return false; - const MCInstrDesc &MCID = KillMI->getDesc(); - if (MCID.hasUnmodeledSideEffects() || MCID.isCall() || MCID.isBranch() || - MCID.isTerminator()) + if (KillMI->hasUnmodeledSideEffects() || KillMI->isCall() || + KillMI->isBranch() || KillMI->isTerminator()) // Don't move pass calls, etc. return false; @@ -974,9 +972,8 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB, if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost. return false; ++NumVisited; - const MCInstrDesc &OMCID = OtherMI->getDesc(); - if (OMCID.hasUnmodeledSideEffects() || OMCID.isCall() || OMCID.isBranch() || - OMCID.isTerminator()) + if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() || + OtherMI->isBranch() || OtherMI->isTerminator()) // Don't move pass calls, etc. return false; for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) { @@ -1118,9 +1115,8 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB, if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost. return false; ++NumVisited; - const MCInstrDesc &MCID = OtherMI->getDesc(); - if (MCID.hasUnmodeledSideEffects() || MCID.isCall() || MCID.isBranch() || - MCID.isTerminator()) + if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() || + OtherMI->isBranch() || OtherMI->isTerminator()) // Don't move pass calls, etc. return false; SmallVector OtherDefs; @@ -1200,7 +1196,6 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, return false; MachineInstr &MI = *mi; - const MCInstrDesc &MCID = MI.getDesc(); unsigned regA = MI.getOperand(DstIdx).getReg(); unsigned regB = MI.getOperand(SrcIdx).getReg(); @@ -1222,7 +1217,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, unsigned regCIdx = ~0U; bool TryCommute = false; bool AggressiveCommute = false; - if (MCID.isCommutable() && MI.getNumOperands() >= 3 && + if (MI.isCommutable() && MI.getNumOperands() >= 3 && TII->findCommutedOpIndices(&MI, SrcOp1, SrcOp2)) { if (SrcIdx == SrcOp1) regCIdx = SrcOp2; @@ -1260,7 +1255,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, if (TargetRegisterInfo::isVirtualRegister(regA)) ScanUses(regA, &*mbbi, Processed); - if (MCID.isConvertibleTo3Addr()) { + if (MI.isConvertibleTo3Addr()) { // This instruction is potentially convertible to a true // three-address instruction. Check if it is profitable. if (!regBKilled || isProfitableToConv3Addr(regA, regB)) { @@ -1287,7 +1282,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // movq (%rax), %rcx // addq %rdx, %rcx // because it's preferable to schedule a load than a register copy. - if (MCID.mayLoad() && !regBKilled) { + if (MI.mayLoad() && !regBKilled) { // Determine if a load can be unfolded. unsigned LoadRegIndex; unsigned NewOpc = @@ -1530,7 +1525,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) { // If it's safe and profitable, remat the definition instead of // copying it. if (DefMI && - DefMI->getDesc().isAsCheapAsAMove() && + DefMI->isAsCheapAsAMove() && DefMI->isSafeToReMat(TII, AA, regB) && isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist)){ DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n"); diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp index 8f84ac7b412..42a136e72d4 100644 --- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp @@ -313,7 +313,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end(); MI != E; ++MI) { if (!MI->isLabel()) { - MayThrow |= MI->getDesc().isCall(); + MayThrow |= MI->isCall(); continue; } diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 6d37416d0c1..876575c4ba2 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -1069,7 +1069,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { } // Try to figure out the unwinding opcode out of src / dst regs. - if (MI->getDesc().mayStore()) { + if (MI->mayStore()) { // Register saves. assert(DstReg == ARM::SP && "Only stack pointer as a destination reg is supported"); diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 45325157a8a..08f1de3f7d2 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -146,7 +146,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); const MCInstrDesc &MCID = MI->getDesc(); unsigned NumOps = MCID.getNumOperands(); - bool isLoad = !MCID.mayStore(); + bool isLoad = !MI->mayStore(); const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); const MachineOperand &Base = MI->getOperand(2); const MachineOperand &Offset = MI->getOperand(NumOps-3); @@ -491,7 +491,7 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector &Pred) const { // FIXME: This confuses implicit_def with optional CPSR def. const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef()) + if (!MCID.getImplicitDefs() && !MI->hasOptionalDef()) return false; bool Found = false; @@ -510,11 +510,10 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, /// By default, this returns true for every instruction with a /// PredicateOperand. bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isPredicable()) + if (!MI->isPredicable()) return false; - if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { + if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { ARMFunctionInfo *AFI = MI->getParent()->getParent()->getInfo(); return AFI->isThumb2Function(); @@ -593,7 +592,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); unsigned NumOps = MCID.getNumOperands(); MachineOperand JTOP = - MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2)); + MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2)); unsigned JTI = JTOP.getIndex(); const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); assert(MJTI != 0); @@ -845,7 +844,7 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, int &FrameIndex) const { const MachineMemOperand *Dummy; - return MI->getDesc().mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); + return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); } void ARMBaseInstrInfo:: @@ -991,7 +990,7 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, int &FrameIndex) const { const MachineMemOperand *Dummy; - return MI->getDesc().mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); + return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); } bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ @@ -1357,7 +1356,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, return false; // Terminators and labels can't be scheduled around. - if (MI->getDesc().isTerminator() || MI->isLabel()) + if (MI->isTerminator() || MI->isLabel()) return true; // Treat the start of the IT block as a scheduling boundary, but schedule @@ -2339,10 +2338,10 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, DefMI->isRegSequence() || DefMI->isImplicitDef()) return 1; - const MCInstrDesc &DefMCID = DefMI->getDesc(); if (!ItinData || ItinData->isEmpty()) - return DefMCID.mayLoad() ? 3 : 1; + return DefMI->mayLoad() ? 3 : 1; + const MCInstrDesc &DefMCID = DefMI->getDesc(); const MCInstrDesc &UseMCID = UseMI->getDesc(); const MachineOperand &DefMO = DefMI->getOperand(DefIdx); if (DefMO.getReg() == ARM::CPSR) { @@ -2352,7 +2351,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } // CPSR set and branch can be paired in the same cycle. - if (UseMCID.isBranch()) + if (UseMI->isBranch()) return 0; } diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index 568d8e4cdde..fcf87fb65c4 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -495,7 +495,7 @@ void ARMConstantIslands::JumpTableFunctionScan(MachineFunction &MF) { for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) - if (I->getDesc().isBranch() && I->getOpcode() == ARM::t2BR_JT) + if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT) T2JumpTables.push_back(I); } } @@ -547,7 +547,7 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF, BBI.Unalign = isThumb ? 1 : 2; int Opc = I->getOpcode(); - if (I->getDesc().isBranch()) { + if (I->isBranch()) { bool isCond = false; unsigned Bits = 0; unsigned Scale = 1; @@ -1738,7 +1738,7 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) { MachineInstr *MI = T2JumpTables[i]; const MCInstrDesc &MCID = MI->getDesc(); unsigned NumOps = MCID.getNumOperands(); - unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); + unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2); MachineOperand JTOP = MI->getOperand(JTOpIdx); unsigned JTI = JTOP.getIndex(); assert(JTI < JT.size()); @@ -1861,7 +1861,7 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) { MachineInstr *MI = T2JumpTables[i]; const MCInstrDesc &MCID = MI->getDesc(); unsigned NumOps = MCID.getNumOperands(); - unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2); + unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2); MachineOperand JTOP = MI->getOperand(JTOpIdx); unsigned JTI = JTOP.getIndex(); assert(JTI < JT.size()); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index de366f7bc8f..d3ebf36b28e 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -228,8 +228,7 @@ class ARMFastISel : public FastISel { // we don't care about implicit defs here, just places we'll need to add a // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.hasOptionalDef()) + if (!MI->hasOptionalDef()) return false; // Look to see if our OptionalDef is defining CPSR or CCR. diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index f2eacdc6324..06944b1ca78 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -310,8 +310,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { void ARMFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); - assert(MBBI->getDesc().isReturn() && - "Can only insert epilog into returning blocks"); + assert(MBBI->isReturn() && "Can only insert epilog into returning blocks"); unsigned RetOpcode = MBBI->getOpcode(); DebugLoc dl = MBBI->getDebugLoc(); MachineFrameInfo *MFI = MF.getFrameInfo(); diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp index 787f6a27918..2393544dec6 100644 --- a/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -21,7 +21,7 @@ static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI, // FIXME: Detect integer instructions properly. const MCInstrDesc &MCID = MI->getDesc(); unsigned Domain = MCID.TSFlags & ARMII::DomainMask; - if (MCID.mayStore()) + if (MI->mayStore()) return false; unsigned Opcode = MCID.getOpcode(); if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) @@ -48,9 +48,9 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { MachineInstr *DefMI = LastMI; const MCInstrDesc &LastMCID = LastMI->getDesc(); // Skip over one non-VFP / NEON instruction. - if (!LastMCID.isBarrier() && + if (!LastMI->isBarrier() && // On A9, AGU and NEON/FPU are muxed. - !(STI.isCortexA9() && (LastMCID.mayLoad() || LastMCID.mayStore())) && + !(STI.isCortexA9() && (LastMI->mayLoad() || LastMI->mayStore())) && (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) { MachineBasicBlock::iterator I = LastMI; if (I != LastMI->getParent()->begin()) { diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index e9f70d5acea..f09a1c7e4f7 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -6015,7 +6015,7 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { // executed. for (MachineBasicBlock::reverse_iterator II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { - if (!II->getDesc().isCall()) continue; + if (!II->isCall()) continue; DenseMap DefRegs; for (MachineInstr::mop_iterator @@ -6426,13 +6426,13 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const { - const MCInstrDesc *MCID = &MI->getDesc(); - if (!MCID->hasPostISelHook()) { + if (!MI->hasPostISelHook()) { assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); return; } + const MCInstrDesc *MCID = &MI->getDesc(); // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, // RSC. Coming out of isel, they have an implicit CPSR def, but the optional // operand is still set to noreg. If needed, set the optional operand's @@ -6459,7 +6459,7 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, // Any ARM instruction that sets the 's' bit should specify an optional // "cc_out" operand in the last operand position. - if (!MCID->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { + if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { assert(!NewOpc && "Optional cc_out operand required"); return; } diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 392c0909e4d..6712fb6e9ad 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1472,19 +1472,18 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, while (++I != E) { if (I->isDebugValue() || MemOps.count(&*I)) continue; - const MCInstrDesc &MCID = I->getDesc(); - if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects()) + if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects()) return false; - if (isLd && MCID.mayStore()) + if (isLd && I->mayStore()) return false; if (!isLd) { - if (MCID.mayLoad()) + if (I->mayLoad()) return false; // It's not safe to move the first 'str' down. // str r1, [r0] // strh r5, [r0] // str r4, [r0, #+4] - if (MCID.mayStore()) + if (I->mayStore()) return false; } for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) { @@ -1774,8 +1773,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) { while (MBBI != E) { for (; MBBI != E; ++MBBI) { MachineInstr *MI = MBBI; - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.isCall() || MCID.isTerminator()) { + if (MI->isCall() || MI->isTerminator()) { // Stop at barriers. ++MBBI; break; diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index 2df00538b39..a187f8e92a3 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -139,7 +139,7 @@ bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const { // FIXME: Detect integer instructions properly. const MCInstrDesc &MCID = MI->getDesc(); unsigned Domain = MCID.TSFlags & ARMII::DomainMask; - if (MCID.mayStore()) + if (MI->mayStore()) return false; unsigned Opcode = MCID.getOpcode(); if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) @@ -274,7 +274,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) { } const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.isBarrier()) { + if (MI->isBarrier()) { clearStack(); Skip = 0; ++MII; diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index e8ed482a66f..e61c0a733b0 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -643,14 +643,13 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, assert(Offset && "This code isn't needed if offset already handled!"); unsigned Opcode = MI.getOpcode(); - const MCInstrDesc &Desc = MI.getDesc(); // Remove predicate first. int PIdx = MI.findFirstPredOperandIdx(); if (PIdx != -1) removeOperands(MI, PIdx); - if (Desc.mayLoad()) { + if (MI.mayLoad()) { // Use the destination register to materialize sp + offset. unsigned TmpReg = MI.getOperand(0).getReg(); bool UseRR = false; @@ -673,7 +672,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame // register. The offset is already handled in the vreg value. MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false); - } else if (Desc.mayStore()) { + } else if (MI.mayStore()) { VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass); bool UseRR = false; @@ -699,7 +698,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, } // Add predicate back if it's needed. - if (MI.getDesc().isPredicable()) { + if (MI.isPredicable()) { MachineInstrBuilder MIB(&MI); AddDefaultPred(MIB); } diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp index b6274007b23..d1acb6f83f7 100644 --- a/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -141,7 +141,7 @@ Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI, // rsb r2, 0 // const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.hasOptionalDef() && + if (MI->hasOptionalDef() && MI->getOperand(MCID.getNumOperands() - 1).getReg() == ARM::CPSR) return false; @@ -198,7 +198,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) { // Branches, including tricky ones like LDM_RET, need to end an IT // block so check the instruction we just put in the block. for (; MBBI != E && Pos && - (!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) { + (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) { if (MBBI->isDebugValue()) continue; diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index e5fc8b4fdd5..c0e3ac69bcb 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -533,8 +533,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, if (Entry.LowRegs1 && !VerifyLowRegs(MI)) return false; - const MCInstrDesc &MCID = MI->getDesc(); - if (MCID.mayLoad() || MCID.mayStore()) + if (MI->mayLoad() || MI->mayStore()) return ReduceLoadStore(MBB, MI, Entry); switch (Opc) { @@ -877,7 +876,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { ProcessNext: bool DefCPSR = false; LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); - if (MI->getDesc().isCall()) { + if (MI->isCall()) { // Calls don't really set CPSR. CPSRDef = 0; IsSelfLoop = false; diff --git a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp index ff051e331f9..c751dd8caa5 100644 --- a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp +++ b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp @@ -310,9 +310,9 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { // Check if the last terminator is an unconditional branch. MachineBasicBlock::const_iterator I = Pred->end(); - while (I != Pred->begin() && !(--I)->getDesc().isTerminator()) + while (I != Pred->begin() && !(--I)->isTerminator()) ; // Noop - return I == Pred->end() || !I->getDesc().isBarrier(); + return I == Pred->end() || !I->isBarrier(); } // Force static initialization. diff --git a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp index e4b295d5b77..19e787d8622 100644 --- a/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp +++ b/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp @@ -107,7 +107,6 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate, // Hazard check MachineBasicBlock::iterator a = candidate; MachineBasicBlock::iterator b = slot; - MCInstrDesc desc = candidate->getDesc(); // MBB layout:- // candidate := a0 = operation(a1, a2) @@ -121,7 +120,7 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate, // 4. b0 is one or more of {a1, a2} // 5. a accesses memory, and the middle bit // contains a store operation. - bool a_is_memory = desc.mayLoad() || desc.mayStore(); + bool a_is_memory = candidate->mayLoad() || candidate->mayStore(); // Determine the number of operands in the slot instruction and in the // candidate instruction. @@ -154,7 +153,7 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate, } // Check hazard type 5 - if (a_is_memory && m->getDesc().mayStore()) + if (a_is_memory && m->mayStore()) return true; } @@ -181,8 +180,8 @@ static bool isDelayFiller(MachineBasicBlock &MBB, if (candidate == MBB.begin()) return false; - MCInstrDesc brdesc = (--candidate)->getDesc(); - return (brdesc.hasDelaySlot()); + --candidate; + return (candidate->hasDelaySlot()); } static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) { @@ -209,9 +208,8 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) { break; --I; - MCInstrDesc desc = I->getDesc(); - if (desc.hasDelaySlot() || desc.isBranch() || isDelayFiller(MBB,I) || - desc.isCall() || desc.isReturn() || desc.isBarrier() || + if (I->hasDelaySlot() || I->isBranch() || isDelayFiller(MBB,I) || + I->isCall() || I->isReturn() || I->isBarrier() || hasUnknownSideEffects(I)) break; @@ -230,7 +228,7 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) { bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) - if (I->getDesc().hasDelaySlot()) { + if (I->hasDelaySlot()) { MachineBasicBlock::iterator D = MBB.end(); MachineBasicBlock::iterator J = I; diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp index 3fc7c10c36a..e406ff25aa4 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -140,7 +140,7 @@ void MSP430FrameLowering::emitEpilogue(MachineFunction &MF, while (MBBI != MBB.begin()) { MachineBasicBlock::iterator PI = prior(MBBI); unsigned Opc = PI->getOpcode(); - if (Opc != MSP430::POP16r && !PI->getDesc().isTerminator()) + if (Opc != MSP430::POP16r && !PI->isTerminator()) break; --MBBI; } diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp index 81f766e5910..9d3c7e9d7f3 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -158,13 +158,12 @@ ReverseBranchCondition(SmallVectorImpl &Cond) const { } bool MSP430InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isTerminator()) return false; + if (!MI->isTerminator()) return false; // Conditional branch is a special case. - if (MCID.isBranch() && !MCID.isBarrier()) + if (MI->isBranch() && !MI->isBarrier()) return true; - if (!MCID.isPredicable()) + if (!MI->isPredicable()) return true; return !isPredicated(MI); } @@ -189,7 +188,7 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // A terminator that isn't a branch can't easily be handled // by this analysis. - if (!I->getDesc().isBranch()) + if (!I->isBranch()) return true; // Cannot handle indirect branches. diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index d27e3abd7f3..cc25c4c7bd2 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -317,9 +317,9 @@ bool MipsAsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock* // Otherwise, check the last instruction. // Check if the last terminator is an unconditional branch. MachineBasicBlock::const_iterator I = Pred->end(); - while (I != Pred->begin() && !(--I)->getDesc().isTerminator()) ; + while (I != Pred->begin() && !(--I)->isTerminator()) ; - return !I->getDesc().isBarrier(); + return !I->isBarrier(); } // Print out an operand for an inline asm expression. diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp index 17cc0c533e6..6b26e24c6e6 100644 --- a/lib/Target/Mips/MipsCodeEmitter.cpp +++ b/lib/Target/Mips/MipsCodeEmitter.cpp @@ -161,7 +161,7 @@ unsigned MipsCodeEmitter::getRelocation(const MachineInstr &MI, if (Form == MipsII::FrmJ) return Mips::reloc_mips_26; if ((Form == MipsII::FrmI || Form == MipsII::FrmFI) - && MI.getDesc().isBranch()) + && MI.isBranch()) return Mips::reloc_mips_branch; if (Form == MipsII::FrmI && MI.getOpcode() == Mips::LUi) return Mips::reloc_mips_hi; diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index be3b7a02ec3..1d9e9b08617 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -96,7 +96,7 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) { LastFiller = MBB.end(); for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) - if (I->getDesc().hasDelaySlot()) { + if (I->hasDelaySlot()) { ++FilledSlots; Changed = true; @@ -146,7 +146,7 @@ bool Filler::findDelayInstr(MachineBasicBlock &MBB, || I->isInlineAsm() || I->isLabel() || FI == LastFiller - || I->getDesc().isPseudo() + || I->isPseudo() // // Should not allow: // ERET, DERET or WAIT, PAUSE. Need to add these to instruction @@ -174,16 +174,15 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, if (candidate->isImplicitDef() || candidate->isKill()) return true; - MCInstrDesc MCID = candidate->getDesc(); // Loads or stores cannot be moved past a store to the delay slot // and stores cannot be moved past a load. - if (MCID.mayLoad()) { + if (candidate->mayLoad()) { if (sawStore) return true; sawLoad = true; } - if (MCID.mayStore()) { + if (candidate->mayStore()) { if (sawStore) return true; sawStore = true; @@ -191,7 +190,7 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, return true; } - assert((!MCID.isCall() && !MCID.isReturn()) && + assert((!candidate->isCall() && !candidate->isReturn()) && "Cannot put calls or returns in delay slot."); for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) { @@ -221,11 +220,11 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI, SmallSet& RegUses) { // If MI is a call or return, just examine the explicit non-variadic operands. MCInstrDesc MCID = MI->getDesc(); - unsigned e = MCID.isCall() || MCID.isReturn() ? MCID.getNumOperands() : - MI->getNumOperands(); + unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() : + MI->getNumOperands(); // Add RA to RegDefs to prevent users of RA from going into delay slot. - if (MCID.isCall()) + if (MI->isCall()) RegDefs.insert(Mips::RA); for (unsigned i = 0; i != e; ++i) { diff --git a/lib/Target/PTX/PTXInstrInfo.cpp b/lib/Target/PTX/PTXInstrInfo.cpp index 0190a8572a4..871b3a76bb4 100644 --- a/lib/Target/PTX/PTXInstrInfo.cpp +++ b/lib/Target/PTX/PTXInstrInfo.cpp @@ -116,7 +116,7 @@ bool PTXInstrInfo::isPredicated(const MachineInstr *MI) const { } bool PTXInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { - return !isPredicated(MI) && get(MI->getOpcode()).isTerminator(); + return !isPredicated(MI) && MI->isTerminator(); } bool PTXInstrInfo:: @@ -186,13 +186,11 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock::iterator iter = MBB.end(); const MachineInstr& instLast1 = *--iter; - const MCInstrDesc &desc1 = instLast1.getDesc(); // for special case that MBB has only 1 instruction const bool IsSizeOne = MBB.size() == 1; // if IsSizeOne is true, *--iter and instLast2 are invalid // we put a dummy value in instLast2 and desc2 since they are used const MachineInstr& instLast2 = IsSizeOne ? instLast1 : *--iter; - const MCInstrDesc &desc2 = IsSizeOne ? desc1 : instLast2.getDesc(); DEBUG(dbgs() << "\n"); DEBUG(dbgs() << "AnalyzeBranch: opcode: " << instLast1.getOpcode() << "\n"); @@ -207,7 +205,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, } // this block ends with only an unconditional branch - if (desc1.isUnconditionalBranch() && + if (instLast1.isUnconditionalBranch() && // when IsSizeOne is true, it "absorbs" the evaluation of instLast2 (IsSizeOne || !IsAnyKindOfBranch(instLast2))) { DEBUG(dbgs() << "AnalyzeBranch: ends with only uncond branch\n"); @@ -217,7 +215,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, // this block ends with a conditional branch and // it falls through to a successor block - if (desc1.isConditionalBranch() && + if (instLast1.isConditionalBranch() && IsAnySuccessorAlsoLayoutSuccessor(MBB)) { DEBUG(dbgs() << "AnalyzeBranch: ends with cond branch and fall through\n"); TBB = GetBranchTarget(instLast1); @@ -233,8 +231,8 @@ AnalyzeBranch(MachineBasicBlock &MBB, // this block ends with a conditional branch // followed by an unconditional branch - if (desc2.isConditionalBranch() && - desc1.isUnconditionalBranch()) { + if (instLast2.isConditionalBranch() && + instLast1.isUnconditionalBranch()) { DEBUG(dbgs() << "AnalyzeBranch: ends with cond and uncond branch\n"); TBB = GetBranchTarget(instLast2); FBB = GetBranchTarget(instLast1); @@ -341,8 +339,7 @@ void PTXInstrInfo::AddDefaultPredicate(MachineInstr *MI) { } bool PTXInstrInfo::IsAnyKindOfBranch(const MachineInstr& inst) { - const MCInstrDesc &desc = inst.getDesc(); - return desc.isTerminator() || desc.isBranch() || desc.isIndirectBranch(); + return inst.isTerminator() || inst.isBranch() || inst.isIndirectBranch(); } bool PTXInstrInfo:: diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index e9a4290bae1..5c45018eba3 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -64,7 +64,7 @@ static void RemoveVRSaveCode(MachineInstr *MI) { // epilog blocks. for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) { // If last instruction is a return instruction, add an epilogue - if (!I->empty() && I->back().getDesc().isReturn()) { + if (!I->empty() && I->back().isReturn()) { bool FoundIt = false; for (MBBI = I->end(); MBBI != I->begin(); ) { --MBBI; diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 3dee4067af9..038cecade57 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -210,13 +210,13 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) { // Find all return blocks, outputting a restore in each epilog. for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { - if (!BB->empty() && BB->back().getDesc().isReturn()) { + if (!BB->empty() && BB->back().isReturn()) { IP = BB->end(); --IP; // Skip over all terminator instructions, which are part of the return // sequence. MachineBasicBlock::iterator I2 = IP; - while (I2 != BB->begin() && (--I2)->getDesc().isTerminator()) + while (I2 != BB->begin() && (--I2)->isTerminator()) IP = I2; // Emit: MTVRSAVE InVRSave diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp index dab35e5e4e6..9295408605f 100644 --- a/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/lib/Target/Sparc/DelaySlotFiller.cpp @@ -100,7 +100,7 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) - if (I->getDesc().hasDelaySlot()) { + if (I->hasDelaySlot()) { MachineBasicBlock::iterator D = MBB.end(); MachineBasicBlock::iterator J = I; @@ -149,7 +149,7 @@ Filler::findDelayInstr(MachineBasicBlock &MBB, } //Call's delay filler can def some of call's uses. - if (slot->getDesc().isCall()) + if (slot->isCall()) insertCallUses(slot, RegUses); else insertDefsUses(slot, RegDefs, RegUses); @@ -170,7 +170,7 @@ Filler::findDelayInstr(MachineBasicBlock &MBB, if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isLabel() - || I->getDesc().hasDelaySlot() + || I->hasDelaySlot() || isDelayFiller(MBB, I)) break; @@ -194,13 +194,13 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, if (candidate->isImplicitDef() || candidate->isKill()) return true; - if (candidate->getDesc().mayLoad()) { + if (candidate->mayLoad()) { sawLoad = true; if (sawStore) return true; } - if (candidate->getDesc().mayStore()) { + if (candidate->mayStore()) { if (sawStore) return true; sawStore = true; @@ -298,13 +298,13 @@ bool Filler::isDelayFiller(MachineBasicBlock &MBB, return false; if (candidate->getOpcode() == SP::UNIMP) return true; - const MCInstrDesc &prevdesc = (--candidate)->getDesc(); - return prevdesc.hasDelaySlot(); + --candidate; + return candidate->hasDelaySlot(); } bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize) { - if (!I->getDesc().isCall()) + if (!I->isCall()) return false; unsigned structSizeOpNum = 0; diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp index deb39d9e2e7..7548bbf6ebe 100644 --- a/lib/Target/Sparc/SparcAsmPrinter.cpp +++ b/lib/Target/Sparc/SparcAsmPrinter.cpp @@ -236,9 +236,9 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const { // Check if the last terminator is an unconditional branch. MachineBasicBlock::const_iterator I = Pred->end(); - while (I != Pred->begin() && !(--I)->getDesc().isTerminator()) + while (I != Pred->begin() && !(--I)->isTerminator()) ; // Noop - return I == Pred->end() || !I->getDesc().isBarrier(); + return I == Pred->end() || !I->isBarrier(); } diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index 1c48c047397..5290d42a0a6 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -133,7 +133,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, break; //Terminator is not a branch - if (!I->getDesc().isBranch()) + if (!I->isBranch()) return true; //Handle Unconditional branches diff --git a/lib/Target/TargetInstrInfo.cpp b/lib/Target/TargetInstrInfo.cpp index d52ecb32cf7..2097a18cf72 100644 --- a/lib/Target/TargetInstrInfo.cpp +++ b/lib/Target/TargetInstrInfo.cpp @@ -130,13 +130,12 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isTerminator()) return false; + if (!MI->isTerminator()) return false; // Conditional branch is a special case. - if (MCID.isBranch() && !MCID.isBarrier()) + if (MI->isBranch() && !MI->isBarrier()) return true; - if (!MCID.isPredicable()) + if (!MI->isPredicable()) return true; return !isPredicated(MI); } diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index ba615a89b40..ed16e882adf 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -1004,7 +1004,7 @@ void Emitter::emitInstruction(MachineInstr &MI, break; } - if (!Desc->isVariadic() && CurOp != NumOps) { + if (!MI.isVariadic() && CurOp != NumOps) { #ifndef NDEBUG dbgs() << "Cannot encode all operands of: " << MI << "\n"; #endif diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index f8371e000d5..158acc90c10 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -991,7 +991,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, unsigned Opc = PI->getOpcode(); if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE && - !PI->getDesc().isTerminator()) + !PI->isTerminator()) break; --MBBI; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index af8303d872e..ed28a8573ce 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2040,13 +2040,12 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { } bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { - const MCInstrDesc &MCID = MI->getDesc(); - if (!MCID.isTerminator()) return false; + if (!MI->isTerminator()) return false; // Conditional branch is a special case. - if (MCID.isBranch() && !MCID.isBarrier()) + if (MI->isBranch() && !MI->isBarrier()) return true; - if (!MCID.isPredicable()) + if (!MI->isPredicable()) return true; return !isPredicated(MI); } @@ -2072,7 +2071,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // A terminator that isn't a branch can't easily be handled by this // analysis. - if (!I->getDesc().isBranch()) + if (!I->isBranch()) return true; // Handle unconditional branches. diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index bf1ed08841a..4e804325a39 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -583,7 +583,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // sure we restore the stack pointer immediately after the call, there may // be spill code inserted between the CALL and ADJCALLSTACKUP instructions. MachineBasicBlock::iterator B = MBB.begin(); - while (I != B && !llvm::prior(I)->getDesc().isCall()) + while (I != B && !llvm::prior(I)->isCall()) --I; MBB.insert(I, New); } diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp index 9bb54a826bd..f8c30eb9b90 100644 --- a/lib/Target/X86/X86VZeroUpper.cpp +++ b/lib/Target/X86/X86VZeroUpper.cpp @@ -220,7 +220,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF, for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) { MachineInstr *MI = I; DebugLoc dl = I->getDebugLoc(); - bool isControlFlow = MI->getDesc().isCall() || MI->getDesc().isReturn(); + bool isControlFlow = MI->isCall() || MI->isReturn(); // Shortcut: don't need to check regular instructions in dirty state. if (!isControlFlow && CurState == ST_DIRTY)