1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Instruction class for the IR library.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/Instruction.h"
15 #include "llvm/IR/CallSite.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/Module.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/Type.h"
21 #include "llvm/Support/LeakDetector.h"
24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25 Instruction *InsertBefore)
26 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
27 // Make sure that we get added to a basicblock
28 LeakDetector::addGarbageObject(this);
30 // If requested, insert this instruction into a basic block...
32 assert(InsertBefore->getParent() &&
33 "Instruction to insert before is not in a basic block!");
34 InsertBefore->getParent()->getInstList().insert(InsertBefore, this);
38 const DataLayout *Instruction::getDataLayout() const {
39 return getParent()->getDataLayout();
42 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
43 BasicBlock *InsertAtEnd)
44 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(0) {
45 // Make sure that we get added to a basicblock
46 LeakDetector::addGarbageObject(this);
48 // append this instruction into the basic block
49 assert(InsertAtEnd && "Basic block to append to may not be NULL!");
50 InsertAtEnd->getInstList().push_back(this);
54 // Out of line virtual method, so the vtable, etc has a home.
55 Instruction::~Instruction() {
56 assert(Parent == 0 && "Instruction still linked in the program!");
57 if (hasMetadataHashEntry())
58 clearMetadataHashEntries();
62 void Instruction::setParent(BasicBlock *P) {
64 if (!P) LeakDetector::addGarbageObject(this);
66 if (P) LeakDetector::removeGarbageObject(this);
72 void Instruction::removeFromParent() {
73 getParent()->getInstList().remove(this);
76 void Instruction::eraseFromParent() {
77 getParent()->getInstList().erase(this);
80 /// insertBefore - Insert an unlinked instructions into a basic block
81 /// immediately before the specified instruction.
82 void Instruction::insertBefore(Instruction *InsertPos) {
83 InsertPos->getParent()->getInstList().insert(InsertPos, this);
86 /// insertAfter - Insert an unlinked instructions into a basic block
87 /// immediately after the specified instruction.
88 void Instruction::insertAfter(Instruction *InsertPos) {
89 InsertPos->getParent()->getInstList().insertAfter(InsertPos, this);
92 /// moveBefore - Unlink this instruction from its current basic block and
93 /// insert it into the basic block that MovePos lives in, right before
95 void Instruction::moveBefore(Instruction *MovePos) {
96 MovePos->getParent()->getInstList().splice(MovePos,getParent()->getInstList(),
100 /// Set or clear the unsafe-algebra flag on this instruction, which must be an
101 /// operator which supports this flag. See LangRef.html for the meaning of this
103 void Instruction::setHasUnsafeAlgebra(bool B) {
104 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
105 cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B);
108 /// Set or clear the NoNaNs flag on this instruction, which must be an operator
109 /// which supports this flag. See LangRef.html for the meaning of this flag.
110 void Instruction::setHasNoNaNs(bool B) {
111 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
112 cast<FPMathOperator>(this)->setHasNoNaNs(B);
115 /// Set or clear the no-infs flag on this instruction, which must be an operator
116 /// which supports this flag. See LangRef.html for the meaning of this flag.
117 void Instruction::setHasNoInfs(bool B) {
118 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
119 cast<FPMathOperator>(this)->setHasNoInfs(B);
122 /// Set or clear the no-signed-zeros flag on this instruction, which must be an
123 /// operator which supports this flag. See LangRef.html for the meaning of this
125 void Instruction::setHasNoSignedZeros(bool B) {
126 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
127 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
130 /// Set or clear the allow-reciprocal flag on this instruction, which must be an
131 /// operator which supports this flag. See LangRef.html for the meaning of this
133 void Instruction::setHasAllowReciprocal(bool B) {
134 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
135 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
138 /// Convenience function for setting all the fast-math flags on this
139 /// instruction, which must be an operator which supports these flags. See
140 /// LangRef.html for the meaning of these flats.
141 void Instruction::setFastMathFlags(FastMathFlags FMF) {
142 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
143 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
146 /// Determine whether the unsafe-algebra flag is set.
147 bool Instruction::hasUnsafeAlgebra() const {
148 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
149 return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
152 /// Determine whether the no-NaNs flag is set.
153 bool Instruction::hasNoNaNs() const {
154 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
155 return cast<FPMathOperator>(this)->hasNoNaNs();
158 /// Determine whether the no-infs flag is set.
159 bool Instruction::hasNoInfs() const {
160 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
161 return cast<FPMathOperator>(this)->hasNoInfs();
164 /// Determine whether the no-signed-zeros flag is set.
165 bool Instruction::hasNoSignedZeros() const {
166 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
167 return cast<FPMathOperator>(this)->hasNoSignedZeros();
170 /// Determine whether the allow-reciprocal flag is set.
171 bool Instruction::hasAllowReciprocal() const {
172 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
173 return cast<FPMathOperator>(this)->hasAllowReciprocal();
176 /// Convenience function for getting all the fast-math flags, which must be an
177 /// operator which supports these flags. See LangRef.html for the meaning of
179 FastMathFlags Instruction::getFastMathFlags() const {
180 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
181 return cast<FPMathOperator>(this)->getFastMathFlags();
184 /// Copy I's fast-math flags
185 void Instruction::copyFastMathFlags(const Instruction *I) {
186 setFastMathFlags(I->getFastMathFlags());
190 const char *Instruction::getOpcodeName(unsigned OpCode) {
193 case Ret: return "ret";
194 case Br: return "br";
195 case Switch: return "switch";
196 case IndirectBr: return "indirectbr";
197 case Invoke: return "invoke";
198 case Resume: return "resume";
199 case Unreachable: return "unreachable";
201 // Standard binary operators...
202 case Add: return "add";
203 case FAdd: return "fadd";
204 case Sub: return "sub";
205 case FSub: return "fsub";
206 case Mul: return "mul";
207 case FMul: return "fmul";
208 case UDiv: return "udiv";
209 case SDiv: return "sdiv";
210 case FDiv: return "fdiv";
211 case URem: return "urem";
212 case SRem: return "srem";
213 case FRem: return "frem";
215 // Logical operators...
216 case And: return "and";
217 case Or : return "or";
218 case Xor: return "xor";
220 // Memory instructions...
221 case Alloca: return "alloca";
222 case Load: return "load";
223 case Store: return "store";
224 case AtomicCmpXchg: return "cmpxchg";
225 case AtomicRMW: return "atomicrmw";
226 case Fence: return "fence";
227 case GetElementPtr: return "getelementptr";
229 // Convert instructions...
230 case Trunc: return "trunc";
231 case ZExt: return "zext";
232 case SExt: return "sext";
233 case FPTrunc: return "fptrunc";
234 case FPExt: return "fpext";
235 case FPToUI: return "fptoui";
236 case FPToSI: return "fptosi";
237 case UIToFP: return "uitofp";
238 case SIToFP: return "sitofp";
239 case IntToPtr: return "inttoptr";
240 case PtrToInt: return "ptrtoint";
241 case BitCast: return "bitcast";
242 case AddrSpaceCast: return "addrspacecast";
244 // Other instructions...
245 case ICmp: return "icmp";
246 case FCmp: return "fcmp";
247 case PHI: return "phi";
248 case Select: return "select";
249 case Call: return "call";
250 case Shl: return "shl";
251 case LShr: return "lshr";
252 case AShr: return "ashr";
253 case VAArg: return "va_arg";
254 case ExtractElement: return "extractelement";
255 case InsertElement: return "insertelement";
256 case ShuffleVector: return "shufflevector";
257 case ExtractValue: return "extractvalue";
258 case InsertValue: return "insertvalue";
259 case LandingPad: return "landingpad";
261 default: return "<Invalid operator> ";
265 /// isIdenticalTo - Return true if the specified instruction is exactly
266 /// identical to the current one. This means that all operands match and any
267 /// extra information (e.g. load is volatile) agree.
268 bool Instruction::isIdenticalTo(const Instruction *I) const {
269 return isIdenticalToWhenDefined(I) &&
270 SubclassOptionalData == I->SubclassOptionalData;
273 /// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it
274 /// ignores the SubclassOptionalData flags, which specify conditions
275 /// under which the instruction's result is undefined.
276 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
277 if (getOpcode() != I->getOpcode() ||
278 getNumOperands() != I->getNumOperands() ||
279 getType() != I->getType())
282 // We have two instructions of identical opcode and #operands. Check to see
283 // if all operands are the same.
284 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
285 if (getOperand(i) != I->getOperand(i))
288 // Check special state that is a part of some instructions.
289 if (const LoadInst *LI = dyn_cast<LoadInst>(this))
290 return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
291 LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
292 LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
293 LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
294 if (const StoreInst *SI = dyn_cast<StoreInst>(this))
295 return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
296 SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
297 SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
298 SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
299 if (const CmpInst *CI = dyn_cast<CmpInst>(this))
300 return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
301 if (const CallInst *CI = dyn_cast<CallInst>(this))
302 return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
303 CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
304 CI->getAttributes() == cast<CallInst>(I)->getAttributes();
305 if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
306 return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
307 CI->getAttributes() == cast<InvokeInst>(I)->getAttributes();
308 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
309 return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
310 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
311 return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
312 if (const FenceInst *FI = dyn_cast<FenceInst>(this))
313 return FI->getOrdering() == cast<FenceInst>(FI)->getOrdering() &&
314 FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
315 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
316 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
317 CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
318 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
319 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
320 return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
321 RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
322 RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
323 RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
324 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
325 const PHINode *otherPHI = cast<PHINode>(I);
326 for (unsigned i = 0, e = thisPHI->getNumOperands(); i != e; ++i) {
327 if (thisPHI->getIncomingBlock(i) != otherPHI->getIncomingBlock(i))
336 // This should be kept in sync with isEquivalentOperation in
337 // lib/Transforms/IPO/MergeFunctions.cpp.
338 bool Instruction::isSameOperationAs(const Instruction *I,
339 unsigned flags) const {
340 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
341 bool UseScalarTypes = flags & CompareUsingScalarTypes;
343 if (getOpcode() != I->getOpcode() ||
344 getNumOperands() != I->getNumOperands() ||
346 getType()->getScalarType() != I->getType()->getScalarType() :
347 getType() != I->getType()))
350 // We have two instructions of identical opcode and #operands. Check to see
351 // if all operands are the same type
352 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
354 getOperand(i)->getType()->getScalarType() !=
355 I->getOperand(i)->getType()->getScalarType() :
356 getOperand(i)->getType() != I->getOperand(i)->getType())
359 // Check special state that is a part of some instructions.
360 if (const LoadInst *LI = dyn_cast<LoadInst>(this))
361 return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
362 (LI->getAlignment() == cast<LoadInst>(I)->getAlignment() ||
364 LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
365 LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
366 if (const StoreInst *SI = dyn_cast<StoreInst>(this))
367 return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
368 (SI->getAlignment() == cast<StoreInst>(I)->getAlignment() ||
370 SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
371 SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
372 if (const CmpInst *CI = dyn_cast<CmpInst>(this))
373 return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
374 if (const CallInst *CI = dyn_cast<CallInst>(this))
375 return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
376 CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
377 CI->getAttributes() == cast<CallInst>(I)->getAttributes();
378 if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
379 return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
380 CI->getAttributes() ==
381 cast<InvokeInst>(I)->getAttributes();
382 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
383 return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
384 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
385 return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
386 if (const FenceInst *FI = dyn_cast<FenceInst>(this))
387 return FI->getOrdering() == cast<FenceInst>(I)->getOrdering() &&
388 FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
389 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
390 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
391 CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
392 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
393 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
394 return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
395 RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
396 RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
397 RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
402 /// isUsedOutsideOfBlock - Return true if there are any uses of I outside of the
403 /// specified block. Note that PHI nodes are considered to evaluate their
404 /// operands in the corresponding predecessor block.
405 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
406 for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
407 // PHI nodes uses values in the corresponding predecessor block. For other
408 // instructions, just check to see whether the parent of the use matches up.
410 const PHINode *PN = dyn_cast<PHINode>(U);
412 if (cast<Instruction>(U)->getParent() != BB)
417 if (PN->getIncomingBlock(UI) != BB)
423 /// mayReadFromMemory - Return true if this instruction may read memory.
425 bool Instruction::mayReadFromMemory() const {
426 switch (getOpcode()) {
427 default: return false;
428 case Instruction::VAArg:
429 case Instruction::Load:
430 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
431 case Instruction::AtomicCmpXchg:
432 case Instruction::AtomicRMW:
434 case Instruction::Call:
435 return !cast<CallInst>(this)->doesNotAccessMemory();
436 case Instruction::Invoke:
437 return !cast<InvokeInst>(this)->doesNotAccessMemory();
438 case Instruction::Store:
439 return !cast<StoreInst>(this)->isUnordered();
443 /// mayWriteToMemory - Return true if this instruction may modify memory.
445 bool Instruction::mayWriteToMemory() const {
446 switch (getOpcode()) {
447 default: return false;
448 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
449 case Instruction::Store:
450 case Instruction::VAArg:
451 case Instruction::AtomicCmpXchg:
452 case Instruction::AtomicRMW:
454 case Instruction::Call:
455 return !cast<CallInst>(this)->onlyReadsMemory();
456 case Instruction::Invoke:
457 return !cast<InvokeInst>(this)->onlyReadsMemory();
458 case Instruction::Load:
459 return !cast<LoadInst>(this)->isUnordered();
463 bool Instruction::mayThrow() const {
464 if (const CallInst *CI = dyn_cast<CallInst>(this))
465 return !CI->doesNotThrow();
466 return isa<ResumeInst>(this);
469 bool Instruction::mayReturn() const {
470 if (const CallInst *CI = dyn_cast<CallInst>(this))
471 return !CI->doesNotReturn();
475 /// isAssociative - Return true if the instruction is associative:
477 /// Associative operators satisfy: x op (y op z) === (x op y) op z
479 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
481 bool Instruction::isAssociative(unsigned Opcode) {
482 return Opcode == And || Opcode == Or || Opcode == Xor ||
483 Opcode == Add || Opcode == Mul;
486 bool Instruction::isAssociative() const {
487 unsigned Opcode = getOpcode();
488 if (isAssociative(Opcode))
494 return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
500 /// isCommutative - Return true if the instruction is commutative:
502 /// Commutative operators satisfy: (x op y) === (y op x)
504 /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when
505 /// applied to any type.
507 bool Instruction::isCommutative(unsigned op) {
522 /// isIdempotent - Return true if the instruction is idempotent:
524 /// Idempotent operators satisfy: x op x === x
526 /// In LLVM, the And and Or operators are idempotent.
528 bool Instruction::isIdempotent(unsigned Opcode) {
529 return Opcode == And || Opcode == Or;
532 /// isNilpotent - Return true if the instruction is nilpotent:
534 /// Nilpotent operators satisfy: x op x === Id,
536 /// where Id is the identity for the operator, i.e. a constant such that
537 /// x op Id === x and Id op x === x for all x.
539 /// In LLVM, the Xor operator is nilpotent.
541 bool Instruction::isNilpotent(unsigned Opcode) {
542 return Opcode == Xor;
545 Instruction *Instruction::clone() const {
546 Instruction *New = clone_impl();
547 New->SubclassOptionalData = SubclassOptionalData;
551 // Otherwise, enumerate and copy over metadata from the old instruction to the
553 SmallVector<std::pair<unsigned, MDNode*>, 4> TheMDs;
554 getAllMetadataOtherThanDebugLoc(TheMDs);
555 for (unsigned i = 0, e = TheMDs.size(); i != e; ++i)
556 New->setMetadata(TheMDs[i].first, TheMDs[i].second);
558 New->setDebugLoc(getDebugLoc());