1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Instruction class for the IR library.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/Instruction.h"
15 #include "llvm/IR/CallSite.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/LeakDetector.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/Type.h"
24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25 Instruction *InsertBefore)
26 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27 // Make sure that we get added to a basicblock
28 LeakDetector::addGarbageObject(this);
30 // If requested, insert this instruction into a basic block...
32 assert(InsertBefore->getParent() &&
33 "Instruction to insert before is not in a basic block!");
34 InsertBefore->getParent()->getInstList().insert(InsertBefore, this);
38 const DataLayout *Instruction::getDataLayout() const {
39 return getParent()->getDataLayout();
42 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
43 BasicBlock *InsertAtEnd)
44 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
45 // Make sure that we get added to a basicblock
46 LeakDetector::addGarbageObject(this);
48 // append this instruction into the basic block
49 assert(InsertAtEnd && "Basic block to append to may not be NULL!");
50 InsertAtEnd->getInstList().push_back(this);
54 // Out of line virtual method, so the vtable, etc has a home.
55 Instruction::~Instruction() {
56 assert(!Parent && "Instruction still linked in the program!");
57 if (hasMetadataHashEntry())
58 clearMetadataHashEntries();
62 void Instruction::setParent(BasicBlock *P) {
64 if (!P) LeakDetector::addGarbageObject(this);
66 if (P) LeakDetector::removeGarbageObject(this);
72 void Instruction::removeFromParent() {
73 getParent()->getInstList().remove(this);
76 void Instruction::eraseFromParent() {
77 getParent()->getInstList().erase(this);
80 /// insertBefore - Insert an unlinked instructions into a basic block
81 /// immediately before the specified instruction.
82 void Instruction::insertBefore(Instruction *InsertPos) {
83 InsertPos->getParent()->getInstList().insert(InsertPos, this);
86 /// insertAfter - Insert an unlinked instructions into a basic block
87 /// immediately after the specified instruction.
88 void Instruction::insertAfter(Instruction *InsertPos) {
89 InsertPos->getParent()->getInstList().insertAfter(InsertPos, this);
92 /// moveBefore - Unlink this instruction from its current basic block and
93 /// insert it into the basic block that MovePos lives in, right before
95 void Instruction::moveBefore(Instruction *MovePos) {
96 MovePos->getParent()->getInstList().splice(MovePos,getParent()->getInstList(),
100 /// Set or clear the unsafe-algebra flag on this instruction, which must be an
101 /// operator which supports this flag. See LangRef.html for the meaning of this
103 void Instruction::setHasUnsafeAlgebra(bool B) {
104 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
105 cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B);
108 /// Set or clear the NoNaNs flag on this instruction, which must be an operator
109 /// which supports this flag. See LangRef.html for the meaning of this flag.
110 void Instruction::setHasNoNaNs(bool B) {
111 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
112 cast<FPMathOperator>(this)->setHasNoNaNs(B);
115 /// Set or clear the no-infs flag on this instruction, which must be an operator
116 /// which supports this flag. See LangRef.html for the meaning of this flag.
117 void Instruction::setHasNoInfs(bool B) {
118 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
119 cast<FPMathOperator>(this)->setHasNoInfs(B);
122 /// Set or clear the no-signed-zeros flag on this instruction, which must be an
123 /// operator which supports this flag. See LangRef.html for the meaning of this
125 void Instruction::setHasNoSignedZeros(bool B) {
126 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
127 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
130 /// Set or clear the allow-reciprocal flag on this instruction, which must be an
131 /// operator which supports this flag. See LangRef.html for the meaning of this
133 void Instruction::setHasAllowReciprocal(bool B) {
134 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
135 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
138 /// Convenience function for setting all the fast-math flags on this
139 /// instruction, which must be an operator which supports these flags. See
140 /// LangRef.html for the meaning of these flats.
141 void Instruction::setFastMathFlags(FastMathFlags FMF) {
142 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
143 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
146 /// Determine whether the unsafe-algebra flag is set.
147 bool Instruction::hasUnsafeAlgebra() const {
148 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
149 return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
152 /// Determine whether the no-NaNs flag is set.
153 bool Instruction::hasNoNaNs() const {
154 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
155 return cast<FPMathOperator>(this)->hasNoNaNs();
158 /// Determine whether the no-infs flag is set.
159 bool Instruction::hasNoInfs() const {
160 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
161 return cast<FPMathOperator>(this)->hasNoInfs();
164 /// Determine whether the no-signed-zeros flag is set.
165 bool Instruction::hasNoSignedZeros() const {
166 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
167 return cast<FPMathOperator>(this)->hasNoSignedZeros();
170 /// Determine whether the allow-reciprocal flag is set.
171 bool Instruction::hasAllowReciprocal() const {
172 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
173 return cast<FPMathOperator>(this)->hasAllowReciprocal();
176 /// Convenience function for getting all the fast-math flags, which must be an
177 /// operator which supports these flags. See LangRef.html for the meaning of
179 FastMathFlags Instruction::getFastMathFlags() const {
180 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
181 return cast<FPMathOperator>(this)->getFastMathFlags();
184 /// Copy I's fast-math flags
185 void Instruction::copyFastMathFlags(const Instruction *I) {
186 setFastMathFlags(I->getFastMathFlags());
190 const char *Instruction::getOpcodeName(unsigned OpCode) {
193 case Ret: return "ret";
194 case Br: return "br";
195 case Switch: return "switch";
196 case IndirectBr: return "indirectbr";
197 case Invoke: return "invoke";
198 case Resume: return "resume";
199 case Unreachable: return "unreachable";
201 // Standard binary operators...
202 case Add: return "add";
203 case FAdd: return "fadd";
204 case Sub: return "sub";
205 case FSub: return "fsub";
206 case Mul: return "mul";
207 case FMul: return "fmul";
208 case UDiv: return "udiv";
209 case SDiv: return "sdiv";
210 case FDiv: return "fdiv";
211 case URem: return "urem";
212 case SRem: return "srem";
213 case FRem: return "frem";
215 // Logical operators...
216 case And: return "and";
217 case Or : return "or";
218 case Xor: return "xor";
220 // Memory instructions...
221 case Alloca: return "alloca";
222 case Load: return "load";
223 case Store: return "store";
224 case AtomicCmpXchg: return "cmpxchg";
225 case AtomicRMW: return "atomicrmw";
226 case Fence: return "fence";
227 case GetElementPtr: return "getelementptr";
229 // Convert instructions...
230 case Trunc: return "trunc";
231 case ZExt: return "zext";
232 case SExt: return "sext";
233 case FPTrunc: return "fptrunc";
234 case FPExt: return "fpext";
235 case FPToUI: return "fptoui";
236 case FPToSI: return "fptosi";
237 case UIToFP: return "uitofp";
238 case SIToFP: return "sitofp";
239 case IntToPtr: return "inttoptr";
240 case PtrToInt: return "ptrtoint";
241 case BitCast: return "bitcast";
242 case AddrSpaceCast: return "addrspacecast";
244 // Other instructions...
245 case ICmp: return "icmp";
246 case FCmp: return "fcmp";
247 case PHI: return "phi";
248 case Select: return "select";
249 case Call: return "call";
250 case Shl: return "shl";
251 case LShr: return "lshr";
252 case AShr: return "ashr";
253 case VAArg: return "va_arg";
254 case ExtractElement: return "extractelement";
255 case InsertElement: return "insertelement";
256 case ShuffleVector: return "shufflevector";
257 case ExtractValue: return "extractvalue";
258 case InsertValue: return "insertvalue";
259 case LandingPad: return "landingpad";
261 default: return "<Invalid operator> ";
265 /// isIdenticalTo - Return true if the specified instruction is exactly
266 /// identical to the current one. This means that all operands match and any
267 /// extra information (e.g. load is volatile) agree.
268 bool Instruction::isIdenticalTo(const Instruction *I) const {
269 return isIdenticalToWhenDefined(I) &&
270 SubclassOptionalData == I->SubclassOptionalData;
273 /// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it
274 /// ignores the SubclassOptionalData flags, which specify conditions
275 /// under which the instruction's result is undefined.
276 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
277 if (getOpcode() != I->getOpcode() ||
278 getNumOperands() != I->getNumOperands() ||
279 getType() != I->getType())
282 // We have two instructions of identical opcode and #operands. Check to see
283 // if all operands are the same.
284 if (!std::equal(op_begin(), op_end(), I->op_begin()))
287 // Check special state that is a part of some instructions.
288 if (const LoadInst *LI = dyn_cast<LoadInst>(this))
289 return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
290 LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
291 LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
292 LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
293 if (const StoreInst *SI = dyn_cast<StoreInst>(this))
294 return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
295 SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
296 SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
297 SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
298 if (const CmpInst *CI = dyn_cast<CmpInst>(this))
299 return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
300 if (const CallInst *CI = dyn_cast<CallInst>(this))
301 return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
302 CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
303 CI->getAttributes() == cast<CallInst>(I)->getAttributes();
304 if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
305 return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
306 CI->getAttributes() == cast<InvokeInst>(I)->getAttributes();
307 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
308 return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
309 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
310 return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
311 if (const FenceInst *FI = dyn_cast<FenceInst>(this))
312 return FI->getOrdering() == cast<FenceInst>(FI)->getOrdering() &&
313 FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
314 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
315 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
316 CXI->getSuccessOrdering() ==
317 cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
318 CXI->getFailureOrdering() ==
319 cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
320 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
321 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
322 return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
323 RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
324 RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
325 RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
326 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
327 const PHINode *otherPHI = cast<PHINode>(I);
328 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
329 otherPHI->block_begin());
335 // This should be kept in sync with isEquivalentOperation in
336 // lib/Transforms/IPO/MergeFunctions.cpp.
337 bool Instruction::isSameOperationAs(const Instruction *I,
338 unsigned flags) const {
339 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
340 bool UseScalarTypes = flags & CompareUsingScalarTypes;
342 if (getOpcode() != I->getOpcode() ||
343 getNumOperands() != I->getNumOperands() ||
345 getType()->getScalarType() != I->getType()->getScalarType() :
346 getType() != I->getType()))
349 // We have two instructions of identical opcode and #operands. Check to see
350 // if all operands are the same type
351 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
353 getOperand(i)->getType()->getScalarType() !=
354 I->getOperand(i)->getType()->getScalarType() :
355 getOperand(i)->getType() != I->getOperand(i)->getType())
358 // Check special state that is a part of some instructions.
359 if (const LoadInst *LI = dyn_cast<LoadInst>(this))
360 return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
361 (LI->getAlignment() == cast<LoadInst>(I)->getAlignment() ||
363 LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
364 LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
365 if (const StoreInst *SI = dyn_cast<StoreInst>(this))
366 return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
367 (SI->getAlignment() == cast<StoreInst>(I)->getAlignment() ||
369 SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
370 SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
371 if (const CmpInst *CI = dyn_cast<CmpInst>(this))
372 return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
373 if (const CallInst *CI = dyn_cast<CallInst>(this))
374 return CI->isTailCall() == cast<CallInst>(I)->isTailCall() &&
375 CI->getCallingConv() == cast<CallInst>(I)->getCallingConv() &&
376 CI->getAttributes() == cast<CallInst>(I)->getAttributes();
377 if (const InvokeInst *CI = dyn_cast<InvokeInst>(this))
378 return CI->getCallingConv() == cast<InvokeInst>(I)->getCallingConv() &&
379 CI->getAttributes() ==
380 cast<InvokeInst>(I)->getAttributes();
381 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(this))
382 return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
383 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
384 return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
385 if (const FenceInst *FI = dyn_cast<FenceInst>(this))
386 return FI->getOrdering() == cast<FenceInst>(I)->getOrdering() &&
387 FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
388 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
389 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
390 CXI->getSuccessOrdering() ==
391 cast<AtomicCmpXchgInst>(I)->getSuccessOrdering() &&
392 CXI->getFailureOrdering() ==
393 cast<AtomicCmpXchgInst>(I)->getFailureOrdering() &&
394 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
395 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
396 return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
397 RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
398 RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
399 RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
404 /// isUsedOutsideOfBlock - Return true if there are any uses of I outside of the
405 /// specified block. Note that PHI nodes are considered to evaluate their
406 /// operands in the corresponding predecessor block.
407 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
408 for (const Use &U : uses()) {
409 // PHI nodes uses values in the corresponding predecessor block. For other
410 // instructions, just check to see whether the parent of the use matches up.
411 const Instruction *I = cast<Instruction>(U.getUser());
412 const PHINode *PN = dyn_cast<PHINode>(I);
414 if (I->getParent() != BB)
419 if (PN->getIncomingBlock(U) != BB)
425 /// mayReadFromMemory - Return true if this instruction may read memory.
427 bool Instruction::mayReadFromMemory() const {
428 switch (getOpcode()) {
429 default: return false;
430 case Instruction::VAArg:
431 case Instruction::Load:
432 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
433 case Instruction::AtomicCmpXchg:
434 case Instruction::AtomicRMW:
436 case Instruction::Call:
437 return !cast<CallInst>(this)->doesNotAccessMemory();
438 case Instruction::Invoke:
439 return !cast<InvokeInst>(this)->doesNotAccessMemory();
440 case Instruction::Store:
441 return !cast<StoreInst>(this)->isUnordered();
445 /// mayWriteToMemory - Return true if this instruction may modify memory.
447 bool Instruction::mayWriteToMemory() const {
448 switch (getOpcode()) {
449 default: return false;
450 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
451 case Instruction::Store:
452 case Instruction::VAArg:
453 case Instruction::AtomicCmpXchg:
454 case Instruction::AtomicRMW:
456 case Instruction::Call:
457 return !cast<CallInst>(this)->onlyReadsMemory();
458 case Instruction::Invoke:
459 return !cast<InvokeInst>(this)->onlyReadsMemory();
460 case Instruction::Load:
461 return !cast<LoadInst>(this)->isUnordered();
465 bool Instruction::mayThrow() const {
466 if (const CallInst *CI = dyn_cast<CallInst>(this))
467 return !CI->doesNotThrow();
468 return isa<ResumeInst>(this);
471 bool Instruction::mayReturn() const {
472 if (const CallInst *CI = dyn_cast<CallInst>(this))
473 return !CI->doesNotReturn();
477 /// isAssociative - Return true if the instruction is associative:
479 /// Associative operators satisfy: x op (y op z) === (x op y) op z
481 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
483 bool Instruction::isAssociative(unsigned Opcode) {
484 return Opcode == And || Opcode == Or || Opcode == Xor ||
485 Opcode == Add || Opcode == Mul;
488 bool Instruction::isAssociative() const {
489 unsigned Opcode = getOpcode();
490 if (isAssociative(Opcode))
496 return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
502 /// isCommutative - Return true if the instruction is commutative:
504 /// Commutative operators satisfy: (x op y) === (y op x)
506 /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when
507 /// applied to any type.
509 bool Instruction::isCommutative(unsigned op) {
524 /// isIdempotent - Return true if the instruction is idempotent:
526 /// Idempotent operators satisfy: x op x === x
528 /// In LLVM, the And and Or operators are idempotent.
530 bool Instruction::isIdempotent(unsigned Opcode) {
531 return Opcode == And || Opcode == Or;
534 /// isNilpotent - Return true if the instruction is nilpotent:
536 /// Nilpotent operators satisfy: x op x === Id,
538 /// where Id is the identity for the operator, i.e. a constant such that
539 /// x op Id === x and Id op x === x for all x.
541 /// In LLVM, the Xor operator is nilpotent.
543 bool Instruction::isNilpotent(unsigned Opcode) {
544 return Opcode == Xor;
547 Instruction *Instruction::clone() const {
548 Instruction *New = clone_impl();
549 New->SubclassOptionalData = SubclassOptionalData;
553 // Otherwise, enumerate and copy over metadata from the old instruction to the
555 SmallVector<std::pair<unsigned, MDNode*>, 4> TheMDs;
556 getAllMetadataOtherThanDebugLoc(TheMDs);
557 for (const auto &MD : TheMDs)
558 New->setMetadata(MD.first, MD.second);
560 New->setDebugLoc(getDebugLoc());