1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfo.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/ManagedStatic.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Mutex.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetInstrInfo.h"
43 #include "llvm/Target/TargetIntrinsicInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSelectionDAGInfo.h"
53 /// makeVTList - Return an instance of the SDVTList struct initialized with the
54 /// specified members.
55 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
56 SDVTList Res = {VTs, NumVTs};
60 // Default null implementations of the callbacks.
61 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
62 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
64 //===----------------------------------------------------------------------===//
65 // ConstantFPSDNode Class
66 //===----------------------------------------------------------------------===//
68 /// isExactlyValue - We don't rely on operator== working on double values, as
69 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
70 /// As such, this method can be used to do an exact bit-for-bit comparison of
71 /// two floating point values.
72 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
73 return getValueAPF().bitwiseIsEqual(V);
76 bool ConstantFPSDNode::isValueValidForType(EVT VT,
78 assert(VT.isFloatingPoint() && "Can only convert between FP types");
80 // convert modifies in place, so make a copy.
81 APFloat Val2 = APFloat(Val);
83 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
84 APFloat::rmNearestTiesToEven,
89 //===----------------------------------------------------------------------===//
91 //===----------------------------------------------------------------------===//
93 /// isBuildVectorAllOnes - Return true if the specified node is a
94 /// BUILD_VECTOR where all of the elements are ~0 or undef.
95 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
96 // Look through a bit convert.
97 if (N->getOpcode() == ISD::BITCAST)
98 N = N->getOperand(0).getNode();
100 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
102 unsigned i = 0, e = N->getNumOperands();
104 // Skip over all of the undef values.
105 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
108 // Do not accept an all-undef vector.
109 if (i == e) return false;
111 // Do not accept build_vectors that aren't all constants or which have non-~0
112 // elements. We have to be a bit careful here, as the type of the constant
113 // may not be the same as the type of the vector elements due to type
114 // legalization (the elements are promoted to a legal type for the target and
115 // a vector of a type may be legal when the base element type is not).
116 // We only want to check enough bits to cover the vector elements, because
117 // we care if the resultant vector is all ones, not whether the individual
119 SDValue NotZero = N->getOperand(i);
120 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
121 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
122 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
124 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
125 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
130 // Okay, we have at least one ~0 value, check to see if the rest match or are
131 // undefs. Even with the above element type twiddling, this should be OK, as
132 // the same type legalization should have applied to all the elements.
133 for (++i; i != e; ++i)
134 if (N->getOperand(i) != NotZero &&
135 N->getOperand(i).getOpcode() != ISD::UNDEF)
141 /// isBuildVectorAllZeros - Return true if the specified node is a
142 /// BUILD_VECTOR where all of the elements are 0 or undef.
143 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
144 // Look through a bit convert.
145 if (N->getOpcode() == ISD::BITCAST)
146 N = N->getOperand(0).getNode();
148 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
150 unsigned i = 0, e = N->getNumOperands();
152 // Skip over all of the undef values.
153 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
156 // Do not accept an all-undef vector.
157 if (i == e) return false;
159 // Do not accept build_vectors that aren't all constants or which have non-0
161 SDValue Zero = N->getOperand(i);
162 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
163 if (!CN->isNullValue())
165 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
166 if (!CFPN->getValueAPF().isPosZero())
171 // Okay, we have at least one 0 value, check to see if the rest match or are
173 for (++i; i != e; ++i)
174 if (N->getOperand(i) != Zero &&
175 N->getOperand(i).getOpcode() != ISD::UNDEF)
180 /// \brief Return true if the specified node is a BUILD_VECTOR node of
181 /// all ConstantSDNode or undef.
182 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
183 if (N->getOpcode() != ISD::BUILD_VECTOR)
186 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
187 SDValue Op = N->getOperand(i);
188 if (Op.getOpcode() == ISD::UNDEF)
190 if (!isa<ConstantSDNode>(Op))
196 /// isScalarToVector - Return true if the specified node is a
197 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
198 /// element is not an undef.
199 bool ISD::isScalarToVector(const SDNode *N) {
200 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
203 if (N->getOpcode() != ISD::BUILD_VECTOR)
205 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
207 unsigned NumElems = N->getNumOperands();
210 for (unsigned i = 1; i < NumElems; ++i) {
211 SDValue V = N->getOperand(i);
212 if (V.getOpcode() != ISD::UNDEF)
218 /// allOperandsUndef - Return true if the node has at least one operand
219 /// and all operands of the specified node are ISD::UNDEF.
220 bool ISD::allOperandsUndef(const SDNode *N) {
221 // Return false if the node has no operands.
222 // This is "logically inconsistent" with the definition of "all" but
223 // is probably the desired behavior.
224 if (N->getNumOperands() == 0)
227 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
228 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
234 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
237 return ISD::ANY_EXTEND;
239 return ISD::SIGN_EXTEND;
241 return ISD::ZERO_EXTEND;
246 llvm_unreachable("Invalid LoadExtType");
249 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
250 /// when given the operation for (X op Y).
251 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
252 // To perform this operation, we just need to swap the L and G bits of the
254 unsigned OldL = (Operation >> 2) & 1;
255 unsigned OldG = (Operation >> 1) & 1;
256 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
257 (OldL << 1) | // New G bit
258 (OldG << 2)); // New L bit.
261 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
262 /// 'op' is a valid SetCC operation.
263 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
264 unsigned Operation = Op;
266 Operation ^= 7; // Flip L, G, E bits, but not U.
268 Operation ^= 15; // Flip all of the condition bits.
270 if (Operation > ISD::SETTRUE2)
271 Operation &= ~8; // Don't let N and U bits get set.
273 return ISD::CondCode(Operation);
277 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
278 /// signed operation and 2 if the result is an unsigned comparison. Return zero
279 /// if the operation does not depend on the sign of the input (setne and seteq).
280 static int isSignedOp(ISD::CondCode Opcode) {
282 default: llvm_unreachable("Illegal integer setcc operation!");
284 case ISD::SETNE: return 0;
288 case ISD::SETGE: return 1;
292 case ISD::SETUGE: return 2;
296 /// getSetCCOrOperation - Return the result of a logical OR between different
297 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
298 /// returns SETCC_INVALID if it is not possible to represent the resultant
300 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
302 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
303 // Cannot fold a signed integer setcc with an unsigned integer setcc.
304 return ISD::SETCC_INVALID;
306 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
308 // If the N and U bits get set then the resultant comparison DOES suddenly
309 // care about orderedness, and is true when ordered.
310 if (Op > ISD::SETTRUE2)
311 Op &= ~16; // Clear the U bit if the N bit is set.
313 // Canonicalize illegal integer setcc's.
314 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
317 return ISD::CondCode(Op);
320 /// getSetCCAndOperation - Return the result of a logical AND between different
321 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
322 /// function returns zero if it is not possible to represent the resultant
324 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
326 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
327 // Cannot fold a signed setcc with an unsigned setcc.
328 return ISD::SETCC_INVALID;
330 // Combine all of the condition bits.
331 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
333 // Canonicalize illegal integer setcc's.
337 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
338 case ISD::SETOEQ: // SETEQ & SETU[LG]E
339 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
340 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
341 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
348 //===----------------------------------------------------------------------===//
349 // SDNode Profile Support
350 //===----------------------------------------------------------------------===//
352 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
354 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
358 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
359 /// solely with their pointer.
360 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
361 ID.AddPointer(VTList.VTs);
364 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
366 static void AddNodeIDOperands(FoldingSetNodeID &ID,
367 const SDValue *Ops, unsigned NumOps) {
368 for (; NumOps; --NumOps, ++Ops) {
369 ID.AddPointer(Ops->getNode());
370 ID.AddInteger(Ops->getResNo());
374 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
376 static void AddNodeIDOperands(FoldingSetNodeID &ID,
377 const SDUse *Ops, unsigned NumOps) {
378 for (; NumOps; --NumOps, ++Ops) {
379 ID.AddPointer(Ops->getNode());
380 ID.AddInteger(Ops->getResNo());
384 static void AddNodeIDNode(FoldingSetNodeID &ID,
385 unsigned short OpC, SDVTList VTList,
386 const SDValue *OpList, unsigned N) {
387 AddNodeIDOpcode(ID, OpC);
388 AddNodeIDValueTypes(ID, VTList);
389 AddNodeIDOperands(ID, OpList, N);
392 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
394 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
395 switch (N->getOpcode()) {
396 case ISD::TargetExternalSymbol:
397 case ISD::ExternalSymbol:
398 llvm_unreachable("Should only be used on nodes with operands");
399 default: break; // Normal nodes don't need extra info.
400 case ISD::TargetConstant:
401 case ISD::Constant: {
402 const ConstantSDNode *C = cast<ConstantSDNode>(N);
403 ID.AddPointer(C->getConstantIntValue());
404 ID.AddBoolean(C->isOpaque());
407 case ISD::TargetConstantFP:
408 case ISD::ConstantFP: {
409 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
412 case ISD::TargetGlobalAddress:
413 case ISD::GlobalAddress:
414 case ISD::TargetGlobalTLSAddress:
415 case ISD::GlobalTLSAddress: {
416 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
417 ID.AddPointer(GA->getGlobal());
418 ID.AddInteger(GA->getOffset());
419 ID.AddInteger(GA->getTargetFlags());
420 ID.AddInteger(GA->getAddressSpace());
423 case ISD::BasicBlock:
424 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
427 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
429 case ISD::RegisterMask:
430 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
433 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
435 case ISD::FrameIndex:
436 case ISD::TargetFrameIndex:
437 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
440 case ISD::TargetJumpTable:
441 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
442 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
444 case ISD::ConstantPool:
445 case ISD::TargetConstantPool: {
446 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
447 ID.AddInteger(CP->getAlignment());
448 ID.AddInteger(CP->getOffset());
449 if (CP->isMachineConstantPoolEntry())
450 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
452 ID.AddPointer(CP->getConstVal());
453 ID.AddInteger(CP->getTargetFlags());
456 case ISD::TargetIndex: {
457 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
458 ID.AddInteger(TI->getIndex());
459 ID.AddInteger(TI->getOffset());
460 ID.AddInteger(TI->getTargetFlags());
464 const LoadSDNode *LD = cast<LoadSDNode>(N);
465 ID.AddInteger(LD->getMemoryVT().getRawBits());
466 ID.AddInteger(LD->getRawSubclassData());
467 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
471 const StoreSDNode *ST = cast<StoreSDNode>(N);
472 ID.AddInteger(ST->getMemoryVT().getRawBits());
473 ID.AddInteger(ST->getRawSubclassData());
474 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
477 case ISD::ATOMIC_CMP_SWAP:
478 case ISD::ATOMIC_SWAP:
479 case ISD::ATOMIC_LOAD_ADD:
480 case ISD::ATOMIC_LOAD_SUB:
481 case ISD::ATOMIC_LOAD_AND:
482 case ISD::ATOMIC_LOAD_OR:
483 case ISD::ATOMIC_LOAD_XOR:
484 case ISD::ATOMIC_LOAD_NAND:
485 case ISD::ATOMIC_LOAD_MIN:
486 case ISD::ATOMIC_LOAD_MAX:
487 case ISD::ATOMIC_LOAD_UMIN:
488 case ISD::ATOMIC_LOAD_UMAX:
489 case ISD::ATOMIC_LOAD:
490 case ISD::ATOMIC_STORE: {
491 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
492 ID.AddInteger(AT->getMemoryVT().getRawBits());
493 ID.AddInteger(AT->getRawSubclassData());
494 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
497 case ISD::PREFETCH: {
498 const MemSDNode *PF = cast<MemSDNode>(N);
499 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
502 case ISD::VECTOR_SHUFFLE: {
503 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
504 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
506 ID.AddInteger(SVN->getMaskElt(i));
509 case ISD::TargetBlockAddress:
510 case ISD::BlockAddress: {
511 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
512 ID.AddPointer(BA->getBlockAddress());
513 ID.AddInteger(BA->getOffset());
514 ID.AddInteger(BA->getTargetFlags());
517 } // end switch (N->getOpcode())
519 // Target specific memory nodes could also have address spaces to check.
520 if (N->isTargetMemoryOpcode())
521 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
524 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
526 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
527 AddNodeIDOpcode(ID, N->getOpcode());
528 // Add the return value info.
529 AddNodeIDValueTypes(ID, N->getVTList());
530 // Add the operand info.
531 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
533 // Handle SDNode leafs with special info.
534 AddNodeIDCustom(ID, N);
537 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
538 /// the CSE map that carries volatility, temporalness, indexing mode, and
539 /// extension/truncation information.
541 static inline unsigned
542 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
543 bool isNonTemporal, bool isInvariant) {
544 assert((ConvType & 3) == ConvType &&
545 "ConvType may not require more than 2 bits!");
546 assert((AM & 7) == AM &&
547 "AM may not require more than 3 bits!");
551 (isNonTemporal << 6) |
555 //===----------------------------------------------------------------------===//
556 // SelectionDAG Class
557 //===----------------------------------------------------------------------===//
559 /// doNotCSE - Return true if CSE should not be performed for this node.
560 static bool doNotCSE(SDNode *N) {
561 if (N->getValueType(0) == MVT::Glue)
562 return true; // Never CSE anything that produces a flag.
564 switch (N->getOpcode()) {
566 case ISD::HANDLENODE:
568 return true; // Never CSE these nodes.
571 // Check that remaining values produced are not flags.
572 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
573 if (N->getValueType(i) == MVT::Glue)
574 return true; // Never CSE anything that produces a flag.
579 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
581 void SelectionDAG::RemoveDeadNodes() {
582 // Create a dummy node (which is not added to allnodes), that adds a reference
583 // to the root node, preventing it from being deleted.
584 HandleSDNode Dummy(getRoot());
586 SmallVector<SDNode*, 128> DeadNodes;
588 // Add all obviously-dead nodes to the DeadNodes worklist.
589 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
591 DeadNodes.push_back(I);
593 RemoveDeadNodes(DeadNodes);
595 // If the root changed (e.g. it was a dead load, update the root).
596 setRoot(Dummy.getValue());
599 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
600 /// given list, and any nodes that become unreachable as a result.
601 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
603 // Process the worklist, deleting the nodes and adding their uses to the
605 while (!DeadNodes.empty()) {
606 SDNode *N = DeadNodes.pop_back_val();
608 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
609 DUL->NodeDeleted(N, nullptr);
611 // Take the node out of the appropriate CSE map.
612 RemoveNodeFromCSEMaps(N);
614 // Next, brutally remove the operand list. This is safe to do, as there are
615 // no cycles in the graph.
616 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
618 SDNode *Operand = Use.getNode();
621 // Now that we removed this operand, see if there are no uses of it left.
622 if (Operand->use_empty())
623 DeadNodes.push_back(Operand);
630 void SelectionDAG::RemoveDeadNode(SDNode *N){
631 SmallVector<SDNode*, 16> DeadNodes(1, N);
633 // Create a dummy node that adds a reference to the root node, preventing
634 // it from being deleted. (This matters if the root is an operand of the
636 HandleSDNode Dummy(getRoot());
638 RemoveDeadNodes(DeadNodes);
641 void SelectionDAG::DeleteNode(SDNode *N) {
642 // First take this out of the appropriate CSE map.
643 RemoveNodeFromCSEMaps(N);
645 // Finally, remove uses due to operands of this node, remove from the
646 // AllNodes list, and delete the node.
647 DeleteNodeNotInCSEMaps(N);
650 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
651 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
652 assert(N->use_empty() && "Cannot delete a node that is not dead!");
654 // Drop all of the operands and decrement used node's use counts.
660 void SelectionDAG::DeallocateNode(SDNode *N) {
661 if (N->OperandsNeedDelete)
662 delete[] N->OperandList;
664 // Set the opcode to DELETED_NODE to help catch bugs when node
665 // memory is reallocated.
666 N->NodeType = ISD::DELETED_NODE;
668 NodeAllocator.Deallocate(AllNodes.remove(N));
670 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
671 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
672 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
673 DbgVals[i]->setIsInvalidated();
676 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
677 /// correspond to it. This is useful when we're about to delete or repurpose
678 /// the node. We don't want future request for structurally identical nodes
679 /// to return N anymore.
680 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
682 switch (N->getOpcode()) {
683 case ISD::HANDLENODE: return false; // noop.
685 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
686 "Cond code doesn't exist!");
687 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
688 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
690 case ISD::ExternalSymbol:
691 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
693 case ISD::TargetExternalSymbol: {
694 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
695 Erased = TargetExternalSymbols.erase(
696 std::pair<std::string,unsigned char>(ESN->getSymbol(),
697 ESN->getTargetFlags()));
700 case ISD::VALUETYPE: {
701 EVT VT = cast<VTSDNode>(N)->getVT();
702 if (VT.isExtended()) {
703 Erased = ExtendedValueTypeNodes.erase(VT);
705 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
706 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
711 // Remove it from the CSE Map.
712 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
713 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
714 Erased = CSEMap.RemoveNode(N);
718 // Verify that the node was actually in one of the CSE maps, unless it has a
719 // flag result (which cannot be CSE'd) or is one of the special cases that are
720 // not subject to CSE.
721 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
722 !N->isMachineOpcode() && !doNotCSE(N)) {
725 llvm_unreachable("Node is not in map!");
731 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
732 /// maps and modified in place. Add it back to the CSE maps, unless an identical
733 /// node already exists, in which case transfer all its users to the existing
734 /// node. This transfer can potentially trigger recursive merging.
737 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
738 // For node types that aren't CSE'd, just act as if no identical node
741 SDNode *Existing = CSEMap.GetOrInsertNode(N);
743 // If there was already an existing matching node, use ReplaceAllUsesWith
744 // to replace the dead one with the existing one. This can cause
745 // recursive merging of other unrelated nodes down the line.
746 ReplaceAllUsesWith(N, Existing);
748 // N is now dead. Inform the listeners and delete it.
749 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
750 DUL->NodeDeleted(N, Existing);
751 DeleteNodeNotInCSEMaps(N);
756 // If the node doesn't already exist, we updated it. Inform listeners.
757 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
761 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
762 /// were replaced with those specified. If this node is never memoized,
763 /// return null, otherwise return a pointer to the slot it would take. If a
764 /// node already exists with these operands, the slot will be non-null.
765 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
770 SDValue Ops[] = { Op };
772 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
773 AddNodeIDCustom(ID, N);
774 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
778 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
779 /// were replaced with those specified. If this node is never memoized,
780 /// return null, otherwise return a pointer to the slot it would take. If a
781 /// node already exists with these operands, the slot will be non-null.
782 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
783 SDValue Op1, SDValue Op2,
788 SDValue Ops[] = { Op1, Op2 };
790 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
791 AddNodeIDCustom(ID, N);
792 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
797 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
798 /// were replaced with those specified. If this node is never memoized,
799 /// return null, otherwise return a pointer to the slot it would take. If a
800 /// node already exists with these operands, the slot will be non-null.
801 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
802 const SDValue *Ops,unsigned NumOps,
808 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
809 AddNodeIDCustom(ID, N);
810 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
815 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
816 static void VerifyNodeCommon(SDNode *N) {
817 switch (N->getOpcode()) {
820 case ISD::BUILD_PAIR: {
821 EVT VT = N->getValueType(0);
822 assert(N->getNumValues() == 1 && "Too many results!");
823 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
824 "Wrong return type!");
825 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
826 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
827 "Mismatched operand types!");
828 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
829 "Wrong operand type!");
830 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
831 "Wrong return type size");
834 case ISD::BUILD_VECTOR: {
835 assert(N->getNumValues() == 1 && "Too many results!");
836 assert(N->getValueType(0).isVector() && "Wrong return type!");
837 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
838 "Wrong number of operands!");
839 EVT EltVT = N->getValueType(0).getVectorElementType();
840 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
841 assert((I->getValueType() == EltVT ||
842 (EltVT.isInteger() && I->getValueType().isInteger() &&
843 EltVT.bitsLE(I->getValueType()))) &&
844 "Wrong operand type!");
845 assert(I->getValueType() == N->getOperand(0).getValueType() &&
846 "Operands must all have the same type");
853 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
854 static void VerifySDNode(SDNode *N) {
855 // The SDNode allocators cannot be used to allocate nodes with fields that are
856 // not present in an SDNode!
857 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
858 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
859 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
860 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
861 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
862 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
863 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
864 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
865 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
866 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
867 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
868 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
869 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
870 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
871 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
872 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
873 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
874 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
875 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
880 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
882 static void VerifyMachineNode(SDNode *N) {
883 // The MachineNode allocators cannot be used to allocate nodes with fields
884 // that are not present in a MachineNode!
885 // Currently there are no such nodes.
891 /// getEVTAlignment - Compute the default alignment value for the
894 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
895 Type *Ty = VT == MVT::iPTR ?
896 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
897 VT.getTypeForEVT(*getContext());
899 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
902 // EntryNode could meaningfully have debug info if we can find it...
903 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
904 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
905 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
906 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
907 UpdateListeners(nullptr) {
908 AllNodes.push_back(&EntryNode);
909 DbgInfo = new SDDbgInfo();
912 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
915 Context = &mf.getFunction()->getContext();
918 SelectionDAG::~SelectionDAG() {
919 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
924 void SelectionDAG::allnodes_clear() {
925 assert(&*AllNodes.begin() == &EntryNode);
926 AllNodes.remove(AllNodes.begin());
927 while (!AllNodes.empty())
928 DeallocateNode(AllNodes.begin());
931 void SelectionDAG::clear() {
933 OperandAllocator.Reset();
936 ExtendedValueTypeNodes.clear();
937 ExternalSymbols.clear();
938 TargetExternalSymbols.clear();
939 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
940 static_cast<CondCodeSDNode*>(nullptr));
941 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
942 static_cast<SDNode*>(nullptr));
944 EntryNode.UseList = nullptr;
945 AllNodes.push_back(&EntryNode);
946 Root = getEntryNode();
950 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
951 return VT.bitsGT(Op.getValueType()) ?
952 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
953 getNode(ISD::TRUNCATE, DL, VT, Op);
956 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
957 return VT.bitsGT(Op.getValueType()) ?
958 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
959 getNode(ISD::TRUNCATE, DL, VT, Op);
962 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
963 return VT.bitsGT(Op.getValueType()) ?
964 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
965 getNode(ISD::TRUNCATE, DL, VT, Op);
968 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
969 assert(!VT.isVector() &&
970 "getZeroExtendInReg should use the vector element type instead of "
972 if (Op.getValueType() == VT) return Op;
973 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
974 APInt Imm = APInt::getLowBitsSet(BitWidth,
976 return getNode(ISD::AND, DL, Op.getValueType(), Op,
977 getConstant(Imm, Op.getValueType()));
980 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
982 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
983 EVT EltVT = VT.getScalarType();
985 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
986 return getNode(ISD::XOR, DL, VT, Val, NegOne);
989 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
990 EVT EltVT = VT.getScalarType();
991 assert((EltVT.getSizeInBits() >= 64 ||
992 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
993 "getConstant with a uint64_t value that doesn't fit in the type!");
994 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
997 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
999 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
1002 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
1004 assert(VT.isInteger() && "Cannot create FP integer constant!");
1006 EVT EltVT = VT.getScalarType();
1007 const ConstantInt *Elt = &Val;
1009 const TargetLowering *TLI = TM.getTargetLowering();
1011 // In some cases the vector type is legal but the element type is illegal and
1012 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1013 // inserted value (the type does not need to match the vector element type).
1014 // Any extra bits introduced will be truncated away.
1015 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1016 TargetLowering::TypePromoteInteger) {
1017 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1018 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1019 Elt = ConstantInt::get(*getContext(), NewVal);
1021 // In other cases the element type is illegal and needs to be expanded, for
1022 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1023 // the value into n parts and use a vector type with n-times the elements.
1024 // Then bitcast to the type requested.
1025 // Legalizing constants too early makes the DAGCombiner's job harder so we
1026 // only legalize if the DAG tells us we must produce legal types.
1027 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1028 TLI->getTypeAction(*getContext(), EltVT) ==
1029 TargetLowering::TypeExpandInteger) {
1030 APInt NewVal = Elt->getValue();
1031 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1032 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1033 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1034 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1036 // Check the temporary vector is the correct size. If this fails then
1037 // getTypeToTransformTo() probably returned a type whose size (in bits)
1038 // isn't a power-of-2 factor of the requested type size.
1039 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1041 SmallVector<SDValue, 2> EltParts;
1042 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1043 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1044 .trunc(ViaEltSizeInBits),
1045 ViaEltVT, isT, isO));
1048 // EltParts is currently in little endian order. If we actually want
1049 // big-endian order then reverse it now.
1050 if (TLI->isBigEndian())
1051 std::reverse(EltParts.begin(), EltParts.end());
1053 // The elements must be reversed when the element order is different
1054 // to the endianness of the elements (because the BITCAST is itself a
1055 // vector shuffle in this situation). However, we do not need any code to
1056 // perform this reversal because getConstant() is producing a vector
1058 // This situation occurs in MIPS MSA.
1060 SmallVector<SDValue, 8> Ops;
1061 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
1062 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1064 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
1065 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
1070 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1071 "APInt size does not match type size!");
1072 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1073 FoldingSetNodeID ID;
1074 AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
1078 SDNode *N = nullptr;
1079 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1081 return SDValue(N, 0);
1084 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
1085 CSEMap.InsertNode(N, IP);
1086 AllNodes.push_back(N);
1089 SDValue Result(N, 0);
1090 if (VT.isVector()) {
1091 SmallVector<SDValue, 8> Ops;
1092 Ops.assign(VT.getVectorNumElements(), Result);
1093 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1098 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1099 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
1103 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1104 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1107 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1108 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1110 EVT EltVT = VT.getScalarType();
1112 // Do the map lookup using the actual bit pattern for the floating point
1113 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1114 // we don't have issues with SNANs.
1115 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1116 FoldingSetNodeID ID;
1117 AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
1120 SDNode *N = nullptr;
1121 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1123 return SDValue(N, 0);
1126 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1127 CSEMap.InsertNode(N, IP);
1128 AllNodes.push_back(N);
1131 SDValue Result(N, 0);
1132 if (VT.isVector()) {
1133 SmallVector<SDValue, 8> Ops;
1134 Ops.assign(VT.getVectorNumElements(), Result);
1135 // FIXME SDLoc info might be appropriate here
1136 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops);
1141 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1142 EVT EltVT = VT.getScalarType();
1143 if (EltVT==MVT::f32)
1144 return getConstantFP(APFloat((float)Val), VT, isTarget);
1145 else if (EltVT==MVT::f64)
1146 return getConstantFP(APFloat(Val), VT, isTarget);
1147 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1150 APFloat apf = APFloat(Val);
1151 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1153 return getConstantFP(apf, VT, isTarget);
1155 llvm_unreachable("Unsupported type in getConstantFP");
1158 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
1159 EVT VT, int64_t Offset,
1161 unsigned char TargetFlags) {
1162 assert((TargetFlags == 0 || isTargetGA) &&
1163 "Cannot set target flags on target-independent globals");
1164 const TargetLowering *TLI = TM.getTargetLowering();
1166 // Truncate (with sign-extension) the offset value to the pointer size.
1167 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
1169 Offset = SignExtend64(Offset, BitWidth);
1171 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1173 // If GV is an alias then use the aliasee for determining thread-localness.
1174 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1175 GVar = dyn_cast_or_null<GlobalVariable>(GA->getAliasedGlobal());
1179 if (GVar && GVar->isThreadLocal())
1180 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1182 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1184 FoldingSetNodeID ID;
1185 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1187 ID.AddInteger(Offset);
1188 ID.AddInteger(TargetFlags);
1189 ID.AddInteger(GV->getType()->getAddressSpace());
1191 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1192 return SDValue(E, 0);
1194 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
1195 DL.getDebugLoc(), GV, VT,
1196 Offset, TargetFlags);
1197 CSEMap.InsertNode(N, IP);
1198 AllNodes.push_back(N);
1199 return SDValue(N, 0);
1202 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1203 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1204 FoldingSetNodeID ID;
1205 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1208 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1209 return SDValue(E, 0);
1211 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1212 CSEMap.InsertNode(N, IP);
1213 AllNodes.push_back(N);
1214 return SDValue(N, 0);
1217 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1218 unsigned char TargetFlags) {
1219 assert((TargetFlags == 0 || isTarget) &&
1220 "Cannot set target flags on target-independent jump tables");
1221 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1222 FoldingSetNodeID ID;
1223 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1225 ID.AddInteger(TargetFlags);
1227 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1228 return SDValue(E, 0);
1230 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1232 CSEMap.InsertNode(N, IP);
1233 AllNodes.push_back(N);
1234 return SDValue(N, 0);
1237 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1238 unsigned Alignment, int Offset,
1240 unsigned char TargetFlags) {
1241 assert((TargetFlags == 0 || isTarget) &&
1242 "Cannot set target flags on target-independent globals");
1245 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1246 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1247 FoldingSetNodeID ID;
1248 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1249 ID.AddInteger(Alignment);
1250 ID.AddInteger(Offset);
1252 ID.AddInteger(TargetFlags);
1254 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1255 return SDValue(E, 0);
1257 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1258 Alignment, TargetFlags);
1259 CSEMap.InsertNode(N, IP);
1260 AllNodes.push_back(N);
1261 return SDValue(N, 0);
1265 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1266 unsigned Alignment, int Offset,
1268 unsigned char TargetFlags) {
1269 assert((TargetFlags == 0 || isTarget) &&
1270 "Cannot set target flags on target-independent globals");
1273 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
1274 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1275 FoldingSetNodeID ID;
1276 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1277 ID.AddInteger(Alignment);
1278 ID.AddInteger(Offset);
1279 C->addSelectionDAGCSEId(ID);
1280 ID.AddInteger(TargetFlags);
1282 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1283 return SDValue(E, 0);
1285 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1286 Alignment, TargetFlags);
1287 CSEMap.InsertNode(N, IP);
1288 AllNodes.push_back(N);
1289 return SDValue(N, 0);
1292 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1293 unsigned char TargetFlags) {
1294 FoldingSetNodeID ID;
1295 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), nullptr, 0);
1296 ID.AddInteger(Index);
1297 ID.AddInteger(Offset);
1298 ID.AddInteger(TargetFlags);
1300 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1301 return SDValue(E, 0);
1303 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1305 CSEMap.InsertNode(N, IP);
1306 AllNodes.push_back(N);
1307 return SDValue(N, 0);
1310 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1311 FoldingSetNodeID ID;
1312 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), nullptr, 0);
1315 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1316 return SDValue(E, 0);
1318 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1319 CSEMap.InsertNode(N, IP);
1320 AllNodes.push_back(N);
1321 return SDValue(N, 0);
1324 SDValue SelectionDAG::getValueType(EVT VT) {
1325 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1326 ValueTypeNodes.size())
1327 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1329 SDNode *&N = VT.isExtended() ?
1330 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1332 if (N) return SDValue(N, 0);
1333 N = new (NodeAllocator) VTSDNode(VT);
1334 AllNodes.push_back(N);
1335 return SDValue(N, 0);
1338 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1339 SDNode *&N = ExternalSymbols[Sym];
1340 if (N) return SDValue(N, 0);
1341 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1342 AllNodes.push_back(N);
1343 return SDValue(N, 0);
1346 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1347 unsigned char TargetFlags) {
1349 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1351 if (N) return SDValue(N, 0);
1352 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1353 AllNodes.push_back(N);
1354 return SDValue(N, 0);
1357 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1358 if ((unsigned)Cond >= CondCodeNodes.size())
1359 CondCodeNodes.resize(Cond+1);
1361 if (!CondCodeNodes[Cond]) {
1362 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1363 CondCodeNodes[Cond] = N;
1364 AllNodes.push_back(N);
1367 return SDValue(CondCodeNodes[Cond], 0);
1370 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1371 // the shuffle mask M that point at N1 to point at N2, and indices that point
1372 // N2 to point at N1.
1373 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1375 int NElts = M.size();
1376 for (int i = 0; i != NElts; ++i) {
1384 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
1385 SDValue N2, const int *Mask) {
1386 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1387 "Invalid VECTOR_SHUFFLE");
1389 // Canonicalize shuffle undef, undef -> undef
1390 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1391 return getUNDEF(VT);
1393 // Validate that all indices in Mask are within the range of the elements
1394 // input to the shuffle.
1395 unsigned NElts = VT.getVectorNumElements();
1396 SmallVector<int, 8> MaskVec;
1397 for (unsigned i = 0; i != NElts; ++i) {
1398 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1399 MaskVec.push_back(Mask[i]);
1402 // Canonicalize shuffle v, v -> v, undef
1405 for (unsigned i = 0; i != NElts; ++i)
1406 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1409 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1410 if (N1.getOpcode() == ISD::UNDEF)
1411 commuteShuffle(N1, N2, MaskVec);
1413 // Canonicalize all index into lhs, -> shuffle lhs, undef
1414 // Canonicalize all index into rhs, -> shuffle rhs, undef
1415 bool AllLHS = true, AllRHS = true;
1416 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1417 for (unsigned i = 0; i != NElts; ++i) {
1418 if (MaskVec[i] >= (int)NElts) {
1423 } else if (MaskVec[i] >= 0) {
1427 if (AllLHS && AllRHS)
1428 return getUNDEF(VT);
1429 if (AllLHS && !N2Undef)
1433 commuteShuffle(N1, N2, MaskVec);
1436 // If Identity shuffle return that node.
1437 bool Identity = true;
1438 for (unsigned i = 0; i != NElts; ++i) {
1439 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1441 if (Identity && NElts)
1444 FoldingSetNodeID ID;
1445 SDValue Ops[2] = { N1, N2 };
1446 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1447 for (unsigned i = 0; i != NElts; ++i)
1448 ID.AddInteger(MaskVec[i]);
1451 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1452 return SDValue(E, 0);
1454 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1455 // SDNode doesn't have access to it. This memory will be "leaked" when
1456 // the node is deallocated, but recovered when the NodeAllocator is released.
1457 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1458 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1460 ShuffleVectorSDNode *N =
1461 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
1462 dl.getDebugLoc(), N1, N2,
1464 CSEMap.InsertNode(N, IP);
1465 AllNodes.push_back(N);
1466 return SDValue(N, 0);
1469 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
1470 SDValue Val, SDValue DTy,
1471 SDValue STy, SDValue Rnd, SDValue Sat,
1472 ISD::CvtCode Code) {
1473 // If the src and dest types are the same and the conversion is between
1474 // integer types of the same sign or two floats, no conversion is necessary.
1476 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1479 FoldingSetNodeID ID;
1480 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1481 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1483 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1484 return SDValue(E, 0);
1486 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
1489 CSEMap.InsertNode(N, IP);
1490 AllNodes.push_back(N);
1491 return SDValue(N, 0);
1494 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1495 FoldingSetNodeID ID;
1496 AddNodeIDNode(ID, ISD::Register, getVTList(VT), nullptr, 0);
1497 ID.AddInteger(RegNo);
1499 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1500 return SDValue(E, 0);
1502 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1503 CSEMap.InsertNode(N, IP);
1504 AllNodes.push_back(N);
1505 return SDValue(N, 0);
1508 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1509 FoldingSetNodeID ID;
1510 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), nullptr, 0);
1511 ID.AddPointer(RegMask);
1513 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1514 return SDValue(E, 0);
1516 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1517 CSEMap.InsertNode(N, IP);
1518 AllNodes.push_back(N);
1519 return SDValue(N, 0);
1522 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
1523 FoldingSetNodeID ID;
1524 SDValue Ops[] = { Root };
1525 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1526 ID.AddPointer(Label);
1528 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1529 return SDValue(E, 0);
1531 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
1532 dl.getDebugLoc(), Root, Label);
1533 CSEMap.InsertNode(N, IP);
1534 AllNodes.push_back(N);
1535 return SDValue(N, 0);
1539 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1542 unsigned char TargetFlags) {
1543 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1545 FoldingSetNodeID ID;
1546 AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
1548 ID.AddInteger(Offset);
1549 ID.AddInteger(TargetFlags);
1551 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1552 return SDValue(E, 0);
1554 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1556 CSEMap.InsertNode(N, IP);
1557 AllNodes.push_back(N);
1558 return SDValue(N, 0);
1561 SDValue SelectionDAG::getSrcValue(const Value *V) {
1562 assert((!V || V->getType()->isPointerTy()) &&
1563 "SrcValue is not a pointer?");
1565 FoldingSetNodeID ID;
1566 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), nullptr, 0);
1570 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1571 return SDValue(E, 0);
1573 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1574 CSEMap.InsertNode(N, IP);
1575 AllNodes.push_back(N);
1576 return SDValue(N, 0);
1579 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1580 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1581 FoldingSetNodeID ID;
1582 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), nullptr, 0);
1586 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1587 return SDValue(E, 0);
1589 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1590 CSEMap.InsertNode(N, IP);
1591 AllNodes.push_back(N);
1592 return SDValue(N, 0);
1595 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
1596 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
1597 unsigned SrcAS, unsigned DestAS) {
1598 SDValue Ops[] = {Ptr};
1599 FoldingSetNodeID ID;
1600 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
1601 ID.AddInteger(SrcAS);
1602 ID.AddInteger(DestAS);
1605 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1606 return SDValue(E, 0);
1608 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
1610 VT, Ptr, SrcAS, DestAS);
1611 CSEMap.InsertNode(N, IP);
1612 AllNodes.push_back(N);
1613 return SDValue(N, 0);
1616 /// getShiftAmountOperand - Return the specified value casted to
1617 /// the target's desired shift amount type.
1618 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1619 EVT OpTy = Op.getValueType();
1620 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
1621 if (OpTy == ShTy || OpTy.isVector()) return Op;
1623 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1624 return getNode(Opcode, SDLoc(Op), ShTy, Op);
1627 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
1628 /// specified value type.
1629 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1630 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1631 unsigned ByteSize = VT.getStoreSize();
1632 Type *Ty = VT.getTypeForEVT(*getContext());
1633 const TargetLowering *TLI = TM.getTargetLowering();
1634 unsigned StackAlign =
1635 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
1637 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1638 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1641 /// CreateStackTemporary - Create a stack temporary suitable for holding
1642 /// either of the specified value types.
1643 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1644 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1645 VT2.getStoreSizeInBits())/8;
1646 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1647 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1648 const TargetLowering *TLI = TM.getTargetLowering();
1649 const DataLayout *TD = TLI->getDataLayout();
1650 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1651 TD->getPrefTypeAlignment(Ty2));
1653 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1654 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1655 return getFrameIndex(FrameIdx, TLI->getPointerTy());
1658 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1659 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
1660 // These setcc operations always fold.
1664 case ISD::SETFALSE2: return getConstant(0, VT);
1666 case ISD::SETTRUE2: {
1667 const TargetLowering *TLI = TM.getTargetLowering();
1668 TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
1670 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
1683 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1687 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1688 const APInt &C2 = N2C->getAPIntValue();
1689 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1690 const APInt &C1 = N1C->getAPIntValue();
1693 default: llvm_unreachable("Unknown integer setcc!");
1694 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1695 case ISD::SETNE: return getConstant(C1 != C2, VT);
1696 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1697 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1698 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1699 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1700 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1701 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1702 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1703 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1707 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1708 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
1709 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1712 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1713 return getUNDEF(VT);
1715 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1716 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1717 return getUNDEF(VT);
1719 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1720 R==APFloat::cmpLessThan, VT);
1721 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1722 return getUNDEF(VT);
1724 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1725 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1726 return getUNDEF(VT);
1728 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1729 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1730 return getUNDEF(VT);
1732 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1733 R==APFloat::cmpEqual, VT);
1734 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1735 return getUNDEF(VT);
1737 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1738 R==APFloat::cmpEqual, VT);
1739 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1740 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1741 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1742 R==APFloat::cmpEqual, VT);
1743 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1744 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1745 R==APFloat::cmpLessThan, VT);
1746 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1747 R==APFloat::cmpUnordered, VT);
1748 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1749 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1752 // Ensure that the constant occurs on the RHS.
1753 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1754 MVT CompVT = N1.getValueType().getSimpleVT();
1755 if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
1758 return getSetCC(dl, VT, N2, N1, SwappedCond);
1762 // Could not fold it.
1766 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1767 /// use this predicate to simplify operations downstream.
1768 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1769 // This predicate is not safe for vector operations.
1770 if (Op.getValueType().isVector())
1773 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1774 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1777 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1778 /// this predicate to simplify operations downstream. Mask is known to be zero
1779 /// for bits that V cannot have.
1780 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1781 unsigned Depth) const {
1782 APInt KnownZero, KnownOne;
1783 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1784 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1785 return (KnownZero & Mask) == Mask;
1788 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
1789 /// known to be either zero or one and return them in the KnownZero/KnownOne
1790 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1792 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1793 APInt &KnownOne, unsigned Depth) const {
1794 const TargetLowering *TLI = TM.getTargetLowering();
1795 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1797 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1799 return; // Limit search depth.
1801 APInt KnownZero2, KnownOne2;
1803 switch (Op.getOpcode()) {
1805 // We know all of the bits for a constant!
1806 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1807 KnownZero = ~KnownOne;
1810 // If either the LHS or the RHS are Zero, the result is zero.
1811 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1812 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1813 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1814 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1816 // Output known-1 bits are only known if set in both the LHS & RHS.
1817 KnownOne &= KnownOne2;
1818 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1819 KnownZero |= KnownZero2;
1822 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1823 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1824 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1825 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1827 // Output known-0 bits are only known if clear in both the LHS & RHS.
1828 KnownZero &= KnownZero2;
1829 // Output known-1 are known to be set if set in either the LHS | RHS.
1830 KnownOne |= KnownOne2;
1833 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1834 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1835 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1836 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1838 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1839 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1840 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1841 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1842 KnownZero = KnownZeroOut;
1846 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1847 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1848 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1849 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1851 // If low bits are zero in either operand, output low known-0 bits.
1852 // Also compute a conserative estimate for high known-0 bits.
1853 // More trickiness is possible, but this is sufficient for the
1854 // interesting case of alignment computation.
1855 KnownOne.clearAllBits();
1856 unsigned TrailZ = KnownZero.countTrailingOnes() +
1857 KnownZero2.countTrailingOnes();
1858 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1859 KnownZero2.countLeadingOnes(),
1860 BitWidth) - BitWidth;
1862 TrailZ = std::min(TrailZ, BitWidth);
1863 LeadZ = std::min(LeadZ, BitWidth);
1864 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1865 APInt::getHighBitsSet(BitWidth, LeadZ);
1869 // For the purposes of computing leading zeros we can conservatively
1870 // treat a udiv as a logical right shift by the power of 2 known to
1871 // be less than the denominator.
1872 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1873 unsigned LeadZ = KnownZero2.countLeadingOnes();
1875 KnownOne2.clearAllBits();
1876 KnownZero2.clearAllBits();
1877 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1878 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1879 if (RHSUnknownLeadingOnes != BitWidth)
1880 LeadZ = std::min(BitWidth,
1881 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1883 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1887 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1888 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1889 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1890 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1892 // Only known if known in both the LHS and RHS.
1893 KnownOne &= KnownOne2;
1894 KnownZero &= KnownZero2;
1896 case ISD::SELECT_CC:
1897 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1898 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1899 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1900 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1902 // Only known if known in both the LHS and RHS.
1903 KnownOne &= KnownOne2;
1904 KnownZero &= KnownZero2;
1912 if (Op.getResNo() != 1)
1914 // The boolean result conforms to getBooleanContents. Fall through.
1916 // If we know the result of a setcc has the top bits zero, use this info.
1917 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
1918 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1919 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1922 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1923 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1924 unsigned ShAmt = SA->getZExtValue();
1926 // If the shift count is an invalid immediate, don't do anything.
1927 if (ShAmt >= BitWidth)
1930 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1931 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1932 KnownZero <<= ShAmt;
1934 // low bits known zero.
1935 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1939 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1940 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1941 unsigned ShAmt = SA->getZExtValue();
1943 // If the shift count is an invalid immediate, don't do anything.
1944 if (ShAmt >= BitWidth)
1947 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1948 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1949 KnownZero = KnownZero.lshr(ShAmt);
1950 KnownOne = KnownOne.lshr(ShAmt);
1952 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1953 KnownZero |= HighBits; // High bits known zero.
1957 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1958 unsigned ShAmt = SA->getZExtValue();
1960 // If the shift count is an invalid immediate, don't do anything.
1961 if (ShAmt >= BitWidth)
1964 // If any of the demanded bits are produced by the sign extension, we also
1965 // demand the input sign bit.
1966 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1968 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1969 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1970 KnownZero = KnownZero.lshr(ShAmt);
1971 KnownOne = KnownOne.lshr(ShAmt);
1973 // Handle the sign bits.
1974 APInt SignBit = APInt::getSignBit(BitWidth);
1975 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1977 if (KnownZero.intersects(SignBit)) {
1978 KnownZero |= HighBits; // New bits are known zero.
1979 } else if (KnownOne.intersects(SignBit)) {
1980 KnownOne |= HighBits; // New bits are known one.
1984 case ISD::SIGN_EXTEND_INREG: {
1985 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1986 unsigned EBits = EVT.getScalarType().getSizeInBits();
1988 // Sign extension. Compute the demanded bits in the result that are not
1989 // present in the input.
1990 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1992 APInt InSignBit = APInt::getSignBit(EBits);
1993 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1995 // If the sign extended bits are demanded, we know that the sign
1997 InSignBit = InSignBit.zext(BitWidth);
1998 if (NewBits.getBoolValue())
1999 InputDemandedBits |= InSignBit;
2001 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2002 KnownOne &= InputDemandedBits;
2003 KnownZero &= InputDemandedBits;
2004 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2006 // If the sign bit of the input is known set or clear, then we know the
2007 // top bits of the result.
2008 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2009 KnownZero |= NewBits;
2010 KnownOne &= ~NewBits;
2011 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2012 KnownOne |= NewBits;
2013 KnownZero &= ~NewBits;
2014 } else { // Input sign bit unknown
2015 KnownZero &= ~NewBits;
2016 KnownOne &= ~NewBits;
2021 case ISD::CTTZ_ZERO_UNDEF:
2023 case ISD::CTLZ_ZERO_UNDEF:
2025 unsigned LowBits = Log2_32(BitWidth)+1;
2026 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2027 KnownOne.clearAllBits();
2031 LoadSDNode *LD = cast<LoadSDNode>(Op);
2032 // If this is a ZEXTLoad and we are looking at the loaded value.
2033 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2034 EVT VT = LD->getMemoryVT();
2035 unsigned MemBits = VT.getScalarType().getSizeInBits();
2036 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2037 } else if (const MDNode *Ranges = LD->getRanges()) {
2038 computeMaskedBitsLoad(*Ranges, KnownZero);
2042 case ISD::ZERO_EXTEND: {
2043 EVT InVT = Op.getOperand(0).getValueType();
2044 unsigned InBits = InVT.getScalarType().getSizeInBits();
2045 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2046 KnownZero = KnownZero.trunc(InBits);
2047 KnownOne = KnownOne.trunc(InBits);
2048 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2049 KnownZero = KnownZero.zext(BitWidth);
2050 KnownOne = KnownOne.zext(BitWidth);
2051 KnownZero |= NewBits;
2054 case ISD::SIGN_EXTEND: {
2055 EVT InVT = Op.getOperand(0).getValueType();
2056 unsigned InBits = InVT.getScalarType().getSizeInBits();
2057 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2059 KnownZero = KnownZero.trunc(InBits);
2060 KnownOne = KnownOne.trunc(InBits);
2061 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2063 // Note if the sign bit is known to be zero or one.
2064 bool SignBitKnownZero = KnownZero.isNegative();
2065 bool SignBitKnownOne = KnownOne.isNegative();
2066 assert(!(SignBitKnownZero && SignBitKnownOne) &&
2067 "Sign bit can't be known to be both zero and one!");
2069 KnownZero = KnownZero.zext(BitWidth);
2070 KnownOne = KnownOne.zext(BitWidth);
2072 // If the sign bit is known zero or one, the top bits match.
2073 if (SignBitKnownZero)
2074 KnownZero |= NewBits;
2075 else if (SignBitKnownOne)
2076 KnownOne |= NewBits;
2079 case ISD::ANY_EXTEND: {
2080 EVT InVT = Op.getOperand(0).getValueType();
2081 unsigned InBits = InVT.getScalarType().getSizeInBits();
2082 KnownZero = KnownZero.trunc(InBits);
2083 KnownOne = KnownOne.trunc(InBits);
2084 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2085 KnownZero = KnownZero.zext(BitWidth);
2086 KnownOne = KnownOne.zext(BitWidth);
2089 case ISD::TRUNCATE: {
2090 EVT InVT = Op.getOperand(0).getValueType();
2091 unsigned InBits = InVT.getScalarType().getSizeInBits();
2092 KnownZero = KnownZero.zext(InBits);
2093 KnownOne = KnownOne.zext(InBits);
2094 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2095 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2096 KnownZero = KnownZero.trunc(BitWidth);
2097 KnownOne = KnownOne.trunc(BitWidth);
2100 case ISD::AssertZext: {
2101 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2102 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2103 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2104 KnownZero |= (~InMask);
2105 KnownOne &= (~KnownZero);
2109 // All bits are zero except the low bit.
2110 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2114 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2115 // We know that the top bits of C-X are clear if X contains less bits
2116 // than C (i.e. no wrap-around can happen). For example, 20-X is
2117 // positive if we can prove that X is >= 0 and < 16.
2118 if (CLHS->getAPIntValue().isNonNegative()) {
2119 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2120 // NLZ can't be BitWidth with no sign bit
2121 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2122 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2124 // If all of the MaskV bits are known to be zero, then we know the
2125 // output top bits are zero, because we now know that the output is
2127 if ((KnownZero2 & MaskV) == MaskV) {
2128 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2129 // Top bits known zero.
2130 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2138 // Output known-0 bits are known if clear or set in both the low clear bits
2139 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2140 // low 3 bits clear.
2141 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2142 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2143 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2145 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2146 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2147 KnownZeroOut = std::min(KnownZeroOut,
2148 KnownZero2.countTrailingOnes());
2150 if (Op.getOpcode() == ISD::ADD) {
2151 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2155 // With ADDE, a carry bit may be added in, so we can only use this
2156 // information if we know (at least) that the low two bits are clear. We
2157 // then return to the caller that the low bit is unknown but that other bits
2159 if (KnownZeroOut >= 2) // ADDE
2160 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2164 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2165 const APInt &RA = Rem->getAPIntValue().abs();
2166 if (RA.isPowerOf2()) {
2167 APInt LowBits = RA - 1;
2168 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2170 // The low bits of the first operand are unchanged by the srem.
2171 KnownZero = KnownZero2 & LowBits;
2172 KnownOne = KnownOne2 & LowBits;
2174 // If the first operand is non-negative or has all low bits zero, then
2175 // the upper bits are all zero.
2176 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2177 KnownZero |= ~LowBits;
2179 // If the first operand is negative and not all low bits are zero, then
2180 // the upper bits are all one.
2181 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2182 KnownOne |= ~LowBits;
2183 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2188 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2189 const APInt &RA = Rem->getAPIntValue();
2190 if (RA.isPowerOf2()) {
2191 APInt LowBits = (RA - 1);
2192 KnownZero |= ~LowBits;
2193 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2194 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2199 // Since the result is less than or equal to either operand, any leading
2200 // zero bits in either operand must also exist in the result.
2201 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2202 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2204 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2205 KnownZero2.countLeadingOnes());
2206 KnownOne.clearAllBits();
2207 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2210 case ISD::FrameIndex:
2211 case ISD::TargetFrameIndex:
2212 if (unsigned Align = InferPtrAlignment(Op)) {
2213 // The low bits are known zero if the pointer is aligned.
2214 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2220 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2223 case ISD::INTRINSIC_WO_CHAIN:
2224 case ISD::INTRINSIC_W_CHAIN:
2225 case ISD::INTRINSIC_VOID:
2226 // Allow the target to implement this method for its nodes.
2227 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2232 /// ComputeNumSignBits - Return the number of times the sign bit of the
2233 /// register is replicated into the other bits. We know that at least 1 bit
2234 /// is always equal to the sign bit (itself), but other cases can give us
2235 /// information. For example, immediately after an "SRA X, 2", we know that
2236 /// the top 3 bits are all equal to each other, so we return 3.
2237 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2238 const TargetLowering *TLI = TM.getTargetLowering();
2239 EVT VT = Op.getValueType();
2240 assert(VT.isInteger() && "Invalid VT!");
2241 unsigned VTBits = VT.getScalarType().getSizeInBits();
2243 unsigned FirstAnswer = 1;
2246 return 1; // Limit search depth.
2248 switch (Op.getOpcode()) {
2250 case ISD::AssertSext:
2251 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2252 return VTBits-Tmp+1;
2253 case ISD::AssertZext:
2254 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2257 case ISD::Constant: {
2258 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2259 return Val.getNumSignBits();
2262 case ISD::SIGN_EXTEND:
2264 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2265 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2267 case ISD::SIGN_EXTEND_INREG:
2268 // Max of the input and what this extends.
2270 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2273 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2274 return std::max(Tmp, Tmp2);
2277 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2278 // SRA X, C -> adds C sign bits.
2279 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2280 Tmp += C->getZExtValue();
2281 if (Tmp > VTBits) Tmp = VTBits;
2285 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2286 // shl destroys sign bits.
2287 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2288 if (C->getZExtValue() >= VTBits || // Bad shift.
2289 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2290 return Tmp - C->getZExtValue();
2295 case ISD::XOR: // NOT is handled here.
2296 // Logical binary ops preserve the number of sign bits at the worst.
2297 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2299 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2300 FirstAnswer = std::min(Tmp, Tmp2);
2301 // We computed what we know about the sign bits as our first
2302 // answer. Now proceed to the generic code that uses
2303 // ComputeMaskedBits, and pick whichever answer is better.
2308 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2309 if (Tmp == 1) return 1; // Early out.
2310 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2311 return std::min(Tmp, Tmp2);
2319 if (Op.getResNo() != 1)
2321 // The boolean result conforms to getBooleanContents. Fall through.
2323 // If setcc returns 0/-1, all bits are sign bits.
2324 if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
2325 TargetLowering::ZeroOrNegativeOneBooleanContent)
2330 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2331 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2333 // Handle rotate right by N like a rotate left by 32-N.
2334 if (Op.getOpcode() == ISD::ROTR)
2335 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2337 // If we aren't rotating out all of the known-in sign bits, return the
2338 // number that are left. This handles rotl(sext(x), 1) for example.
2339 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2340 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2344 // Add can have at most one carry bit. Thus we know that the output
2345 // is, at worst, one more bit than the inputs.
2346 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2347 if (Tmp == 1) return 1; // Early out.
2349 // Special case decrementing a value (ADD X, -1):
2350 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2351 if (CRHS->isAllOnesValue()) {
2352 APInt KnownZero, KnownOne;
2353 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2355 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2357 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2360 // If we are subtracting one from a positive number, there is no carry
2361 // out of the result.
2362 if (KnownZero.isNegative())
2366 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2367 if (Tmp2 == 1) return 1;
2368 return std::min(Tmp, Tmp2)-1;
2371 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2372 if (Tmp2 == 1) return 1;
2375 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2376 if (CLHS->isNullValue()) {
2377 APInt KnownZero, KnownOne;
2378 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2379 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2381 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2384 // If the input is known to be positive (the sign bit is known clear),
2385 // the output of the NEG has the same number of sign bits as the input.
2386 if (KnownZero.isNegative())
2389 // Otherwise, we treat this like a SUB.
2392 // Sub can have at most one carry bit. Thus we know that the output
2393 // is, at worst, one more bit than the inputs.
2394 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2395 if (Tmp == 1) return 1; // Early out.
2396 return std::min(Tmp, Tmp2)-1;
2398 // FIXME: it's tricky to do anything useful for this, but it is an important
2399 // case for targets like X86.
2403 // If we are looking at the loaded value of the SDNode.
2404 if (Op.getResNo() == 0) {
2405 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2406 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2407 unsigned ExtType = LD->getExtensionType();
2410 case ISD::SEXTLOAD: // '17' bits known
2411 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2412 return VTBits-Tmp+1;
2413 case ISD::ZEXTLOAD: // '16' bits known
2414 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2420 // Allow the target to implement this method for its nodes.
2421 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2422 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2423 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2424 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2425 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
2426 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2429 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2430 // use this information.
2431 APInt KnownZero, KnownOne;
2432 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2435 if (KnownZero.isNegative()) { // sign bit is 0
2437 } else if (KnownOne.isNegative()) { // sign bit is 1;
2444 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2445 // the number of identical bits in the top of the input value.
2447 Mask <<= Mask.getBitWidth()-VTBits;
2448 // Return # leading zeros. We use 'min' here in case Val was zero before
2449 // shifting. We don't want to return '64' as for an i32 "0".
2450 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2453 /// isBaseWithConstantOffset - Return true if the specified operand is an
2454 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2455 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2456 /// semantics as an ADD. This handles the equivalence:
2457 /// X|Cst == X+Cst iff X&Cst = 0.
2458 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2459 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2460 !isa<ConstantSDNode>(Op.getOperand(1)))
2463 if (Op.getOpcode() == ISD::OR &&
2464 !MaskedValueIsZero(Op.getOperand(0),
2465 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2472 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2473 // If we're told that NaNs won't happen, assume they won't.
2474 if (getTarget().Options.NoNaNsFPMath)
2477 // If the value is a constant, we can obviously see if it is a NaN or not.
2478 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2479 return !C->getValueAPF().isNaN();
2481 // TODO: Recognize more cases here.
2486 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2487 // If the value is a constant, we can obviously see if it is a zero or not.
2488 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2489 return !C->isZero();
2491 // TODO: Recognize more cases here.
2492 switch (Op.getOpcode()) {
2495 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2496 return !C->isNullValue();
2503 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2504 // Check the obvious case.
2505 if (A == B) return true;
2507 // For for negative and positive zero.
2508 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2509 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2510 if (CA->isZero() && CB->isZero()) return true;
2512 // Otherwise they may not be equal.
2516 /// getNode - Gets or creates the specified node.
2518 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
2519 FoldingSetNodeID ID;
2520 AddNodeIDNode(ID, Opcode, getVTList(VT), nullptr, 0);
2522 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2523 return SDValue(E, 0);
2525 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
2526 DL.getDebugLoc(), getVTList(VT));
2527 CSEMap.InsertNode(N, IP);
2529 AllNodes.push_back(N);
2533 return SDValue(N, 0);
2536 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
2537 EVT VT, SDValue Operand) {
2538 // Constant fold unary operations with an integer constant operand. Even
2539 // opaque constant will be folded, because the folding of unary operations
2540 // doesn't create new constants with different values. Nevertheless, the
2541 // opaque flag is preserved during folding to prevent future folding with
2543 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2544 const APInt &Val = C->getAPIntValue();
2547 case ISD::SIGN_EXTEND:
2548 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
2549 C->isTargetOpcode(), C->isOpaque());
2550 case ISD::ANY_EXTEND:
2551 case ISD::ZERO_EXTEND:
2553 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
2554 C->isTargetOpcode(), C->isOpaque());
2555 case ISD::UINT_TO_FP:
2556 case ISD::SINT_TO_FP: {
2557 APFloat apf(EVTToAPFloatSemantics(VT),
2558 APInt::getNullValue(VT.getSizeInBits()));
2559 (void)apf.convertFromAPInt(Val,
2560 Opcode==ISD::SINT_TO_FP,
2561 APFloat::rmNearestTiesToEven);
2562 return getConstantFP(apf, VT);
2565 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
2566 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
2567 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
2568 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
2571 return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
2574 return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
2577 case ISD::CTLZ_ZERO_UNDEF:
2578 return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
2581 case ISD::CTTZ_ZERO_UNDEF:
2582 return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
2587 // Constant fold unary operations with a floating point constant operand.
2588 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2589 APFloat V = C->getValueAPF(); // make copy
2593 return getConstantFP(V, VT);
2596 return getConstantFP(V, VT);
2598 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2599 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2600 return getConstantFP(V, VT);
2604 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2605 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2606 return getConstantFP(V, VT);
2610 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2611 if (fs == APFloat::opOK || fs == APFloat::opInexact)
2612 return getConstantFP(V, VT);
2615 case ISD::FP_EXTEND: {
2617 // This can return overflow, underflow, or inexact; we don't care.
2618 // FIXME need to be more flexible about rounding mode.
2619 (void)V.convert(EVTToAPFloatSemantics(VT),
2620 APFloat::rmNearestTiesToEven, &ignored);
2621 return getConstantFP(V, VT);
2623 case ISD::FP_TO_SINT:
2624 case ISD::FP_TO_UINT: {
2627 assert(integerPartWidth >= 64);
2628 // FIXME need to be more flexible about rounding mode.
2629 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2630 Opcode==ISD::FP_TO_SINT,
2631 APFloat::rmTowardZero, &ignored);
2632 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
2634 APInt api(VT.getSizeInBits(), x);
2635 return getConstant(api, VT);
2638 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2639 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2640 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2641 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2646 unsigned OpOpcode = Operand.getNode()->getOpcode();
2648 case ISD::TokenFactor:
2649 case ISD::MERGE_VALUES:
2650 case ISD::CONCAT_VECTORS:
2651 return Operand; // Factor, merge or concat of one node? No need.
2652 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2653 case ISD::FP_EXTEND:
2654 assert(VT.isFloatingPoint() &&
2655 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2656 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2657 assert((!VT.isVector() ||
2658 VT.getVectorNumElements() ==
2659 Operand.getValueType().getVectorNumElements()) &&
2660 "Vector element count mismatch!");
2661 if (Operand.getOpcode() == ISD::UNDEF)
2662 return getUNDEF(VT);
2664 case ISD::SIGN_EXTEND:
2665 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2666 "Invalid SIGN_EXTEND!");
2667 if (Operand.getValueType() == VT) return Operand; // noop extension
2668 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2669 "Invalid sext node, dst < src!");
2670 assert((!VT.isVector() ||
2671 VT.getVectorNumElements() ==
2672 Operand.getValueType().getVectorNumElements()) &&
2673 "Vector element count mismatch!");
2674 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2675 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2676 else if (OpOpcode == ISD::UNDEF)
2677 // sext(undef) = 0, because the top bits will all be the same.
2678 return getConstant(0, VT);
2680 case ISD::ZERO_EXTEND:
2681 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2682 "Invalid ZERO_EXTEND!");
2683 if (Operand.getValueType() == VT) return Operand; // noop extension
2684 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2685 "Invalid zext node, dst < src!");
2686 assert((!VT.isVector() ||
2687 VT.getVectorNumElements() ==
2688 Operand.getValueType().getVectorNumElements()) &&
2689 "Vector element count mismatch!");
2690 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2691 return getNode(ISD::ZERO_EXTEND, DL, VT,
2692 Operand.getNode()->getOperand(0));
2693 else if (OpOpcode == ISD::UNDEF)
2694 // zext(undef) = 0, because the top bits will be zero.
2695 return getConstant(0, VT);
2697 case ISD::ANY_EXTEND:
2698 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2699 "Invalid ANY_EXTEND!");
2700 if (Operand.getValueType() == VT) return Operand; // noop extension
2701 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2702 "Invalid anyext node, dst < src!");
2703 assert((!VT.isVector() ||
2704 VT.getVectorNumElements() ==
2705 Operand.getValueType().getVectorNumElements()) &&
2706 "Vector element count mismatch!");
2708 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2709 OpOpcode == ISD::ANY_EXTEND)
2710 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2711 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2712 else if (OpOpcode == ISD::UNDEF)
2713 return getUNDEF(VT);
2715 // (ext (trunx x)) -> x
2716 if (OpOpcode == ISD::TRUNCATE) {
2717 SDValue OpOp = Operand.getNode()->getOperand(0);
2718 if (OpOp.getValueType() == VT)
2723 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2724 "Invalid TRUNCATE!");
2725 if (Operand.getValueType() == VT) return Operand; // noop truncate
2726 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2727 "Invalid truncate node, src < dst!");
2728 assert((!VT.isVector() ||
2729 VT.getVectorNumElements() ==
2730 Operand.getValueType().getVectorNumElements()) &&
2731 "Vector element count mismatch!");
2732 if (OpOpcode == ISD::TRUNCATE)
2733 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2734 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2735 OpOpcode == ISD::ANY_EXTEND) {
2736 // If the source is smaller than the dest, we still need an extend.
2737 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2738 .bitsLT(VT.getScalarType()))
2739 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2740 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2741 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2742 return Operand.getNode()->getOperand(0);
2744 if (OpOpcode == ISD::UNDEF)
2745 return getUNDEF(VT);
2748 // Basic sanity checking.
2749 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2750 && "Cannot BITCAST between types of different sizes!");
2751 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2752 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2753 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2754 if (OpOpcode == ISD::UNDEF)
2755 return getUNDEF(VT);
2757 case ISD::SCALAR_TO_VECTOR:
2758 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2759 (VT.getVectorElementType() == Operand.getValueType() ||
2760 (VT.getVectorElementType().isInteger() &&
2761 Operand.getValueType().isInteger() &&
2762 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2763 "Illegal SCALAR_TO_VECTOR node!");
2764 if (OpOpcode == ISD::UNDEF)
2765 return getUNDEF(VT);
2766 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2767 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2768 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2769 Operand.getConstantOperandVal(1) == 0 &&
2770 Operand.getOperand(0).getValueType() == VT)
2771 return Operand.getOperand(0);
2774 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2775 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2776 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2777 Operand.getNode()->getOperand(0));
2778 if (OpOpcode == ISD::FNEG) // --X -> X
2779 return Operand.getNode()->getOperand(0);
2782 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2783 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2788 SDVTList VTs = getVTList(VT);
2789 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2790 FoldingSetNodeID ID;
2791 SDValue Ops[1] = { Operand };
2792 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2794 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2795 return SDValue(E, 0);
2797 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2798 DL.getDebugLoc(), VTs, Operand);
2799 CSEMap.InsertNode(N, IP);
2801 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
2802 DL.getDebugLoc(), VTs, Operand);
2805 AllNodes.push_back(N);
2809 return SDValue(N, 0);
2812 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2813 SDNode *Cst1, SDNode *Cst2) {
2814 // If the opcode is a target-specific ISD node, there's nothing we can
2815 // do here and the operand rules may not line up with the below, so
2817 if (Opcode >= ISD::BUILTIN_OP_END)
2820 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2821 SmallVector<SDValue, 4> Outputs;
2822 EVT SVT = VT.getScalarType();
2824 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2825 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2826 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
2829 if (Scalar1 && Scalar2)
2830 // Scalar instruction.
2831 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2833 // For vectors extract each constant element into Inputs so we can constant
2834 // fold them individually.
2835 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2836 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2840 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2842 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2843 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2844 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2845 if (!V1 || !V2) // Not a constant, bail.
2848 if (V1->isOpaque() || V2->isOpaque())
2851 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2852 // FIXME: This is valid and could be handled by truncating the APInts.
2853 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2856 Inputs.push_back(std::make_pair(V1, V2));
2860 // We have a number of constant values, constant fold them element by element.
2861 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2862 const APInt &C1 = Inputs[I].first->getAPIntValue();
2863 const APInt &C2 = Inputs[I].second->getAPIntValue();
2867 Outputs.push_back(getConstant(C1 + C2, SVT));
2870 Outputs.push_back(getConstant(C1 - C2, SVT));
2873 Outputs.push_back(getConstant(C1 * C2, SVT));
2876 if (!C2.getBoolValue())
2878 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2881 if (!C2.getBoolValue())
2883 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2886 if (!C2.getBoolValue())
2888 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2891 if (!C2.getBoolValue())
2893 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2896 Outputs.push_back(getConstant(C1 & C2, SVT));
2899 Outputs.push_back(getConstant(C1 | C2, SVT));
2902 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2905 Outputs.push_back(getConstant(C1 << C2, SVT));
2908 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2911 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2914 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2917 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2924 // Handle the scalar case first.
2925 if (Scalar1 && Scalar2)
2926 return Outputs.back();
2928 // Otherwise build a big vector out of the scalar elements we generated.
2929 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs);
2932 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
2934 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2935 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2938 case ISD::TokenFactor:
2939 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2940 N2.getValueType() == MVT::Other && "Invalid token factor!");
2941 // Fold trivial token factors.
2942 if (N1.getOpcode() == ISD::EntryToken) return N2;
2943 if (N2.getOpcode() == ISD::EntryToken) return N1;
2944 if (N1 == N2) return N1;
2946 case ISD::CONCAT_VECTORS:
2947 // Concat of UNDEFs is UNDEF.
2948 if (N1.getOpcode() == ISD::UNDEF &&
2949 N2.getOpcode() == ISD::UNDEF)
2950 return getUNDEF(VT);
2952 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2953 // one big BUILD_VECTOR.
2954 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2955 N2.getOpcode() == ISD::BUILD_VECTOR) {
2956 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2957 N1.getNode()->op_end());
2958 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2959 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
2963 assert(VT.isInteger() && "This operator does not apply to FP types!");
2964 assert(N1.getValueType() == N2.getValueType() &&
2965 N1.getValueType() == VT && "Binary operator types must match!");
2966 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2967 // worth handling here.
2968 if (N2C && N2C->isNullValue())
2970 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2977 assert(VT.isInteger() && "This operator does not apply to FP types!");
2978 assert(N1.getValueType() == N2.getValueType() &&
2979 N1.getValueType() == VT && "Binary operator types must match!");
2980 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2981 // it's worth handling here.
2982 if (N2C && N2C->isNullValue())
2992 assert(VT.isInteger() && "This operator does not apply to FP types!");
2993 assert(N1.getValueType() == N2.getValueType() &&
2994 N1.getValueType() == VT && "Binary operator types must match!");
3001 if (getTarget().Options.UnsafeFPMath) {
3002 if (Opcode == ISD::FADD) {
3004 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
3005 if (CFP->getValueAPF().isZero())
3008 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3009 if (CFP->getValueAPF().isZero())
3011 } else if (Opcode == ISD::FSUB) {
3013 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
3014 if (CFP->getValueAPF().isZero())
3016 } else if (Opcode == ISD::FMUL) {
3017 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
3020 // If the first operand isn't the constant, try the second
3022 CFP = dyn_cast<ConstantFPSDNode>(N2);
3029 return SDValue(CFP,0);
3031 if (CFP->isExactlyValue(1.0))
3036 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3037 assert(N1.getValueType() == N2.getValueType() &&
3038 N1.getValueType() == VT && "Binary operator types must match!");
3040 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3041 assert(N1.getValueType() == VT &&
3042 N1.getValueType().isFloatingPoint() &&
3043 N2.getValueType().isFloatingPoint() &&
3044 "Invalid FCOPYSIGN!");
3051 assert(VT == N1.getValueType() &&
3052 "Shift operators return type must be the same as their first arg");
3053 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3054 "Shifts only work on integers");
3055 assert((!VT.isVector() || VT == N2.getValueType()) &&
3056 "Vector shift amounts must be in the same as their first arg");
3057 // Verify that the shift amount VT is bit enough to hold valid shift
3058 // amounts. This catches things like trying to shift an i1024 value by an
3059 // i8, which is easy to fall into in generic code that uses
3060 // TLI.getShiftAmount().
3061 assert(N2.getValueType().getSizeInBits() >=
3062 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
3063 "Invalid use of small shift amount with oversized value!");
3065 // Always fold shifts of i1 values so the code generator doesn't need to
3066 // handle them. Since we know the size of the shift has to be less than the
3067 // size of the value, the shift/rotate count is guaranteed to be zero.
3070 if (N2C && N2C->isNullValue())
3073 case ISD::FP_ROUND_INREG: {
3074 EVT EVT = cast<VTSDNode>(N2)->getVT();
3075 assert(VT == N1.getValueType() && "Not an inreg round!");
3076 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3077 "Cannot FP_ROUND_INREG integer types");
3078 assert(EVT.isVector() == VT.isVector() &&
3079 "FP_ROUND_INREG type should be vector iff the operand "
3081 assert((!EVT.isVector() ||
3082 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3083 "Vector element counts must match in FP_ROUND_INREG");
3084 assert(EVT.bitsLE(VT) && "Not rounding down!");
3086 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3090 assert(VT.isFloatingPoint() &&
3091 N1.getValueType().isFloatingPoint() &&
3092 VT.bitsLE(N1.getValueType()) &&
3093 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
3094 if (N1.getValueType() == VT) return N1; // noop conversion.
3096 case ISD::AssertSext:
3097 case ISD::AssertZext: {
3098 EVT EVT = cast<VTSDNode>(N2)->getVT();
3099 assert(VT == N1.getValueType() && "Not an inreg extend!");
3100 assert(VT.isInteger() && EVT.isInteger() &&
3101 "Cannot *_EXTEND_INREG FP types");
3102 assert(!EVT.isVector() &&
3103 "AssertSExt/AssertZExt type should be the vector element type "
3104 "rather than the vector type!");
3105 assert(EVT.bitsLE(VT) && "Not extending!");
3106 if (VT == EVT) return N1; // noop assertion.
3109 case ISD::SIGN_EXTEND_INREG: {
3110 EVT EVT = cast<VTSDNode>(N2)->getVT();
3111 assert(VT == N1.getValueType() && "Not an inreg extend!");
3112 assert(VT.isInteger() && EVT.isInteger() &&
3113 "Cannot *_EXTEND_INREG FP types");
3114 assert(EVT.isVector() == VT.isVector() &&
3115 "SIGN_EXTEND_INREG type should be vector iff the operand "
3117 assert((!EVT.isVector() ||
3118 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3119 "Vector element counts must match in SIGN_EXTEND_INREG");
3120 assert(EVT.bitsLE(VT) && "Not extending!");
3121 if (EVT == VT) return N1; // Not actually extending
3124 APInt Val = N1C->getAPIntValue();
3125 unsigned FromBits = EVT.getScalarType().getSizeInBits();
3126 Val <<= Val.getBitWidth()-FromBits;
3127 Val = Val.ashr(Val.getBitWidth()-FromBits);
3128 return getConstant(Val, VT);
3132 case ISD::EXTRACT_VECTOR_ELT:
3133 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3134 if (N1.getOpcode() == ISD::UNDEF)
3135 return getUNDEF(VT);
3137 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3138 // expanding copies of large vectors from registers.
3140 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3141 N1.getNumOperands() > 0) {
3143 N1.getOperand(0).getValueType().getVectorNumElements();
3144 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3145 N1.getOperand(N2C->getZExtValue() / Factor),
3146 getConstant(N2C->getZExtValue() % Factor,
3147 N2.getValueType()));
3150 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3151 // expanding large vector constants.
3152 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3153 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3155 if (VT != Elt.getValueType())
3156 // If the vector element type is not legal, the BUILD_VECTOR operands
3157 // are promoted and implicitly truncated, and the result implicitly
3158 // extended. Make that explicit here.
3159 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3164 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3165 // operations are lowered to scalars.
3166 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3167 // If the indices are the same, return the inserted element else
3168 // if the indices are known different, extract the element from
3169 // the original vector.
3170 SDValue N1Op2 = N1.getOperand(2);
3171 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3173 if (N1Op2C && N2C) {
3174 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3175 if (VT == N1.getOperand(1).getValueType())
3176 return N1.getOperand(1);
3178 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3181 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3185 case ISD::EXTRACT_ELEMENT:
3186 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3187 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3188 (N1.getValueType().isInteger() == VT.isInteger()) &&
3189 N1.getValueType() != VT &&
3190 "Wrong types for EXTRACT_ELEMENT!");
3192 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3193 // 64-bit integers into 32-bit parts. Instead of building the extract of
3194 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3195 if (N1.getOpcode() == ISD::BUILD_PAIR)
3196 return N1.getOperand(N2C->getZExtValue());
3198 // EXTRACT_ELEMENT of a constant int is also very common.
3199 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3200 unsigned ElementSize = VT.getSizeInBits();
3201 unsigned Shift = ElementSize * N2C->getZExtValue();
3202 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3203 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3206 case ISD::EXTRACT_SUBVECTOR: {
3208 if (VT.isSimple() && N1.getValueType().isSimple()) {
3209 assert(VT.isVector() && N1.getValueType().isVector() &&
3210 "Extract subvector VTs must be a vectors!");
3211 assert(VT.getVectorElementType() ==
3212 N1.getValueType().getVectorElementType() &&
3213 "Extract subvector VTs must have the same element type!");
3214 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
3215 "Extract subvector must be from larger vector to smaller vector!");
3217 if (isa<ConstantSDNode>(Index.getNode())) {
3218 assert((VT.getVectorNumElements() +
3219 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3220 <= N1.getValueType().getVectorNumElements())
3221 && "Extract subvector overflow!");
3224 // Trivial extraction.
3225 if (VT.getSimpleVT() == N1.getSimpleValueType())
3232 // Perform trivial constant folding.
3233 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3234 if (SV.getNode()) return SV;
3236 // Canonicalize constant to RHS if commutative.
3237 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3238 std::swap(N1C, N2C);
3242 // Constant fold FP operations.
3243 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3244 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3246 if (!N2CFP && isCommutativeBinOp(Opcode)) {
3247 // Canonicalize constant to RHS if commutative.
3248 std::swap(N1CFP, N2CFP);
3251 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3252 APFloat::opStatus s;
3255 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3256 if (s != APFloat::opInvalidOp)
3257 return getConstantFP(V1, VT);
3260 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3261 if (s!=APFloat::opInvalidOp)
3262 return getConstantFP(V1, VT);
3265 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3266 if (s!=APFloat::opInvalidOp)
3267 return getConstantFP(V1, VT);
3270 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3271 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3272 return getConstantFP(V1, VT);
3275 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3276 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3277 return getConstantFP(V1, VT);
3279 case ISD::FCOPYSIGN:
3281 return getConstantFP(V1, VT);
3286 if (Opcode == ISD::FP_ROUND) {
3287 APFloat V = N1CFP->getValueAPF(); // make copy
3289 // This can return overflow, underflow, or inexact; we don't care.
3290 // FIXME need to be more flexible about rounding mode.
3291 (void)V.convert(EVTToAPFloatSemantics(VT),
3292 APFloat::rmNearestTiesToEven, &ignored);
3293 return getConstantFP(V, VT);
3297 // Canonicalize an UNDEF to the RHS, even over a constant.
3298 if (N1.getOpcode() == ISD::UNDEF) {
3299 if (isCommutativeBinOp(Opcode)) {
3303 case ISD::FP_ROUND_INREG:
3304 case ISD::SIGN_EXTEND_INREG:
3310 return N1; // fold op(undef, arg2) -> undef
3318 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3319 // For vectors, we can't easily build an all zero vector, just return
3326 // Fold a bunch of operators when the RHS is undef.
3327 if (N2.getOpcode() == ISD::UNDEF) {
3330 if (N1.getOpcode() == ISD::UNDEF)
3331 // Handle undef ^ undef -> 0 special case. This is a common
3333 return getConstant(0, VT);
3343 return N2; // fold op(arg1, undef) -> undef
3349 if (getTarget().Options.UnsafeFPMath)
3357 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3358 // For vectors, we can't easily build an all zero vector, just return
3363 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3364 // For vectors, we can't easily build an all one vector, just return
3372 // Memoize this node if possible.
3374 SDVTList VTs = getVTList(VT);
3375 if (VT != MVT::Glue) {
3376 SDValue Ops[] = { N1, N2 };
3377 FoldingSetNodeID ID;
3378 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3380 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3381 return SDValue(E, 0);
3383 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3384 DL.getDebugLoc(), VTs, N1, N2);
3385 CSEMap.InsertNode(N, IP);
3387 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
3388 DL.getDebugLoc(), VTs, N1, N2);
3391 AllNodes.push_back(N);
3395 return SDValue(N, 0);
3398 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3399 SDValue N1, SDValue N2, SDValue N3) {
3400 // Perform various simplifications.
3401 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3404 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3405 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3406 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
3407 if (N1CFP && N2CFP && N3CFP) {
3408 APFloat V1 = N1CFP->getValueAPF();
3409 const APFloat &V2 = N2CFP->getValueAPF();
3410 const APFloat &V3 = N3CFP->getValueAPF();
3411 APFloat::opStatus s =
3412 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
3413 if (s != APFloat::opInvalidOp)
3414 return getConstantFP(V1, VT);
3418 case ISD::CONCAT_VECTORS:
3419 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3420 // one big BUILD_VECTOR.
3421 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3422 N2.getOpcode() == ISD::BUILD_VECTOR &&
3423 N3.getOpcode() == ISD::BUILD_VECTOR) {
3424 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3425 N1.getNode()->op_end());
3426 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3427 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3428 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts);
3432 // Use FoldSetCC to simplify SETCC's.
3433 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3434 if (Simp.getNode()) return Simp;
3439 if (N1C->getZExtValue())
3440 return N2; // select true, X, Y -> X
3441 return N3; // select false, X, Y -> Y
3444 if (N2 == N3) return N2; // select C, X, X -> X
3446 case ISD::VECTOR_SHUFFLE:
3447 llvm_unreachable("should use getVectorShuffle constructor!");
3448 case ISD::INSERT_SUBVECTOR: {
3450 if (VT.isSimple() && N1.getValueType().isSimple()
3451 && N2.getValueType().isSimple()) {
3452 assert(VT.isVector() && N1.getValueType().isVector() &&
3453 N2.getValueType().isVector() &&
3454 "Insert subvector VTs must be a vectors");
3455 assert(VT == N1.getValueType() &&
3456 "Dest and insert subvector source types must match!");
3457 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
3458 "Insert subvector must be from smaller vector to larger vector!");
3459 if (isa<ConstantSDNode>(Index.getNode())) {
3460 assert((N2.getValueType().getVectorNumElements() +
3461 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3462 <= VT.getVectorNumElements())
3463 && "Insert subvector overflow!");
3466 // Trivial insertion.
3467 if (VT.getSimpleVT() == N2.getSimpleValueType())
3473 // Fold bit_convert nodes from a type to themselves.
3474 if (N1.getValueType() == VT)
3479 // Memoize node if it doesn't produce a flag.
3481 SDVTList VTs = getVTList(VT);
3482 if (VT != MVT::Glue) {
3483 SDValue Ops[] = { N1, N2, N3 };
3484 FoldingSetNodeID ID;
3485 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3487 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3488 return SDValue(E, 0);
3490 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3491 DL.getDebugLoc(), VTs, N1, N2, N3);
3492 CSEMap.InsertNode(N, IP);
3494 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
3495 DL.getDebugLoc(), VTs, N1, N2, N3);
3498 AllNodes.push_back(N);
3502 return SDValue(N, 0);
3505 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3506 SDValue N1, SDValue N2, SDValue N3,
3508 SDValue Ops[] = { N1, N2, N3, N4 };
3509 return getNode(Opcode, DL, VT, Ops);
3512 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
3513 SDValue N1, SDValue N2, SDValue N3,
3514 SDValue N4, SDValue N5) {
3515 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3516 return getNode(Opcode, DL, VT, Ops);
3519 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3520 /// the incoming stack arguments to be loaded from the stack.
3521 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3522 SmallVector<SDValue, 8> ArgChains;
3524 // Include the original chain at the beginning of the list. When this is
3525 // used by target LowerCall hooks, this helps legalize find the
3526 // CALLSEQ_BEGIN node.
3527 ArgChains.push_back(Chain);
3529 // Add a chain value for each stack argument.
3530 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3531 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3532 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3533 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3534 if (FI->getIndex() < 0)
3535 ArgChains.push_back(SDValue(L, 1));
3537 // Build a tokenfactor for all the chains.
3538 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
3541 /// getMemsetValue - Vectorized representation of the memset value
3543 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3545 assert(Value.getOpcode() != ISD::UNDEF);
3547 unsigned NumBits = VT.getScalarType().getSizeInBits();
3548 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3549 assert(C->getAPIntValue().getBitWidth() == 8);
3550 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
3552 return DAG.getConstant(Val, VT);
3553 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
3556 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3558 // Use a multiplication with 0x010101... to extend the input to the
3560 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
3561 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3567 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3568 /// used when a memcpy is turned into a memset when the source is a constant
3570 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
3571 const TargetLowering &TLI, StringRef Str) {
3572 // Handle vector with all elements zero.
3575 return DAG.getConstant(0, VT);
3576 else if (VT == MVT::f32 || VT == MVT::f64)
3577 return DAG.getConstantFP(0.0, VT);
3578 else if (VT.isVector()) {
3579 unsigned NumElts = VT.getVectorNumElements();
3580 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3581 return DAG.getNode(ISD::BITCAST, dl, VT,
3582 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3585 llvm_unreachable("Expected type!");
3588 assert(!VT.isVector() && "Can't handle vector type here!");
3589 unsigned NumVTBits = VT.getSizeInBits();
3590 unsigned NumVTBytes = NumVTBits / 8;
3591 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3593 APInt Val(NumVTBits, 0);
3594 if (TLI.isLittleEndian()) {
3595 for (unsigned i = 0; i != NumBytes; ++i)
3596 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3598 for (unsigned i = 0; i != NumBytes; ++i)
3599 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3602 // If the "cost" of materializing the integer immediate is less than the cost
3603 // of a load, then it is cost effective to turn the load into the immediate.
3604 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
3605 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
3606 return DAG.getConstant(Val, VT);
3607 return SDValue(nullptr, 0);
3610 /// getMemBasePlusOffset - Returns base and offset node for the
3612 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
3613 SelectionDAG &DAG) {
3614 EVT VT = Base.getValueType();
3615 return DAG.getNode(ISD::ADD, dl,
3616 VT, Base, DAG.getConstant(Offset, VT));
3619 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
3621 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3622 unsigned SrcDelta = 0;
3623 GlobalAddressSDNode *G = nullptr;
3624 if (Src.getOpcode() == ISD::GlobalAddress)
3625 G = cast<GlobalAddressSDNode>(Src);
3626 else if (Src.getOpcode() == ISD::ADD &&
3627 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3628 Src.getOperand(1).getOpcode() == ISD::Constant) {
3629 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3630 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3635 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3638 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
3639 /// to replace the memset / memcpy. Return true if the number of memory ops
3640 /// is below the threshold. It returns the types of the sequence of
3641 /// memory ops to perform memset / memcpy by reference.
3642 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3643 unsigned Limit, uint64_t Size,
3644 unsigned DstAlign, unsigned SrcAlign,
3650 const TargetLowering &TLI) {
3651 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3652 "Expecting memcpy / memset source to meet alignment requirement!");
3653 // If 'SrcAlign' is zero, that means the memory operation does not need to
3654 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3655 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3656 // is the specified alignment of the memory operation. If it is zero, that
3657 // means it's possible to change the alignment of the destination.
3658 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3659 // not need to be loaded.
3660 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
3661 IsMemset, ZeroMemset, MemcpyStrSrc,
3662 DAG.getMachineFunction());
3664 if (VT == MVT::Other) {
3666 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
3667 TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
3668 VT = TLI.getPointerTy();
3670 switch (DstAlign & 7) {
3671 case 0: VT = MVT::i64; break;
3672 case 4: VT = MVT::i32; break;
3673 case 2: VT = MVT::i16; break;
3674 default: VT = MVT::i8; break;
3679 while (!TLI.isTypeLegal(LVT))
3680 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3681 assert(LVT.isInteger());
3687 unsigned NumMemOps = 0;
3689 unsigned VTSize = VT.getSizeInBits() / 8;
3690 while (VTSize > Size) {
3691 // For now, only use non-vector load / store's for the left-over pieces.
3696 if (VT.isVector() || VT.isFloatingPoint()) {
3697 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3698 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3699 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3701 else if (NewVT == MVT::i64 &&
3702 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3703 TLI.isSafeMemOpType(MVT::f64)) {
3704 // i64 is usually not legal on 32-bit targets, but f64 may be.
3712 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3713 if (NewVT == MVT::i8)
3715 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3717 NewVTSize = NewVT.getSizeInBits() / 8;
3719 // If the new VT cannot cover all of the remaining bits, then consider
3720 // issuing a (or a pair of) unaligned and overlapping load / store.
3721 // FIXME: Only does this for 64-bit or more since we don't have proper
3722 // cost model for unaligned load / store.
3725 if (NumMemOps && AllowOverlap &&
3726 VTSize >= 8 && NewVTSize < Size &&
3727 TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
3735 if (++NumMemOps > Limit)
3738 MemOps.push_back(VT);
3745 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3746 SDValue Chain, SDValue Dst,
3747 SDValue Src, uint64_t Size,
3748 unsigned Align, bool isVol,
3750 MachinePointerInfo DstPtrInfo,
3751 MachinePointerInfo SrcPtrInfo) {
3752 // Turn a memcpy of undef to nop.
3753 if (Src.getOpcode() == ISD::UNDEF)
3756 // Expand memcpy to a series of load and store ops if the size operand falls
3757 // below a certain threshold.
3758 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3759 // rather than maybe a humongous number of loads and stores.
3760 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3761 std::vector<EVT> MemOps;
3762 bool DstAlignCanChange = false;
3763 MachineFunction &MF = DAG.getMachineFunction();
3764 MachineFrameInfo *MFI = MF.getFrameInfo();
3766 MF.getFunction()->getAttributes().
3767 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3768 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3769 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3770 DstAlignCanChange = true;
3771 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3772 if (Align > SrcAlign)
3775 bool CopyFromStr = isMemSrcFromString(Src, Str);
3776 bool isZeroStr = CopyFromStr && Str.empty();
3777 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3779 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3780 (DstAlignCanChange ? 0 : Align),
3781 (isZeroStr ? 0 : SrcAlign),
3782 false, false, CopyFromStr, true, DAG, TLI))
3785 if (DstAlignCanChange) {
3786 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3787 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3789 // Don't promote to an alignment that would require dynamic stack
3791 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3792 if (!TRI->needsStackRealignment(MF))
3793 while (NewAlign > Align &&
3794 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3797 if (NewAlign > Align) {
3798 // Give the stack frame object a larger alignment if needed.
3799 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3800 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3805 SmallVector<SDValue, 8> OutChains;
3806 unsigned NumMemOps = MemOps.size();
3807 uint64_t SrcOff = 0, DstOff = 0;
3808 for (unsigned i = 0; i != NumMemOps; ++i) {
3810 unsigned VTSize = VT.getSizeInBits() / 8;
3811 SDValue Value, Store;
3813 if (VTSize > Size) {
3814 // Issuing an unaligned load / store pair that overlaps with the previous
3815 // pair. Adjust the offset accordingly.
3816 assert(i == NumMemOps-1 && i != 0);
3817 SrcOff -= VTSize - Size;
3818 DstOff -= VTSize - Size;
3822 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3823 // It's unlikely a store of a vector immediate can be done in a single
3824 // instruction. It would require a load from a constantpool first.
3825 // We only handle zero vectors here.
3826 // FIXME: Handle other cases where store of vector immediate is done in
3827 // a single instruction.
3828 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
3829 if (Value.getNode())
3830 Store = DAG.getStore(Chain, dl, Value,
3831 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3832 DstPtrInfo.getWithOffset(DstOff), isVol,
3836 if (!Store.getNode()) {
3837 // The type might not be legal for the target. This should only happen
3838 // if the type is smaller than a legal type, as on PPC, so the right
3839 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3840 // to Load/Store if NVT==VT.
3841 // FIXME does the case above also need this?
3842 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3843 assert(NVT.bitsGE(VT));
3844 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3845 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3846 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3847 MinAlign(SrcAlign, SrcOff));
3848 Store = DAG.getTruncStore(Chain, dl, Value,
3849 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3850 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3853 OutChains.push_back(Store);
3859 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3862 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
3863 SDValue Chain, SDValue Dst,
3864 SDValue Src, uint64_t Size,
3865 unsigned Align, bool isVol,
3867 MachinePointerInfo DstPtrInfo,
3868 MachinePointerInfo SrcPtrInfo) {
3869 // Turn a memmove of undef to nop.
3870 if (Src.getOpcode() == ISD::UNDEF)
3873 // Expand memmove to a series of load and store ops if the size operand falls
3874 // below a certain threshold.
3875 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3876 std::vector<EVT> MemOps;
3877 bool DstAlignCanChange = false;
3878 MachineFunction &MF = DAG.getMachineFunction();
3879 MachineFrameInfo *MFI = MF.getFrameInfo();
3880 bool OptSize = MF.getFunction()->getAttributes().
3881 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3882 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3883 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3884 DstAlignCanChange = true;
3885 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3886 if (Align > SrcAlign)
3888 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3890 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3891 (DstAlignCanChange ? 0 : Align), SrcAlign,
3892 false, false, false, false, DAG, TLI))
3895 if (DstAlignCanChange) {
3896 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3897 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3898 if (NewAlign > Align) {
3899 // Give the stack frame object a larger alignment if needed.
3900 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3901 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3906 uint64_t SrcOff = 0, DstOff = 0;
3907 SmallVector<SDValue, 8> LoadValues;
3908 SmallVector<SDValue, 8> LoadChains;
3909 SmallVector<SDValue, 8> OutChains;
3910 unsigned NumMemOps = MemOps.size();
3911 for (unsigned i = 0; i < NumMemOps; i++) {
3913 unsigned VTSize = VT.getSizeInBits() / 8;
3916 Value = DAG.getLoad(VT, dl, Chain,
3917 getMemBasePlusOffset(Src, SrcOff, dl, DAG),
3918 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3919 false, false, SrcAlign);
3920 LoadValues.push_back(Value);
3921 LoadChains.push_back(Value.getValue(1));
3924 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
3926 for (unsigned i = 0; i < NumMemOps; i++) {
3928 unsigned VTSize = VT.getSizeInBits() / 8;
3931 Store = DAG.getStore(Chain, dl, LoadValues[i],
3932 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
3933 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3934 OutChains.push_back(Store);
3938 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3941 /// \brief Lower the call to 'memset' intrinsic function into a series of store
3944 /// \param DAG Selection DAG where lowered code is placed.
3945 /// \param dl Link to corresponding IR location.
3946 /// \param Chain Control flow dependency.
3947 /// \param Dst Pointer to destination memory location.
3948 /// \param Src Value of byte to write into the memory.
3949 /// \param Size Number of bytes to write.
3950 /// \param Align Alignment of the destination in bytes.
3951 /// \param isVol True if destination is volatile.
3952 /// \param DstPtrInfo IR information on the memory pointer.
3953 /// \returns New head in the control flow, if lowering was successful, empty
3954 /// SDValue otherwise.
3956 /// The function tries to replace 'llvm.memset' intrinsic with several store
3957 /// operations and value calculation code. This is usually profitable for small
3959 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
3960 SDValue Chain, SDValue Dst,
3961 SDValue Src, uint64_t Size,
3962 unsigned Align, bool isVol,
3963 MachinePointerInfo DstPtrInfo) {
3964 // Turn a memset of undef to nop.
3965 if (Src.getOpcode() == ISD::UNDEF)
3968 // Expand memset to a series of load/store ops if the size operand
3969 // falls below a certain threshold.
3970 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3971 std::vector<EVT> MemOps;
3972 bool DstAlignCanChange = false;
3973 MachineFunction &MF = DAG.getMachineFunction();
3974 MachineFrameInfo *MFI = MF.getFrameInfo();
3975 bool OptSize = MF.getFunction()->getAttributes().
3976 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
3977 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3978 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3979 DstAlignCanChange = true;
3981 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3982 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3983 Size, (DstAlignCanChange ? 0 : Align), 0,
3984 true, IsZeroVal, false, true, DAG, TLI))
3987 if (DstAlignCanChange) {
3988 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
3989 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3990 if (NewAlign > Align) {
3991 // Give the stack frame object a larger alignment if needed.
3992 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3993 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3998 SmallVector<SDValue, 8> OutChains;
3999 uint64_t DstOff = 0;
4000 unsigned NumMemOps = MemOps.size();
4002 // Find the largest store and generate the bit pattern for it.
4003 EVT LargestVT = MemOps[0];
4004 for (unsigned i = 1; i < NumMemOps; i++)
4005 if (MemOps[i].bitsGT(LargestVT))
4006 LargestVT = MemOps[i];
4007 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4009 for (unsigned i = 0; i < NumMemOps; i++) {
4011 unsigned VTSize = VT.getSizeInBits() / 8;
4012 if (VTSize > Size) {
4013 // Issuing an unaligned load / store pair that overlaps with the previous
4014 // pair. Adjust the offset accordingly.
4015 assert(i == NumMemOps-1 && i != 0);
4016 DstOff -= VTSize - Size;
4019 // If this store is smaller than the largest store see whether we can get
4020 // the smaller value for free with a truncate.
4021 SDValue Value = MemSetValue;
4022 if (VT.bitsLT(LargestVT)) {
4023 if (!LargestVT.isVector() && !VT.isVector() &&
4024 TLI.isTruncateFree(LargestVT, VT))
4025 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4027 Value = getMemsetValue(Src, VT, DAG, dl);
4029 assert(Value.getValueType() == VT && "Value with wrong type.");
4030 SDValue Store = DAG.getStore(Chain, dl, Value,
4031 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
4032 DstPtrInfo.getWithOffset(DstOff),
4033 isVol, false, Align);
4034 OutChains.push_back(Store);
4035 DstOff += VT.getSizeInBits() / 8;
4039 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4042 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
4043 SDValue Src, SDValue Size,
4044 unsigned Align, bool isVol, bool AlwaysInline,
4045 MachinePointerInfo DstPtrInfo,
4046 MachinePointerInfo SrcPtrInfo) {
4047 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4049 // Check to see if we should lower the memcpy to loads and stores first.
4050 // For cases within the target-specified limits, this is the best choice.
4051 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4053 // Memcpy with size zero? Just return the original chain.
4054 if (ConstantSize->isNullValue())
4057 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4058 ConstantSize->getZExtValue(),Align,
4059 isVol, false, DstPtrInfo, SrcPtrInfo);
4060 if (Result.getNode())
4064 // Then check to see if we should lower the memcpy with target-specific
4065 // code. If the target chooses to do this, this is the next best.
4067 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
4068 isVol, AlwaysInline,
4069 DstPtrInfo, SrcPtrInfo);
4070 if (Result.getNode())
4073 // If we really need inline code and the target declined to provide it,
4074 // use a (potentially long) sequence of loads and stores.
4076 assert(ConstantSize && "AlwaysInline requires a constant size!");
4077 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4078 ConstantSize->getZExtValue(), Align, isVol,
4079 true, DstPtrInfo, SrcPtrInfo);
4082 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4083 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4084 // respect volatile, so they may do things like read or write memory
4085 // beyond the given memory regions. But fixing this isn't easy, and most
4086 // people don't care.
4088 const TargetLowering *TLI = TM.getTargetLowering();
4090 // Emit a library call.
4091 TargetLowering::ArgListTy Args;
4092 TargetLowering::ArgListEntry Entry;
4093 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4094 Entry.Node = Dst; Args.push_back(Entry);
4095 Entry.Node = Src; Args.push_back(Entry);
4096 Entry.Node = Size; Args.push_back(Entry);
4097 // FIXME: pass in SDLoc
4099 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4100 false, false, false, false, 0,
4101 TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4102 /*isTailCall=*/false,
4103 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4104 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4105 TLI->getPointerTy()),
4107 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4109 return CallResult.second;
4112 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
4113 SDValue Src, SDValue Size,
4114 unsigned Align, bool isVol,
4115 MachinePointerInfo DstPtrInfo,
4116 MachinePointerInfo SrcPtrInfo) {
4117 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4119 // Check to see if we should lower the memmove to loads and stores first.
4120 // For cases within the target-specified limits, this is the best choice.
4121 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4123 // Memmove with size zero? Just return the original chain.
4124 if (ConstantSize->isNullValue())
4128 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4129 ConstantSize->getZExtValue(), Align, isVol,
4130 false, DstPtrInfo, SrcPtrInfo);
4131 if (Result.getNode())
4135 // Then check to see if we should lower the memmove with target-specific
4136 // code. If the target chooses to do this, this is the next best.
4138 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4139 DstPtrInfo, SrcPtrInfo);
4140 if (Result.getNode())
4143 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4144 // not be safe. See memcpy above for more details.
4146 const TargetLowering *TLI = TM.getTargetLowering();
4148 // Emit a library call.
4149 TargetLowering::ArgListTy Args;
4150 TargetLowering::ArgListEntry Entry;
4151 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4152 Entry.Node = Dst; Args.push_back(Entry);
4153 Entry.Node = Src; Args.push_back(Entry);
4154 Entry.Node = Size; Args.push_back(Entry);
4155 // FIXME: pass in SDLoc
4157 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4158 false, false, false, false, 0,
4159 TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
4160 /*isTailCall=*/false,
4161 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
4162 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
4163 TLI->getPointerTy()),
4165 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4167 return CallResult.second;
4170 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
4171 SDValue Src, SDValue Size,
4172 unsigned Align, bool isVol,
4173 MachinePointerInfo DstPtrInfo) {
4174 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4176 // Check to see if we should lower the memset to stores first.
4177 // For cases within the target-specified limits, this is the best choice.
4178 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4180 // Memset with size zero? Just return the original chain.
4181 if (ConstantSize->isNullValue())
4185 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4186 Align, isVol, DstPtrInfo);
4188 if (Result.getNode())
4192 // Then check to see if we should lower the memset with target-specific
4193 // code. If the target chooses to do this, this is the next best.
4195 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4197 if (Result.getNode())
4200 // Emit a library call.
4201 const TargetLowering *TLI = TM.getTargetLowering();
4202 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
4203 TargetLowering::ArgListTy Args;
4204 TargetLowering::ArgListEntry Entry;
4205 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4206 Args.push_back(Entry);
4207 // Extend or truncate the argument to be an i32 value for the call.
4208 if (Src.getValueType().bitsGT(MVT::i32))
4209 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4211 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4213 Entry.Ty = Type::getInt32Ty(*getContext());
4214 Entry.isSExt = true;
4215 Args.push_back(Entry);
4217 Entry.Ty = IntPtrTy;
4218 Entry.isSExt = false;
4219 Args.push_back(Entry);
4220 // FIXME: pass in SDLoc
4222 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4223 false, false, false, false, 0,
4224 TLI->getLibcallCallingConv(RTLIB::MEMSET),
4225 /*isTailCall=*/false,
4226 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4227 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
4228 TLI->getPointerTy()),
4230 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4232 return CallResult.second;
4235 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4236 SDVTList VTList, SDValue *Ops, unsigned NumOps,
4237 MachineMemOperand *MMO,
4238 AtomicOrdering SuccessOrdering,
4239 AtomicOrdering FailureOrdering,
4240 SynchronizationScope SynchScope) {
4241 FoldingSetNodeID ID;
4242 ID.AddInteger(MemVT.getRawBits());
4243 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4244 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4246 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4247 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4248 return SDValue(E, 0);
4251 // Allocate the operands array for the node out of the BumpPtrAllocator, since
4252 // SDNode doesn't have access to it. This memory will be "leaked" when
4253 // the node is deallocated, but recovered when the allocator is released.
4254 // If the number of operands is less than 5 we use AtomicSDNode's internal
4256 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : nullptr;
4258 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
4259 dl.getDebugLoc(), VTList, MemVT,
4260 Ops, DynOps, NumOps, MMO,
4261 SuccessOrdering, FailureOrdering,
4263 CSEMap.InsertNode(N, IP);
4264 AllNodes.push_back(N);
4265 return SDValue(N, 0);
4268 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4269 SDVTList VTList, SDValue *Ops, unsigned NumOps,
4270 MachineMemOperand *MMO,
4271 AtomicOrdering Ordering,
4272 SynchronizationScope SynchScope) {
4273 return getAtomic(Opcode, dl, MemVT, VTList, Ops, NumOps, MMO, Ordering,
4274 Ordering, SynchScope);
4277 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4278 SDValue Chain, SDValue Ptr, SDValue Cmp,
4279 SDValue Swp, MachinePointerInfo PtrInfo,
4281 AtomicOrdering SuccessOrdering,
4282 AtomicOrdering FailureOrdering,
4283 SynchronizationScope SynchScope) {
4284 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4285 Alignment = getEVTAlignment(MemVT);
4287 MachineFunction &MF = getMachineFunction();
4289 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4290 // For now, atomics are considered to be volatile always.
4291 // FIXME: Volatile isn't really correct; we should keep track of atomic
4292 // orderings in the memoperand.
4293 unsigned Flags = MachineMemOperand::MOVolatile;
4294 if (Opcode != ISD::ATOMIC_STORE)
4295 Flags |= MachineMemOperand::MOLoad;
4296 if (Opcode != ISD::ATOMIC_LOAD)
4297 Flags |= MachineMemOperand::MOStore;
4299 MachineMemOperand *MMO =
4300 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4302 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4303 SuccessOrdering, FailureOrdering, SynchScope);
4306 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4308 SDValue Ptr, SDValue Cmp,
4309 SDValue Swp, MachineMemOperand *MMO,
4310 AtomicOrdering SuccessOrdering,
4311 AtomicOrdering FailureOrdering,
4312 SynchronizationScope SynchScope) {
4313 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4314 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4316 EVT VT = Cmp.getValueType();
4318 SDVTList VTs = getVTList(VT, MVT::Other);
4319 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4320 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, SuccessOrdering,
4321 FailureOrdering, SynchScope);
4324 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4326 SDValue Ptr, SDValue Val,
4327 const Value* PtrVal,
4329 AtomicOrdering Ordering,
4330 SynchronizationScope SynchScope) {
4331 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4332 Alignment = getEVTAlignment(MemVT);
4334 MachineFunction &MF = getMachineFunction();
4335 // An atomic store does not load. An atomic load does not store.
4336 // (An atomicrmw obviously both loads and stores.)
4337 // For now, atomics are considered to be volatile always, and they are
4339 // FIXME: Volatile isn't really correct; we should keep track of atomic
4340 // orderings in the memoperand.
4341 unsigned Flags = MachineMemOperand::MOVolatile;
4342 if (Opcode != ISD::ATOMIC_STORE)
4343 Flags |= MachineMemOperand::MOLoad;
4344 if (Opcode != ISD::ATOMIC_LOAD)
4345 Flags |= MachineMemOperand::MOStore;
4347 MachineMemOperand *MMO =
4348 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4349 MemVT.getStoreSize(), Alignment);
4351 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4352 Ordering, SynchScope);
4355 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4357 SDValue Ptr, SDValue Val,
4358 MachineMemOperand *MMO,
4359 AtomicOrdering Ordering,
4360 SynchronizationScope SynchScope) {
4361 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4362 Opcode == ISD::ATOMIC_LOAD_SUB ||
4363 Opcode == ISD::ATOMIC_LOAD_AND ||
4364 Opcode == ISD::ATOMIC_LOAD_OR ||
4365 Opcode == ISD::ATOMIC_LOAD_XOR ||
4366 Opcode == ISD::ATOMIC_LOAD_NAND ||
4367 Opcode == ISD::ATOMIC_LOAD_MIN ||
4368 Opcode == ISD::ATOMIC_LOAD_MAX ||
4369 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4370 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4371 Opcode == ISD::ATOMIC_SWAP ||
4372 Opcode == ISD::ATOMIC_STORE) &&
4373 "Invalid Atomic Op");
4375 EVT VT = Val.getValueType();
4377 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4378 getVTList(VT, MVT::Other);
4379 SDValue Ops[] = {Chain, Ptr, Val};
4380 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
4383 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
4384 EVT VT, SDValue Chain,
4386 MachineMemOperand *MMO,
4387 AtomicOrdering Ordering,
4388 SynchronizationScope SynchScope) {
4389 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4391 SDVTList VTs = getVTList(VT, MVT::Other);
4392 SDValue Ops[] = {Chain, Ptr};
4393 return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
4396 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
4397 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4402 SmallVector<EVT, 4> VTs;
4403 VTs.reserve(NumOps);
4404 for (unsigned i = 0; i < NumOps; ++i)
4405 VTs.push_back(Ops[i].getValueType());
4406 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs),
4407 ArrayRef<SDValue>(Ops, NumOps));
4411 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4412 ArrayRef<SDValue> Ops,
4413 EVT MemVT, MachinePointerInfo PtrInfo,
4414 unsigned Align, bool Vol,
4415 bool ReadMem, bool WriteMem) {
4416 if (Align == 0) // Ensure that codegen never sees alignment 0
4417 Align = getEVTAlignment(MemVT);
4419 MachineFunction &MF = getMachineFunction();
4422 Flags |= MachineMemOperand::MOStore;
4424 Flags |= MachineMemOperand::MOLoad;
4426 Flags |= MachineMemOperand::MOVolatile;
4427 MachineMemOperand *MMO =
4428 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4430 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
4434 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
4435 ArrayRef<SDValue> Ops, EVT MemVT,
4436 MachineMemOperand *MMO) {
4437 assert((Opcode == ISD::INTRINSIC_VOID ||
4438 Opcode == ISD::INTRINSIC_W_CHAIN ||
4439 Opcode == ISD::PREFETCH ||
4440 Opcode == ISD::LIFETIME_START ||
4441 Opcode == ISD::LIFETIME_END ||
4442 (Opcode <= INT_MAX &&
4443 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4444 "Opcode is not a memory-accessing opcode!");
4446 // Memoize the node unless it returns a flag.
4447 MemIntrinsicSDNode *N;
4448 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4449 FoldingSetNodeID ID;
4450 AddNodeIDNode(ID, Opcode, VTList, Ops.data(), Ops.size());
4451 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4453 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4454 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4455 return SDValue(E, 0);
4458 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4459 dl.getDebugLoc(), VTList, Ops,
4461 CSEMap.InsertNode(N, IP);
4463 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
4464 dl.getDebugLoc(), VTList, Ops,
4467 AllNodes.push_back(N);
4468 return SDValue(N, 0);
4471 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4472 /// MachinePointerInfo record from it. This is particularly useful because the
4473 /// code generator has many cases where it doesn't bother passing in a
4474 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4475 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4476 // If this is FI+Offset, we can model it.
4477 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4478 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4480 // If this is (FI+Offset1)+Offset2, we can model it.
4481 if (Ptr.getOpcode() != ISD::ADD ||
4482 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4483 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4484 return MachinePointerInfo();
4486 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4487 return MachinePointerInfo::getFixedStack(FI, Offset+
4488 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4491 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4492 /// MachinePointerInfo record from it. This is particularly useful because the
4493 /// code generator has many cases where it doesn't bother passing in a
4494 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4495 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4496 // If the 'Offset' value isn't a constant, we can't handle this.
4497 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4498 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4499 if (OffsetOp.getOpcode() == ISD::UNDEF)
4500 return InferPointerInfo(Ptr);
4501 return MachinePointerInfo();
4506 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4507 EVT VT, SDLoc dl, SDValue Chain,
4508 SDValue Ptr, SDValue Offset,
4509 MachinePointerInfo PtrInfo, EVT MemVT,
4510 bool isVolatile, bool isNonTemporal, bool isInvariant,
4511 unsigned Alignment, const MDNode *TBAAInfo,
4512 const MDNode *Ranges) {
4513 assert(Chain.getValueType() == MVT::Other &&
4514 "Invalid chain type");
4515 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4516 Alignment = getEVTAlignment(VT);
4518 unsigned Flags = MachineMemOperand::MOLoad;
4520 Flags |= MachineMemOperand::MOVolatile;
4522 Flags |= MachineMemOperand::MONonTemporal;
4524 Flags |= MachineMemOperand::MOInvariant;
4526 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4528 if (PtrInfo.V.isNull())
4529 PtrInfo = InferPointerInfo(Ptr, Offset);
4531 MachineFunction &MF = getMachineFunction();
4532 MachineMemOperand *MMO =
4533 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4535 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4539 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4540 EVT VT, SDLoc dl, SDValue Chain,
4541 SDValue Ptr, SDValue Offset, EVT MemVT,
4542 MachineMemOperand *MMO) {
4544 ExtType = ISD::NON_EXTLOAD;
4545 } else if (ExtType == ISD::NON_EXTLOAD) {
4546 assert(VT == MemVT && "Non-extending load from different memory type!");
4549 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4550 "Should only be an extending load, not truncating!");
4551 assert(VT.isInteger() == MemVT.isInteger() &&
4552 "Cannot convert from FP to Int or Int -> FP!");
4553 assert(VT.isVector() == MemVT.isVector() &&
4554 "Cannot use trunc store to convert to or from a vector!");
4555 assert((!VT.isVector() ||
4556 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4557 "Cannot use trunc store to change the number of vector elements!");
4560 bool Indexed = AM != ISD::UNINDEXED;
4561 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4562 "Unindexed load with an offset!");
4564 SDVTList VTs = Indexed ?
4565 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4566 SDValue Ops[] = { Chain, Ptr, Offset };
4567 FoldingSetNodeID ID;
4568 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4569 ID.AddInteger(MemVT.getRawBits());
4570 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4571 MMO->isNonTemporal(),
4572 MMO->isInvariant()));
4573 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4575 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4576 cast<LoadSDNode>(E)->refineAlignment(MMO);
4577 return SDValue(E, 0);
4579 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
4580 dl.getDebugLoc(), VTs, AM, ExtType,
4582 CSEMap.InsertNode(N, IP);
4583 AllNodes.push_back(N);
4584 return SDValue(N, 0);
4587 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4588 SDValue Chain, SDValue Ptr,
4589 MachinePointerInfo PtrInfo,
4590 bool isVolatile, bool isNonTemporal,
4591 bool isInvariant, unsigned Alignment,
4592 const MDNode *TBAAInfo,
4593 const MDNode *Ranges) {
4594 SDValue Undef = getUNDEF(Ptr.getValueType());
4595 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4596 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4600 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
4601 SDValue Chain, SDValue Ptr,
4602 MachineMemOperand *MMO) {
4603 SDValue Undef = getUNDEF(Ptr.getValueType());
4604 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4608 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4609 SDValue Chain, SDValue Ptr,
4610 MachinePointerInfo PtrInfo, EVT MemVT,
4611 bool isVolatile, bool isNonTemporal,
4612 unsigned Alignment, const MDNode *TBAAInfo) {
4613 SDValue Undef = getUNDEF(Ptr.getValueType());
4614 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4615 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4620 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
4621 SDValue Chain, SDValue Ptr, EVT MemVT,
4622 MachineMemOperand *MMO) {
4623 SDValue Undef = getUNDEF(Ptr.getValueType());
4624 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4629 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
4630 SDValue Offset, ISD::MemIndexedMode AM) {
4631 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4632 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4633 "Load is already a indexed load!");
4634 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4635 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4636 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4637 false, LD->getAlignment());
4640 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4641 SDValue Ptr, MachinePointerInfo PtrInfo,
4642 bool isVolatile, bool isNonTemporal,
4643 unsigned Alignment, const MDNode *TBAAInfo) {
4644 assert(Chain.getValueType() == MVT::Other &&
4645 "Invalid chain type");
4646 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4647 Alignment = getEVTAlignment(Val.getValueType());
4649 unsigned Flags = MachineMemOperand::MOStore;
4651 Flags |= MachineMemOperand::MOVolatile;
4653 Flags |= MachineMemOperand::MONonTemporal;
4655 if (PtrInfo.V.isNull())
4656 PtrInfo = InferPointerInfo(Ptr);
4658 MachineFunction &MF = getMachineFunction();
4659 MachineMemOperand *MMO =
4660 MF.getMachineMemOperand(PtrInfo, Flags,
4661 Val.getValueType().getStoreSize(), Alignment,
4664 return getStore(Chain, dl, Val, Ptr, MMO);
4667 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
4668 SDValue Ptr, MachineMemOperand *MMO) {
4669 assert(Chain.getValueType() == MVT::Other &&
4670 "Invalid chain type");
4671 EVT VT = Val.getValueType();
4672 SDVTList VTs = getVTList(MVT::Other);
4673 SDValue Undef = getUNDEF(Ptr.getValueType());
4674 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4675 FoldingSetNodeID ID;
4676 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4677 ID.AddInteger(VT.getRawBits());
4678 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4679 MMO->isNonTemporal(), MMO->isInvariant()));
4680 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4682 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4683 cast<StoreSDNode>(E)->refineAlignment(MMO);
4684 return SDValue(E, 0);
4686 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4687 dl.getDebugLoc(), VTs,
4688 ISD::UNINDEXED, false, VT, MMO);
4689 CSEMap.InsertNode(N, IP);
4690 AllNodes.push_back(N);
4691 return SDValue(N, 0);
4694 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4695 SDValue Ptr, MachinePointerInfo PtrInfo,
4696 EVT SVT,bool isVolatile, bool isNonTemporal,
4698 const MDNode *TBAAInfo) {
4699 assert(Chain.getValueType() == MVT::Other &&
4700 "Invalid chain type");
4701 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4702 Alignment = getEVTAlignment(SVT);
4704 unsigned Flags = MachineMemOperand::MOStore;
4706 Flags |= MachineMemOperand::MOVolatile;
4708 Flags |= MachineMemOperand::MONonTemporal;
4710 if (PtrInfo.V.isNull())
4711 PtrInfo = InferPointerInfo(Ptr);
4713 MachineFunction &MF = getMachineFunction();
4714 MachineMemOperand *MMO =
4715 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4718 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4721 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
4722 SDValue Ptr, EVT SVT,
4723 MachineMemOperand *MMO) {
4724 EVT VT = Val.getValueType();
4726 assert(Chain.getValueType() == MVT::Other &&
4727 "Invalid chain type");
4729 return getStore(Chain, dl, Val, Ptr, MMO);
4731 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4732 "Should only be a truncating store, not extending!");
4733 assert(VT.isInteger() == SVT.isInteger() &&
4734 "Can't do FP-INT conversion!");
4735 assert(VT.isVector() == SVT.isVector() &&
4736 "Cannot use trunc store to convert to or from a vector!");
4737 assert((!VT.isVector() ||
4738 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4739 "Cannot use trunc store to change the number of vector elements!");
4741 SDVTList VTs = getVTList(MVT::Other);
4742 SDValue Undef = getUNDEF(Ptr.getValueType());
4743 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4744 FoldingSetNodeID ID;
4745 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4746 ID.AddInteger(SVT.getRawBits());
4747 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4748 MMO->isNonTemporal(), MMO->isInvariant()));
4749 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4751 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4752 cast<StoreSDNode>(E)->refineAlignment(MMO);
4753 return SDValue(E, 0);
4755 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4756 dl.getDebugLoc(), VTs,
4757 ISD::UNINDEXED, true, SVT, MMO);
4758 CSEMap.InsertNode(N, IP);
4759 AllNodes.push_back(N);
4760 return SDValue(N, 0);
4764 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
4765 SDValue Offset, ISD::MemIndexedMode AM) {
4766 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4767 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4768 "Store is already a indexed store!");
4769 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4770 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4771 FoldingSetNodeID ID;
4772 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4773 ID.AddInteger(ST->getMemoryVT().getRawBits());
4774 ID.AddInteger(ST->getRawSubclassData());
4775 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4777 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4778 return SDValue(E, 0);
4780 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
4781 dl.getDebugLoc(), VTs, AM,
4782 ST->isTruncatingStore(),
4784 ST->getMemOperand());
4785 CSEMap.InsertNode(N, IP);
4786 AllNodes.push_back(N);
4787 return SDValue(N, 0);
4790 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
4791 SDValue Chain, SDValue Ptr,
4794 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4795 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
4798 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4799 const SDUse *Ops, unsigned NumOps) {
4801 case 0: return getNode(Opcode, DL, VT);
4802 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4803 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4804 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4808 // Copy from an SDUse array into an SDValue array for use with
4809 // the regular getNode logic.
4810 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4811 return getNode(Opcode, DL, VT, NewOps);
4814 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
4815 ArrayRef<SDValue> Ops) {
4816 unsigned NumOps = Ops.size();
4818 case 0: return getNode(Opcode, DL, VT);
4819 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4820 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4821 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4827 case ISD::SELECT_CC: {
4828 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4829 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4830 "LHS and RHS of condition must have same type!");
4831 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4832 "True and False arms of SelectCC must have same type!");
4833 assert(Ops[2].getValueType() == VT &&
4834 "select_cc node must be of same type as true and false value!");
4838 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4839 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4840 "LHS/RHS of comparison should match types!");
4847 SDVTList VTs = getVTList(VT);
4849 if (VT != MVT::Glue) {
4850 FoldingSetNodeID ID;
4851 AddNodeIDNode(ID, Opcode, VTs, Ops.data(), NumOps);
4854 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4855 return SDValue(E, 0);
4857 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4858 VTs, Ops.data(), NumOps);
4859 CSEMap.InsertNode(N, IP);
4861 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4862 VTs, Ops.data(), NumOps);
4865 AllNodes.push_back(N);
4869 return SDValue(N, 0);
4872 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
4873 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
4874 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
4877 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4878 ArrayRef<SDValue> Ops) {
4879 if (VTList.NumVTs == 1)
4880 return getNode(Opcode, DL, VTList.VTs[0], Ops);
4884 // FIXME: figure out how to safely handle things like
4885 // int foo(int x) { return 1 << (x & 255); }
4886 // int bar() { return foo(256); }
4887 case ISD::SRA_PARTS:
4888 case ISD::SRL_PARTS:
4889 case ISD::SHL_PARTS:
4890 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4891 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4892 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4893 else if (N3.getOpcode() == ISD::AND)
4894 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4895 // If the and is only masking out bits that cannot effect the shift,
4896 // eliminate the and.
4897 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4898 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4899 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4905 // Memoize the node unless it returns a flag.
4907 unsigned NumOps = Ops.size();
4908 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4909 FoldingSetNodeID ID;
4910 AddNodeIDNode(ID, Opcode, VTList, Ops.data(), NumOps);
4912 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4913 return SDValue(E, 0);
4916 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4917 DL.getDebugLoc(), VTList, Ops[0]);
4918 } else if (NumOps == 2) {
4919 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4920 DL.getDebugLoc(), VTList, Ops[0],
4922 } else if (NumOps == 3) {
4923 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4924 DL.getDebugLoc(), VTList, Ops[0],
4927 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4928 VTList, Ops.data(), NumOps);
4930 CSEMap.InsertNode(N, IP);
4933 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
4934 DL.getDebugLoc(), VTList, Ops[0]);
4935 } else if (NumOps == 2) {
4936 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
4937 DL.getDebugLoc(), VTList, Ops[0],
4939 } else if (NumOps == 3) {
4940 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
4941 DL.getDebugLoc(), VTList, Ops[0],
4944 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4945 VTList, Ops.data(), NumOps);
4948 AllNodes.push_back(N);
4952 return SDValue(N, 0);
4955 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
4956 return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
4959 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4961 SDValue Ops[] = { N1 };
4962 return getNode(Opcode, DL, VTList, Ops);
4965 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4966 SDValue N1, SDValue N2) {
4967 SDValue Ops[] = { N1, N2 };
4968 return getNode(Opcode, DL, VTList, Ops);
4971 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4972 SDValue N1, SDValue N2, SDValue N3) {
4973 SDValue Ops[] = { N1, N2, N3 };
4974 return getNode(Opcode, DL, VTList, Ops);
4977 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4978 SDValue N1, SDValue N2, SDValue N3,
4980 SDValue Ops[] = { N1, N2, N3, N4 };
4981 return getNode(Opcode, DL, VTList, Ops);
4984 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
4985 SDValue N1, SDValue N2, SDValue N3,
4986 SDValue N4, SDValue N5) {
4987 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4988 return getNode(Opcode, DL, VTList, Ops);
4991 SDVTList SelectionDAG::getVTList(EVT VT) {
4992 return makeVTList(SDNode::getValueTypeList(VT), 1);
4995 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4996 FoldingSetNodeID ID;
4998 ID.AddInteger(VT1.getRawBits());
4999 ID.AddInteger(VT2.getRawBits());
5002 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5004 EVT *Array = Allocator.Allocate<EVT>(2);
5007 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5008 VTListMap.InsertNode(Result, IP);
5010 return Result->getSDVTList();
5013 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5014 FoldingSetNodeID ID;
5016 ID.AddInteger(VT1.getRawBits());
5017 ID.AddInteger(VT2.getRawBits());
5018 ID.AddInteger(VT3.getRawBits());
5021 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5023 EVT *Array = Allocator.Allocate<EVT>(3);
5027 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5028 VTListMap.InsertNode(Result, IP);
5030 return Result->getSDVTList();
5033 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5034 FoldingSetNodeID ID;
5036 ID.AddInteger(VT1.getRawBits());
5037 ID.AddInteger(VT2.getRawBits());
5038 ID.AddInteger(VT3.getRawBits());
5039 ID.AddInteger(VT4.getRawBits());
5042 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5044 EVT *Array = Allocator.Allocate<EVT>(4);
5049 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5050 VTListMap.InsertNode(Result, IP);
5052 return Result->getSDVTList();
5055 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5056 unsigned NumVTs = VTs.size();
5057 FoldingSetNodeID ID;
5058 ID.AddInteger(NumVTs);
5059 for (unsigned index = 0; index < NumVTs; index++) {
5060 ID.AddInteger(VTs[index].getRawBits());
5064 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5066 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5067 std::copy(VTs.begin(), VTs.end(), Array);
5068 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5069 VTListMap.InsertNode(Result, IP);
5071 return Result->getSDVTList();
5075 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5076 /// specified operands. If the resultant node already exists in the DAG,
5077 /// this does not modify the specified node, instead it returns the node that
5078 /// already exists. If the resultant node does not exist in the DAG, the
5079 /// input node is returned. As a degenerate case, if you specify the same
5080 /// input operands as the node already has, the input node is returned.
5081 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5082 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5084 // Check to see if there is no change.
5085 if (Op == N->getOperand(0)) return N;
5087 // See if the modified node already exists.
5088 void *InsertPos = nullptr;
5089 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5092 // Nope it doesn't. Remove the node from its current place in the maps.
5094 if (!RemoveNodeFromCSEMaps(N))
5095 InsertPos = nullptr;
5097 // Now we update the operands.
5098 N->OperandList[0].set(Op);
5100 // If this gets put into a CSE map, add it.
5101 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5105 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5106 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5108 // Check to see if there is no change.
5109 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5110 return N; // No operands changed, just return the input node.
5112 // See if the modified node already exists.
5113 void *InsertPos = nullptr;
5114 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
5117 // Nope it doesn't. Remove the node from its current place in the maps.
5119 if (!RemoveNodeFromCSEMaps(N))
5120 InsertPos = nullptr;
5122 // Now we update the operands.
5123 if (N->OperandList[0] != Op1)
5124 N->OperandList[0].set(Op1);
5125 if (N->OperandList[1] != Op2)
5126 N->OperandList[1].set(Op2);
5128 // If this gets put into a CSE map, add it.
5129 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5133 SDNode *SelectionDAG::
5134 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
5135 SDValue Ops[] = { Op1, Op2, Op3 };
5136 return UpdateNodeOperands(N, Ops, 3);
5139 SDNode *SelectionDAG::
5140 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5141 SDValue Op3, SDValue Op4) {
5142 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
5143 return UpdateNodeOperands(N, Ops, 4);
5146 SDNode *SelectionDAG::
5147 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
5148 SDValue Op3, SDValue Op4, SDValue Op5) {
5149 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
5150 return UpdateNodeOperands(N, Ops, 5);
5153 SDNode *SelectionDAG::
5154 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
5155 assert(N->getNumOperands() == NumOps &&
5156 "Update with wrong number of operands");
5158 // Check to see if there is no change.
5159 bool AnyChange = false;
5160 for (unsigned i = 0; i != NumOps; ++i) {
5161 if (Ops[i] != N->getOperand(i)) {
5167 // No operands changed, just return the input node.
5168 if (!AnyChange) return N;
5170 // See if the modified node already exists.
5171 void *InsertPos = nullptr;
5172 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5175 // Nope it doesn't. Remove the node from its current place in the maps.
5177 if (!RemoveNodeFromCSEMaps(N))
5178 InsertPos = nullptr;
5180 // Now we update the operands.
5181 for (unsigned i = 0; i != NumOps; ++i)
5182 if (N->OperandList[i] != Ops[i])
5183 N->OperandList[i].set(Ops[i]);
5185 // If this gets put into a CSE map, add it.
5186 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5190 /// DropOperands - Release the operands and set this node to have
5192 void SDNode::DropOperands() {
5193 // Unlike the code in MorphNodeTo that does this, we don't need to
5194 // watch for dead nodes here.
5195 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5201 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5204 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5206 SDVTList VTs = getVTList(VT);
5207 return SelectNodeTo(N, MachineOpc, VTs, nullptr, 0);
5210 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5211 EVT VT, SDValue Op1) {
5212 SDVTList VTs = getVTList(VT);
5213 SDValue Ops[] = { Op1 };
5214 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5217 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5218 EVT VT, SDValue Op1,
5220 SDVTList VTs = getVTList(VT);
5221 SDValue Ops[] = { Op1, Op2 };
5222 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5225 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5226 EVT VT, SDValue Op1,
5227 SDValue Op2, SDValue Op3) {
5228 SDVTList VTs = getVTList(VT);
5229 SDValue Ops[] = { Op1, Op2, Op3 };
5230 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5233 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5234 EVT VT, const SDValue *Ops,
5236 SDVTList VTs = getVTList(VT);
5237 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5240 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5241 EVT VT1, EVT VT2, const SDValue *Ops,
5243 SDVTList VTs = getVTList(VT1, VT2);
5244 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5247 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5249 SDVTList VTs = getVTList(VT1, VT2);
5250 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)nullptr, 0);
5253 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5254 EVT VT1, EVT VT2, EVT VT3,
5255 const SDValue *Ops, unsigned NumOps) {
5256 SDVTList VTs = getVTList(VT1, VT2, VT3);
5257 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5260 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5261 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5262 const SDValue *Ops, unsigned NumOps) {
5263 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5264 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5267 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5270 SDVTList VTs = getVTList(VT1, VT2);
5271 SDValue Ops[] = { Op1 };
5272 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5275 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5277 SDValue Op1, SDValue Op2) {
5278 SDVTList VTs = getVTList(VT1, VT2);
5279 SDValue Ops[] = { Op1, Op2 };
5280 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5283 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5285 SDValue Op1, SDValue Op2,
5287 SDVTList VTs = getVTList(VT1, VT2);
5288 SDValue Ops[] = { Op1, Op2, Op3 };
5289 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5292 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5293 EVT VT1, EVT VT2, EVT VT3,
5294 SDValue Op1, SDValue Op2,
5296 SDVTList VTs = getVTList(VT1, VT2, VT3);
5297 SDValue Ops[] = { Op1, Op2, Op3 };
5298 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5301 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5302 SDVTList VTs, const SDValue *Ops,
5304 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5305 // Reset the NodeID to -1.
5310 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
5311 /// the line number information on the merged node since it is not possible to
5312 /// preserve the information that operation is associated with multiple lines.
5313 /// This will make the debugger working better at -O0, were there is a higher
5314 /// probability having other instructions associated with that line.
5316 /// For IROrder, we keep the smaller of the two
5317 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
5318 DebugLoc NLoc = N->getDebugLoc();
5319 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
5320 (OLoc.getDebugLoc() != NLoc)) {
5321 N->setDebugLoc(DebugLoc());
5323 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
5324 N->setIROrder(Order);
5328 /// MorphNodeTo - This *mutates* the specified node to have the specified
5329 /// return type, opcode, and operands.
5331 /// Note that MorphNodeTo returns the resultant node. If there is already a
5332 /// node of the specified opcode and operands, it returns that node instead of
5333 /// the current one. Note that the SDLoc need not be the same.
5335 /// Using MorphNodeTo is faster than creating a new node and swapping it in
5336 /// with ReplaceAllUsesWith both because it often avoids allocating a new
5337 /// node, and because it doesn't require CSE recalculation for any of
5338 /// the node's users.
5340 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5341 SDVTList VTs, const SDValue *Ops,
5343 // If an identical node already exists, use it.
5345 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5346 FoldingSetNodeID ID;
5347 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5348 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5349 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
5352 if (!RemoveNodeFromCSEMaps(N))
5355 // Start the morphing.
5357 N->ValueList = VTs.VTs;
5358 N->NumValues = VTs.NumVTs;
5360 // Clear the operands list, updating used nodes to remove this from their
5361 // use list. Keep track of any operands that become dead as a result.
5362 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5363 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5365 SDNode *Used = Use.getNode();
5367 if (Used->use_empty())
5368 DeadNodeSet.insert(Used);
5371 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5372 // Initialize the memory references information.
5373 MN->setMemRefs(nullptr, nullptr);
5374 // If NumOps is larger than the # of operands we can have in a
5375 // MachineSDNode, reallocate the operand list.
5376 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5377 if (MN->OperandsNeedDelete)
5378 delete[] MN->OperandList;
5379 if (NumOps > array_lengthof(MN->LocalOperands))
5380 // We're creating a final node that will live unmorphed for the
5381 // remainder of the current SelectionDAG iteration, so we can allocate
5382 // the operands directly out of a pool with no recycling metadata.
5383 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5386 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5387 MN->OperandsNeedDelete = false;
5389 MN->InitOperands(MN->OperandList, Ops, NumOps);
5391 // If NumOps is larger than the # of operands we currently have, reallocate
5392 // the operand list.
5393 if (NumOps > N->NumOperands) {
5394 if (N->OperandsNeedDelete)
5395 delete[] N->OperandList;
5396 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5397 N->OperandsNeedDelete = true;
5399 N->InitOperands(N->OperandList, Ops, NumOps);
5402 // Delete any nodes that are still dead after adding the uses for the
5404 if (!DeadNodeSet.empty()) {
5405 SmallVector<SDNode *, 16> DeadNodes;
5406 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5407 E = DeadNodeSet.end(); I != E; ++I)
5408 if ((*I)->use_empty())
5409 DeadNodes.push_back(*I);
5410 RemoveDeadNodes(DeadNodes);
5414 CSEMap.InsertNode(N, IP); // Memoize the new node.
5419 /// getMachineNode - These are used for target selectors to create a new node
5420 /// with specified return type(s), MachineInstr opcode, and operands.
5422 /// Note that getMachineNode returns the resultant node. If there is already a
5423 /// node of the specified opcode and operands, it returns that node instead of
5424 /// the current one.
5426 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
5427 SDVTList VTs = getVTList(VT);
5428 return getMachineNode(Opcode, dl, VTs, None);
5432 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
5433 SDVTList VTs = getVTList(VT);
5434 SDValue Ops[] = { Op1 };
5435 return getMachineNode(Opcode, dl, VTs, Ops);
5439 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5440 SDValue Op1, SDValue Op2) {
5441 SDVTList VTs = getVTList(VT);
5442 SDValue Ops[] = { Op1, Op2 };
5443 return getMachineNode(Opcode, dl, VTs, Ops);
5447 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5448 SDValue Op1, SDValue Op2, SDValue Op3) {
5449 SDVTList VTs = getVTList(VT);
5450 SDValue Ops[] = { Op1, Op2, Op3 };
5451 return getMachineNode(Opcode, dl, VTs, Ops);
5455 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
5456 ArrayRef<SDValue> Ops) {
5457 SDVTList VTs = getVTList(VT);
5458 return getMachineNode(Opcode, dl, VTs, Ops);
5462 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
5463 SDVTList VTs = getVTList(VT1, VT2);
5464 return getMachineNode(Opcode, dl, VTs, None);
5468 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5469 EVT VT1, EVT VT2, SDValue Op1) {
5470 SDVTList VTs = getVTList(VT1, VT2);
5471 SDValue Ops[] = { Op1 };
5472 return getMachineNode(Opcode, dl, VTs, Ops);
5476 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5477 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5478 SDVTList VTs = getVTList(VT1, VT2);
5479 SDValue Ops[] = { Op1, Op2 };
5480 return getMachineNode(Opcode, dl, VTs, Ops);
5484 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5485 EVT VT1, EVT VT2, SDValue Op1,
5486 SDValue Op2, SDValue Op3) {
5487 SDVTList VTs = getVTList(VT1, VT2);
5488 SDValue Ops[] = { Op1, Op2, Op3 };
5489 return getMachineNode(Opcode, dl, VTs, Ops);
5493 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5495 ArrayRef<SDValue> Ops) {
5496 SDVTList VTs = getVTList(VT1, VT2);
5497 return getMachineNode(Opcode, dl, VTs, Ops);
5501 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5502 EVT VT1, EVT VT2, EVT VT3,
5503 SDValue Op1, SDValue Op2) {
5504 SDVTList VTs = getVTList(VT1, VT2, VT3);
5505 SDValue Ops[] = { Op1, Op2 };
5506 return getMachineNode(Opcode, dl, VTs, Ops);
5510 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5511 EVT VT1, EVT VT2, EVT VT3,
5512 SDValue Op1, SDValue Op2, SDValue Op3) {
5513 SDVTList VTs = getVTList(VT1, VT2, VT3);
5514 SDValue Ops[] = { Op1, Op2, Op3 };
5515 return getMachineNode(Opcode, dl, VTs, Ops);
5519 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5520 EVT VT1, EVT VT2, EVT VT3,
5521 ArrayRef<SDValue> Ops) {
5522 SDVTList VTs = getVTList(VT1, VT2, VT3);
5523 return getMachineNode(Opcode, dl, VTs, Ops);
5527 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
5528 EVT VT2, EVT VT3, EVT VT4,
5529 ArrayRef<SDValue> Ops) {
5530 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5531 return getMachineNode(Opcode, dl, VTs, Ops);
5535 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
5536 ArrayRef<EVT> ResultTys,
5537 ArrayRef<SDValue> Ops) {
5538 SDVTList VTs = getVTList(ResultTys);
5539 return getMachineNode(Opcode, dl, VTs, Ops);
5543 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
5544 ArrayRef<SDValue> OpsArray) {
5545 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5548 const SDValue *Ops = OpsArray.data();
5549 unsigned NumOps = OpsArray.size();
5552 FoldingSetNodeID ID;
5553 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5555 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5556 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
5560 // Allocate a new MachineSDNode.
5561 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
5562 DL.getDebugLoc(), VTs);
5564 // Initialize the operands list.
5565 if (NumOps > array_lengthof(N->LocalOperands))
5566 // We're creating a final node that will live unmorphed for the
5567 // remainder of the current SelectionDAG iteration, so we can allocate
5568 // the operands directly out of a pool with no recycling metadata.
5569 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5572 N->InitOperands(N->LocalOperands, Ops, NumOps);
5573 N->OperandsNeedDelete = false;
5576 CSEMap.InsertNode(N, IP);
5578 AllNodes.push_back(N);
5580 VerifyMachineNode(N);
5585 /// getTargetExtractSubreg - A convenience function for creating
5586 /// TargetOpcode::EXTRACT_SUBREG nodes.
5588 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
5590 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5591 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5592 VT, Operand, SRIdxVal);
5593 return SDValue(Subreg, 0);
5596 /// getTargetInsertSubreg - A convenience function for creating
5597 /// TargetOpcode::INSERT_SUBREG nodes.
5599 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
5600 SDValue Operand, SDValue Subreg) {
5601 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5602 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5603 VT, Operand, Subreg, SRIdxVal);
5604 return SDValue(Result, 0);
5607 /// getNodeIfExists - Get the specified node if it's already available, or
5608 /// else return NULL.
5609 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5610 const SDValue *Ops, unsigned NumOps) {
5611 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5612 FoldingSetNodeID ID;
5613 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5615 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5621 /// getDbgValue - Creates a SDDbgValue node.
5625 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R,
5626 bool IsIndirect, uint64_t Off,
5627 DebugLoc DL, unsigned O) {
5628 return new (Allocator) SDDbgValue(MDPtr, N, R, IsIndirect, Off, DL, O);
5633 SelectionDAG::getConstantDbgValue(MDNode *MDPtr, const Value *C,
5635 DebugLoc DL, unsigned O) {
5636 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5641 SelectionDAG::getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5642 DebugLoc DL, unsigned O) {
5643 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5648 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5649 /// pointed to by a use iterator is deleted, increment the use iterator
5650 /// so that it doesn't dangle.
5652 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5653 SDNode::use_iterator &UI;
5654 SDNode::use_iterator &UE;
5656 void NodeDeleted(SDNode *N, SDNode *E) override {
5657 // Increment the iterator as needed.
5658 while (UI != UE && N == *UI)
5663 RAUWUpdateListener(SelectionDAG &d,
5664 SDNode::use_iterator &ui,
5665 SDNode::use_iterator &ue)
5666 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5671 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5672 /// This can cause recursive merging of nodes in the DAG.
5674 /// This version assumes From has a single result value.
5676 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5677 SDNode *From = FromN.getNode();
5678 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5679 "Cannot replace with this method!");
5680 assert(From != To.getNode() && "Cannot replace uses of with self");
5682 // Iterate over all the existing uses of From. New uses will be added
5683 // to the beginning of the use list, which we avoid visiting.
5684 // This specifically avoids visiting uses of From that arise while the
5685 // replacement is happening, because any such uses would be the result
5686 // of CSE: If an existing node looks like From after one of its operands
5687 // is replaced by To, we don't want to replace of all its users with To
5688 // too. See PR3018 for more info.
5689 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5690 RAUWUpdateListener Listener(*this, UI, UE);
5694 // This node is about to morph, remove its old self from the CSE maps.
5695 RemoveNodeFromCSEMaps(User);
5697 // A user can appear in a use list multiple times, and when this
5698 // happens the uses are usually next to each other in the list.
5699 // To help reduce the number of CSE recomputations, process all
5700 // the uses of this user that we can find this way.
5702 SDUse &Use = UI.getUse();
5705 } while (UI != UE && *UI == User);
5707 // Now that we have modified User, add it back to the CSE maps. If it
5708 // already exists there, recursively merge the results together.
5709 AddModifiedNodeToCSEMaps(User);
5712 // If we just RAUW'd the root, take note.
5713 if (FromN == getRoot())
5717 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5718 /// This can cause recursive merging of nodes in the DAG.
5720 /// This version assumes that for each value of From, there is a
5721 /// corresponding value in To in the same position with the same type.
5723 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5725 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5726 assert((!From->hasAnyUseOfValue(i) ||
5727 From->getValueType(i) == To->getValueType(i)) &&
5728 "Cannot use this version of ReplaceAllUsesWith!");
5731 // Handle the trivial case.
5735 // Iterate over just the existing users of From. See the comments in
5736 // the ReplaceAllUsesWith above.
5737 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5738 RAUWUpdateListener Listener(*this, UI, UE);
5742 // This node is about to morph, remove its old self from the CSE maps.
5743 RemoveNodeFromCSEMaps(User);
5745 // A user can appear in a use list multiple times, and when this
5746 // happens the uses are usually next to each other in the list.
5747 // To help reduce the number of CSE recomputations, process all
5748 // the uses of this user that we can find this way.
5750 SDUse &Use = UI.getUse();
5753 } while (UI != UE && *UI == User);
5755 // Now that we have modified User, add it back to the CSE maps. If it
5756 // already exists there, recursively merge the results together.
5757 AddModifiedNodeToCSEMaps(User);
5760 // If we just RAUW'd the root, take note.
5761 if (From == getRoot().getNode())
5762 setRoot(SDValue(To, getRoot().getResNo()));
5765 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5766 /// This can cause recursive merging of nodes in the DAG.
5768 /// This version can replace From with any result values. To must match the
5769 /// number and types of values returned by From.
5770 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5771 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5772 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5774 // Iterate over just the existing users of From. See the comments in
5775 // the ReplaceAllUsesWith above.
5776 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5777 RAUWUpdateListener Listener(*this, UI, UE);
5781 // This node is about to morph, remove its old self from the CSE maps.
5782 RemoveNodeFromCSEMaps(User);
5784 // A user can appear in a use list multiple times, and when this
5785 // happens the uses are usually next to each other in the list.
5786 // To help reduce the number of CSE recomputations, process all
5787 // the uses of this user that we can find this way.
5789 SDUse &Use = UI.getUse();
5790 const SDValue &ToOp = To[Use.getResNo()];
5793 } while (UI != UE && *UI == User);
5795 // Now that we have modified User, add it back to the CSE maps. If it
5796 // already exists there, recursively merge the results together.
5797 AddModifiedNodeToCSEMaps(User);
5800 // If we just RAUW'd the root, take note.
5801 if (From == getRoot().getNode())
5802 setRoot(SDValue(To[getRoot().getResNo()]));
5805 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5806 /// uses of other values produced by From.getNode() alone. The Deleted
5807 /// vector is handled the same way as for ReplaceAllUsesWith.
5808 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5809 // Handle the really simple, really trivial case efficiently.
5810 if (From == To) return;
5812 // Handle the simple, trivial, case efficiently.
5813 if (From.getNode()->getNumValues() == 1) {
5814 ReplaceAllUsesWith(From, To);
5818 // Iterate over just the existing users of From. See the comments in
5819 // the ReplaceAllUsesWith above.
5820 SDNode::use_iterator UI = From.getNode()->use_begin(),
5821 UE = From.getNode()->use_end();
5822 RAUWUpdateListener Listener(*this, UI, UE);
5825 bool UserRemovedFromCSEMaps = false;
5827 // A user can appear in a use list multiple times, and when this
5828 // happens the uses are usually next to each other in the list.
5829 // To help reduce the number of CSE recomputations, process all
5830 // the uses of this user that we can find this way.
5832 SDUse &Use = UI.getUse();
5834 // Skip uses of different values from the same node.
5835 if (Use.getResNo() != From.getResNo()) {
5840 // If this node hasn't been modified yet, it's still in the CSE maps,
5841 // so remove its old self from the CSE maps.
5842 if (!UserRemovedFromCSEMaps) {
5843 RemoveNodeFromCSEMaps(User);
5844 UserRemovedFromCSEMaps = true;
5849 } while (UI != UE && *UI == User);
5851 // We are iterating over all uses of the From node, so if a use
5852 // doesn't use the specific value, no changes are made.
5853 if (!UserRemovedFromCSEMaps)
5856 // Now that we have modified User, add it back to the CSE maps. If it
5857 // already exists there, recursively merge the results together.
5858 AddModifiedNodeToCSEMaps(User);
5861 // If we just RAUW'd the root, take note.
5862 if (From == getRoot())
5867 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5868 /// to record information about a use.
5875 /// operator< - Sort Memos by User.
5876 bool operator<(const UseMemo &L, const UseMemo &R) {
5877 return (intptr_t)L.User < (intptr_t)R.User;
5881 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5882 /// uses of other values produced by From.getNode() alone. The same value
5883 /// may appear in both the From and To list. The Deleted vector is
5884 /// handled the same way as for ReplaceAllUsesWith.
5885 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5888 // Handle the simple, trivial case efficiently.
5890 return ReplaceAllUsesOfValueWith(*From, *To);
5892 // Read up all the uses and make records of them. This helps
5893 // processing new uses that are introduced during the
5894 // replacement process.
5895 SmallVector<UseMemo, 4> Uses;
5896 for (unsigned i = 0; i != Num; ++i) {
5897 unsigned FromResNo = From[i].getResNo();
5898 SDNode *FromNode = From[i].getNode();
5899 for (SDNode::use_iterator UI = FromNode->use_begin(),
5900 E = FromNode->use_end(); UI != E; ++UI) {
5901 SDUse &Use = UI.getUse();
5902 if (Use.getResNo() == FromResNo) {
5903 UseMemo Memo = { *UI, i, &Use };
5904 Uses.push_back(Memo);
5909 // Sort the uses, so that all the uses from a given User are together.
5910 std::sort(Uses.begin(), Uses.end());
5912 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5913 UseIndex != UseIndexEnd; ) {
5914 // We know that this user uses some value of From. If it is the right
5915 // value, update it.
5916 SDNode *User = Uses[UseIndex].User;
5918 // This node is about to morph, remove its old self from the CSE maps.
5919 RemoveNodeFromCSEMaps(User);
5921 // The Uses array is sorted, so all the uses for a given User
5922 // are next to each other in the list.
5923 // To help reduce the number of CSE recomputations, process all
5924 // the uses of this user that we can find this way.
5926 unsigned i = Uses[UseIndex].Index;
5927 SDUse &Use = *Uses[UseIndex].Use;
5931 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5933 // Now that we have modified User, add it back to the CSE maps. If it
5934 // already exists there, recursively merge the results together.
5935 AddModifiedNodeToCSEMaps(User);
5939 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5940 /// based on their topological order. It returns the maximum id and a vector
5941 /// of the SDNodes* in assigned order by reference.
5942 unsigned SelectionDAG::AssignTopologicalOrder() {
5944 unsigned DAGSize = 0;
5946 // SortedPos tracks the progress of the algorithm. Nodes before it are
5947 // sorted, nodes after it are unsorted. When the algorithm completes
5948 // it is at the end of the list.
5949 allnodes_iterator SortedPos = allnodes_begin();
5951 // Visit all the nodes. Move nodes with no operands to the front of
5952 // the list immediately. Annotate nodes that do have operands with their
5953 // operand count. Before we do this, the Node Id fields of the nodes
5954 // may contain arbitrary values. After, the Node Id fields for nodes
5955 // before SortedPos will contain the topological sort index, and the
5956 // Node Id fields for nodes At SortedPos and after will contain the
5957 // count of outstanding operands.
5958 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5961 unsigned Degree = N->getNumOperands();
5963 // A node with no uses, add it to the result array immediately.
5964 N->setNodeId(DAGSize++);
5965 allnodes_iterator Q = N;
5967 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5968 assert(SortedPos != AllNodes.end() && "Overran node list");
5971 // Temporarily use the Node Id as scratch space for the degree count.
5972 N->setNodeId(Degree);
5976 // Visit all the nodes. As we iterate, move nodes into sorted order,
5977 // such that by the time the end is reached all nodes will be sorted.
5978 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5981 // N is in sorted position, so all its uses have one less operand
5982 // that needs to be sorted.
5983 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5986 unsigned Degree = P->getNodeId();
5987 assert(Degree != 0 && "Invalid node degree");
5990 // All of P's operands are sorted, so P may sorted now.
5991 P->setNodeId(DAGSize++);
5993 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5994 assert(SortedPos != AllNodes.end() && "Overran node list");
5997 // Update P's outstanding operand count.
5998 P->setNodeId(Degree);
6001 if (I == SortedPos) {
6004 dbgs() << "Overran sorted position:\n";
6007 llvm_unreachable(nullptr);
6011 assert(SortedPos == AllNodes.end() &&
6012 "Topological sort incomplete!");
6013 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6014 "First node in topological sort is not the entry token!");
6015 assert(AllNodes.front().getNodeId() == 0 &&
6016 "First node in topological sort has non-zero id!");
6017 assert(AllNodes.front().getNumOperands() == 0 &&
6018 "First node in topological sort has operands!");
6019 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6020 "Last node in topologic sort has unexpected id!");
6021 assert(AllNodes.back().use_empty() &&
6022 "Last node in topologic sort has users!");
6023 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6027 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6028 /// value is produced by SD.
6029 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6030 DbgInfo->add(DB, SD, isParameter);
6032 SD->setHasDebugValue(true);
6035 /// TransferDbgValues - Transfer SDDbgValues.
6036 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6037 if (From == To || !From.getNode()->getHasDebugValue())
6039 SDNode *FromNode = From.getNode();
6040 SDNode *ToNode = To.getNode();
6041 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6042 SmallVector<SDDbgValue *, 2> ClonedDVs;
6043 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6045 SDDbgValue *Dbg = *I;
6046 if (Dbg->getKind() == SDDbgValue::SDNODE) {
6047 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
6049 Dbg->getOffset(), Dbg->getDebugLoc(),
6051 ClonedDVs.push_back(Clone);
6054 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
6055 E = ClonedDVs.end(); I != E; ++I)
6056 AddDbgValue(*I, ToNode, false);
6059 //===----------------------------------------------------------------------===//
6061 //===----------------------------------------------------------------------===//
6063 HandleSDNode::~HandleSDNode() {
6067 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6068 DebugLoc DL, const GlobalValue *GA,
6069 EVT VT, int64_t o, unsigned char TF)
6070 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6074 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
6075 SDValue X, unsigned SrcAS,
6077 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
6078 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6080 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6081 EVT memvt, MachineMemOperand *mmo)
6082 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6083 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6084 MMO->isNonTemporal(), MMO->isInvariant());
6085 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6086 assert(isNonTemporal() == MMO->isNonTemporal() &&
6087 "Non-temporal encoding error!");
6088 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6091 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
6092 const SDValue *Ops, unsigned NumOps, EVT memvt,
6093 MachineMemOperand *mmo)
6094 : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
6095 MemoryVT(memvt), MMO(mmo) {
6096 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
6097 MMO->isNonTemporal(), MMO->isInvariant());
6098 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
6099 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
6102 /// Profile - Gather unique data for the node.
6104 void SDNode::Profile(FoldingSetNodeID &ID) const {
6105 AddNodeIDNode(ID, this);
6110 std::vector<EVT> VTs;
6113 VTs.reserve(MVT::LAST_VALUETYPE);
6114 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6115 VTs.push_back(MVT((MVT::SimpleValueType)i));
6120 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6121 static ManagedStatic<EVTArray> SimpleVTArray;
6122 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6124 /// getValueTypeList - Return a pointer to the specified value type.
6126 const EVT *SDNode::getValueTypeList(EVT VT) {
6127 if (VT.isExtended()) {
6128 sys::SmartScopedLock<true> Lock(*VTMutex);
6129 return &(*EVTs->insert(VT).first);
6131 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6132 "Value type out of range!");
6133 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
6137 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
6138 /// indicated value. This method ignores uses of other values defined by this
6140 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
6141 assert(Value < getNumValues() && "Bad value!");
6143 // TODO: Only iterate over uses of a given value of the node
6144 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
6145 if (UI.getUse().getResNo() == Value) {
6152 // Found exactly the right number of uses?
6157 /// hasAnyUseOfValue - Return true if there are any use of the indicated
6158 /// value. This method ignores uses of other values defined by this operation.
6159 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
6160 assert(Value < getNumValues() && "Bad value!");
6162 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
6163 if (UI.getUse().getResNo() == Value)
6170 /// isOnlyUserOf - Return true if this node is the only use of N.
6172 bool SDNode::isOnlyUserOf(SDNode *N) const {
6174 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
6185 /// isOperand - Return true if this node is an operand of N.
6187 bool SDValue::isOperandOf(SDNode *N) const {
6188 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6189 if (*this == N->getOperand(i))
6194 bool SDNode::isOperandOf(SDNode *N) const {
6195 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6196 if (this == N->OperandList[i].getNode())
6201 /// reachesChainWithoutSideEffects - Return true if this operand (which must
6202 /// be a chain) reaches the specified operand without crossing any
6203 /// side-effecting instructions on any chain path. In practice, this looks
6204 /// through token factors and non-volatile loads. In order to remain efficient,
6205 /// this only looks a couple of nodes in, it does not do an exhaustive search.
6206 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6207 unsigned Depth) const {
6208 if (*this == Dest) return true;
6210 // Don't search too deeply, we just want to be able to see through
6211 // TokenFactor's etc.
6212 if (Depth == 0) return false;
6214 // If this is a token factor, all inputs to the TF happen in parallel. If any
6215 // of the operands of the TF does not reach dest, then we cannot do the xform.
6216 if (getOpcode() == ISD::TokenFactor) {
6217 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6218 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6223 // Loads don't have side effects, look through them.
6224 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6225 if (!Ld->isVolatile())
6226 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6231 /// hasPredecessor - Return true if N is a predecessor of this node.
6232 /// N is either an operand of this node, or can be reached by recursively
6233 /// traversing up the operands.
6234 /// NOTE: This is an expensive method. Use it carefully.
6235 bool SDNode::hasPredecessor(const SDNode *N) const {
6236 SmallPtrSet<const SDNode *, 32> Visited;
6237 SmallVector<const SDNode *, 16> Worklist;
6238 return hasPredecessorHelper(N, Visited, Worklist);
6242 SDNode::hasPredecessorHelper(const SDNode *N,
6243 SmallPtrSet<const SDNode *, 32> &Visited,
6244 SmallVectorImpl<const SDNode *> &Worklist) const {
6245 if (Visited.empty()) {
6246 Worklist.push_back(this);
6248 // Take a look in the visited set. If we've already encountered this node
6249 // we needn't search further.
6250 if (Visited.count(N))
6254 // Haven't visited N yet. Continue the search.
6255 while (!Worklist.empty()) {
6256 const SDNode *M = Worklist.pop_back_val();
6257 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6258 SDNode *Op = M->getOperand(i).getNode();
6259 if (Visited.insert(Op))
6260 Worklist.push_back(Op);
6269 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6270 assert(Num < NumOperands && "Invalid child # of SDNode!");
6271 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6274 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6275 assert(N->getNumValues() == 1 &&
6276 "Can't unroll a vector with multiple results!");
6278 EVT VT = N->getValueType(0);
6279 unsigned NE = VT.getVectorNumElements();
6280 EVT EltVT = VT.getVectorElementType();
6283 SmallVector<SDValue, 8> Scalars;
6284 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6286 // If ResNE is 0, fully unroll the vector op.
6289 else if (NE > ResNE)
6293 for (i= 0; i != NE; ++i) {
6294 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6295 SDValue Operand = N->getOperand(j);
6296 EVT OperandVT = Operand.getValueType();
6297 if (OperandVT.isVector()) {
6298 // A vector operand; extract a single element.
6299 const TargetLowering *TLI = TM.getTargetLowering();
6300 EVT OperandEltVT = OperandVT.getVectorElementType();
6301 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6304 getConstant(i, TLI->getVectorIdxTy()));
6306 // A scalar operand; just use it as is.
6307 Operands[j] = Operand;
6311 switch (N->getOpcode()) {
6313 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands));
6316 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
6323 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6324 getShiftAmountOperand(Operands[0].getValueType(),
6327 case ISD::SIGN_EXTEND_INREG:
6328 case ISD::FP_ROUND_INREG: {
6329 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6330 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6332 getValueType(ExtVT)));
6337 for (; i < ResNE; ++i)
6338 Scalars.push_back(getUNDEF(EltVT));
6340 return getNode(ISD::BUILD_VECTOR, dl,
6341 EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars);
6345 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6346 /// location that is 'Dist' units away from the location that the 'Base' load
6347 /// is loading from.
6348 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6349 unsigned Bytes, int Dist) const {
6350 if (LD->getChain() != Base->getChain())
6352 EVT VT = LD->getValueType(0);
6353 if (VT.getSizeInBits() / 8 != Bytes)
6356 SDValue Loc = LD->getOperand(1);
6357 SDValue BaseLoc = Base->getOperand(1);
6358 if (Loc.getOpcode() == ISD::FrameIndex) {
6359 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6361 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6362 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6363 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6364 int FS = MFI->getObjectSize(FI);
6365 int BFS = MFI->getObjectSize(BFI);
6366 if (FS != BFS || FS != (int)Bytes) return false;
6367 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6371 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6372 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6375 const GlobalValue *GV1 = nullptr;
6376 const GlobalValue *GV2 = nullptr;
6377 int64_t Offset1 = 0;
6378 int64_t Offset2 = 0;
6379 const TargetLowering *TLI = TM.getTargetLowering();
6380 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6381 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6382 if (isGA1 && isGA2 && GV1 == GV2)
6383 return Offset1 == (Offset2 + Dist*Bytes);
6388 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6389 /// it cannot be inferred.
6390 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6391 // If this is a GlobalAddress + cst, return the alignment.
6392 const GlobalValue *GV;
6393 int64_t GVOffset = 0;
6394 const TargetLowering *TLI = TM.getTargetLowering();
6395 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6396 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
6397 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6398 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
6399 TLI->getDataLayout());
6400 unsigned AlignBits = KnownZero.countTrailingOnes();
6401 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6403 return MinAlign(Align, GVOffset);
6406 // If this is a direct reference to a stack slot, use information about the
6407 // stack slot's alignment.
6408 int FrameIdx = 1 << 31;
6409 int64_t FrameOffset = 0;
6410 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6411 FrameIdx = FI->getIndex();
6412 } else if (isBaseWithConstantOffset(Ptr) &&
6413 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6415 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6416 FrameOffset = Ptr.getConstantOperandVal(1);
6419 if (FrameIdx != (1 << 31)) {
6420 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6421 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6429 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
6430 /// which is split (or expanded) into two not necessarily identical pieces.
6431 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
6432 // Currently all types are split in half.
6434 if (!VT.isVector()) {
6435 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
6437 unsigned NumElements = VT.getVectorNumElements();
6438 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
6439 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
6442 return std::make_pair(LoVT, HiVT);
6445 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
6447 std::pair<SDValue, SDValue>
6448 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
6450 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
6451 N.getValueType().getVectorNumElements() &&
6452 "More vector elements requested than available!");
6454 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
6455 getConstant(0, TLI->getVectorIdxTy()));
6456 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
6457 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy()));
6458 return std::make_pair(Lo, Hi);
6461 void SelectionDAG::ExtractVectorElements(SDValue Op,
6462 SmallVectorImpl<SDValue> &Args,
6463 unsigned Start, unsigned Count) {
6464 EVT VT = Op.getValueType();
6466 Count = VT.getVectorNumElements();
6468 EVT EltVT = VT.getVectorElementType();
6469 EVT IdxTy = TLI->getVectorIdxTy();
6471 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
6472 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6473 Op, getConstant(i, IdxTy)));
6477 // getAddressSpace - Return the address space this GlobalAddress belongs to.
6478 unsigned GlobalAddressSDNode::getAddressSpace() const {
6479 return getGlobal()->getType()->getAddressSpace();
6483 Type *ConstantPoolSDNode::getType() const {
6484 if (isMachineConstantPoolEntry())
6485 return Val.MachineCPVal->getType();
6486 return Val.ConstVal->getType();
6489 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6491 unsigned &SplatBitSize,
6493 unsigned MinSplatBits,
6494 bool isBigEndian) const {
6495 EVT VT = getValueType(0);
6496 assert(VT.isVector() && "Expected a vector type");
6497 unsigned sz = VT.getSizeInBits();
6498 if (MinSplatBits > sz)
6501 SplatValue = APInt(sz, 0);
6502 SplatUndef = APInt(sz, 0);
6504 // Get the bits. Bits with undefined values (when the corresponding element
6505 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6506 // in SplatValue. If any of the values are not constant, give up and return
6508 unsigned int nOps = getNumOperands();
6509 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6510 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6512 for (unsigned j = 0; j < nOps; ++j) {
6513 unsigned i = isBigEndian ? nOps-1-j : j;
6514 SDValue OpVal = getOperand(i);
6515 unsigned BitPos = j * EltBitSize;
6517 if (OpVal.getOpcode() == ISD::UNDEF)
6518 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6519 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6520 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6521 zextOrTrunc(sz) << BitPos;
6522 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6523 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6528 // The build_vector is all constants or undefs. Find the smallest element
6529 // size that splats the vector.
6531 HasAnyUndefs = (SplatUndef != 0);
6534 unsigned HalfSize = sz / 2;
6535 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6536 APInt LowValue = SplatValue.trunc(HalfSize);
6537 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6538 APInt LowUndef = SplatUndef.trunc(HalfSize);
6540 // If the two halves do not match (ignoring undef bits), stop here.
6541 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6542 MinSplatBits > HalfSize)
6545 SplatValue = HighValue | LowValue;
6546 SplatUndef = HighUndef & LowUndef;
6555 ConstantSDNode *BuildVectorSDNode::getConstantSplatValue() const {
6556 SDValue Op0 = getOperand(0);
6557 if (Op0.getOpcode() != ISD::Constant)
6560 for (unsigned i = 1, e = getNumOperands(); i != e; ++i)
6561 if (getOperand(i) != Op0)
6564 return cast<ConstantSDNode>(Op0);
6567 bool BuildVectorSDNode::isConstant() const {
6568 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
6569 unsigned Opc = getOperand(i).getOpcode();
6570 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
6576 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6577 // Find the first non-undef value in the shuffle mask.
6579 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6582 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6584 // Make sure all remaining elements are either undef or the same as the first
6586 for (int Idx = Mask[i]; i != e; ++i)
6587 if (Mask[i] >= 0 && Mask[i] != Idx)
6593 static void checkForCyclesHelper(const SDNode *N,
6594 SmallPtrSet<const SDNode*, 32> &Visited,
6595 SmallPtrSet<const SDNode*, 32> &Checked) {
6596 // If this node has already been checked, don't check it again.
6597 if (Checked.count(N))
6600 // If a node has already been visited on this depth-first walk, reject it as
6602 if (!Visited.insert(N)) {
6603 dbgs() << "Offending node:\n";
6605 errs() << "Detected cycle in SelectionDAG\n";
6609 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6610 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6617 void llvm::checkForCycles(const llvm::SDNode *N) {
6619 assert(N && "Checking nonexistent SDNode");
6620 SmallPtrSet<const SDNode*, 32> visited;
6621 SmallPtrSet<const SDNode*, 32> checked;
6622 checkForCyclesHelper(N, visited, checked);
6626 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6627 checkForCycles(DAG->getRoot().getNode());