#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include <queue>
#include <set>
STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
-
//===----------------------------------------------------------------------===//
// Pattern Matcher Implementation
//===----------------------------------------------------------------------===//
/// getTruncate - return an SDNode that implements a subreg based truncate
/// of the specified operand to the the specified value type.
- SDNode *getTruncate(SDOperand N0, MVT::ValueType VT);
+ SDNode *getTruncate(SDOperand N0, MVT VT);
#ifndef NDEBUG
unsigned Indent;
};
}
+/// findFlagUse - Return use of MVT::Flag value produced by the specified SDNode.
+///
static SDNode *findFlagUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
- SDNode *User = *I;
+ SDNode *User = I->getUser();
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
SDOperand Op = User->getOperand(i);
if (Op.Val == N && Op.ResNo == FlagResNo)
return NULL;
}
+/// findNonImmUse - Return true by reference in "found" if "Use" is an
+/// non-immediate use of "Def". This function recursively traversing
+/// up the operand chain ignoring certain nodes.
static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
SDNode *Root, SDNode *Skip, bool &found,
- std::set<SDNode *> &Visited) {
+ SmallPtrSet<SDNode*, 16> &Visited) {
if (found ||
Use->getNodeId() > Def->getNodeId() ||
- !Visited.insert(Use).second)
+ !Visited.insert(Use))
return;
-
+
for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
SDNode *N = Use->getOperand(i).Val;
if (N == Skip)
continue;
if (N == Def) {
if (Use == ImmedUse)
- continue; // Immediate use is ok.
+ continue; // We are not looking for immediate use.
if (Use == Root) {
+ // Must be a chain reading node where it is possible to reach its own
+ // chain operand through a path started from another operand.
assert(Use->getOpcode() == ISD::STORE ||
- Use->getOpcode() == X86ISD::CMP);
+ Use->getOpcode() == X86ISD::CMP ||
+ Use->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ Use->getOpcode() == ISD::INTRINSIC_VOID);
continue;
}
found = true;
break;
}
+
+ // Traverse up the operand chain.
findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
}
}
/// its chain operand.
static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
SDNode *Skip = NULL) {
- std::set<SDNode *> Visited;
+ SmallPtrSet<SDNode*, 16> Visited;
bool found = false;
findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
return found;
// NU), then TF is a predecessor of FU and a successor of NU. But since
// NU and FU are flagged together, this effectively creates a cycle.
bool HasFlagUse = false;
- MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
+ MVT VT = Root->getValueType(Root->getNumValues()-1);
while ((VT == MVT::Flag && !Root->use_empty())) {
SDNode *FU = findFlagUse(Root);
if (FU == NULL)
Store.getOperand(2), Store.getOperand(3));
}
+/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
+///
+static bool isRMWLoad(SDOperand N, SDOperand Chain, SDOperand Address,
+ SDOperand &Load) {
+ if (N.getOpcode() == ISD::BIT_CONVERT)
+ N = N.getOperand(0);
+
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
+ if (!LD || LD->isVolatile())
+ return false;
+ if (LD->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
+ return false;
+
+ if (N.hasOneUse() &&
+ N.getOperand(1) == Address &&
+ N.Val->isOperandOf(Chain.Val)) {
+ Load = N;
+ return true;
+ }
+ return false;
+}
+
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
/// This is only run if not in -fast mode (aka -O0).
/// This allows the instruction selector to pick more read-modify-write
SDOperand N1 = I->getOperand(1);
SDOperand N2 = I->getOperand(2);
- if (MVT::isFloatingPoint(N1.getValueType()) ||
- MVT::isVector(N1.getValueType()) ||
+ if ((N1.getValueType().isFloatingPoint() &&
+ !N1.getValueType().isVector()) ||
!N1.hasOneUse())
continue;
case ISD::OR:
case ISD::XOR:
case ISD::ADDC:
- case ISD::ADDE: {
+ case ISD::ADDE:
+ case ISD::VECTOR_SHUFFLE: {
SDOperand N10 = N1.getOperand(0);
SDOperand N11 = N1.getOperand(1);
- if (ISD::isNON_EXTLoad(N10.Val))
- RModW = true;
- else if (ISD::isNON_EXTLoad(N11.Val)) {
- RModW = true;
- std::swap(N10, N11);
- }
- RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
- (N10.getOperand(1) == N2) &&
- (N10.Val->getValueType(0) == N1.getValueType());
- if (RModW)
- Load = N10;
+ RModW = isRMWLoad(N10, Chain, N2, Load);
+ if (!RModW)
+ RModW = isRMWLoad(N11, Chain, N2, Load);
break;
}
case ISD::SUB:
case X86ISD::SHLD:
case X86ISD::SHRD: {
SDOperand N10 = N1.getOperand(0);
- if (ISD::isNON_EXTLoad(N10.Val))
- RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
- (N10.getOperand(1) == N2) &&
- (N10.Val->getValueType(0) == N1.getValueType());
- if (RModW)
- Load = N10;
+ RModW = isRMWLoad(N10, Chain, N2, Load);
break;
}
}
// If the source and destination are SSE registers, then this is a legal
// conversion that should not be lowered.
- MVT::ValueType SrcVT = N->getOperand(0).getValueType();
- MVT::ValueType DstVT = N->getValueType(0);
+ MVT SrcVT = N->getOperand(0).getValueType();
+ MVT DstVT = N->getValueType(0);
bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
if (SrcIsSSE && DstIsSSE)
continue;
- // If this is an FPStack extension (but not a truncation), it is a noop.
- if (!SrcIsSSE && !DstIsSSE && N->getOpcode() == ISD::FP_EXTEND)
- continue;
-
+ if (!SrcIsSSE && !DstIsSSE) {
+ // If this is an FPStack extension, it is a noop.
+ if (N->getOpcode() == ISD::FP_EXTEND)
+ continue;
+ // If this is a value-preserving FPStack truncation, it is a noop.
+ if (N->getConstantOperandVal(1))
+ continue;
+ }
+
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
// FPStack has extload and truncstore. SSE can fold direct loads into other
// operations. Based on this, decide what we want to do.
- MVT::ValueType MemVT;
+ MVT MemVT;
if (N->getOpcode() == ISD::FP_ROUND)
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
else
DAG.RemoveDeadNodes();
- // Emit machine code to BB.
+ // Emit machine code to BB. This can change 'BB' to the last block being
+ // inserted into.
ScheduleAndEmitDAG(DAG);
// If we are emitting FP stack code, scan the basic block to determine if this
// Scan all of the machine instructions in these MBBs, checking for FP
// stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
MachineFunction::iterator MBBI = FirstMBB;
- do {
+ MachineFunction::iterator EndMBB = BB; ++EndMBB;
+ for (; MBBI != EndMBB; ++MBBI) {
+ MachineBasicBlock *MBB = MBBI;
+
+ // If this block returns, ignore it. We don't want to insert an FP_REG_KILL
+ // before the return.
+ if (!MBB->empty()) {
+ MachineBasicBlock::iterator EndI = MBB->end();
+ --EndI;
+ if (EndI->getDesc().isReturn())
+ continue;
+ }
+
bool ContainsFPCode = false;
- for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
!ContainsFPCode && I != E; ++I) {
if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
const TargetRegisterClass *clas;
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
- TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
+ TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
X86::RFP32RegisterClass ||
clas == X86::RFP64RegisterClass ||
}
// Finally, if we found any FP code, emit the FP_REG_KILL instruction.
if (ContainsFPCode) {
- BuildMI(*MBBI, MBBI->getFirstTerminator(),
+ BuildMI(*MBB, MBBI->getFirstTerminator(),
TM.getInstrInfo()->get(X86::FP_REG_KILL));
++NumFPKill;
}
- } while (&*(MBBI++) != BB);
+ }
}
/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
// On x86-64, the resultant disp must fit in 32-bits.
isInt32(AM.Disp + CN->getSignExtended()) &&
// Check to see if the LHS & C is zero.
- CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getValue())) {
+ CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
AM.Disp += CN->getValue();
return false;
}
if (MatchAddress(N, AM))
return false;
- MVT::ValueType VT = N.getValueType();
+ MVT VT = N.getValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
if (!AM.Base.Reg.Val)
AM.Base.Reg = CurDAG->getRegister(0, VT);
// Also handle the case where we explicitly require zeros in the top
// elements. This is a vector shuffle from the zero vector.
- if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
+ if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.Val->hasOneUse() &&
// Check to see if the top elements are all zeros (or bitcast of zeros).
- ISD::isBuildVectorAllZeros(N.getOperand(0).Val) &&
- N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
- N.getOperand(1).Val->hasOneUse() &&
- ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
- N.getOperand(1).getOperand(0).hasOneUse()) {
- // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
- // from the LHS.
- unsigned VecWidth=MVT::getVectorNumElements(N.getOperand(0).getValueType());
- SDOperand ShufMask = N.getOperand(2);
- assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
- if (C->getValue() == VecWidth) {
- for (unsigned i = 1; i != VecWidth; ++i) {
- if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
- // ok.
- } else {
- ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
- if (C->getValue() >= VecWidth) return false;
- }
- }
- }
-
- // Okay, this is a zero extending load. Fold it.
- LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
- if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
- return false;
- OutChain = LD->getChain();
- InChain = SDOperand(LD, 1);
- return true;
- }
+ N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ N.getOperand(0).Val->hasOneUse() &&
+ ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).Val) &&
+ N.getOperand(0).getOperand(0).hasOneUse()) {
+ // Okay, this is a zero extending load. Fold it.
+ LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
+ if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
+ return false;
+ OutChain = LD->getChain();
+ InChain = SDOperand(LD, 1);
+ return true;
}
return false;
}
if (MatchAddress(N, AM))
return false;
- MVT::ValueType VT = N.getValueType();
+ MVT VT = N.getValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base.Reg.Val)
return FindCallStartFromCall(Node->getOperand(0).Val);
}
-SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) {
+SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) {
SDOperand SRIdx;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
+ default: assert(0 && "Unknown truncate!");
case MVT::i8:
SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
// Ensure that the source register has an 8-bit subreg on 32-bit targets
if (!Subtarget->is64Bit()) {
unsigned Opc;
- MVT::ValueType VT;
- switch (N0.getValueType()) {
+ MVT VT;
+ switch (N0.getValueType().getSimpleVT()) {
default: assert(0 && "Unknown truncate!");
case MVT::i16:
Opc = X86::MOV16to16_;
case MVT::i32:
SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
break;
- default: assert(0 && "Unknown truncate!"); break;
}
return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
}
SDNode *X86DAGToDAGISel::Select(SDOperand N) {
SDNode *Node = N.Val;
- MVT::ValueType NVT = Node->getValueType(0);
+ MVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
case X86ISD::GlobalBaseReg:
return getGlobalBaseReg();
- case X86ISD::FP_GET_RESULT2: {
- SDOperand Chain = N.getOperand(0);
- SDOperand InFlag = N.getOperand(1);
- AddToISelQueue(Chain);
- AddToISelQueue(InFlag);
- std::vector<MVT::ValueType> Tys;
- Tys.push_back(MVT::f80);
- Tys.push_back(MVT::f80);
- Tys.push_back(MVT::Other);
- Tys.push_back(MVT::Flag);
- SDOperand Ops[] = { Chain, InFlag };
- SDNode *ResNode = CurDAG->getTargetNode(X86::FpGETRESULT80x2, Tys,
- Ops, 2);
- Chain = SDOperand(ResNode, 2);
- InFlag = SDOperand(ResNode, 3);
- ReplaceUses(SDOperand(N.Val, 2), Chain);
- ReplaceUses(SDOperand(N.Val, 3), InFlag);
- return ResNode;
- }
-
case ISD::ADD: {
// Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
// code and is matched first so to prevent it from being turned into
// RIP-relative addressing.
if (TM.getCodeModel() != CodeModel::Small)
break;
- MVT::ValueType PtrVT = TLI.getPointerTy();
+ MVT PtrVT = TLI.getPointerTy();
SDOperand N0 = N.getOperand(0);
SDOperand N1 = N.getOperand(1);
if (N.Val->getValueType(0) == PtrVT &&
SDOperand N0 = Node->getOperand(0);
SDOperand N1 = Node->getOperand(1);
- // There are several forms of IMUL that just return the low part and
- // don't have fixed-register operands. If we don't need the high part,
- // use these instead. They can be selected with the generated ISel code.
- if (NVT != MVT::i8 &&
- N.getValue(1).use_empty()) {
- N = CurDAG->getNode(ISD::MUL, NVT, N0, N1);
- break;
- }
-
bool isSigned = Opcode == ISD::SMUL_LOHI;
if (!isSigned)
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
}
else
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
}
unsigned LoReg, HiReg;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned)
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
}
else
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
unsigned LoReg, HiReg;
unsigned ClrOpcode, SExtOpcode;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
}
case ISD::ANY_EXTEND: {
+ // Check if the type extended to supports subregs.
+ if (NVT == MVT::i8)
+ break;
+
SDOperand N0 = Node->getOperand(0);
+ // Get the subregsiter index for the type to extend.
+ MVT N0VT = N0.getValueType();
+ unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT :
+ (N0VT == MVT::i16) ? X86::SUBREG_16BIT :
+ (Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0;
+
+ // If we don't have a subreg Idx, let generated ISel have a try.
+ if (Idx == 0)
+ break;
+
+ // If we have an index, generate an insert_subreg into undef.
AddToISelQueue(N0);
- if (NVT == MVT::i64 || NVT == MVT::i32 || NVT == MVT::i16) {
- SDOperand SRIdx;
- switch(N0.getValueType()) {
- case MVT::i32:
- SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
- break;
- case MVT::i16:
- SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
- break;
- case MVT::i8:
- if (Subtarget->is64Bit())
- SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
- break;
- default: assert(0 && "Unknown any_extend!");
- }
- if (SRIdx.Val) {
- SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
- NVT, N0, SRIdx);
+ SDOperand Undef =
+ SDOperand(CurDAG->getTargetNode(X86::IMPLICIT_DEF, NVT), 0);
+ SDOperand SRIdx = CurDAG->getTargetConstant(Idx, MVT::i32);
+ SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
+ NVT, Undef, N0, SRIdx);
#ifndef NDEBUG
- DOUT << std::string(Indent-2, ' ') << "=> ";
- DEBUG(ResNode->dump(CurDAG));
- DOUT << "\n";
- Indent -= 2;
+ DOUT << std::string(Indent-2, ' ') << "=> ";
+ DEBUG(ResNode->dump(CurDAG));
+ DOUT << "\n";
+ Indent -= 2;
#endif
- return ResNode;
- } // Otherwise let generated ISel handle it.
- }
- break;
+ return ResNode;
}
case ISD::SIGN_EXTEND_INREG: {
SDOperand N0 = Node->getOperand(0);
AddToISelQueue(N0);
- MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
+ MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0);
unsigned Opc = 0;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i16:
if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
else assert(0 && "Unknown sign_extend_inreg!");
break;
case MVT::i32:
- switch (SVT) {
+ switch (SVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i8: Opc = X86::MOVSX32rr8; break;
case MVT::i16: Opc = X86::MOVSX32rr16; break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
break;
case MVT::i64:
- switch (SVT) {
+ switch (SVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i8: Opc = X86::MOVSX64rr8; break;
case MVT::i16: Opc = X86::MOVSX64rr16; break;
case MVT::i32: Opc = X86::MOVSX64rr32; break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
return ResNode;
break;
}
+
+ case ISD::DECLARE: {
+ // Handle DECLARE nodes here because the second operand may have been
+ // wrapped in X86ISD::Wrapper.
+ SDOperand Chain = Node->getOperand(0);
+ SDOperand N1 = Node->getOperand(1);
+ SDOperand N2 = Node->getOperand(2);
+ if (!isa<FrameIndexSDNode>(N1))
+ break;
+ int FI = cast<FrameIndexSDNode>(N1)->getIndex();
+ if (N2.getOpcode() == ISD::ADD &&
+ N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
+ N2 = N2.getOperand(1);
+ if (N2.getOpcode() == X86ISD::Wrapper &&
+ isa<GlobalAddressSDNode>(N2.getOperand(0))) {
+ GlobalValue *GV =
+ cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal();
+ SDOperand Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ SDOperand Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
+ AddToISelQueue(Chain);
+ SDOperand Ops[] = { Tmp1, Tmp2, Chain };
+ return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
+ MVT::Other, Ops, 3);
+ }
+ break;
+ }
}
SDNode *ResNode = SelectCode(N);