#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Target/TargetOptions.h"
using namespace llvm;
namespace {
bool X86SelectExtractValue(Instruction *I);
- bool X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic);
+ bool X86VisitIntrinsicCall(IntrinsicInst &I);
bool X86SelectCall(Instruction *I);
CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
if (Subtarget->is64Bit()) {
if (Subtarget->isTargetWin64())
return CC_X86_Win64_C;
- else if (CC == CallingConv::Fast && isTaillCall)
- return CC_X86_64_TailCall;
else
return CC_X86_64_C;
}
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
- User *U;
+ User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
Disp += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getTypePaddedSize(GTI.getIndexedType());
+ uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
Disp += CI->getSExtValue() * S;
} else if (IndexReg == 0 &&
- (!AM.GV ||
- !getTargetMachine()->symbolicAddressesAreRIPRel()) &&
+ (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
(S == 1 || S == 2 || S == 4 || S == 8)) {
// Scaled-index addressing.
Scale = S;
return false;
// RIP-relative addresses can't have additional register operands.
- if (getTargetMachine()->symbolicAddressesAreRIPRel() &&
+ if (Subtarget->isPICStyleRIPRel() &&
(AM.Base.Reg != 0 || AM.IndexReg != 0))
return false;
if (GVar->isThreadLocal())
return false;
- // Set up the basic address.
+ // Okay, we've committed to selecting this global. Set up the basic address.
AM.GV = GV;
+
if (!isCall &&
TM.getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->is64Bit())
+ !Subtarget->is64Bit()) {
+ // FIXME: How do we know Base.Reg is free??
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
+ }
- // Emit an extra load if the ABI requires it.
- if (Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) {
- // Check to see if we've already materialized this
- // value in a register in this block.
- if (unsigned Reg = LocalValueMap[V]) {
- AM.Base.Reg = Reg;
- AM.GV = 0;
- return true;
+ // If the ABI doesn't require an extra load, return a direct reference to
+ // the global.
+ if (!Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) {
+ if (Subtarget->isPICStyleRIPRel()) {
+ // Use rip-relative addressing if we can. Above we verified that the
+ // base and index registers are unused.
+ assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
+ AM.Base.Reg = X86::RIP;
+ } else if (Subtarget->isPICStyleStub() &&
+ TM.getRelocationModel() == Reloc::PIC_) {
+ AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
+ } else if (Subtarget->isPICStyleGOT()) {
+ AM.GVOpFlags = X86II::MO_GOTOFF;
}
- // Issue load from stub if necessary.
+
+ return true;
+ }
+
+ // Check to see if we've already materialized this stub loaded value into a
+ // register in this block. If so, just reuse it.
+ DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
+ unsigned LoadReg;
+ if (I != LocalValueMap.end() && I->second != 0) {
+ LoadReg = I->second;
+ } else {
+ // Issue load from stub.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
- if (TLI.getPointerTy() == MVT::i32) {
- Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
- } else {
+ X86AddressMode StubAM;
+ StubAM.Base.Reg = AM.Base.Reg;
+ StubAM.GV = GV;
+
+ if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
+
+ if (Subtarget->isPICStyleRIPRel()) {
+ StubAM.GVOpFlags = X86II::MO_GOTPCREL;
+ StubAM.Base.Reg = X86::RIP;
+ }
+
+ } else {
+ Opc = X86::MOV32rm;
+ RC = X86::GR32RegisterClass;
+
+ if (Subtarget->isPICStyleGOT())
+ StubAM.GVOpFlags = X86II::MO_GOT;
+ else if (Subtarget->isPICStyleStub()) {
+ // In darwin, we have multiple different stub types, and we have both
+ // PIC and -mdynamic-no-pic. Determine whether we have a stub
+ // reference and/or whether the reference is relative to the PIC base
+ // or not.
+ bool IsPIC = TM.getRelocationModel() == Reloc::PIC_;
+
+ if (!GV->hasHiddenVisibility()) {
+ // Non-hidden $non_lazy_ptr reference.
+ StubAM.GVOpFlags = IsPIC ? X86II::MO_DARWIN_NONLAZY_PIC_BASE :
+ X86II::MO_DARWIN_NONLAZY;
+ } else {
+ // Hidden $non_lazy_ptr reference.
+ StubAM.GVOpFlags = IsPIC ? X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
+ X86II::MO_DARWIN_HIDDEN_NONLAZY;
+ }
+ }
}
-
- X86AddressMode StubAM;
- StubAM.Base.Reg = AM.Base.Reg;
- StubAM.GV = AM.GV;
- unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), StubAM);
-
- // Now construct the final address. Note that the Disp, Scale,
- // and Index values may already be set here.
- AM.Base.Reg = ResultReg;
- AM.GV = 0;
-
+
+ LoadReg = createResultReg(RC);
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
+
// Prevent loading GV stub multiple times in same MBB.
- LocalValueMap[V] = AM.Base.Reg;
+ LocalValueMap[V] = LoadReg;
}
+
+ // Now construct the final address. Note that the Disp, Scale,
+ // and Index values may already be set here.
+ AM.Base.Reg = LoadReg;
+ AM.GV = 0;
return true;
}
// If all else fails, try to materialize the value in a register.
- if (!AM.GV || !getTargetMachine()->symbolicAddressesAreRIPRel()) {
+ if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
if (AM.Base.Reg == 0) {
AM.Base.Reg = getRegForValue(V);
return AM.Base.Reg != 0;
// looking for the SETO/SETB instruction. If an instruction modifies the
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
// convert the branch into a JO/JB instruction.
-
- Value *Agg = EI->getAggregateOperand();
-
- if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
- Function *F = CI->getCalledFunction();
-
- if (F && F->isDeclaration()) {
- switch (F->getIntrinsicID()) {
- default: break;
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow: {
- const MachineInstr *SetMI = 0;
- unsigned Reg = lookUpRegForValue(EI);
-
- for (MachineBasicBlock::const_reverse_iterator
- RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
- const MachineInstr &MI = *RI;
-
- if (MI.modifiesRegister(Reg)) {
- unsigned Src, Dst, SrcSR, DstSR;
-
- if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
- Reg = Src;
- continue;
- }
-
- SetMI = &MI;
- break;
- }
-
- const TargetInstrDesc &TID = MI.getDesc();
- const unsigned *ImpDefs = TID.getImplicitDefs();
-
- if (TID.hasUnmodeledSideEffects()) break;
-
- bool ModifiesEFlags = false;
-
- if (ImpDefs) {
- for (unsigned u = 0; ImpDefs[u]; ++u)
- if (ImpDefs[u] == X86::EFLAGS) {
- ModifiesEFlags = true;
- break;
- }
+ if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
+ if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
+ CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
+ const MachineInstr *SetMI = 0;
+ unsigned Reg = lookUpRegForValue(EI);
+
+ for (MachineBasicBlock::const_reverse_iterator
+ RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
+ const MachineInstr &MI = *RI;
+
+ if (MI.modifiesRegister(Reg)) {
+ unsigned Src, Dst, SrcSR, DstSR;
+
+ if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
+ Reg = Src;
+ continue;
}
- if (ModifiesEFlags) break;
+ SetMI = &MI;
+ break;
}
- if (SetMI) {
- unsigned OpCode = SetMI->getOpcode();
+ const TargetInstrDesc &TID = MI.getDesc();
+ if (TID.hasUnmodeledSideEffects() ||
+ TID.hasImplicitDefOfPhysReg(X86::EFLAGS))
+ break;
+ }
+
+ if (SetMI) {
+ unsigned OpCode = SetMI->getOpcode();
- if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(MBB, DL, TII.get((OpCode == X86::SETOr) ?
- X86::JO : X86::JB)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
- return true;
- }
+ if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
+ BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? X86::JO : X86::JB))
+ .addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB);
+ MBB->addSuccessor(TrueMBB);
+ return true;
}
}
- }
}
}
}
// Unhandled operand. Halt "fast" selection and bail.
return false;
- // First issue a copy to GR16_ or GR32_.
- unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
+ // First issue a copy to GR16_ABCD or GR32_ABCD.
+ unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
- ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
+ ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
ExtractValueInst *EI = cast<ExtractValueInst>(I);
Value *Agg = EI->getAggregateOperand();
- if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
- Function *F = CI->getCalledFunction();
-
- if (F && F->isDeclaration()) {
- switch (F->getIntrinsicID()) {
- default: break;
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
- // Cheat a little. We know that the registers for "add" and "seto" are
- // allocated sequentially. However, we only keep track of the register
- // for "add" in the value map. Use extractvalue's index to get the
- // correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
- return true;
- }
+ if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
+ switch (CI->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ // Cheat a little. We know that the registers for "add" and "seto" are
+ // allocated sequentially. However, we only keep track of the register
+ // for "add" in the value map. Use extractvalue's index to get the
+ // correct register for "seto".
+ UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ return true;
}
}
return false;
}
-bool X86FastISel::X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
+bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
- switch (Intrinsic) {
+ switch (I.getIntrinsicID()) {
default: return false;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow: {
// instructions are encountered, we use the fact that two registers were
// created sequentially to get the correct registers for the "sum" and the
// "overflow bit".
- MVT VT;
const Function *Callee = I.getCalledFunction();
const Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
+ MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
return false;
unsigned OpC = 0;
-
if (VT == MVT::i32)
OpC = X86::ADD32rr;
else if (VT == MVT::i64)
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
- UpdateValueMap(&I, ResultReg);
-
- ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
- BuildMI(MBB, DL, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
- X86::SETOr : X86::SETBr), ResultReg);
+ unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
+
+ // If the add with overflow is an intra-block value then we just want to
+ // create temporaries for it like normal. If it is a cross-block value then
+ // UpdateValueMap will return the cross-block register used. Since we
+ // *really* want the value to be live in the register pair known by
+ // UpdateValueMap, we have to use DestReg1+1 as the destination register in
+ // the cross block case. In the non-cross-block case, we should just make
+ // another register for the value.
+ if (DestReg1 != ResultReg)
+ ResultReg = DestReg1+1;
+ else
+ ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
+
+ unsigned Opc = X86::SETBr;
+ if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
+ Opc = X86::SETOr;
+ BuildMI(MBB, DL, TII.get(Opc), ResultReg);
return true;
}
}
return false;
// Handle intrinsic calls.
- if (Function *F = CI->getCalledFunction())
- if (F->isDeclaration())
- if (unsigned IID = F->getIntrinsicID())
- return X86VisitIntrinsicCall(*CI, IID);
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
+ return X86VisitIntrinsicCall(*II);
// Handle only C and fastcc calling conventions for now.
CallSite CS(CI);
CC != CallingConv::X86_FastCall)
return false;
+ // On X86, -tailcallopt changes the fastcc ABI. FastISel doesn't
+ // handle this for now.
+ if (CC == CallingConv::Fast && PerformTailCallOpt)
+ return false;
+
// Let SDISel handle vararg functions.
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
return false;
unsigned CalleeOp = 0;
GlobalValue *GV = 0;
- if (CalleeAM.Base.Reg != 0) {
- assert(CalleeAM.GV == 0);
- CalleeOp = CalleeAM.Base.Reg;
- } else if (CalleeAM.GV != 0) {
- assert(CalleeAM.GV != 0);
+ if (CalleeAM.GV != 0) {
GV = CalleeAM.GV;
+ } else if (CalleeAM.Base.Reg != 0) {
+ CalleeOp = CalleeAM.Base.Reg;
} else
return false;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, false, TM, ArgLocs);
+ CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
// Get a count of how many bytes are to be pushed on the stack.
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
- if (!Subtarget->is64Bit() &&
- TM.getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT()) {
+ if (Subtarget->isPICStyleGOT()) {
TargetRegisterClass *RC = X86::GR32RegisterClass;
unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true;
}
-
+
// Issue the call.
- unsigned CallOpc = CalleeOp
- ? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r)
- : (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32);
- MachineInstrBuilder MIB = CalleeOp
- ? BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp)
- : BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV);
+ MachineInstrBuilder MIB;
+ if (CalleeOp) {
+ // Register-indirect call.
+ unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
+ MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
+
+ } else {
+ // Direct call.
+ assert(GV && "Not a direct call");
+ unsigned CallOpc =
+ Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
+
+ // See if we need any target-specific flags on the GV operand.
+ unsigned char OpFlags = 0;
+
+ // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
+ // external symbols most go through the PLT in PIC mode. If the symbol
+ // has hidden or protected visibility, or if it is static or local, then
+ // we don't need to use the PLT - we can directly call it.
+ if (Subtarget->isTargetELF() &&
+ TM.getRelocationModel() == Reloc::PIC_ &&
+ GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
+ OpFlags = X86II::MO_PLT;
+ } else if (Subtarget->isPICStyleStub() &&
+ (GV->isDeclaration() || GV->isWeakForLinker()) &&
+ Subtarget->getDarwinVers() < 9) {
+ // PC-relative references to external symbols should go through $stub,
+ // unless we're building with the leopard linker or later, which
+ // automatically synthesizes these stubs.
+ OpFlags = X86II::MO_DARWIN_STUB;
+ }
+
+
+ MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
+ }
// Add an implicit use GOT pointer in EBX.
- if (!Subtarget->is64Bit() &&
- TM.getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT())
+ if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX);
// Add implicit physical register uses to the call.
// Now handle call return value (if any).
if (RetVT.getSimpleVT() != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, false, TM, RVLocs);
+ CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
return X86SelectFPTrunc(I);
case Instruction::ExtractValue:
return X86SelectExtractValue(I);
+ case Instruction::IntToPtr: // Deliberate fall-through.
+ case Instruction::PtrToInt: {
+ MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ MVT DstVT = TLI.getValueType(I->getType());
+ if (DstVT.bitsGT(SrcVT))
+ return X86SelectZExt(I);
+ if (DstVT.bitsLT(SrcVT))
+ return X86SelectTrunc(I);
+ unsigned Reg = getRegForValue(I->getOperand(0));
+ if (Reg == 0) return false;
+ UpdateValueMap(I, Reg);
+ return true;
+ }
}
return false;
else
Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
return ResultReg;
}
return 0;
unsigned Align = TD.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TD.getTypePaddedSize(C->getType());
+ Align = TD.getTypeAllocSize(C->getType());
}
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- if (TM.getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->is64Bit())
+ unsigned char OpFlag = 0;
+ if (Subtarget->isPICStyleStub() &&
+ TM.getRelocationModel() == Reloc::PIC_) { // Not dynamic-no-pic
+ OpFlag = X86II::MO_PIC_BASE_OFFSET;
+ PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ } else if (Subtarget->isPICStyleGOT()) {
+ OpFlag = X86II::MO_GOTOFF;
PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ } else if (Subtarget->isPICStyleRIPRel() &&
+ TM.getCodeModel() == CodeModel::Small) {
+ PICBase = X86::RIP;
+ }
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), MCPOffset,
- PICBase);
+ addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
+ MCPOffset, PICBase, OpFlag);
return ResultReg;
}
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
return ResultReg;
}