//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines a pattern matching instruction selector for X86.
addRegisterClass(MVT::i16, X86::R16RegisterClass);
addRegisterClass(MVT::i32, X86::R32RegisterClass);
addRegisterClass(MVT::f64, X86::RFPRegisterClass);
-
+
// FIXME: Eliminate these two classes when legalize can handle promotions
// well.
/**/ addRegisterClass(MVT::i1, X86::R8RegisterClass);
// These should be promoted to a larger select which is supported.
/**/ setOperationAction(ISD::SELECT , MVT::i1 , Promote);
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
-
+
computeRegisterProperties();
-
+
addLegalFPImmediate(+0.0); // FLD0
addLegalFPImmediate(+1.0); // FLD1
addLegalFPImmediate(-0.0); // FLD0/FCHS
// [ESP] -- return address
// [ESP + 4] -- first argument (leftmost lexically)
// [ESP + 8] -- second argument, if first argument is four bytes in size
- // ...
+ // ...
//
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
-
+
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
MVT::ValueType ObjectVT = getValueType(I->getType());
}
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
-
+
// Create the SelectionDAG nodes corresponding to a load from this parameter
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
}
return std::make_pair(Result, Chain);
}
-
+
std::pair<SDOperand, SDOperand> X86TargetLowering::
LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
MachineFunction &MF = DAG.getMachineFunction();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
}
-
+
SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
if (!isFrameAddress)
RegBase,
FrameIndexBase,
} BaseType;
-
+
struct { // This is really a union, discriminated by BaseType!
SDOperand Reg;
int FrameIndex;
} Base;
-
+
unsigned Scale;
SDOperand IndexReg;
unsigned Disp;
GlobalValue *GV;
-
+
X86ISelAddressMode()
: BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
}
break;
}
}
-
+
// Insert FP_REG_KILL instructions into basic blocks that need them. This
// only occurs due to the floating point stackifier not being aggressive
// enough to handle arbitrary global stackification.
BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
++NumFPKill;
}
-
+
// Clear state used for selection.
ExprMap.clear();
RegPressureMap.clear();
} else if (IAM.IndexReg.Val) {
Result.IndexReg = SelectExpr(IAM.IndexReg);
}
-
+
switch (IAM.BaseType) {
case X86ISelAddressMode::RegBase:
Result.BaseType = X86AddressMode::RegBase;
ConstantSDNode *AddVal =
cast<ConstantSDNode>(MulVal.Val->getOperand(1));
AM.Disp += AddVal->getValue() * CN->getValue();
- } else {
+ } else {
Reg = N.Val->getOperand(0);
}
static const unsigned CMOVTAB16[] = {
X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
- X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
+ X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
};
static const unsigned CMOVTAB32[] = {
X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
- X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
+ X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
};
static const unsigned CMOVTABFP[] = {
X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
return true;
}
}
-
+
return false;
}
// Just use the specified register as our input.
return dyn_cast<RegSDNode>(Node)->getReg();
}
-
+
unsigned &Reg = ExprMap[N];
if (Reg) return Reg;
-
+
switch (N.getOpcode()) {
default:
Reg = Result = (N.getValueType() != MVT::Other) ?
ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
break;
}
-
+
switch (N.getOpcode()) {
default:
Node->dump();
X86AddressMode AM;
EmitFoldedLoad(N.getOperand(0), AM);
addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
-
+
return Result;
}
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
return Result;
- }
+ }
case ISD::SIGN_EXTEND: {
int DestIs16 = N.getValueType() == MVT::i16;
int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
}
Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
-
+
if (PromoteType != MVT::Other) {
Tmp2 = MakeReg(PromoteType);
BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
MachineConstantPool *CP = F->getConstantPool();
unsigned Zero = MakeReg(MVT::i32);
Constant *Null = Constant::getNullValue(Type::UIntTy);
- addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
+ addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
CP->getConstantPoolIndex(Null));
unsigned Offset = MakeReg(MVT::i32);
Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
-
+
addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Offset),
CP->getConstantPoolIndex(OffsetCst));
unsigned Addr = MakeReg(MVT::i32);
// Reload the modified control word now...
addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
-
+
// Restore the memory image of control word to original value
addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
CWFrameIdx, 1).addReg(HighPartOfCW);
unsigned MovOpc, LowReg, HiReg;
switch (N.getValueType()) {
default: assert(0 && "Unsupported VT!");
- case MVT::i8:
+ case MVT::i8:
MovOpc = X86::MOV8rr;
LowReg = X86::AL;
HiReg = X86::AH;
BuildMI(BB, Opc, 1).addReg(Tmp2);
BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
return Result;
- }
+ }
case ISD::SUB:
case ISD::MUL:
static const unsigned ANDTab[] = {
X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
- X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
+ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
};
static const unsigned ORTab[] = {
X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
.addReg(ShiftOpLo);
// TmpReg3 = shl inLo, CL
BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
-
+
// Set the flags to indicate whether the shift was by more than 32 bits.
BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
-
+
// DestHi = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(BB, X86::CMOVNE32rr, 2,
+ BuildMI(BB, X86::CMOVNE32rr, 2,
Result+1).addReg(TmpReg2).addReg(TmpReg3);
// DestLo = (>32) ? TmpReg : TmpReg3;
BuildMI(BB, X86::CMOVNE32rr, 2,
BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
.addReg(ShiftOpHi);
// TmpReg3 = s[ah]r inHi, CL
- BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
+ BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
: X86::SHR32rCL, 1, TmpReg3)
.addReg(ShiftOpHi);
-
+
// Set the flags to indicate whether the shift was by more than 32 bits.
BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
-
+
// DestLo = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(BB, X86::CMOVNE32rr, 2,
+ BuildMI(BB, X86::CMOVNE32rr, 2,
Result).addReg(TmpReg2).addReg(TmpReg3);
-
+
// DestHi = (>32) ? TmpReg : TmpReg3;
- BuildMI(BB, X86::CMOVNE32rr, 2,
+ BuildMI(BB, X86::CMOVNE32rr, 2,
Result+1).addReg(TmpReg3).addReg(TmpReg);
}
return Result+N.ResNo;
BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
unsigned TmpReg3 = MakeReg(N.getValueType());
BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
-
+
unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
if (isNeg)
}
// Emit the DIV/IDIV instruction.
- BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
+ BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
// Get the result of the divide or rem.
BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
return Result;
}
-
+
switch (N.getValueType()) {
default: assert(0 && "Cannot shift this type!");
case MVT::i8: Opc = X86::SHL8ri; break;
<< " the stack alignment yet!";
abort();
}
-
+
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
Select(N.getOperand(0));
BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
/*Have to put the reg in CL*/0, 0, 0,
};
-
+
const unsigned *TabPtr = 0;
switch (StVal.getOpcode()) {
default:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM: return false;
-
+
case ISD::ADD: TabPtr = ADDTAB; break;
case ISD::SUB: TabPtr = SUBTAB; break;
case ISD::AND: TabPtr = ANDTAB; break;
case ISD::SRA: TabPtr = SARTAB; break;
case ISD::SRL: TabPtr = SHRTAB; break;
}
-
+
// Handle: [mem] op= CST
SDOperand Op0 = StVal.getOperand(0);
SDOperand Op1 = StVal.getOperand(1);
case MVT::i16: Opc = TabPtr[1]; break;
case MVT::i32: Opc = TabPtr[2]; break;
}
-
+
if (Opc) {
if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
assert(0 && "Already emitted?");
} else {
SelectAddress(TheLoad.getOperand(1), AM);
Select(TheLoad.getOperand(0));
- }
+ }
if (StVal.getOpcode() == ISD::ADD) {
if (CN->getValue() == 1) {
}
}
}
-
+
addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
return true;
}
}
-
+
// If we have [mem] = V op [mem], try to turn it into:
// [mem] = [mem] op V.
if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
StVal.getOpcode() != ISD::SRL)
std::swap(Op0, Op1);
-
+
if (Op0 != TheLoad) return false;
switch (Op0.getValueType()) {
case ISD::EntryToken: return; // Noop
case ISD::TokenFactor:
if (Node->getNumOperands() == 2) {
- bool OneFirst =
+ bool OneFirst =
getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
Select(Node->getOperand(OneFirst));
Select(Node->getOperand(!OneFirst));
Select(N.getOperand(0));
}
Tmp2 = cast<RegSDNode>(N)->getReg();
-
+
if (Tmp1 != Tmp2) {
switch (N.getOperand(1).getValueType()) {
default: assert(0 && "Invalid type for operation!");
case MVT::i1: Opc = X86::MOV8mr; break;
case MVT::f32: Opc = X86::FST32m; break;
}
-
+
std::vector<std::pair<unsigned, unsigned> > RP;
RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
case MVT::i32: Opc = X86::MOV32mr; break;
case MVT::f64: Opc = X86::FST64m; break;
}
-
+
std::vector<std::pair<unsigned, unsigned> > RP;
RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
case ISD::ADJCALLSTACKUP:
Select(N.getOperand(0));
Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
-
+
Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
X86::ADJCALLSTACKUP;
BuildMI(BB, Opc, 1).addImm(Tmp1);
/// description file.
///
FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
- return new ISel(TM);
+ return new ISel(TM);
}
//===-- X86ISelSimple.cpp - A simple instruction selector for x86 ---------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines a simple peephole instruction selector for the x86 target
MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI);
void visitSelectInst(SelectInst &SI);
-
-
+
+
// Memory Instructions
void visitLoadInst(LoadInst &I);
void visitStoreInst(StoreInst &I);
void visitAllocaInst(AllocaInst &I);
void visitMallocInst(MallocInst &I);
void visitFreeInst(FreeInst &I);
-
+
// Other operators
void visitShiftInst(ShiftInst &I);
void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
unsigned DestReg, const Type *DestTy,
unsigned Op0Reg, unsigned Op1Reg);
- void doMultiplyConst(MachineBasicBlock *MBB,
+ void doMultiplyConst(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, const Type *DestTy,
unsigned Op0Reg, unsigned Op1Val);
// Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
// constant.
- void doSHLDConst(MachineBasicBlock *MBB,
+ void doSHLDConst(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
unsigned Op1Val);
-
+
/// emitSelectOperation - Common code shared between visitSelectInst and the
/// constant expression support.
void emitSelectOperation(MachineBasicBlock *MBB,
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
// Do not emit noop casts at all, unless it's a double -> float cast.
if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()) &&
- (CI->getType() != Type::FloatTy ||
+ (CI->getType() != Type::FloatTy ||
CI->getOperand(0)->getType() != Type::DoubleTy))
return getReg(CI->getOperand(0), MBB, IPt);
} else if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
unsigned TySize = TM.getTargetData().getTypeSize(Ty);
TySize *= CUI->getValue(); // Get total allocated size...
unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
-
+
// Create a new stack object using the frame manager...
int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
AllocaMap.insert(I, std::make_pair(AI, FrameIdx));
else if (CFP->isExactlyValue(-0.0)) {
unsigned Tmp = makeAnotherReg(Type::DoubleTy);
BuildMI(*MBB, IP, X86::FLD0, 0, Tmp);
- BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
+ BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
} else if (CFP->isExactlyValue(-1.0)) {
unsigned Tmp = makeAnotherReg(Type::DoubleTy);
BuildMI(*MBB, IP, X86::FLD1, 0, Tmp);
- BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
+ BuildMI(*MBB, IP, X86::FCHS, 1, R).addReg(Tmp);
} else { // FIXME: PI, other native values
// FIXME: 2*PI -> LDPI + FADD
}
unsigned CPI = CP->getConstantPoolIndex(CFP);
-
+
assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
// [ESP] -- return address
// [ESP + 4] -- first argument (leftmost lexically)
// [ESP + 8] -- second argument, if first argument is four bytes in size
- // ...
+ // ...
//
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
MachineFrameInfo *MFI = F->getFrameInfo();
// Switch the FPU to 64-bit precision mode for better compatibility and speed.
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
-
+
// Set the high part to be 64-bit precision.
addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
CWFrameIdx, 1).addImm(2);
// predecessor. Recycle it.
ValReg = EntryIt->second;
- } else {
+ } else {
// Get the incoming value into a virtual register.
//
Value *Val = PN->getIncomingValue(i);
// might be arbitrarily complex if it is a constant expression),
// just insert the computation at the top of the basic block.
MachineBasicBlock::iterator PI = PredMBB->begin();
-
+
// Skip over any PHI nodes though!
while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
++PI;
-
+
ValReg = getReg(Val, PredMBB, PI);
}
// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
// it into the conditional branch or select instruction which is the only user
// of the cc instruction. This is the case if the conditional branch is the
-// only user of the setcc. We also don't handle long arguments below, so we
+// only user of the setcc. We also don't handle long arguments below, so we
// reject them here as well.
//
static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
static const unsigned TESTTab[] = {
X86::TEST8ri, X86::TEST16ri, X86::TEST32ri
};
-
+
// Emit test X, i
unsigned LHS = getReg(Op0I->getOperand(0), MBB, IP);
unsigned Imm =
cast<ConstantInt>(Op0I->getOperand(1))->getRawValue();
BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(LHS).addImm(Imm);
-
+
if (OpNum == 2) return 6; // Map jl -> js
if (OpNum == 3) return 7; // Map jg -> jns
return OpNum;
}
/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
-/// register, then move it to wherever the result should be.
+/// register, then move it to wherever the result should be.
///
void X86ISel::visitSetCondInst(SetCondInst &I) {
if (canFoldSetCCIntoBranchOrSelect(&I))
emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
SI.getFalseValue(), DestReg);
}
-
+
/// emitSelect - Common code shared between visitSelectInst and the constant
/// expression support.
void X86ISel::emitSelectOperation(MachineBasicBlock *MBB,
Value *Cond, Value *TrueVal, Value *FalseVal,
unsigned DestReg) {
unsigned SelectClass = getClassB(TrueVal->getType());
-
+
// We don't support 8-bit conditional moves. If we have incoming constants,
// transform them into 16-bit constants to avoid having a run-time conversion.
if (SelectClass == cByte) {
unsigned Opcode;
if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
// We successfully folded the setcc into the select instruction.
-
+
unsigned OpNum = getSetCCNumber(SCI->getOpcode());
OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
IP);
const Type *CompTy = SCI->getOperand(0)->getType();
bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
-
+
// LLVM -> X86 signed X86 unsigned
// ----- ---------- ------------
// seteq -> cmovNE cmovNE
// ----
// cmovNS // Used by comparison with 0 optimization
// cmovS
-
+
switch (SelectClass) {
default: assert(0 && "Unknown value class!");
case cFP: {
// Long comparisons end up in the BL register.
CondReg = X86::BL;
}
-
+
BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
Opcode = X86::FCMOVE;
}
BuildMI(BB, X86::JNE, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
} else {
BuildMI(BB, X86::JE, 1).addMBB(MBBMap[BI.getSuccessor(1)]);
-
+
if (BI.getSuccessor(0) != NextBB)
BuildMI(BB, X86::JMP, 1).addMBB(MBBMap[BI.getSuccessor(0)]);
}
const Type *CompTy = SCI->getOperand(0)->getType();
bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
-
+
// LLVM -> X86 signed X86 unsigned
// ----- ---------- ------------
{ X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
X86::JS, X86::JNS },
};
-
+
if (BI.getSuccessor(0) != NextBB) {
BuildMI(BB, OpcodeTab[isSigned][OpNum], 1)
.addMBB(MBBMap[BI.getSuccessor(0)]);
}
ArgOffset += 4; // 8 byte entry, not 4.
break;
-
+
case cFP:
if (ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(Args[i].Val)) {
// Store constant FP values with integer instructions to avoid having
unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
-}
+}
/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
/// function, lowering any calls to unknown intrinsic functions into the
BuildMI(BB, Opc[Class], 0);
return;
}
-
+
default: assert(0 && "Error: unknown intrinsics should have been lowered!");
}
}
// Special case: op Reg, load [mem]
if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1) && Class != cLong &&
- Op0->hasOneUse() &&
+ Op0->hasOneUse() &&
isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B))
if (!B.swapOperands())
std::swap(Op0, Op1); // Make sure any loads are in the RHS.
// Arithmetic operators
{ X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
{ X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
-
+
// Bitwise operators
{ X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
{ X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
} else {
X86AddressMode AM;
getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), AM);
-
+
addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
}
return;
// If this is a floating point subtract, check to see if we can fold the first
// operand in.
if (Class == cFP && OperatorClass == 1 &&
- isa<LoadInst>(Op0) &&
+ isa<LoadInst>(Op0) &&
isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B)) {
const Type *Ty = Op0->getType();
assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
} else {
X86AddressMode AM;
getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), AM);
-
+
addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
}
return;
DestReg).addReg(Op0r), CPI);
return;
}
-
+
// Special case: R1 = op <const fp>, R2
if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) {
{ X86::FADD32m, X86::FSUBR32m, X86::FMUL32m, X86::FDIVR32m }, // Float
{ X86::FADD64m, X86::FSUBR64m, X86::FMUL64m, X86::FDIVR64m }, // Double
};
-
+
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
unsigned Op1r = getReg(Op1, BB, IP);
void X86ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
Value *Op0, Value *Op1,
- unsigned OperatorClass,
+ unsigned OperatorClass,
unsigned DestReg) {
unsigned Class = getClassB(Op0->getType());
if (CI->isNullValue()) {
unsigned op1Reg = getReg(Op1, MBB, IP);
BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
-
+
if (Class == cLong) {
// We just emitted: Dl = neg Sl
// Now emit : T = addc Sh, 0
// sub C, X -> tmp = neg X; DestReg = add tmp, C. This is better
// than copying C into a temporary register, because of register
// pressure (tmp and destreg can share a register.
- static unsigned const ADDRITab[] = {
+ static unsigned const ADDRITab[] = {
X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri
};
unsigned op1Reg = getReg(Op1, MBB, IP);
BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
return;
}
-
+
static const unsigned OpcodeTab[][5] = {
// Arithmetic operators
{ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
{ X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
-
+
// Bitwise operators
{ X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
{ X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
{ X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
};
-
+
unsigned Opcode = OpcodeTab[OperatorClass][Class];
unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
return;
}
-
+
// If this is a long value and the high or low bits have a special
// property, emit some special cases.
unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
-
+
// If the constant is zero in the low 32-bits, just copy the low part
// across and apply the normal 32-bit operation to the high parts. There
// will be no carry or borrow into the top.
.addReg(Op0r+1).addImm(Op1h);
return;
}
-
+
// If this is a logical operation and the top 32-bits are zero, just
// operate on the lower 32.
if (Op1h == 0 && OperatorClass > 1) {
BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
return;
}
-
+
// TODO: We could handle lots of other special cases here, such as AND'ing
// with 0xFFFFFFFF00000000 -> noop, etc.
-
+
// Otherwise, code generate the full operation with a constant.
static const unsigned TopTab[] = {
X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
};
-
+
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
.addReg(Op0r+1).addImm(Op1h);
// Arithmetic operators
{ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, 0, X86::ADD32rr }, // ADD
{ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, 0, X86::SUB32rr }, // SUB
-
+
// Bitwise operators
{ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
{ X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
{ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
};
-
+
unsigned Opcode = OpcodeTab[OperatorClass][Class];
unsigned Op0r = getReg(Op0, MBB, IP);
unsigned Op1r = getReg(Op1, MBB, IP);
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
-
+
if (Class == cLong) { // Handle the upper 32 bits of long values...
static const unsigned TopTab[] = {
X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
return;
}
}
-
+
if (Class == cShort) {
BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
return;
// Most general case, emit a normal multiply...
TmpReg = makeAnotherReg(DestTy);
BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
-
+
// Emit a MUL to multiply the register holding the index by
// elementSize, putting the result in OffsetReg.
doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
const Type *Ty = Op0->getType();
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
-
+
unsigned Op0r = getReg(Op0);
if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
unsigned FI = getFixedSizedAllocaFI(AI);
} else {
X86AddressMode AM;
getAddressingMode(LI->getOperand(0), AM);
-
+
addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
}
return;
emitMultiply(BB, IP, Op0, Op1, ResultReg);
}
-void X86ISel::emitMultiply(MachineBasicBlock *MBB,
+void X86ISel::emitMultiply(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
Value *Op0, Value *Op1, unsigned DestReg) {
MachineBasicBlock &BB = *MBB;
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
unsigned CLow = CI->getRawValue();
unsigned CHi = CI->getRawValue() >> 32;
-
+
if (CLow == 0) {
// If the low part of the constant is all zeros, things are simple.
BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
return;
}
-
+
// Multiply the two low parts... capturing carry into EDX
unsigned OverflowReg = 0;
if (CLow == 1) {
BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
-
+
BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
BuildMI(BB, IP, X86::MOV32rr, 1,
OverflowReg).addReg(X86::EDX); // AL*BL >> 32
}
-
+
unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
-
+
unsigned AHBLplusOverflowReg;
if (OverflowReg) {
AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
} else {
AHBLplusOverflowReg = AHBLReg;
}
-
+
if (CHi == 0) {
BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
} else {
unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
-
+
BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
}
// Multiply the two low parts... capturing carry into EDX
BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
-
+
unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
BuildMI(BB, IP, X86::MOV32rr, 1,
OverflowReg).addReg(X86::EDX); // AL*BL >> 32
-
+
unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
BuildMI(BB, IP, X86::IMUL32rr, 2,
AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
-
+
unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
-
+
unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
BuildMI(BB, IP, X86::IMUL32rr, 2,
ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
-
+
BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
}
const Type *Ty = Op0->getType();
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
-
+
unsigned Op0r = getReg(Op0);
if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
unsigned FI = getFixedSizedAllocaFI(AI);
} else {
X86AddressMode AM;
getAddressingMode(LI->getOperand(0), AM);
-
+
addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
}
return;
const Type *Ty = Op0->getType();
assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
-
+
unsigned Op1r = getReg(Op1);
if (AllocaInst *AI = dyn_castFixedAlloca(LI->getOperand(0))) {
unsigned FI = getFixedSizedAllocaFI(AI);
unsigned TmpReg1 = makeAnotherReg(Op0->getType());
BuildMI(*BB, IP, ANDOpcode[Class], 2, TmpReg1).addReg(Op0Reg).addImm(1);
-
+
unsigned TmpReg2 = makeAnotherReg(Op0->getType());
BuildMI(*BB, IP, XOROpcode[Class], 2,
TmpReg2).addReg(TmpReg1).addReg(TmpReg0);
// Figure out which register we want to pick the result out of...
unsigned DestReg = isDiv ? Reg : ExtReg;
-
+
// Put the result into the destination register...
BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
}
/// Emit code for a 'SHLD DestReg, Op0, Op1, Amt' operation, where Amt is a
/// constant.
-void X86ISel::doSHLDConst(MachineBasicBlock *MBB,
+void X86ISel::doSHLDConst(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
unsigned DestReg, unsigned Op0Reg, unsigned Op1Reg,
unsigned Amt) {
// NOTE: It is always cheaper on the P4 to emit SHLD as two shifts and an OR
// than it is to emit a real SHLD.
- BuildMI(*MBB, IP, X86::SHLD32rri8, 3,
+ BuildMI(*MBB, IP, X86::SHLD32rri8, 3,
DestReg).addReg(Op0Reg).addReg(Op1Reg).addImm(Amt);
}
}
/// constant expression support.
void X86ISel::emitShiftOperation(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
- Value *Op, Value *ShiftAmount,
- bool isLeftShift, const Type *ResultTy,
+ Value *Op, Value *ShiftAmount,
+ bool isLeftShift, const Type *ResultTy,
unsigned DestReg) {
unsigned SrcReg = getReg (Op, MBB, IP);
bool isSigned = ResultTy->isSigned ();
BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestHi = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
// DestLo = (>32) ? TmpReg : TmpReg3;
BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestLo = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg).addReg(TmpReg2).addReg(TmpReg3);
// DestHi = (>32) ? TmpReg : TmpReg3;
- BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg3).addReg(TmpReg);
}
}
bool Swapped = false;
if (!isa<LoadInst>(User->getOperand(1)))
Swapped = !cast<BinaryOperator>(User)->swapOperands();
-
+
// Okay, now that everything is set up, if this load is used by the second
// operand, and if there are no instructions that invalidate the load
// before the binary operator, eliminate the load.
return; // Eliminate the load!
// If we swapped the operands to the instruction, but couldn't fold the
- // load anyway, swap them back. We don't want to break add X, int
+ // load anyway, swap them back. We don't want to break add X, int
// folding.
if (Swapped) cast<BinaryOperator>(User)->swapOperands();
}
} else {
X86AddressMode AM;
getAddressingMode(I.getOperand(0), AM);
-
+
if (Class == cLong) {
addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
AM.Disp += 4;
addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
unsigned(V.I >> 32));
}
-
+
} else if (Class == cLong) {
unsigned ValReg = getReg(I.getOperand(0));
addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
// Noop casts are not emitted: getReg will return the source operand as the
// register to use for any uses of the noop cast.
if (DestClass == SrcClass) {
- // The only detail in this plan is that casts from double -> float are
+ // The only detail in this plan is that casts from double -> float are
// truncating operations that we have to codegen through memory (despite
// the fact that the source/dest registers are the same class).
if (CI.getType() != Type::FloatTy || Op->getType() != Type::DoubleTy)
if (!isa<GetElementPtrInst>(*I)) {
AllUsesAreGEPs = false;
break;
- }
+ }
// No need to codegen this cast if all users are getelementptr instrs...
if (AllUsesAreGEPs) return;
{ X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
{ X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
};
-
+
bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy;
BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
DestReg).addReg(SrcReg);
BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
return;
}
-
+
// Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
// move out of AX or AL.
if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
default: // No promotion needed...
break;
}
-
+
if (PromoteType) {
unsigned TmpReg = makeAnotherReg(PromoteType);
BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
MachineConstantPool *CP = F->getConstantPool();
unsigned Zero = makeAnotherReg(Type::IntTy);
Constant *Null = Constant::getNullValue(Type::UIntTy);
- addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
+ addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
CP->getConstantPoolIndex(Null));
unsigned Offset = makeAnotherReg(Type::IntTy);
Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
-
+
addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
CP->getConstantPoolIndex(OffsetCst));
unsigned Addr = makeAnotherReg(Type::IntTy);
// Reload the modified control word now...
addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
-
+
// Restore the memory image of control word to original value
addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
CWFrameIdx, 1).addReg(HighPartOfCW);
///
/// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
///
-void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
+void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
MachineBasicBlock::iterator IP,
std::vector<Value*> &GEPOps,
std::vector<const Type*> &GEPTypes,
// It's a struct access. CUI is the index into the structure,
// which names the field. This index must have unsigned type.
const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
-
+
// Use the TargetData structure to pick out what the layout of the
// structure is in memory. Since the structure index must be constant, we
// can get its value and use it to find the right byte offset from the
// If the index reg is already taken, we can't handle this index.
if (AM.IndexReg) return;
- // If this is a size that we can handle, then add the index as
+ // If this is a size that we can handle, then add the index as
switch (TypeSize) {
case 1: case 2: case 4: case 8:
// These are all acceptable scales on X86.
GEPOps.resize(IdxEnd-IdxBegin+1);
GEPOps[0] = Src;
std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
-
+
std::vector<const Type*>
GEPTypes(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
gep_type_end(Src->getType(), IdxBegin, IdxEnd));
GEPOps.resize(IdxEnd-IdxBegin+1);
GEPOps[0] = Src;
std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
-
+
std::vector<const Type*> GEPTypes;
GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
gep_type_end(Src->getType(), IdxBegin, IdxEnd));
unsigned OldSize = GEPOps.size();
X86AddressMode AM;
getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
-
+
if (GEPOps.size() != OldSize) {
// getGEPIndex consumed some of the input. Build an LEA instruction here.
unsigned NextTarget = 0;
// statically stack allocate the space, so we don't need to do anything here.
//
if (dyn_castFixedAlloca(&I)) return;
-
+
// Find the data size of the alloca inst's getAllocatedType.
const Type *Ty = I.getAllocatedType();
unsigned TySize = TM.getTargetData().getTypeSize(Ty);
// constant by the variable amount.
unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
unsigned SrcReg1 = getReg(I.getArraySize());
-
+
// TotalSizeReg = mul <numelements>, <TypeSize>
MachineBasicBlock::iterator MBBI = BB->end();
doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
// AlignedSize = and <AddedSize>, ~15
unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
-
+
// Subtract size from stack pointer, thereby allocating some space.
BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
1).addExternalSymbol("free", true);
doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
}
-
+
/// createX86SimpleInstructionSelector - This pass converts an LLVM function
/// into a machine code representation is a very simple peep-hole fashion. The
/// generated code sucks but the implementation is nice and simple.