X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86ISelSimple.cpp;h=c243fa134efcc502b7e8b3c136fcb8724f04eae4;hb=6c09db2959462506ebfea25a77cd61eea63ba161;hp=ee8318f22d6d8d83026ca490a17a633989975d9b;hpb=3501feab811c86c9659248a4875fc31a3165f84d;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelSimple.cpp b/lib/Target/X86/X86ISelSimple.cpp index ee8318f22d6..c243fa134ef 100644 --- a/lib/Target/X86/X86ISelSimple.cpp +++ b/lib/Target/X86/X86ISelSimple.cpp @@ -8,32 +8,26 @@ #include "X86InstrInfo.h" #include "X86InstrBuilder.h" #include "llvm/Function.h" -#include "llvm/iTerminators.h" -#include "llvm/iOperators.h" -#include "llvm/iOther.h" -#include "llvm/iPHINode.h" -#include "llvm/iMemory.h" -#include "llvm/Type.h" +#include "llvm/Instructions.h" #include "llvm/DerivedTypes.h" #include "llvm/Constants.h" #include "llvm/Pass.h" +#include "llvm/Intrinsics.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/SSARegMap.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Target/MRegisterInfo.h" -#include +#include "llvm/Support/InstVisitor.h" /// BMI - A special BuildMI variant that takes an iterator to insert the -/// instruction at as well as a basic block. -/// this is the version for when you have a destination register in mind. +/// instruction at as well as a basic block. This is the version for when you +/// have a destination register in mind. inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB, MachineBasicBlock::iterator &I, - MachineOpCode Opcode, - unsigned NumOperands, + int Opcode, unsigned NumOperands, unsigned DestReg) { assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!"); MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1, true, true); @@ -45,9 +39,8 @@ inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB, /// instruction at as well as a basic block. inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB, MachineBasicBlock::iterator &I, - MachineOpCode Opcode, - unsigned NumOperands) { - assert(I > MBB->begin() && I <= MBB->end() && "Bad iterator!"); + int Opcode, unsigned NumOperands) { + assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!"); MachineInstr *MI = new MachineInstr(Opcode, NumOperands, true, true); I = MBB->insert(I, MI)+1; return MachineInstrBuilder(MI); @@ -57,8 +50,9 @@ inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB, namespace { struct ISel : public FunctionPass, InstVisitor { TargetMachine &TM; - MachineFunction *F; // The function we are compiling into - MachineBasicBlock *BB; // The current MBB we are compiling + MachineFunction *F; // The function we are compiling into + MachineBasicBlock *BB; // The current MBB we are compiling + int VarArgsFrameIndex; // FrameIndex for start of varargs area std::map RegMap; // Mapping between Val's and SSA Regs @@ -78,6 +72,8 @@ namespace { F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I)); BB = &F->front(); + + // Copy incoming arguments off of the stack... LoadArgumentsToVirtualRegs(Fn); // Instruction select everything except PHI nodes @@ -89,7 +85,8 @@ namespace { RegMap.clear(); MBBMap.clear(); F = 0; - return false; // We never modify the LLVM itself. + // We always build a machine code representation for the function + return true; } virtual const char *getPassName() const { @@ -125,13 +122,16 @@ namespace { void visitBranchInst(BranchInst &BI); struct ValueRecord { + Value *Val; unsigned Reg; const Type *Ty; - ValueRecord(unsigned R, const Type *T) : Reg(R), Ty(T) {} + ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {} + ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {} }; void doCall(const ValueRecord &Ret, MachineInstr *CallMI, const std::vector &Args); void visitCallInst(CallInst &I); + void visitIntrinsicCall(LLVMIntrinsic::ID ID, CallInst &I); // Arithmetic operators void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass); @@ -140,6 +140,10 @@ namespace { void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI, unsigned DestReg, const Type *DestTy, unsigned Op0Reg, unsigned Op1Reg); + void doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator &MBBI, + unsigned DestReg, const Type *DestTy, + unsigned Op0Reg, unsigned Op1Val); void visitMul(BinaryOperator &B); void visitDiv(BinaryOperator &B) { visitDivRem(B); } @@ -151,21 +155,14 @@ namespace { void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); } void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); } - // Binary comparison operators - void visitSetCCInst(SetCondInst &I, unsigned OpNum); - void visitSetEQ(SetCondInst &I) { visitSetCCInst(I, 0); } - void visitSetNE(SetCondInst &I) { visitSetCCInst(I, 1); } - void visitSetLT(SetCondInst &I) { visitSetCCInst(I, 2); } - void visitSetGT(SetCondInst &I) { visitSetCCInst(I, 3); } - void visitSetLE(SetCondInst &I) { visitSetCCInst(I, 4); } - void visitSetGE(SetCondInst &I) { visitSetCCInst(I, 5); } - + // Comparison operators... + void visitSetCondInst(SetCondInst &I); + unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator &MBBI); + // Memory Instructions - MachineInstr *doFPLoad(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &MBBI, - const Type *Ty, unsigned DestReg); void visitLoadInst(LoadInst &I); - void doFPStore(const Type *Ty, unsigned DestAddrReg, unsigned SrcReg); void visitStoreInst(StoreInst &I); void visitGetElementPtrInst(GetElementPtrInst &I); void visitAllocaInst(AllocaInst &I); @@ -176,6 +173,8 @@ namespace { void visitShiftInst(ShiftInst &I); void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass void visitCastInst(CastInst &I); + void visitVANextInst(VANextInst &I); + void visitVAArgInst(VAArgInst &I); void visitInstruction(Instruction &I) { std::cerr << "Cannot instruction select: " << I; @@ -197,6 +196,26 @@ namespace { Value *Src, User::op_iterator IdxBegin, User::op_iterator IdxEnd, unsigned TargetReg); + /// emitCastOperation - Common code shared between visitCastInst and + /// constant expression cast support. + void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator&IP, + Value *Src, const Type *DestTy, unsigned TargetReg); + + /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary + /// and constant expression support. + void emitSimpleBinaryOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator &IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + /// emitSetCCOperation - Common code shared between visitSetCondInst and + /// constant expression support. + void emitSetCCOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator &IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg); + + /// copyConstantToRegister - Output the instructions required to put the /// specified constant into the specified register. /// @@ -213,9 +232,12 @@ namespace { /// of the long value. /// unsigned makeAnotherReg(const Type *Ty) { + assert(dynamic_cast(TM.getRegisterInfo()) && + "Current target doesn't have X86 reg info??"); + const X86RegisterInfo *MRI = + static_cast(TM.getRegisterInfo()); if (Ty == Type::LongTy || Ty == Type::ULongTy) { - const TargetRegisterClass *RC = - TM.getRegisterInfo()->getRegClassForType(Type::IntTy); + const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy); // Create the lower part F->getSSARegMap()->createVirtualRegister(RC); // Create the upper part. @@ -223,8 +245,7 @@ namespace { } // Add the mapping of regnumber => reg class to MachineFunction - const TargetRegisterClass *RC = - TM.getRegisterInfo()->getRegClassForType(Ty); + const TargetRegisterClass *RC = MRI->getRegClassForType(Ty); return F->getSSARegMap()->createVirtualRegister(RC); } @@ -308,14 +329,39 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB, MachineBasicBlock::iterator &IP, Constant *C, unsigned R) { if (ConstantExpr *CE = dyn_cast(C)) { - if (CE->getOpcode() == Instruction::GetElementPtr) { + unsigned Class = 0; + switch (CE->getOpcode()) { + case Instruction::GetElementPtr: emitGEPOperation(MBB, IP, CE->getOperand(0), CE->op_begin()+1, CE->op_end(), R); return; - } + case Instruction::Cast: + emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R); + return; - std::cerr << "Offending expr: " << C << "\n"; - assert(0 && "Constant expressions not yet handled!\n"); + case Instruction::Xor: ++Class; // FALL THROUGH + case Instruction::Or: ++Class; // FALL THROUGH + case Instruction::And: ++Class; // FALL THROUGH + case Instruction::Sub: ++Class; // FALL THROUGH + case Instruction::Add: + emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + Class, R); + return; + + case Instruction::SetNE: + case Instruction::SetEQ: + case Instruction::SetLT: + case Instruction::SetGT: + case Instruction::SetLE: + case Instruction::SetGE: + emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode(), R); + return; + + default: + std::cerr << "Offending expr: " << C << "\n"; + assert(0 && "Constant expression not yet handled!\n"); + } } if (C->getType()->isIntegral()) { @@ -323,12 +369,7 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB, if (Class == cLong) { // Copy the value into the register pair. - uint64_t Val; - if (C->getType()->isSigned()) - Val = cast(C)->getValue(); - else - Val = cast(C)->getValue(); - + uint64_t Val = cast(C)->getRawValue(); BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF); BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32); return; @@ -342,12 +383,9 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB, if (C->getType() == Type::BoolTy) { BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True); - } else if (C->getType()->isSigned()) { - ConstantSInt *CSI = cast(C); - BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CSI->getValue()); } else { - ConstantUInt *CUI = cast(C); - BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CUI->getValue()); + ConstantInt *CI = cast(C); + BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue()); } } else if (ConstantFP *CFP = dyn_cast(C)) { double Value = CFP->getValue(); @@ -359,7 +397,11 @@ void ISel::copyConstantToRegister(MachineBasicBlock *MBB, // Otherwise we need to spill the constant to memory... MachineConstantPool *CP = F->getConstantPool(); unsigned CPI = CP->getConstantPoolIndex(CFP); - addConstantPoolReference(doFPLoad(MBB, IP, CFP->getType(), R), CPI); + const Type *Ty = CFP->getType(); + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64; + addConstantPoolReference(BMI(MBB, IP, LoadOpcode, 4, R), CPI); } } else if (isa(C)) { @@ -386,7 +428,7 @@ void ISel::LoadArgumentsToVirtualRegs(Function &Fn) { // [ESP + 8] -- second argument, if first argument is four bytes in size // ... // - unsigned ArgOffset = 4; + unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot MachineFrameInfo *MFI = F->getFrameInfo(); for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) { @@ -429,6 +471,12 @@ void ISel::LoadArgumentsToVirtualRegs(Function &Fn) { } ArgOffset += 4; // Each argument takes at least 4 bytes on the stack... } + + // If the function takes variable number of arguments, add a frame offset for + // the start of the first vararg value... this is used to expand + // llvm.va_start. + if (Fn.getFunctionType()->isVarArg()) + VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); } @@ -446,7 +494,7 @@ void ISel::SelectPHINodes() { // Loop over all of the PHI nodes in the LLVM basic block... unsigned NumPHIs = 0; for (BasicBlock::const_iterator I = BB->begin(); - PHINode *PN = (PHINode*)dyn_cast(&*I); ++I) { + PHINode *PN = const_cast(dyn_cast(I)); ++I) { // Create a new machine instr PHI node, and insert it. unsigned PHIReg = getReg(*PN); @@ -459,18 +507,50 @@ void ISel::SelectPHINodes() { MBB->insert(MBB->begin()+NumPHIs++, LongPhiMI); } + // PHIValues - Map of blocks to incoming virtual registers. We use this + // so that we only initialize one incoming value for a particular block, + // even if the block has multiple entries in the PHI node. + // + std::map PHIValues; + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)]; + unsigned ValReg; + std::map::iterator EntryIt = + PHIValues.lower_bound(PredMBB); + + if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) { + // We already inserted an initialization of the register for this + // predecessor. Recycle it. + ValReg = EntryIt->second; + + } else { + // Get the incoming value into a virtual register. + // + Value *Val = PN->getIncomingValue(i); + + // If this is a constant or GlobalValue, we may have to insert code + // into the basic block to compute it into a virtual register. + if (isa(Val) || isa(Val)) { + // Because we don't want to clobber any values which might be in + // physical registers with the computation of this constant (which + // might be arbitrarily complex if it is a constant expression), + // just insert the computation at the top of the basic block. + MachineBasicBlock::iterator PI = PredMBB->begin(); + + // Skip over any PHI nodes though! + while (PI != PredMBB->end() && (*PI)->getOpcode() == X86::PHI) + ++PI; + + ValReg = getReg(Val, PredMBB, PI); + } else { + ValReg = getReg(Val); + } + + // Remember that we inserted a value for this PHI for this predecessor + PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg)); + } - // Get the incoming value into a virtual register. If it is not already - // available in a virtual register, insert the computation code into - // PredMBB - // - MachineBasicBlock::iterator PI = PredMBB->end(); - while (PI != PredMBB->begin() && - TII.isTerminatorInstr((*(PI-1))->getOpcode())) - --PI; - unsigned ValReg = getReg(PN->getIncomingValue(i), PredMBB, PI); PhiMI->addRegOperand(ValReg); PhiMI->addMachineBasicBlockOperand(PredMBB); if (LongPhiMI) { @@ -482,52 +562,116 @@ void ISel::SelectPHINodes() { } } +// canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into +// the conditional branch instruction which is the only user of the cc +// instruction. This is the case if the conditional branch is the only user of +// the setcc, and if the setcc is in the same basic block as the conditional +// branch. We also don't handle long arguments below, so we reject them here as +// well. +// +static SetCondInst *canFoldSetCCIntoBranch(Value *V) { + if (SetCondInst *SCI = dyn_cast(V)) + if (SCI->hasOneUse() && isa(SCI->use_back()) && + SCI->getParent() == cast(SCI->use_back())->getParent()) { + const Type *Ty = SCI->getOperand(0)->getType(); + if (Ty != Type::LongTy && Ty != Type::ULongTy) + return SCI; + } + return 0; +} +// Return a fixed numbering for setcc instructions which does not depend on the +// order of the opcodes. +// +static unsigned getSetCCNumber(unsigned Opcode) { + switch(Opcode) { + default: assert(0 && "Unknown setcc instruction!"); + case Instruction::SetEQ: return 0; + case Instruction::SetNE: return 1; + case Instruction::SetLT: return 2; + case Instruction::SetGE: return 3; + case Instruction::SetGT: return 4; + case Instruction::SetLE: return 5; + } +} -/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized -/// register, then move it to wherever the result should be. -/// -void ISel::visitSetCCInst(SetCondInst &I, unsigned OpNum) { +// LLVM -> X86 signed X86 unsigned +// ----- ---------- ------------ +// seteq -> sete sete +// setne -> setne setne +// setlt -> setl setb +// setge -> setge setae +// setgt -> setg seta +// setle -> setle setbe +// ---- +// sets // Used by comparison with 0 optimization +// setns +static const unsigned SetCCOpcodeTab[2][8] = { + { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr, + 0, 0 }, + { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr, + X86::SETSr, X86::SETNSr }, +}; + +// EmitComparison - This function emits a comparison of the two operands, +// returning the extended setcc code to use. +unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator &IP) { // The arguments are already supposed to be of the same type. - const Type *CompTy = I.getOperand(0)->getType(); - bool isSigned = CompTy->isSigned(); - unsigned reg1 = getReg(I.getOperand(0)); - unsigned reg2 = getReg(I.getOperand(1)); - unsigned DestReg = getReg(I); + const Type *CompTy = Op0->getType(); + unsigned Class = getClassB(CompTy); + unsigned Op0r = getReg(Op0, MBB, IP); + + // Special case handling of: cmp R, i + if (Class == cByte || Class == cShort || Class == cInt) + if (ConstantInt *CI = dyn_cast(Op1)) { + uint64_t Op1v = cast(CI)->getRawValue(); + + // Mask off any upper bits of the constant, if there are any... + Op1v &= (1ULL << (8 << Class)) - 1; + + // If this is a comparison against zero, emit more efficient code. We + // can't handle unsigned comparisons against zero unless they are == or + // !=. These should have been strength reduced already anyway. + if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) { + static const unsigned TESTTab[] = { + X86::TESTrr8, X86::TESTrr16, X86::TESTrr32 + }; + BMI(MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r); + + if (OpNum == 2) return 6; // Map jl -> js + if (OpNum == 3) return 7; // Map jg -> jns + return OpNum; + } - // LLVM -> X86 signed X86 unsigned - // ----- ---------- ------------ - // seteq -> sete sete - // setne -> setne setne - // setlt -> setl setb - // setgt -> setg seta - // setle -> setle setbe - // setge -> setge setae - static const unsigned OpcodeTab[2][6] = { - {X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAr, X86::SETBEr, X86::SETAEr}, - {X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGr, X86::SETLEr, X86::SETGEr}, - }; + static const unsigned CMPTab[] = { + X86::CMPri8, X86::CMPri16, X86::CMPri32 + }; - unsigned Class = getClassB(CompTy); + BMI(MBB, IP, CMPTab[Class], 2).addReg(Op0r).addZImm(Op1v); + return OpNum; + } + + unsigned Op1r = getReg(Op1, MBB, IP); switch (Class) { default: assert(0 && "Unknown type class!"); // Emit: cmp , (do the comparison). We can // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with // 32-bit. case cByte: - BuildMI(BB, X86::CMPrr8, 2).addReg(reg1).addReg(reg2); + BMI(MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r); break; case cShort: - BuildMI(BB, X86::CMPrr16, 2).addReg(reg1).addReg(reg2); + BMI(MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r); break; case cInt: - BuildMI(BB, X86::CMPrr32, 2).addReg(reg1).addReg(reg2); + BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r); break; case cFP: - BuildMI(BB, X86::FpUCOM, 2).addReg(reg1).addReg(reg2); - BuildMI(BB, X86::FNSTSWr8, 0); - BuildMI(BB, X86::SAHF, 1); - isSigned = false; // Compare with unsigned operators + BMI(MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r); + BMI(MBB, IP, X86::FNSTSWr8, 0); + BMI(MBB, IP, X86::SAHF, 1); break; case cLong: @@ -535,9 +679,9 @@ void ISel::visitSetCCInst(SetCondInst &I, unsigned OpNum) { unsigned LoTmp = makeAnotherReg(Type::IntTy); unsigned HiTmp = makeAnotherReg(Type::IntTy); unsigned FinalTmp = makeAnotherReg(Type::IntTy); - BuildMI(BB, X86::XORrr32, 2, LoTmp).addReg(reg1).addReg(reg2); - BuildMI(BB, X86::XORrr32, 2, HiTmp).addReg(reg1+1).addReg(reg2+1); - BuildMI(BB, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); + BMI(MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r); + BMI(MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1); + BMI(MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); break; // Allow the sete or setne to be generated from flags set by OR } else { // Emit a sequence of code which compares the high and low parts once @@ -549,45 +693,90 @@ void ISel::visitSetCCInst(SetCondInst &I, unsigned OpNum) { // dest = hi(op1) == hi(op2) ? AL : BL; // - // FIXME: This would be much better if we had heirarchical register + // FIXME: This would be much better if we had hierarchical register // classes! Until then, hardcode registers so that we can deal with their // aliases (because we don't have conditional byte moves). // - BuildMI(BB, X86::CMPrr32, 2).addReg(reg1).addReg(reg2); - BuildMI(BB, OpcodeTab[0][OpNum], 0, X86::AL); - BuildMI(BB, X86::CMPrr32, 2).addReg(reg1+1).addReg(reg2+1); - BuildMI(BB, OpcodeTab[isSigned][OpNum], 0, X86::BL); - BuildMI(BB, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX); - BuildMI(BB, X86::MOVrr8, 1, DestReg).addReg(X86::BL); - return; + BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r); + BMI(MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL); + BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1); + BMI(MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL); + BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH); + BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH); + BMI(MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX); + // NOTE: visitSetCondInst knows that the value is dumped into the BL + // register at this point for long values... + return OpNum; } } + return OpNum; +} + - BuildMI(BB, OpcodeTab[isSigned][OpNum], 0, DestReg); +/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized +/// register, then move it to wherever the result should be. +/// +void ISel::visitSetCondInst(SetCondInst &I) { + if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch... + + unsigned DestReg = getReg(I); + MachineBasicBlock::iterator MII = BB->end(); + emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(), + DestReg); +} + +/// emitSetCCOperation - Common code shared between visitSetCondInst and +/// constant expression support. +void ISel::emitSetCCOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator &IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg) { + unsigned OpNum = getSetCCNumber(Opcode); + OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP); + + const Type *CompTy = Op0->getType(); + unsigned CompClass = getClassB(CompTy); + bool isSigned = CompTy->isSigned() && CompClass != cFP; + + if (CompClass != cLong || OpNum < 2) { + // Handle normal comparisons with a setcc instruction... + BMI(MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg); + } else { + // Handle long comparisons by copying the value which is already in BL into + // the register we want... + BMI(MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL); + } } + + + /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide /// operand, in the specified target register. void ISel::promote32(unsigned targetReg, const ValueRecord &VR) { bool isUnsigned = VR.Ty->isUnsigned(); + + // Make sure we have the register number for this value... + unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg; + switch (getClassB(VR.Ty)) { case cByte: // Extend value into target register (8->32) if (isUnsigned) - BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(VR.Reg); + BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg); else - BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(VR.Reg); + BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg); break; case cShort: // Extend value into target register (16->32) if (isUnsigned) - BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(VR.Reg); + BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg); else - BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(VR.Reg); + BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg); break; case cInt: // Move value into target register (32->32) - BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(VR.Reg); + BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg); break; default: assert(0 && "Unpromotable operand class in promote32"); @@ -618,13 +807,19 @@ void ISel::visitReturnInst(ReturnInst &I) { case cShort: case cInt: promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType())); + // Declare that EAX is live on exit + BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP); break; case cFP: // Floats & Doubles: Return in ST(0) BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg); + // Declare that top-of-stack is live on exit + BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP); break; case cLong: BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg); BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1); + // Declare that EAX & EDX are live on exit + BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX).addReg(X86::ESP); break; default: visitInstruction(I); @@ -633,18 +828,83 @@ void ISel::visitReturnInst(ReturnInst &I) { BuildMI(BB, X86::RET, 0); } +// getBlockAfter - Return the basic block which occurs lexically after the +// specified one. +static inline BasicBlock *getBlockAfter(BasicBlock *BB) { + Function::iterator I = BB; ++I; // Get iterator to next block + return I != BB->getParent()->end() ? &*I : 0; +} + /// visitBranchInst - Handle conditional and unconditional branches here. Note /// that since code layout is frozen at this point, that if we are trying to /// jump to a block that is the immediate successor of the current block, we can -/// just make a fall-through. (but we don't currently). +/// just make a fall-through (but we don't currently). /// void ISel::visitBranchInst(BranchInst &BI) { - if (BI.isConditional()) { + BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one + + if (!BI.isConditional()) { // Unconditional branch? + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0)); + return; + } + + // See if we can fold the setcc into the branch itself... + SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition()); + if (SCI == 0) { + // Nope, cannot fold setcc into this branch. Emit a branch on a condition + // computed some other way... unsigned condReg = getReg(BI.getCondition()); BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0); - BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1)); + if (BI.getSuccessor(1) == NextBB) { + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0)); + } else { + BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1)); + + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0)); + } + return; + } + + unsigned OpNum = getSetCCNumber(SCI->getOpcode()); + MachineBasicBlock::iterator MII = BB->end(); + OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB, MII); + + const Type *CompTy = SCI->getOperand(0)->getType(); + bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP; + + + // LLVM -> X86 signed X86 unsigned + // ----- ---------- ------------ + // seteq -> je je + // setne -> jne jne + // setlt -> jl jb + // setge -> jge jae + // setgt -> jg ja + // setle -> jle jbe + // ---- + // js // Used by comparison with 0 optimization + // jns + + static const unsigned OpcodeTab[2][8] = { + { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 }, + { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE, + X86::JS, X86::JNS }, + }; + + if (BI.getSuccessor(0) != NextBB) { + BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0)); + if (BI.getSuccessor(1) != NextBB) + BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1)); + } else { + // Change to the inverse condition... + if (BI.getSuccessor(1) != NextBB) { + OpNum ^= 1; + BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1)); + } } - BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0)); } @@ -677,7 +937,7 @@ void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI, // Arguments go on the stack in reverse order, as specified by the ABI. unsigned ArgOffset = 0; for (unsigned i = 0, e = Args.size(); i != e; ++i) { - unsigned ArgReg = Args[i].Reg; + unsigned ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; switch (getClassB(Args[i].Ty)) { case cByte: case cShort: { @@ -759,6 +1019,12 @@ void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI, void ISel::visitCallInst(CallInst &CI) { MachineInstr *TheCall; if (Function *F = CI.getCalledFunction()) { + // Is it an intrinsic function call? + if (LLVMIntrinsic::ID ID = (LLVMIntrinsic::ID)F->getIntrinsicID()) { + visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here + return; + } + // Emit a CALL instruction with PC-relative displacement. TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true); } else { // Emit an indirect call... @@ -768,60 +1034,169 @@ void ISel::visitCallInst(CallInst &CI) { std::vector Args; for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i) - Args.push_back(ValueRecord(getReg(CI.getOperand(i)), - CI.getOperand(i)->getType())); + Args.push_back(ValueRecord(CI.getOperand(i))); unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0; doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args); } -/// visitSimpleBinary - Implement simple binary operators for integral types... -/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, -/// 4 for Xor. -/// -void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) { - unsigned Class = getClassB(B.getType()); +void ISel::visitIntrinsicCall(LLVMIntrinsic::ID ID, CallInst &CI) { + unsigned TmpReg1, TmpReg2; + switch (ID) { + case LLVMIntrinsic::va_start: + // Get the address of the first vararg value... + TmpReg1 = getReg(CI); + addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex); + return; - static const unsigned OpcodeTab[][4] = { - // Arithmetic operators - { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD - { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB + case LLVMIntrinsic::va_copy: + TmpReg1 = getReg(CI); + TmpReg2 = getReg(CI.getOperand(1)); + BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2); + return; + case LLVMIntrinsic::va_end: return; // Noop on X86 - // Bitwise operators - { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND - { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR - { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR - }; + case LLVMIntrinsic::longjmp: + case LLVMIntrinsic::siglongjmp: + BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("abort", true); + return; - bool isLong = false; - if (Class == cLong) { - isLong = true; - Class = cInt; // Bottom 32 bits are handled just like ints + case LLVMIntrinsic::setjmp: + case LLVMIntrinsic::sigsetjmp: + // Setjmp always returns zero... + BuildMI(BB, X86::MOVir32, 1, getReg(CI)).addZImm(0); + return; + default: assert(0 && "Unknown intrinsic for X86!"); } - - unsigned Opcode = OpcodeTab[OperatorClass][Class]; - assert(Opcode && "Floating point arguments to logical inst?"); - unsigned Op0r = getReg(B.getOperand(0)); - unsigned Op1r = getReg(B.getOperand(1)); +} + + +/// visitSimpleBinary - Implement simple binary operators for integral types... +/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for +/// Xor. +void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) { unsigned DestReg = getReg(B); - BuildMI(BB, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + MachineBasicBlock::iterator MI = BB->end(); + emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1), + OperatorClass, DestReg); +} - if (isLong) { // Handle the upper 32 bits of long values... - static const unsigned TopTab[] = { - X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32 +/// emitSimpleBinaryOperation - Implement simple binary operators for integral +/// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for +/// Or, 4 for Xor. +/// +/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary +/// and constant expression support. +/// +void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator &IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned DestReg) { + unsigned Class = getClassB(Op0->getType()); + + // sub 0, X -> neg X + if (OperatorClass == 1 && Class != cLong) + if (ConstantInt *CI = dyn_cast(Op0)) + if (CI->isNullValue()) { + unsigned op1Reg = getReg(Op1, MBB, IP); + switch (Class) { + default: assert(0 && "Unknown class for this function!"); + case cByte: + BMI(MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg); + return; + case cShort: + BMI(MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg); + return; + case cInt: + BMI(MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg); + return; + } + } + + if (!isa(Op1) || Class == cLong) { + static const unsigned OpcodeTab[][4] = { + // Arithmetic operators + { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD + { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB + + // Bitwise operators + { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND + { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR + { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR }; - BuildMI(BB, TopTab[OperatorClass], 2, - DestReg+1).addReg(Op0r+1).addReg(Op1r+1); + + bool isLong = false; + if (Class == cLong) { + isLong = true; + Class = cInt; // Bottom 32 bits are handled just like ints + } + + unsigned Opcode = OpcodeTab[OperatorClass][Class]; + assert(Opcode && "Floating point arguments to logical inst?"); + unsigned Op0r = getReg(Op0, MBB, IP); + unsigned Op1r = getReg(Op1, MBB, IP); + BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + + if (isLong) { // Handle the upper 32 bits of long values... + static const unsigned TopTab[] = { + X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32 + }; + BMI(MBB, IP, TopTab[OperatorClass], 2, + DestReg+1).addReg(Op0r+1).addReg(Op1r+1); + } + return; + } + + // Special case: op Reg, + ConstantInt *Op1C = cast(Op1); + unsigned Op0r = getReg(Op0, MBB, IP); + + // xor X, -1 -> not X + if (OperatorClass == 4 && Op1C->isAllOnesValue()) { + static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 }; + BMI(MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r); + return; } + + // add X, -1 -> dec X + if (OperatorClass == 0 && Op1C->isAllOnesValue()) { + static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 }; + BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r); + return; + } + + // add X, 1 -> inc X + if (OperatorClass == 0 && Op1C->equalsInt(1)) { + static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 }; + BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r); + return; + } + + static const unsigned OpcodeTab[][3] = { + // Arithmetic operators + { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD + { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB + + // Bitwise operators + { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND + { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR + { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR + }; + + assert(Class < 3 && "General code handles 64-bit integer types!"); + unsigned Opcode = OpcodeTab[OperatorClass][Class]; + uint64_t Op1v = cast(Op1C)->getRawValue(); + + // Mask off any upper bits of the constant, if there are any... + Op1v &= (1ULL << (8 << Class)) - 1; + BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addZImm(Op1v); } /// doMultiply - Emit appropriate instructions to multiply together the /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the /// result should be given as DestTy. /// -/// FIXME: doMultiply should use one of the two address IMUL instructions! -/// void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI, unsigned DestReg, const Type *DestTy, unsigned op0Reg, unsigned op1Reg) { @@ -830,28 +1205,76 @@ void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI, case cFP: // Floating point multiply BMI(BB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg); return; + case cInt: + case cShort: + BMI(BB, MBBI, Class == cInt ? X86::IMULrr32 : X86::IMULrr16, 2, DestReg) + .addReg(op0Reg).addReg(op1Reg); + return; + case cByte: + // Must use the MUL instruction, which forces use of AL... + BMI(MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg); + BMI(MBB, MBBI, X86::MULr8, 1).addReg(op1Reg); + BMI(MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL); + return; default: case cLong: assert(0 && "doMultiply cannot operate on LONG values!"); - case cByte: - case cShort: - case cInt: // Small integerals, handled below... - break; } - - static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX }; - static const unsigned MulOpcode[]={ X86::MULr8 , X86::MULr16 , X86::MULr32 }; - static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 }; - unsigned Reg = Regs[Class]; +} - // Emit a MOV to put the first operand into the appropriately-sized - // subreg of EAX. - BMI(MBB, MBBI, MovOpcode[Class], 1, Reg).addReg(op0Reg); +// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It +// returns zero when the input is not exactly a power of two. +static unsigned ExactLog2(unsigned Val) { + if (Val == 0) return 0; + unsigned Count = 0; + while (Val != 1) { + if (Val & 1) return 0; + Val >>= 1; + ++Count; + } + return Count+1; +} + +void ISel::doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator &IP, + unsigned DestReg, const Type *DestTy, + unsigned op0Reg, unsigned ConstRHS) { + unsigned Class = getClass(DestTy); + + // If the element size is exactly a power of 2, use a shift to get it. + if (unsigned Shift = ExactLog2(ConstRHS)) { + switch (Class) { + default: assert(0 && "Unknown class for this function!"); + case cByte: + BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1); + return; + case cShort: + BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1); + return; + case cInt: + BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1); + return; + } + } - // Emit the appropriate multiply instruction. - BMI(MBB, MBBI, MulOpcode[Class], 1).addReg(op1Reg); + if (Class == cShort) { + BMI(MBB, IP, X86::IMULri16, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS); + return; + } else if (Class == cInt) { + BMI(MBB, IP, X86::IMULri32, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS); + return; + } - // Emit another MOV to put the result into the destination register. - BMI(MBB, MBBI, MovOpcode[Class], 1, DestReg).addReg(Reg); + // Most general case, emit a normal multiply... + static const unsigned MOVirTab[] = { + X86::MOVir8, X86::MOVir16, X86::MOVir32 + }; + + unsigned TmpReg = makeAnotherReg(DestTy); + BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS); + + // Emit a MUL to multiply the register holding the index by + // elementSize, putting the result in OffsetReg. + doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg); } /// visitMul - Multiplies are not simple binary operators because they must deal @@ -859,14 +1282,22 @@ void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI, /// void ISel::visitMul(BinaryOperator &I) { unsigned Op0Reg = getReg(I.getOperand(0)); - unsigned Op1Reg = getReg(I.getOperand(1)); unsigned DestReg = getReg(I); // Simple scalar multiply? if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) { - MachineBasicBlock::iterator MBBI = BB->end(); - doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg); + if (ConstantInt *CI = dyn_cast(I.getOperand(1))) { + unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val); + } else { + unsigned Op1Reg = getReg(I.getOperand(1)); + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg); + } } else { + unsigned Op1Reg = getReg(I.getOperand(1)); + // Long value. We have to do things the hard way... // Multiply the two low parts... capturing carry into EDX BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg); @@ -877,16 +1308,16 @@ void ISel::visitMul(BinaryOperator &I) { BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32 MachineBasicBlock::iterator MBBI = BB->end(); - unsigned AHBLReg = makeAnotherReg(Type::UIntTy); - doMultiply(BB, MBBI, AHBLReg, Type::UIntTy, Op0Reg+1, Op1Reg); // AH*BL + unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL + BMI(BB, MBBI, X86::IMULrr32, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg); unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy); BuildMI(BB, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32) AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg); MBBI = BB->end(); - unsigned ALBHReg = makeAnotherReg(Type::UIntTy); - doMultiply(BB, MBBI, ALBHReg, Type::UIntTy, Op0Reg, Op1Reg+1); // AL*BH + unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH + BMI(BB, MBBI, X86::IMULrr32, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1); BuildMI(BB, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32) DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg); @@ -900,21 +1331,21 @@ void ISel::visitMul(BinaryOperator &I) { /// instructions work differently for signed and unsigned operands. /// void ISel::visitDivRem(BinaryOperator &I) { - unsigned Class = getClass(I.getType()); - unsigned Op0Reg = getReg(I.getOperand(0)); - unsigned Op1Reg = getReg(I.getOperand(1)); - unsigned ResultReg = getReg(I); + unsigned Class = getClass(I.getType()); + unsigned Op0Reg, Op1Reg, ResultReg = getReg(I); switch (Class) { case cFP: // Floating point divide - if (I.getOpcode() == Instruction::Div) + if (I.getOpcode() == Instruction::Div) { + Op0Reg = getReg(I.getOperand(0)); + Op1Reg = getReg(I.getOperand(1)); BuildMI(BB, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg); - else { // Floating point remainder... + } else { // Floating point remainder... MachineInstr *TheCall = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true); std::vector Args; - Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy)); - Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy)); + Args.push_back(ValueRecord(I.getOperand(0))); + Args.push_back(ValueRecord(I.getOperand(1))); doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args); } return; @@ -928,19 +1359,19 @@ void ISel::visitDivRem(BinaryOperator &I) { BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true); std::vector Args; - Args.push_back(ValueRecord(Op0Reg, Type::LongTy)); - Args.push_back(ValueRecord(Op1Reg, Type::LongTy)); + Args.push_back(ValueRecord(I.getOperand(0))); + Args.push_back(ValueRecord(I.getOperand(1))); doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args); return; } case cByte: case cShort: case cInt: - break; // Small integerals, handled below... + break; // Small integrals, handled below... default: assert(0 && "Unknown class!"); } static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX }; static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 }; - static const unsigned ExtOpcode[]={ X86::CBW , X86::CWD , X86::CDQ }; + static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 }; static const unsigned ClrOpcode[]={ X86::XORrr8, X86::XORrr16, X86::XORrr32 }; static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX }; @@ -954,17 +1385,21 @@ void ISel::visitDivRem(BinaryOperator &I) { unsigned ExtReg = ExtRegs[Class]; // Put the first operand into one of the A registers... + Op0Reg = getReg(I.getOperand(0)); BuildMI(BB, MovOpcode[Class], 1, Reg).addReg(Op0Reg); if (isSigned) { // Emit a sign extension instruction... - BuildMI(BB, ExtOpcode[Class], 0); + unsigned ShiftResult = makeAnotherReg(I.getType()); + BuildMI(BB, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31); + BuildMI(BB, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult); } else { // If unsigned, emit a zeroing instruction... (reg = xor reg, reg) BuildMI(BB, ClrOpcode[Class], 2, ExtReg).addReg(ExtReg).addReg(ExtReg); } // Emit the appropriate divide or remainder instruction... + Op1Reg = getReg(I.getOperand(1)); BuildMI(BB, DivOpcode[isSigned][Class], 1).addReg(Op1Reg); // Figure out which register we want to pick the result out of... @@ -1031,7 +1466,57 @@ void ISel::visitShiftInst(ShiftInst &I) { } } } else { - visitInstruction(I); // FIXME: Implement long shift by non-constant + unsigned TmpReg = makeAnotherReg(Type::IntTy); + + if (!isLeftShift && isSigned) { + // If this is a SHR of a Long, then we need to do funny sign extension + // stuff. TmpReg gets the value to use as the high-part if we are + // shifting more than 32 bits. + BuildMI(BB, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31); + } else { + // Other shifts use a fixed zero value if the shift is more than 32 + // bits. + BuildMI(BB, X86::MOVir32, 1, TmpReg).addZImm(0); + } + + // Initialize CL with the shift amount... + unsigned ShiftAmount = getReg(I.getOperand(1)); + BuildMI(BB, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmount); + + unsigned TmpReg2 = makeAnotherReg(Type::IntTy); + unsigned TmpReg3 = makeAnotherReg(Type::IntTy); + if (isLeftShift) { + // TmpReg2 = shld inHi, inLo + BuildMI(BB, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg); + // TmpReg3 = shl inLo, CL + BuildMI(BB, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg); + + // Set the flags to indicate whether the shift was by more than 32 bits. + BuildMI(BB, X86::TESTri8, 2).addReg(X86::CL).addZImm(32); + + // DestHi = (>32) ? TmpReg3 : TmpReg2; + BuildMI(BB, X86::CMOVNErr32, 2, + DestReg+1).addReg(TmpReg2).addReg(TmpReg3); + // DestLo = (>32) ? TmpReg : TmpReg3; + BuildMI(BB, X86::CMOVNErr32, 2, DestReg).addReg(TmpReg3).addReg(TmpReg); + } else { + // TmpReg2 = shrd inLo, inHi + BuildMI(BB, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1); + // TmpReg3 = s[ah]r inHi, CL + BuildMI(BB, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3) + .addReg(SrcReg+1); + + // Set the flags to indicate whether the shift was by more than 32 bits. + BuildMI(BB, X86::TESTri8, 2).addReg(X86::CL).addZImm(32); + + // DestLo = (>32) ? TmpReg3 : TmpReg2; + BuildMI(BB, X86::CMOVNErr32, 2, + DestReg).addReg(TmpReg2).addReg(TmpReg3); + + // DestHi = (>32) ? TmpReg : TmpReg3; + BuildMI(BB, X86::CMOVNErr32, 2, + DestReg+1).addReg(TmpReg3).addReg(TmpReg); + } } return; } @@ -1051,59 +1536,13 @@ void ISel::visitShiftInst(ShiftInst &I) { } -/// doFPLoad - This method is used to load an FP value from memory using the -/// current endianness. NOTE: This method returns a partially constructed load -/// instruction which needs to have the memory source filled in still. -/// -MachineInstr *ISel::doFPLoad(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &MBBI, - const Type *Ty, unsigned DestReg) { - assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); - unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64; - - if (TM.getTargetData().isLittleEndian()) // fast path... - return BMI(MBB, MBBI, LoadOpcode, 4, DestReg); - - // If we are big-endian, start by creating an LEA instruction to represent the - // address of the memory location to load from... - // - unsigned SrcAddrReg = makeAnotherReg(Type::UIntTy); - MachineInstr *Result = BMI(MBB, MBBI, X86::LEAr32, 5, SrcAddrReg); - - // Allocate a temporary stack slot to transform the value into... - int FrameIdx = F->getFrameInfo()->CreateStackObject(Ty, TM.getTargetData()); - - // Perform the bswaps 32 bits at a time... - unsigned TmpReg1 = makeAnotherReg(Type::UIntTy); - unsigned TmpReg2 = makeAnotherReg(Type::UIntTy); - addDirectMem(BMI(MBB, MBBI, X86::MOVmr32, 4, TmpReg1), SrcAddrReg); - BMI(MBB, MBBI, X86::BSWAPr32, 1, TmpReg2).addReg(TmpReg1); - unsigned Offset = (Ty == Type::DoubleTy) << 2; - addFrameReference(BMI(MBB, MBBI, X86::MOVrm32, 5), - FrameIdx, Offset).addReg(TmpReg2); - - if (Ty == Type::DoubleTy) { // Swap the other 32 bits of a double value... - TmpReg1 = makeAnotherReg(Type::UIntTy); - TmpReg2 = makeAnotherReg(Type::UIntTy); - - addRegOffset(BMI(MBB, MBBI, X86::MOVmr32, 4, TmpReg1), SrcAddrReg, 4); - BMI(MBB, MBBI, X86::BSWAPr32, 1, TmpReg2).addReg(TmpReg1); - unsigned Offset = (Ty == Type::DoubleTy) << 2; - addFrameReference(BMI(MBB, MBBI, X86::MOVrm32,5), FrameIdx).addReg(TmpReg2); - } - - // Now we can reload the final byteswapped result into the final destination. - addFrameReference(BMI(MBB, MBBI, LoadOpcode, 4, DestReg), FrameIdx); - return Result; -} - /// EmitByteSwap - Byteswap SrcReg into DestReg. /// void ISel::EmitByteSwap(unsigned DestReg, unsigned SrcReg, unsigned Class) { // Emit the byte swap instruction... switch (Class) { case cByte: - // No byteswap neccesary for 8 bit value... + // No byteswap necessary for 8 bit value... BuildMI(BB, X86::MOVrr8, 1, DestReg).addReg(SrcReg); break; case cInt: @@ -1113,7 +1552,7 @@ void ISel::EmitByteSwap(unsigned DestReg, unsigned SrcReg, unsigned Class) { case cShort: // For 16 bit we have to use an xchg instruction, because there is no - // 16-bit bswap. XCHG is neccesarily not in SSA form, so we force things + // 16-bit bswap. XCHG is necessarily not in SSA form, so we force things // into AX to do the xchg. // BuildMI(BB, X86::MOVrr16, 1, X86::AX).addReg(SrcReg); @@ -1131,16 +1570,17 @@ void ISel::EmitByteSwap(unsigned DestReg, unsigned SrcReg, unsigned Class) { /// need to worry about the memory layout of the target machine. /// void ISel::visitLoadInst(LoadInst &I) { - bool isLittleEndian = TM.getTargetData().isLittleEndian(); - bool hasLongPointers = TM.getTargetData().getPointerSize() == 8; unsigned SrcAddrReg = getReg(I.getOperand(0)); unsigned DestReg = getReg(I); - unsigned Class = getClass(I.getType()); + unsigned Class = getClassB(I.getType()); switch (Class) { case cFP: { MachineBasicBlock::iterator MBBI = BB->end(); - addDirectMem(doFPLoad(BB, MBBI, I.getType(), DestReg), SrcAddrReg); + assert(I.getType() == Type::FloatTy || I.getType() == Type::DoubleTy && + "Unknown FP type!"); + unsigned Opc = I.getType() == Type::FloatTy ? X86::FLDr32 : X86::FLDr64; + addDirectMem(BMI(BB, MBBI, Opc, 4, DestReg), SrcAddrReg); return; } case cLong: case cInt: case cShort: case cByte: @@ -1148,20 +1588,7 @@ void ISel::visitLoadInst(LoadInst &I) { default: assert(0 && "Unknown memory class!"); } - // We need to adjust the input pointer if we are emulating a big-endian - // long-pointer target. On these systems, the pointer that we are interested - // in is in the upper part of the eight byte memory image of the pointer. It - // also happens to be byte-swapped, but this will be handled later. - // - if (!isLittleEndian && hasLongPointers && isa(I.getType())) { - unsigned R = makeAnotherReg(Type::UIntTy); - BuildMI(BB, X86::ADDri32, 2, R).addReg(SrcAddrReg).addZImm(4); - SrcAddrReg = R; - } - unsigned IReg = DestReg; - if (!isLittleEndian) // If big endian we need an intermediate stage - DestReg = makeAnotherReg(Class != cLong ? I.getType() : Type::UIntTy); static const unsigned Opcode[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, 0, X86::MOVmr32 @@ -1169,110 +1596,34 @@ void ISel::visitLoadInst(LoadInst &I) { addDirectMem(BuildMI(BB, Opcode[Class], 4, DestReg), SrcAddrReg); // Handle long values now... - if (Class == cLong) { - if (isLittleEndian) { - addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4); - } else { - EmitByteSwap(IReg+1, DestReg, cInt); - unsigned TempReg = makeAnotherReg(Type::IntTy); - addRegOffset(BuildMI(BB, X86::MOVmr32, 4, TempReg), SrcAddrReg, 4); - EmitByteSwap(IReg, TempReg, cInt); - } - return; - } - - if (!isLittleEndian) - EmitByteSwap(IReg, DestReg, Class); + if (Class == cLong) + addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4); } - -/// doFPStore - This method is used to store an FP value to memory using the -/// current endianness. -/// -void ISel::doFPStore(const Type *Ty, unsigned DestAddrReg, unsigned SrcReg) { - assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); - unsigned StoreOpcode = Ty == Type::FloatTy ? X86::FSTr32 : X86::FSTr64; - - if (TM.getTargetData().isLittleEndian()) { // fast path... - addDirectMem(BuildMI(BB, StoreOpcode,5), DestAddrReg).addReg(SrcReg); - return; - } - - // Allocate a temporary stack slot to transform the value into... - int FrameIdx = F->getFrameInfo()->CreateStackObject(Ty, TM.getTargetData()); - unsigned SrcAddrReg = makeAnotherReg(Type::UIntTy); - addFrameReference(BuildMI(BB, X86::LEAr32, 5, SrcAddrReg), FrameIdx); - - // Store the value into a temporary stack slot... - addDirectMem(BuildMI(BB, StoreOpcode, 5), SrcAddrReg).addReg(SrcReg); - - // Perform the bswaps 32 bits at a time... - unsigned TmpReg1 = makeAnotherReg(Type::UIntTy); - unsigned TmpReg2 = makeAnotherReg(Type::UIntTy); - addDirectMem(BuildMI(BB, X86::MOVmr32, 4, TmpReg1), SrcAddrReg); - BuildMI(BB, X86::BSWAPr32, 1, TmpReg2).addReg(TmpReg1); - unsigned Offset = (Ty == Type::DoubleTy) << 2; - addRegOffset(BuildMI(BB, X86::MOVrm32, 5), - DestAddrReg, Offset).addReg(TmpReg2); - - if (Ty == Type::DoubleTy) { // Swap the other 32 bits of a double value... - TmpReg1 = makeAnotherReg(Type::UIntTy); - TmpReg2 = makeAnotherReg(Type::UIntTy); - - addRegOffset(BuildMI(BB, X86::MOVmr32, 4, TmpReg1), SrcAddrReg, 4); - BuildMI(BB, X86::BSWAPr32, 1, TmpReg2).addReg(TmpReg1); - unsigned Offset = (Ty == Type::DoubleTy) << 2; - addDirectMem(BuildMI(BB, X86::MOVrm32, 5), DestAddrReg).addReg(TmpReg2); - } -} - - /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov' /// instruction. /// void ISel::visitStoreInst(StoreInst &I) { - bool isLittleEndian = TM.getTargetData().isLittleEndian(); - bool hasLongPointers = TM.getTargetData().getPointerSize() == 8; unsigned ValReg = getReg(I.getOperand(0)); unsigned AddressReg = getReg(I.getOperand(1)); - - unsigned Class = getClass(I.getOperand(0)->getType()); + + const Type *ValTy = I.getOperand(0)->getType(); + unsigned Class = getClassB(ValTy); switch (Class) { case cLong: - if (isLittleEndian) { - addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg); - addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), - AddressReg, 4).addReg(ValReg+1); - } else { - unsigned T1 = makeAnotherReg(Type::IntTy); - unsigned T2 = makeAnotherReg(Type::IntTy); - EmitByteSwap(T1, ValReg , cInt); - EmitByteSwap(T2, ValReg+1, cInt); - addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(T2); - addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg, 4).addReg(T1); - } + addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg); + addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg,4).addReg(ValReg+1); return; - case cFP: - doFPStore(I.getOperand(0)->getType(), AddressReg, ValReg); + case cFP: { + unsigned StoreOpcode = ValTy == Type::FloatTy ? X86::FSTr32 : X86::FSTr64; + addDirectMem(BuildMI(BB, StoreOpcode, 5), AddressReg).addReg(ValReg); return; + } case cInt: case cShort: case cByte: break; // Integers of various sizes handled below default: assert(0 && "Unknown memory class!"); } - if (!isLittleEndian && hasLongPointers && - isa(I.getOperand(0)->getType())) { - unsigned R = makeAnotherReg(Type::UIntTy); - BuildMI(BB, X86::ADDri32, 2, R).addReg(AddressReg).addZImm(4); - AddressReg = R; - } - - if (!isLittleEndian && Class != cByte) { - unsigned R = makeAnotherReg(I.getOperand(0)->getType()); - EmitByteSwap(R, ValReg, Class); - ValReg = R; - } - static const unsigned Opcode[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 }; addDirectMem(BuildMI(BB, Opcode[Class], 1+4), AddressReg).addReg(ValReg); } @@ -1281,22 +1632,65 @@ void ISel::visitStoreInst(StoreInst &I) { /// visitCastInst - Here we have various kinds of copying with or without /// sign extension going on. void ISel::visitCastInst(CastInst &CI) { - const Type *DestTy = CI.getType(); - Value *Src = CI.getOperand(0); - unsigned SrcReg = getReg(Src); + Value *Op = CI.getOperand(0); + // If this is a cast from a 32-bit integer to a Long type, and the only uses + // of the case are GEP instructions, then the cast does not need to be + // generated explicitly, it will be folded into the GEP. + if (CI.getType() == Type::LongTy && + (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) { + bool AllUsesAreGEPs = true; + for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I) + if (!isa(*I)) { + AllUsesAreGEPs = false; + break; + } + + // No need to codegen this cast if all users are getelementptr instrs... + if (AllUsesAreGEPs) return; + } + + unsigned DestReg = getReg(CI); + MachineBasicBlock::iterator MI = BB->end(); + emitCastOperation(BB, MI, Op, CI.getType(), DestReg); +} + +/// emitCastOperation - Common code shared between visitCastInst and +/// constant expression cast support. +void ISel::emitCastOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator &IP, + Value *Src, const Type *DestTy, + unsigned DestReg) { + unsigned SrcReg = getReg(Src, BB, IP); const Type *SrcTy = Src->getType(); unsigned SrcClass = getClassB(SrcTy); - unsigned DestReg = getReg(CI); unsigned DestClass = getClassB(DestTy); // Implement casts to bool by using compare on the operand followed by set if // not zero on the result. if (DestTy == Type::BoolTy) { - if (SrcClass == cFP || SrcClass == cLong) - visitInstruction(CI); - - BuildMI(BB, X86::CMPri8, 2).addReg(SrcReg).addZImm(0); - BuildMI(BB, X86::SETNEr, 1, DestReg); + switch (SrcClass) { + case cByte: + BMI(BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg); + break; + case cShort: + BMI(BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg); + break; + case cInt: + BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg); + break; + case cLong: { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BMI(BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1); + break; + } + case cFP: + assert(0 && "FIXME: implement cast FP to bool"); + abort(); + } + + // If the zero flag is not set, then the value is true, set the byte to + // true. + BMI(BB, IP, X86::SETNEr, 1, DestReg); return; } @@ -1308,11 +1702,11 @@ void ISel::visitCastInst(CastInst &CI) { // getClass) by using a register-to-register move. if (SrcClass == DestClass) { if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) { - BuildMI(BB, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg); + BMI(BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg); } else if (SrcClass == cFP) { if (SrcTy == Type::FloatTy) { // double -> float assert(DestTy == Type::DoubleTy && "Unknown cFP member!"); - BuildMI(BB, X86::FpMOV, 1, DestReg).addReg(SrcReg); + BMI(BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg); } else { // float -> double assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy && "Unknown cFP member!"); @@ -1320,14 +1714,15 @@ void ISel::visitCastInst(CastInst &CI) { // reading it back. unsigned FltAlign = TM.getTargetData().getFloatAlignment(); int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign); - addFrameReference(BuildMI(BB, X86::FSTr32, 5), FrameIdx).addReg(SrcReg); - addFrameReference(BuildMI(BB, X86::FLDr32, 5, DestReg), FrameIdx); + addFrameReference(BMI(BB, IP, X86::FSTr32, 5), FrameIdx).addReg(SrcReg); + addFrameReference(BMI(BB, IP, X86::FLDr32, 5, DestReg), FrameIdx); } } else if (SrcClass == cLong) { - BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(SrcReg); - BuildMI(BB, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1); + BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg); + BMI(BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1); } else { - visitInstruction(CI); + assert(0 && "Cannot handle this type of cast instruction!"); + abort(); } return; } @@ -1345,21 +1740,21 @@ void ISel::visitCastInst(CastInst &CI) { }; bool isUnsigned = SrcTy->isUnsigned(); - BuildMI(BB, Opc[isUnsigned][SrcClass + DestClass - 1], 1, - DestReg).addReg(SrcReg); + BMI(BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1, + DestReg).addReg(SrcReg); if (isLong) { // Handle upper 32 bits as appropriate... if (isUnsigned) // Zero out top bits... - BuildMI(BB, X86::MOVir32, 1, DestReg+1).addZImm(0); + BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0); else // Sign extend bottom half... - BuildMI(BB, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31); + BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31); } return; } // Special case long -> int ... if (SrcClass == cLong && DestClass == cInt) { - BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(SrcReg); + BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg); return; } @@ -1368,26 +1763,57 @@ void ISel::visitCastInst(CastInst &CI) { if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt && SrcClass > DestClass) { static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX }; - BuildMI(BB, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg); - BuildMI(BB, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]); + BMI(BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg); + BMI(BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]); return; } // Handle casts from integer to floating point now... if (DestClass == cFP) { - // unsigned int -> load as 64 bit int. - // unsigned long long -> more complex - if (SrcTy->isUnsigned() && SrcTy != Type::UByteTy) - visitInstruction(CI); // don't handle unsigned src yet! - - // We don't have the facilities for directly loading byte sized data from - // memory. Promote it to 16 bits. - if (SrcClass == cByte) { - unsigned TmpReg = makeAnotherReg(Type::ShortTy); - BuildMI(BB, SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8, - 1, TmpReg).addReg(SrcReg); - SrcTy = Type::ShortTy; // Pretend the short is our input now! - SrcClass = cShort; + // Promote the integer to a type supported by FLD. We do this because there + // are no unsigned FLD instructions, so we must promote an unsigned value to + // a larger signed value, then use FLD on the larger value. + // + const Type *PromoteType = 0; + unsigned PromoteOpcode; + switch (SrcTy->getPrimitiveID()) { + case Type::BoolTyID: + case Type::SByteTyID: + // We don't have the facilities for directly loading byte sized data from + // memory (even signed). Promote it to 16 bits. + PromoteType = Type::ShortTy; + PromoteOpcode = X86::MOVSXr16r8; + break; + case Type::UByteTyID: + PromoteType = Type::ShortTy; + PromoteOpcode = X86::MOVZXr16r8; + break; + case Type::UShortTyID: + PromoteType = Type::IntTy; + PromoteOpcode = X86::MOVZXr32r16; + break; + case Type::UIntTyID: { + // Make a 64 bit temporary... and zero out the top of it... + unsigned TmpReg = makeAnotherReg(Type::LongTy); + BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg); + BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0); + SrcTy = Type::LongTy; + SrcClass = cLong; + SrcReg = TmpReg; + break; + } + case Type::ULongTyID: + assert("FIXME: not implemented: cast ulong X to fp type!"); + default: // No promotion needed... + break; + } + + if (PromoteType) { + unsigned TmpReg = makeAnotherReg(PromoteType); + BMI(BB, IP, SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8, + 1, TmpReg).addReg(SrcReg); + SrcTy = PromoteType; + SrcClass = getClass(PromoteType); SrcReg = TmpReg; } @@ -1396,18 +1822,17 @@ void ISel::visitCastInst(CastInst &CI) { F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData()); if (SrcClass == cLong) { - if (SrcTy == Type::ULongTy) visitInstruction(CI); - addFrameReference(BuildMI(BB, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg); - addFrameReference(BuildMI(BB, X86::MOVrm32, 5), + addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg); + addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx, 4).addReg(SrcReg+1); } else { static const unsigned Op1[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 }; - addFrameReference(BuildMI(BB, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg); + addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg); } static const unsigned Op2[] = - { 0, X86::FILDr16, X86::FILDr32, 0, X86::FILDr64 }; - addFrameReference(BuildMI(BB, Op2[SrcClass], 5, DestReg), FrameIdx); + { 0/*byte*/, X86::FILDr16, X86::FILDr32, 0/*FP*/, X86::FILDr64 }; + addFrameReference(BMI(BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx); return; } @@ -1417,20 +1842,20 @@ void ISel::visitCastInst(CastInst &CI) { // mode when truncating to an integer value. // int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); - addFrameReference(BuildMI(BB, X86::FNSTCWm16, 4), CWFrameIdx); + addFrameReference(BMI(BB, IP, X86::FNSTCWm16, 4), CWFrameIdx); // Load the old value of the high byte of the control word... unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy); - addFrameReference(BuildMI(BB, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1); + addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1); // Set the high part to be round to zero... - addFrameReference(BuildMI(BB, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12); + addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12); // Reload the modified control word now... - addFrameReference(BuildMI(BB, X86::FLDCWm16, 4), CWFrameIdx); + addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx); // Restore the memory image of control word to original value - addFrameReference(BuildMI(BB, X86::MOVrm8, 5), + addFrameReference(BMI(BB, IP, X86::MOVrm8, 5), CWFrameIdx, 1).addReg(HighPartOfCW); // We don't have the facilities for directly storing byte sized data to @@ -1443,7 +1868,10 @@ void ISel::visitCastInst(CastInst &CI) { case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break; case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break; case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break; - case cLong: visitInstruction(CI); // unsigned long long -> more complex + // The following treatment of cLong may not be perfectly right, + // but it survives chains of casts of the form + // double->ulong->double. + case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break; default: assert(0 && "Unknown store class!"); } @@ -1453,38 +1881,80 @@ void ISel::visitCastInst(CastInst &CI) { static const unsigned Op1[] = { 0, X86::FISTr16, X86::FISTr32, 0, X86::FISTPr64 }; - addFrameReference(BuildMI(BB, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg); + addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg); if (DestClass == cLong) { - addFrameReference(BuildMI(BB, X86::MOVmr32, 4, DestReg), FrameIdx); - addFrameReference(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4); + addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg), FrameIdx); + addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4); } else { static const unsigned Op2[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 }; - addFrameReference(BuildMI(BB, Op2[DestClass], 4, DestReg), FrameIdx); + addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx); } // Reload the original control word now... - addFrameReference(BuildMI(BB, X86::FLDCWm16, 4), CWFrameIdx); + addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx); return; } // Anything we haven't handled already, we can't (yet) handle at all. - visitInstruction (CI); + assert(0 && "Unhandled cast instruction!"); + abort(); } -// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It -// returns zero when the input is not exactly a power of two. -static unsigned ExactLog2(unsigned Val) { - if (Val == 0) return 0; - unsigned Count = 0; - while (Val != 1) { - if (Val & 1) return 0; - Val >>= 1; - ++Count; +/// visitVANextInst - Implement the va_next instruction... +/// +void ISel::visitVANextInst(VANextInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + unsigned Size; + switch (I.getArgType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + Size = 4; + break; + case Type::ULongTyID: + case Type::LongTyID: + case Type::DoubleTyID: + Size = 8; + break; } - return Count+1; + + // Increment the VAList pointer... + BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addZImm(Size); } +void ISel::visitVAArgInst(VAArgInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + switch (I.getType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList); + break; + case Type::ULongTyID: + case Type::LongTyID: + addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList); + addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), VAList, 4); + break; + case Type::DoubleTyID: + addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList); + break; + } +} + + void ISel::visitGetElementPtrInst(GetElementPtrInst &I) { unsigned outputReg = getReg(I); MachineBasicBlock::iterator MI = BB->end(); @@ -1535,6 +2005,13 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, // time. assert(idx->getType() == Type::LongTy && "Bad GEP array index!"); + // Most GEP instructions use a [cast (int/uint) to LongTy] as their + // operand on X86. Handle this case directly now... + if (CastInst *CI = dyn_cast(idx)) + if (CI->getOperand(0)->getType() == Type::IntTy || + CI->getOperand(0)->getType() == Type::UIntTy) + idx = CI->getOperand(0); + // We want to add BaseReg to(idxReg * sizeof ElementType). First, we // must find the size of the pointed-to type (Not coincidentally, the next // type is the type of the elements in the array). @@ -1556,19 +2033,9 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB, } else { unsigned idxReg = getReg(idx, MBB, IP); unsigned OffsetReg = makeAnotherReg(Type::UIntTy); - if (unsigned Shift = ExactLog2(elementSize)) { - // If the element size is exactly a power of 2, use a shift to get it. - BMI(MBB, IP, X86::SHLir32, 2, - OffsetReg).addReg(idxReg).addZImm(Shift-1); - } else { - // Most general case, emit a multiply... - unsigned elementSizeReg = makeAnotherReg(Type::LongTy); - BMI(MBB, IP, X86::MOVir32, 1, elementSizeReg).addZImm(elementSize); - - // Emit a MUL to multiply the register holding the index by - // elementSize, putting the result in OffsetReg. - doMultiply(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSizeReg); - } + + doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize); + // Emit an ADD to add OffsetReg to the basePtr. NextReg = makeAnotherReg(Type::UIntTy); BMI(MBB, IP, X86::ADDrr32, 2,NextReg).addReg(BaseReg).addReg(OffsetReg); @@ -1613,12 +2080,10 @@ void ISel::visitAllocaInst(AllocaInst &I) { // constant by the variable amount. unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy); unsigned SrcReg1 = getReg(I.getArraySize()); - unsigned SizeReg = makeAnotherReg(Type::UIntTy); - BuildMI(BB, X86::MOVir32, 1, SizeReg).addZImm(TySize); // TotalSizeReg = mul , MachineBasicBlock::iterator MBBI = BB->end(); - doMultiply(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, SizeReg); + doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize); // AddedSize = add , 15 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy); @@ -1635,7 +2100,7 @@ void ISel::visitAllocaInst(AllocaInst &I) { // the stack pointer. BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP); - // Inform the Frame Information that we have just allocated a variable sized + // Inform the Frame Information that we have just allocated a variable-sized // object. F->getFrameInfo()->CreateVariableSizedObject(); } @@ -1651,12 +2116,9 @@ void ISel::visitMallocInst(MallocInst &I) { Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize)); } else { Arg = makeAnotherReg(Type::UIntTy); - unsigned Op0Reg = getReg(ConstantUInt::get(Type::UIntTy, AllocSize)); - unsigned Op1Reg = getReg(I.getOperand(0)); + unsigned Op0Reg = getReg(I.getOperand(0)); MachineBasicBlock::iterator MBBI = BB->end(); - doMultiply(BB, MBBI, Arg, Type::UIntTy, Op0Reg, Op1Reg); - - + doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize); } std::vector Args; @@ -1672,18 +2134,17 @@ void ISel::visitMallocInst(MallocInst &I) { /// void ISel::visitFreeInst(FreeInst &I) { std::vector Args; - Args.push_back(ValueRecord(getReg(I.getOperand(0)), - I.getOperand(0)->getType())); + Args.push_back(ValueRecord(I.getOperand(0))); MachineInstr *TheCall = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("free", true); doCall(ValueRecord(0, Type::VoidTy), TheCall, Args); } -/// createSimpleX86InstructionSelector - This pass converts an LLVM function +/// createX86SimpleInstructionSelector - This pass converts an LLVM function /// into a machine code representation is a very simple peep-hole fashion. The /// generated code sucks but the implementation is nice and simple. /// -Pass *createSimpleX86InstructionSelector(TargetMachine &TM) { +FunctionPass *createX86SimpleInstructionSelector(TargetMachine &TM) { return new ISel(TM); }