1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SSARegMap.h"
28 #include "llvm/Target/MRegisterInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Support/GetElementPtrTypeIterator.h"
31 #include "llvm/Support/InstVisitor.h"
32 #include "llvm/Support/CFG.h"
33 #include "Support/Statistic.h"
38 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
41 /// BMI - A special BuildMI variant that takes an iterator to insert the
42 /// instruction at as well as a basic block. This is the version for when you
43 /// have a destination register in mind.
44 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
45 MachineBasicBlock::iterator I,
46 int Opcode, unsigned NumOperands,
48 MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1, true, true);
50 return MachineInstrBuilder(MI).addReg(DestReg, MachineOperand::Def);
53 /// BMI - A special BuildMI variant that takes an iterator to insert the
54 /// instruction at as well as a basic block.
55 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
56 MachineBasicBlock::iterator I,
57 int Opcode, unsigned NumOperands) {
58 MachineInstr *MI = new MachineInstr(Opcode, NumOperands, true, true);
60 return MachineInstrBuilder(MI);
65 struct ISel : public FunctionPass, InstVisitor<ISel> {
67 MachineFunction *F; // The function we are compiling into
68 MachineBasicBlock *BB; // The current MBB we are compiling
69 int VarArgsFrameIndex; // FrameIndex for start of varargs area
70 int ReturnAddressIndex; // FrameIndex for the return address
72 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
74 // MBBMap - Mapping between LLVM BB -> Machine BB
75 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
77 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
79 /// runOnFunction - Top level implementation of instruction selection for
80 /// the entire function.
82 bool runOnFunction(Function &Fn) {
83 // First pass over the function, lower any unknown intrinsic functions
84 // with the IntrinsicLowering class.
85 LowerUnknownIntrinsicFunctionCalls(Fn);
87 F = &MachineFunction::construct(&Fn, TM);
89 // Create all of the machine basic blocks for the function...
90 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
91 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
95 // Set up a frame object for the return address. This is used by the
96 // llvm.returnaddress & llvm.frameaddress intrinisics.
97 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
99 // Copy incoming arguments off of the stack...
100 LoadArgumentsToVirtualRegs(Fn);
102 // Instruction select everything except PHI nodes
105 // Select the PHI nodes
108 // Insert the FP_REG_KILL instructions into blocks that need them.
114 // We always build a machine code representation for the function
118 virtual const char *getPassName() const {
119 return "X86 Simple Instruction Selection";
122 /// visitBasicBlock - This method is called when we are visiting a new basic
123 /// block. This simply creates a new MachineBasicBlock to emit code into
124 /// and adds it to the current MachineFunction. Subsequent visit* for
125 /// instructions will be invoked for all instructions in the basic block.
127 void visitBasicBlock(BasicBlock &LLVM_BB) {
128 BB = MBBMap[&LLVM_BB];
131 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
132 /// function, lowering any calls to unknown intrinsic functions into the
133 /// equivalent LLVM code.
134 void LowerUnknownIntrinsicFunctionCalls(Function &F);
136 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
137 /// from the stack into virtual registers.
139 void LoadArgumentsToVirtualRegs(Function &F);
141 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
142 /// because we have to generate our sources into the source basic blocks,
143 /// not the current one.
145 void SelectPHINodes();
147 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
148 /// that need them. This only occurs due to the floating point stackifier
149 /// not being aggressive enough to handle arbitrary global stackification.
151 void InsertFPRegKills();
153 // Visitation methods for various instructions. These methods simply emit
154 // fixed X86 code for each instruction.
157 // Control flow operators
158 void visitReturnInst(ReturnInst &RI);
159 void visitBranchInst(BranchInst &BI);
165 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
166 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
168 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
169 const std::vector<ValueRecord> &Args);
170 void visitCallInst(CallInst &I);
171 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
173 // Arithmetic operators
174 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
175 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
176 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
177 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
178 unsigned DestReg, const Type *DestTy,
179 unsigned Op0Reg, unsigned Op1Reg);
180 void doMultiplyConst(MachineBasicBlock *MBB,
181 MachineBasicBlock::iterator MBBI,
182 unsigned DestReg, const Type *DestTy,
183 unsigned Op0Reg, unsigned Op1Val);
184 void visitMul(BinaryOperator &B);
186 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
187 void visitRem(BinaryOperator &B) { visitDivRem(B); }
188 void visitDivRem(BinaryOperator &B);
191 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
192 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
193 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
195 // Comparison operators...
196 void visitSetCondInst(SetCondInst &I);
197 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
198 MachineBasicBlock *MBB,
199 MachineBasicBlock::iterator MBBI);
201 // Memory Instructions
202 void visitLoadInst(LoadInst &I);
203 void visitStoreInst(StoreInst &I);
204 void visitGetElementPtrInst(GetElementPtrInst &I);
205 void visitAllocaInst(AllocaInst &I);
206 void visitMallocInst(MallocInst &I);
207 void visitFreeInst(FreeInst &I);
210 void visitShiftInst(ShiftInst &I);
211 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
212 void visitCastInst(CastInst &I);
213 void visitVANextInst(VANextInst &I);
214 void visitVAArgInst(VAArgInst &I);
216 void visitInstruction(Instruction &I) {
217 std::cerr << "Cannot instruction select: " << I;
221 /// promote32 - Make a value 32-bits wide, and put it somewhere.
223 void promote32(unsigned targetReg, const ValueRecord &VR);
225 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
226 /// constant expression GEP support.
228 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
229 Value *Src, User::op_iterator IdxBegin,
230 User::op_iterator IdxEnd, unsigned TargetReg);
232 /// emitCastOperation - Common code shared between visitCastInst and
233 /// constant expression cast support.
234 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
235 Value *Src, const Type *DestTy, unsigned TargetReg);
237 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
238 /// and constant expression support.
239 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
240 MachineBasicBlock::iterator IP,
241 Value *Op0, Value *Op1,
242 unsigned OperatorClass, unsigned TargetReg);
244 void emitDivRemOperation(MachineBasicBlock *BB,
245 MachineBasicBlock::iterator IP,
246 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
247 const Type *Ty, unsigned TargetReg);
249 /// emitSetCCOperation - Common code shared between visitSetCondInst and
250 /// constant expression support.
251 void emitSetCCOperation(MachineBasicBlock *BB,
252 MachineBasicBlock::iterator IP,
253 Value *Op0, Value *Op1, unsigned Opcode,
256 /// emitShiftOperation - Common code shared between visitShiftInst and
257 /// constant expression support.
258 void emitShiftOperation(MachineBasicBlock *MBB,
259 MachineBasicBlock::iterator IP,
260 Value *Op, Value *ShiftAmount, bool isLeftShift,
261 const Type *ResultTy, unsigned DestReg);
264 /// copyConstantToRegister - Output the instructions required to put the
265 /// specified constant into the specified register.
267 void copyConstantToRegister(MachineBasicBlock *MBB,
268 MachineBasicBlock::iterator MBBI,
269 Constant *C, unsigned Reg);
271 /// makeAnotherReg - This method returns the next register number we haven't
274 /// Long values are handled somewhat specially. They are always allocated
275 /// as pairs of 32 bit integer values. The register number returned is the
276 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
277 /// of the long value.
279 unsigned makeAnotherReg(const Type *Ty) {
280 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
281 "Current target doesn't have X86 reg info??");
282 const X86RegisterInfo *MRI =
283 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
284 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
285 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
286 // Create the lower part
287 F->getSSARegMap()->createVirtualRegister(RC);
288 // Create the upper part.
289 return F->getSSARegMap()->createVirtualRegister(RC)-1;
292 // Add the mapping of regnumber => reg class to MachineFunction
293 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
294 return F->getSSARegMap()->createVirtualRegister(RC);
297 /// getReg - This method turns an LLVM value into a register number. This
298 /// is guaranteed to produce the same register number for a particular value
299 /// every time it is queried.
301 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
302 unsigned getReg(Value *V) {
303 // Just append to the end of the current bb.
304 MachineBasicBlock::iterator It = BB->end();
305 return getReg(V, BB, It);
307 unsigned getReg(Value *V, MachineBasicBlock *MBB,
308 MachineBasicBlock::iterator IPt) {
309 unsigned &Reg = RegMap[V];
311 Reg = makeAnotherReg(V->getType());
315 // If this operand is a constant, emit the code to copy the constant into
316 // the register here...
318 if (Constant *C = dyn_cast<Constant>(V)) {
319 copyConstantToRegister(MBB, IPt, C, Reg);
320 RegMap.erase(V); // Assign a new name to this constant if ref'd again
321 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
322 // Move the address of the global into the register
323 BMI(MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
324 RegMap.erase(V); // Assign a new name to this address if ref'd again
332 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
336 cByte, cShort, cInt, cFP, cLong
339 /// getClass - Turn a primitive type into a "class" number which is based on the
340 /// size of the type, and whether or not it is floating point.
342 static inline TypeClass getClass(const Type *Ty) {
343 switch (Ty->getPrimitiveID()) {
344 case Type::SByteTyID:
345 case Type::UByteTyID: return cByte; // Byte operands are class #0
346 case Type::ShortTyID:
347 case Type::UShortTyID: return cShort; // Short operands are class #1
350 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
352 case Type::FloatTyID:
353 case Type::DoubleTyID: return cFP; // Floating Point is #3
356 case Type::ULongTyID: return cLong; // Longs are class #4
358 assert(0 && "Invalid type to getClass!");
359 return cByte; // not reached
363 // getClassB - Just like getClass, but treat boolean values as bytes.
364 static inline TypeClass getClassB(const Type *Ty) {
365 if (Ty == Type::BoolTy) return cByte;
370 /// copyConstantToRegister - Output the instructions required to put the
371 /// specified constant into the specified register.
373 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
374 MachineBasicBlock::iterator IP,
375 Constant *C, unsigned R) {
376 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
378 switch (CE->getOpcode()) {
379 case Instruction::GetElementPtr:
380 emitGEPOperation(MBB, IP, CE->getOperand(0),
381 CE->op_begin()+1, CE->op_end(), R);
383 case Instruction::Cast:
384 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
387 case Instruction::Xor: ++Class; // FALL THROUGH
388 case Instruction::Or: ++Class; // FALL THROUGH
389 case Instruction::And: ++Class; // FALL THROUGH
390 case Instruction::Sub: ++Class; // FALL THROUGH
391 case Instruction::Add:
392 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
396 case Instruction::Mul: {
397 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
398 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
399 doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
402 case Instruction::Div:
403 case Instruction::Rem: {
404 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
405 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
406 emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
407 CE->getOpcode() == Instruction::Div,
412 case Instruction::SetNE:
413 case Instruction::SetEQ:
414 case Instruction::SetLT:
415 case Instruction::SetGT:
416 case Instruction::SetLE:
417 case Instruction::SetGE:
418 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
422 case Instruction::Shl:
423 case Instruction::Shr:
424 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
425 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
429 std::cerr << "Offending expr: " << C << "\n";
430 assert(0 && "Constant expression not yet handled!\n");
434 if (C->getType()->isIntegral()) {
435 unsigned Class = getClassB(C->getType());
437 if (Class == cLong) {
438 // Copy the value into the register pair.
439 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
440 BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(Val & 0xFFFFFFFF);
441 BMI(MBB, IP, X86::MOVri32, 1, R+1).addZImm(Val >> 32);
445 assert(Class <= cInt && "Type not handled yet!");
447 static const unsigned IntegralOpcodeTab[] = {
448 X86::MOVri8, X86::MOVri16, X86::MOVri32
451 if (C->getType() == Type::BoolTy) {
452 BMI(MBB, IP, X86::MOVri8, 1, R).addZImm(C == ConstantBool::True);
454 ConstantInt *CI = cast<ConstantInt>(C);
455 BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
457 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
458 if (CFP->isExactlyValue(+0.0))
459 BMI(MBB, IP, X86::FLD0, 0, R);
460 else if (CFP->isExactlyValue(+1.0))
461 BMI(MBB, IP, X86::FLD1, 0, R);
463 // Otherwise we need to spill the constant to memory...
464 MachineConstantPool *CP = F->getConstantPool();
465 unsigned CPI = CP->getConstantPoolIndex(CFP);
466 const Type *Ty = CFP->getType();
468 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
469 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64;
470 addConstantPoolReference(BMI(MBB, IP, LoadOpcode, 4, R), CPI);
473 } else if (isa<ConstantPointerNull>(C)) {
474 // Copy zero (null pointer) to the register.
475 BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(0);
476 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
477 BMI(MBB, IP, X86::MOVri32, 1, R).addGlobalAddress(CPR->getValue());
479 std::cerr << "Offending constant: " << C << "\n";
480 assert(0 && "Type not handled yet!");
484 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
485 /// the stack into virtual registers.
487 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
488 // Emit instructions to load the arguments... On entry to a function on the
489 // X86, the stack frame looks like this:
491 // [ESP] -- return address
492 // [ESP + 4] -- first argument (leftmost lexically)
493 // [ESP + 8] -- second argument, if first argument is four bytes in size
496 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
497 MachineFrameInfo *MFI = F->getFrameInfo();
499 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
500 unsigned Reg = getReg(*I);
502 int FI; // Frame object index
503 switch (getClassB(I->getType())) {
505 FI = MFI->CreateFixedObject(1, ArgOffset);
506 addFrameReference(BuildMI(BB, X86::MOVrm8, 4, Reg), FI);
509 FI = MFI->CreateFixedObject(2, ArgOffset);
510 addFrameReference(BuildMI(BB, X86::MOVrm16, 4, Reg), FI);
513 FI = MFI->CreateFixedObject(4, ArgOffset);
514 addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
517 FI = MFI->CreateFixedObject(8, ArgOffset);
518 addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
519 addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg+1), FI, 4);
520 ArgOffset += 4; // longs require 4 additional bytes
524 if (I->getType() == Type::FloatTy) {
525 Opcode = X86::FLDr32;
526 FI = MFI->CreateFixedObject(4, ArgOffset);
528 Opcode = X86::FLDr64;
529 FI = MFI->CreateFixedObject(8, ArgOffset);
530 ArgOffset += 4; // doubles require 4 additional bytes
532 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
535 assert(0 && "Unhandled argument type!");
537 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
540 // If the function takes variable number of arguments, add a frame offset for
541 // the start of the first vararg value... this is used to expand
543 if (Fn.getFunctionType()->isVarArg())
544 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
548 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
549 /// because we have to generate our sources into the source basic blocks, not
552 void ISel::SelectPHINodes() {
553 const TargetInstrInfo &TII = TM.getInstrInfo();
554 const Function &LF = *F->getFunction(); // The LLVM function...
555 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
556 const BasicBlock *BB = I;
557 MachineBasicBlock *MBB = MBBMap[I];
559 // Loop over all of the PHI nodes in the LLVM basic block...
560 MachineBasicBlock::iterator instr = MBB->begin();
561 for (BasicBlock::const_iterator I = BB->begin();
562 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
564 // Create a new machine instr PHI node, and insert it.
565 unsigned PHIReg = getReg(*PN);
566 MachineInstr *PhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg);
567 MBB->insert(instr, PhiMI);
569 MachineInstr *LongPhiMI = 0;
570 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) {
571 LongPhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg+1);
572 MBB->insert(instr, LongPhiMI);
575 // PHIValues - Map of blocks to incoming virtual registers. We use this
576 // so that we only initialize one incoming value for a particular block,
577 // even if the block has multiple entries in the PHI node.
579 std::map<MachineBasicBlock*, unsigned> PHIValues;
581 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
582 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
584 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
585 PHIValues.lower_bound(PredMBB);
587 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
588 // We already inserted an initialization of the register for this
589 // predecessor. Recycle it.
590 ValReg = EntryIt->second;
593 // Get the incoming value into a virtual register.
595 Value *Val = PN->getIncomingValue(i);
597 // If this is a constant or GlobalValue, we may have to insert code
598 // into the basic block to compute it into a virtual register.
599 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
600 // Because we don't want to clobber any values which might be in
601 // physical registers with the computation of this constant (which
602 // might be arbitrarily complex if it is a constant expression),
603 // just insert the computation at the top of the basic block.
604 MachineBasicBlock::iterator PI = PredMBB->begin();
606 // Skip over any PHI nodes though!
607 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
610 ValReg = getReg(Val, PredMBB, PI);
612 ValReg = getReg(Val);
615 // Remember that we inserted a value for this PHI for this predecessor
616 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
619 PhiMI->addRegOperand(ValReg);
620 PhiMI->addMachineBasicBlockOperand(PredMBB);
622 LongPhiMI->addRegOperand(ValReg+1);
623 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
630 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
631 /// compensation code on critical edges. As such, it requires that we kill all
632 /// FP registers on the exit from any blocks that either ARE critical edges, or
633 /// branch to a block that has incoming critical edges.
635 /// Note that this kill instruction will eventually be eliminated when
636 /// restrictions in the stackifier are relaxed.
638 static bool RequiresFPRegKill(const BasicBlock *BB) {
640 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
641 const BasicBlock *Succ = *SI;
642 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
643 ++PI; // Block have at least one predecessory
644 if (PI != PE) { // If it has exactly one, this isn't crit edge
645 // If this block has more than one predecessor, check all of the
646 // predecessors to see if they have multiple successors. If so, then the
647 // block we are analyzing needs an FPRegKill.
648 for (PI = pred_begin(Succ); PI != PE; ++PI) {
649 const BasicBlock *Pred = *PI;
650 succ_const_iterator SI2 = succ_begin(Pred);
651 ++SI2; // There must be at least one successor of this block.
652 if (SI2 != succ_end(Pred))
653 return true; // Yes, we must insert the kill on this edge.
657 // If we got this far, there is no need to insert the kill instruction.
664 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
665 // need them. This only occurs due to the floating point stackifier not being
666 // aggressive enough to handle arbitrary global stackification.
668 // Currently we insert an FP_REG_KILL instruction into each block that uses or
669 // defines a floating point virtual register.
671 // When the global register allocators (like linear scan) finally update live
672 // variable analysis, we can keep floating point values in registers across
673 // portions of the CFG that do not involve critical edges. This will be a big
674 // win, but we are waiting on the global allocators before we can do this.
676 // With a bit of work, the floating point stackifier pass can be enhanced to
677 // break critical edges as needed (to make a place to put compensation code),
678 // but this will require some infrastructure improvements as well.
680 void ISel::InsertFPRegKills() {
681 SSARegMap &RegMap = *F->getSSARegMap();
682 const TargetInstrInfo &TII = TM.getInstrInfo();
684 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
685 bool UsesFPReg = false;
686 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
687 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
688 if (I->getOperand(i).isRegister()) {
689 unsigned Reg = I->getOperand(i).getReg();
690 if (MRegisterInfo::isVirtualRegister(Reg))
691 if (RegMap.getRegClass(Reg)->getSize() == 10)
695 // If we haven't found an FP register use or def in this basic block, check
696 // to see if any of our successors has an FP PHI node, which will cause a
697 // copy to be inserted into this block.
699 for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
700 E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
701 MachineBasicBlock *SBB = MBBMap[*SI];
702 for (MachineBasicBlock::iterator I = SBB->begin();
703 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
704 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
710 // Okay, this block uses an FP register. If the block has successors (ie,
711 // it's not an unwind/return), insert the FP_REG_KILL instruction.
712 if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
713 RequiresFPRegKill(BB->getBasicBlock())) {
714 // Rewind past any terminator instructions that might exist.
715 MachineBasicBlock::iterator I = BB->end();
716 while (I != BB->begin() && TII.isTerminatorInstr((--I)->getOpcode()));
718 BMI(BB, I, X86::FP_REG_KILL, 0);
725 // canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into
726 // the conditional branch instruction which is the only user of the cc
727 // instruction. This is the case if the conditional branch is the only user of
728 // the setcc, and if the setcc is in the same basic block as the conditional
729 // branch. We also don't handle long arguments below, so we reject them here as
732 static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
733 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
734 if (SCI->hasOneUse() && isa<BranchInst>(SCI->use_back()) &&
735 SCI->getParent() == cast<BranchInst>(SCI->use_back())->getParent()) {
736 const Type *Ty = SCI->getOperand(0)->getType();
737 if (Ty != Type::LongTy && Ty != Type::ULongTy)
743 // Return a fixed numbering for setcc instructions which does not depend on the
744 // order of the opcodes.
746 static unsigned getSetCCNumber(unsigned Opcode) {
748 default: assert(0 && "Unknown setcc instruction!");
749 case Instruction::SetEQ: return 0;
750 case Instruction::SetNE: return 1;
751 case Instruction::SetLT: return 2;
752 case Instruction::SetGE: return 3;
753 case Instruction::SetGT: return 4;
754 case Instruction::SetLE: return 5;
758 // LLVM -> X86 signed X86 unsigned
759 // ----- ---------- ------------
760 // seteq -> sete sete
761 // setne -> setne setne
762 // setlt -> setl setb
763 // setge -> setge setae
764 // setgt -> setg seta
765 // setle -> setle setbe
767 // sets // Used by comparison with 0 optimization
769 static const unsigned SetCCOpcodeTab[2][8] = {
770 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
772 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
773 X86::SETSr, X86::SETNSr },
776 // EmitComparison - This function emits a comparison of the two operands,
777 // returning the extended setcc code to use.
778 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
779 MachineBasicBlock *MBB,
780 MachineBasicBlock::iterator IP) {
781 // The arguments are already supposed to be of the same type.
782 const Type *CompTy = Op0->getType();
783 unsigned Class = getClassB(CompTy);
784 unsigned Op0r = getReg(Op0, MBB, IP);
786 // Special case handling of: cmp R, i
787 if (Class == cByte || Class == cShort || Class == cInt)
788 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
789 uint64_t Op1v = cast<ConstantInt>(CI)->getRawValue();
791 // Mask off any upper bits of the constant, if there are any...
792 Op1v &= (1ULL << (8 << Class)) - 1;
794 // If this is a comparison against zero, emit more efficient code. We
795 // can't handle unsigned comparisons against zero unless they are == or
796 // !=. These should have been strength reduced already anyway.
797 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
798 static const unsigned TESTTab[] = {
799 X86::TESTrr8, X86::TESTrr16, X86::TESTrr32
801 BMI(MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
803 if (OpNum == 2) return 6; // Map jl -> js
804 if (OpNum == 3) return 7; // Map jg -> jns
808 static const unsigned CMPTab[] = {
809 X86::CMPri8, X86::CMPri16, X86::CMPri32
812 BMI(MBB, IP, CMPTab[Class], 2).addReg(Op0r).addZImm(Op1v);
816 // Special case handling of comparison against +/- 0.0
817 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
818 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
819 BMI(MBB, IP, X86::FTST, 1).addReg(Op0r);
820 BMI(MBB, IP, X86::FNSTSWr8, 0);
821 BMI(MBB, IP, X86::SAHF, 1);
825 unsigned Op1r = getReg(Op1, MBB, IP);
827 default: assert(0 && "Unknown type class!");
828 // Emit: cmp <var1>, <var2> (do the comparison). We can
829 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
832 BMI(MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r);
835 BMI(MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r);
838 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
841 BMI(MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
842 BMI(MBB, IP, X86::FNSTSWr8, 0);
843 BMI(MBB, IP, X86::SAHF, 1);
847 if (OpNum < 2) { // seteq, setne
848 unsigned LoTmp = makeAnotherReg(Type::IntTy);
849 unsigned HiTmp = makeAnotherReg(Type::IntTy);
850 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
851 BMI(MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r);
852 BMI(MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
853 BMI(MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
854 break; // Allow the sete or setne to be generated from flags set by OR
856 // Emit a sequence of code which compares the high and low parts once
857 // each, then uses a conditional move to handle the overflow case. For
858 // example, a setlt for long would generate code like this:
860 // AL = lo(op1) < lo(op2) // Signedness depends on operands
861 // BL = hi(op1) < hi(op2) // Always unsigned comparison
862 // dest = hi(op1) == hi(op2) ? AL : BL;
865 // FIXME: This would be much better if we had hierarchical register
866 // classes! Until then, hardcode registers so that we can deal with their
867 // aliases (because we don't have conditional byte moves).
869 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
870 BMI(MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
871 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1);
872 BMI(MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
873 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
874 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
875 BMI(MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX);
876 // NOTE: visitSetCondInst knows that the value is dumped into the BL
877 // register at this point for long values...
885 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
886 /// register, then move it to wherever the result should be.
888 void ISel::visitSetCondInst(SetCondInst &I) {
889 if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch...
891 unsigned DestReg = getReg(I);
892 MachineBasicBlock::iterator MII = BB->end();
893 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
897 /// emitSetCCOperation - Common code shared between visitSetCondInst and
898 /// constant expression support.
899 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
900 MachineBasicBlock::iterator IP,
901 Value *Op0, Value *Op1, unsigned Opcode,
902 unsigned TargetReg) {
903 unsigned OpNum = getSetCCNumber(Opcode);
904 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
906 const Type *CompTy = Op0->getType();
907 unsigned CompClass = getClassB(CompTy);
908 bool isSigned = CompTy->isSigned() && CompClass != cFP;
910 if (CompClass != cLong || OpNum < 2) {
911 // Handle normal comparisons with a setcc instruction...
912 BMI(MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
914 // Handle long comparisons by copying the value which is already in BL into
915 // the register we want...
916 BMI(MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL);
923 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
924 /// operand, in the specified target register.
925 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
926 bool isUnsigned = VR.Ty->isUnsigned();
928 // Make sure we have the register number for this value...
929 unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg;
931 switch (getClassB(VR.Ty)) {
933 // Extend value into target register (8->32)
935 BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg);
937 BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg);
940 // Extend value into target register (16->32)
942 BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg);
944 BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg);
947 // Move value into target register (32->32)
948 BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg);
951 assert(0 && "Unpromotable operand class in promote32");
955 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
956 /// we have the following possibilities:
958 /// ret void: No return value, simply emit a 'ret' instruction
959 /// ret sbyte, ubyte : Extend value into EAX and return
960 /// ret short, ushort: Extend value into EAX and return
961 /// ret int, uint : Move value into EAX and return
962 /// ret pointer : Move value into EAX and return
963 /// ret long, ulong : Move value into EAX/EDX and return
964 /// ret float/double : Top of FP stack
966 void ISel::visitReturnInst(ReturnInst &I) {
967 if (I.getNumOperands() == 0) {
968 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
972 Value *RetVal = I.getOperand(0);
973 unsigned RetReg = getReg(RetVal);
974 switch (getClassB(RetVal->getType())) {
975 case cByte: // integral return values: extend or move into EAX and return
978 promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType()));
979 // Declare that EAX is live on exit
980 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
982 case cFP: // Floats & Doubles: Return in ST(0)
983 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
984 // Declare that top-of-stack is live on exit
985 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
988 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg);
989 BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1);
990 // Declare that EAX & EDX are live on exit
991 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
997 // Emit a 'ret' instruction
998 BuildMI(BB, X86::RET, 0);
1001 // getBlockAfter - Return the basic block which occurs lexically after the
1003 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1004 Function::iterator I = BB; ++I; // Get iterator to next block
1005 return I != BB->getParent()->end() ? &*I : 0;
1008 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1009 /// that since code layout is frozen at this point, that if we are trying to
1010 /// jump to a block that is the immediate successor of the current block, we can
1011 /// just make a fall-through (but we don't currently).
1013 void ISel::visitBranchInst(BranchInst &BI) {
1014 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1016 if (!BI.isConditional()) { // Unconditional branch?
1017 if (BI.getSuccessor(0) != NextBB)
1018 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1022 // See if we can fold the setcc into the branch itself...
1023 SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition());
1025 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1026 // computed some other way...
1027 unsigned condReg = getReg(BI.getCondition());
1028 BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0);
1029 if (BI.getSuccessor(1) == NextBB) {
1030 if (BI.getSuccessor(0) != NextBB)
1031 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
1033 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
1035 if (BI.getSuccessor(0) != NextBB)
1036 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1041 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1042 MachineBasicBlock::iterator MII = BB->end();
1043 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1045 const Type *CompTy = SCI->getOperand(0)->getType();
1046 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1049 // LLVM -> X86 signed X86 unsigned
1050 // ----- ---------- ------------
1058 // js // Used by comparison with 0 optimization
1061 static const unsigned OpcodeTab[2][8] = {
1062 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1063 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1064 X86::JS, X86::JNS },
1067 if (BI.getSuccessor(0) != NextBB) {
1068 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
1069 if (BI.getSuccessor(1) != NextBB)
1070 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
1072 // Change to the inverse condition...
1073 if (BI.getSuccessor(1) != NextBB) {
1075 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
1081 /// doCall - This emits an abstract call instruction, setting up the arguments
1082 /// and the return value as appropriate. For the actual function call itself,
1083 /// it inserts the specified CallMI instruction into the stream.
1085 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1086 const std::vector<ValueRecord> &Args) {
1088 // Count how many bytes are to be pushed on the stack...
1089 unsigned NumBytes = 0;
1091 if (!Args.empty()) {
1092 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1093 switch (getClassB(Args[i].Ty)) {
1094 case cByte: case cShort: case cInt:
1095 NumBytes += 4; break;
1097 NumBytes += 8; break;
1099 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1101 default: assert(0 && "Unknown class!");
1104 // Adjust the stack pointer for the new arguments...
1105 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(NumBytes);
1107 // Arguments go on the stack in reverse order, as specified by the ABI.
1108 unsigned ArgOffset = 0;
1109 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1110 unsigned ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1111 switch (getClassB(Args[i].Ty)) {
1114 // Promote arg to 32 bits wide into a temporary register...
1115 unsigned R = makeAnotherReg(Type::UIntTy);
1116 promote32(R, Args[i]);
1117 addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
1118 X86::ESP, ArgOffset).addReg(R);
1122 addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
1123 X86::ESP, ArgOffset).addReg(ArgReg);
1126 addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
1127 X86::ESP, ArgOffset).addReg(ArgReg);
1128 addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
1129 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1130 ArgOffset += 4; // 8 byte entry, not 4.
1134 if (Args[i].Ty == Type::FloatTy) {
1135 addRegOffset(BuildMI(BB, X86::FSTr32, 5),
1136 X86::ESP, ArgOffset).addReg(ArgReg);
1138 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1139 addRegOffset(BuildMI(BB, X86::FSTr64, 5),
1140 X86::ESP, ArgOffset).addReg(ArgReg);
1141 ArgOffset += 4; // 8 byte entry, not 4.
1145 default: assert(0 && "Unknown class!");
1150 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(0);
1153 BB->push_back(CallMI);
1155 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addZImm(NumBytes);
1157 // If there is a return value, scavenge the result from the location the call
1160 if (Ret.Ty != Type::VoidTy) {
1161 unsigned DestClass = getClassB(Ret.Ty);
1162 switch (DestClass) {
1166 // Integral results are in %eax, or the appropriate portion
1168 static const unsigned regRegMove[] = {
1169 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32
1171 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1172 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1175 case cFP: // Floating-point return values live in %ST(0)
1176 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1178 case cLong: // Long values are left in EDX:EAX
1179 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg).addReg(X86::EAX);
1180 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg+1).addReg(X86::EDX);
1182 default: assert(0 && "Unknown class!");
1188 /// visitCallInst - Push args on stack and do a procedure call instruction.
1189 void ISel::visitCallInst(CallInst &CI) {
1190 MachineInstr *TheCall;
1191 if (Function *F = CI.getCalledFunction()) {
1192 // Is it an intrinsic function call?
1193 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1194 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1198 // Emit a CALL instruction with PC-relative displacement.
1199 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1200 } else { // Emit an indirect call...
1201 unsigned Reg = getReg(CI.getCalledValue());
1202 TheCall = BuildMI(X86::CALLr32, 1).addReg(Reg);
1205 std::vector<ValueRecord> Args;
1206 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1207 Args.push_back(ValueRecord(CI.getOperand(i)));
1209 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1210 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1214 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1215 /// function, lowering any calls to unknown intrinsic functions into the
1216 /// equivalent LLVM code.
1217 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1218 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1219 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1220 if (CallInst *CI = dyn_cast<CallInst>(I++))
1221 if (Function *F = CI->getCalledFunction())
1222 switch (F->getIntrinsicID()) {
1223 case Intrinsic::not_intrinsic:
1224 case Intrinsic::va_start:
1225 case Intrinsic::va_copy:
1226 case Intrinsic::va_end:
1227 case Intrinsic::returnaddress:
1228 case Intrinsic::frameaddress:
1229 case Intrinsic::memcpy:
1230 case Intrinsic::memset:
1231 // We directly implement these intrinsics
1234 // All other intrinsic calls we must lower.
1235 Instruction *Before = CI->getPrev();
1236 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1237 if (Before) { // Move iterator to instruction after call
1246 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1247 unsigned TmpReg1, TmpReg2;
1249 case Intrinsic::va_start:
1250 // Get the address of the first vararg value...
1251 TmpReg1 = getReg(CI);
1252 addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex);
1255 case Intrinsic::va_copy:
1256 TmpReg1 = getReg(CI);
1257 TmpReg2 = getReg(CI.getOperand(1));
1258 BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2);
1260 case Intrinsic::va_end: return; // Noop on X86
1262 case Intrinsic::returnaddress:
1263 case Intrinsic::frameaddress:
1264 TmpReg1 = getReg(CI);
1265 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1266 if (ID == Intrinsic::returnaddress) {
1267 // Just load the return address
1268 addFrameReference(BuildMI(BB, X86::MOVrm32, 4, TmpReg1),
1269 ReturnAddressIndex);
1271 addFrameReference(BuildMI(BB, X86::LEAr32, 4, TmpReg1),
1272 ReturnAddressIndex, -4);
1275 // Values other than zero are not implemented yet.
1276 BuildMI(BB, X86::MOVri32, 1, TmpReg1).addZImm(0);
1280 case Intrinsic::memcpy: {
1281 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1283 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1284 Align = AlignC->getRawValue();
1285 if (Align == 0) Align = 1;
1288 // Turn the byte code into # iterations
1292 switch (Align & 3) {
1293 case 2: // WORD aligned
1294 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1295 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1297 CountReg = makeAnotherReg(Type::IntTy);
1298 BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
1300 Opcode = X86::REP_MOVSW;
1302 case 0: // DWORD aligned
1303 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1304 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1306 CountReg = makeAnotherReg(Type::IntTy);
1307 BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
1309 Opcode = X86::REP_MOVSD;
1311 case 1: // BYTE aligned
1312 case 3: // BYTE aligned
1313 CountReg = getReg(CI.getOperand(3));
1314 Opcode = X86::REP_MOVSB;
1318 // No matter what the alignment is, we put the source in ESI, the
1319 // destination in EDI, and the count in ECX.
1320 TmpReg1 = getReg(CI.getOperand(1));
1321 TmpReg2 = getReg(CI.getOperand(2));
1322 BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
1323 BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
1324 BuildMI(BB, X86::MOVrr32, 1, X86::ESI).addReg(TmpReg2);
1325 BuildMI(BB, Opcode, 0);
1328 case Intrinsic::memset: {
1329 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1331 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1332 Align = AlignC->getRawValue();
1333 if (Align == 0) Align = 1;
1336 // Turn the byte code into # iterations
1340 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1341 unsigned Val = ValC->getRawValue() & 255;
1343 // If the value is a constant, then we can potentially use larger copies.
1344 switch (Align & 3) {
1345 case 2: // WORD aligned
1346 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1347 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1349 CountReg = makeAnotherReg(Type::IntTy);
1350 BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
1352 BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
1353 Opcode = X86::REP_STOSW;
1355 case 0: // DWORD aligned
1356 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1357 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1359 CountReg = makeAnotherReg(Type::IntTy);
1360 BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
1362 Val = (Val << 8) | Val;
1363 BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
1364 Opcode = X86::REP_STOSD;
1366 case 1: // BYTE aligned
1367 case 3: // BYTE aligned
1368 CountReg = getReg(CI.getOperand(3));
1369 BuildMI(BB, X86::MOVri8, 1, X86::AL).addZImm(Val);
1370 Opcode = X86::REP_STOSB;
1374 // If it's not a constant value we are storing, just fall back. We could
1375 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1376 unsigned ValReg = getReg(CI.getOperand(2));
1377 BuildMI(BB, X86::MOVrr8, 1, X86::AL).addReg(ValReg);
1378 CountReg = getReg(CI.getOperand(3));
1379 Opcode = X86::REP_STOSB;
1382 // No matter what the alignment is, we put the source in ESI, the
1383 // destination in EDI, and the count in ECX.
1384 TmpReg1 = getReg(CI.getOperand(1));
1385 //TmpReg2 = getReg(CI.getOperand(2));
1386 BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
1387 BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
1388 BuildMI(BB, Opcode, 0);
1392 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1397 /// visitSimpleBinary - Implement simple binary operators for integral types...
1398 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1400 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1401 unsigned DestReg = getReg(B);
1402 MachineBasicBlock::iterator MI = BB->end();
1403 emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
1404 OperatorClass, DestReg);
1407 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1408 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
1411 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
1412 /// and constant expression support.
1414 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
1415 MachineBasicBlock::iterator IP,
1416 Value *Op0, Value *Op1,
1417 unsigned OperatorClass, unsigned DestReg) {
1418 unsigned Class = getClassB(Op0->getType());
1420 // sub 0, X -> neg X
1421 if (OperatorClass == 1 && Class != cLong)
1422 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0)) {
1423 if (CI->isNullValue()) {
1424 unsigned op1Reg = getReg(Op1, MBB, IP);
1426 default: assert(0 && "Unknown class for this function!");
1428 BMI(MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg);
1431 BMI(MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg);
1434 BMI(MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg);
1438 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
1439 if (CFP->isExactlyValue(-0.0)) {
1441 unsigned op1Reg = getReg(Op1, MBB, IP);
1442 BMI(MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
1446 if (!isa<ConstantInt>(Op1) || Class == cLong) {
1447 static const unsigned OpcodeTab[][4] = {
1448 // Arithmetic operators
1449 { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD
1450 { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB
1452 // Bitwise operators
1453 { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND
1454 { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR
1455 { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR
1458 bool isLong = false;
1459 if (Class == cLong) {
1461 Class = cInt; // Bottom 32 bits are handled just like ints
1464 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1465 assert(Opcode && "Floating point arguments to logical inst?");
1466 unsigned Op0r = getReg(Op0, MBB, IP);
1467 unsigned Op1r = getReg(Op1, MBB, IP);
1468 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1470 if (isLong) { // Handle the upper 32 bits of long values...
1471 static const unsigned TopTab[] = {
1472 X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32
1474 BMI(MBB, IP, TopTab[OperatorClass], 2,
1475 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
1480 // Special case: op Reg, <const>
1481 ConstantInt *Op1C = cast<ConstantInt>(Op1);
1482 unsigned Op0r = getReg(Op0, MBB, IP);
1484 // xor X, -1 -> not X
1485 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
1486 static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 };
1487 BMI(MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
1491 // add X, -1 -> dec X
1492 if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
1493 static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 };
1494 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1498 // add X, 1 -> inc X
1499 if (OperatorClass == 0 && Op1C->equalsInt(1)) {
1500 static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 };
1501 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1505 static const unsigned OpcodeTab[][3] = {
1506 // Arithmetic operators
1507 { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD
1508 { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB
1510 // Bitwise operators
1511 { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND
1512 { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR
1513 { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR
1516 assert(Class < 3 && "General code handles 64-bit integer types!");
1517 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1518 uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
1520 // Mask off any upper bits of the constant, if there are any...
1521 Op1v &= (1ULL << (8 << Class)) - 1;
1522 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addZImm(Op1v);
1525 /// doMultiply - Emit appropriate instructions to multiply together the
1526 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
1527 /// result should be given as DestTy.
1529 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
1530 unsigned DestReg, const Type *DestTy,
1531 unsigned op0Reg, unsigned op1Reg) {
1532 unsigned Class = getClass(DestTy);
1534 case cFP: // Floating point multiply
1535 BMI(BB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
1539 BMI(BB, MBBI, Class == cInt ? X86::IMULrr32 : X86::IMULrr16, 2, DestReg)
1540 .addReg(op0Reg).addReg(op1Reg);
1543 // Must use the MUL instruction, which forces use of AL...
1544 BMI(MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg);
1545 BMI(MBB, MBBI, X86::MULr8, 1).addReg(op1Reg);
1546 BMI(MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL);
1549 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
1553 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
1554 // returns zero when the input is not exactly a power of two.
1555 static unsigned ExactLog2(unsigned Val) {
1556 if (Val == 0) return 0;
1559 if (Val & 1) return 0;
1566 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
1567 MachineBasicBlock::iterator IP,
1568 unsigned DestReg, const Type *DestTy,
1569 unsigned op0Reg, unsigned ConstRHS) {
1570 unsigned Class = getClass(DestTy);
1572 // If the element size is exactly a power of 2, use a shift to get it.
1573 if (unsigned Shift = ExactLog2(ConstRHS)) {
1575 default: assert(0 && "Unknown class for this function!");
1577 BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1580 BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1583 BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1588 if (Class == cShort) {
1589 BMI(MBB, IP, X86::IMULrri16, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1591 } else if (Class == cInt) {
1592 BMI(MBB, IP, X86::IMULrri32, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1596 // Most general case, emit a normal multiply...
1597 static const unsigned MOVriTab[] = {
1598 X86::MOVri8, X86::MOVri16, X86::MOVri32
1601 unsigned TmpReg = makeAnotherReg(DestTy);
1602 BMI(MBB, IP, MOVriTab[Class], 1, TmpReg).addZImm(ConstRHS);
1604 // Emit a MUL to multiply the register holding the index by
1605 // elementSize, putting the result in OffsetReg.
1606 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
1609 /// visitMul - Multiplies are not simple binary operators because they must deal
1610 /// with the EAX register explicitly.
1612 void ISel::visitMul(BinaryOperator &I) {
1613 unsigned Op0Reg = getReg(I.getOperand(0));
1614 unsigned DestReg = getReg(I);
1616 // Simple scalar multiply?
1617 if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) {
1618 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
1619 unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
1620 MachineBasicBlock::iterator MBBI = BB->end();
1621 doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
1623 unsigned Op1Reg = getReg(I.getOperand(1));
1624 MachineBasicBlock::iterator MBBI = BB->end();
1625 doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
1628 unsigned Op1Reg = getReg(I.getOperand(1));
1630 // Long value. We have to do things the hard way...
1631 // Multiply the two low parts... capturing carry into EDX
1632 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg);
1633 BuildMI(BB, X86::MULr32, 1).addReg(Op1Reg); // AL*BL
1635 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
1636 BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(X86::EAX); // AL*BL
1637 BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
1639 MachineBasicBlock::iterator MBBI = BB->end();
1640 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
1641 BMI(BB, MBBI, X86::IMULrr32, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
1643 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
1644 BuildMI(BB, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32)
1645 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
1648 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
1649 BMI(BB, MBBI, X86::IMULrr32, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
1651 BuildMI(BB, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32)
1652 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
1657 /// visitDivRem - Handle division and remainder instructions... these
1658 /// instruction both require the same instructions to be generated, they just
1659 /// select the result from a different register. Note that both of these
1660 /// instructions work differently for signed and unsigned operands.
1662 void ISel::visitDivRem(BinaryOperator &I) {
1663 unsigned Op0Reg = getReg(I.getOperand(0));
1664 unsigned Op1Reg = getReg(I.getOperand(1));
1665 unsigned ResultReg = getReg(I);
1667 MachineBasicBlock::iterator IP = BB->end();
1668 emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
1669 I.getType(), ResultReg);
1672 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
1673 MachineBasicBlock::iterator IP,
1674 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
1675 const Type *Ty, unsigned ResultReg) {
1676 unsigned Class = getClass(Ty);
1678 case cFP: // Floating point divide
1680 BMI(BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1681 } else { // Floating point remainder...
1682 MachineInstr *TheCall =
1683 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
1684 std::vector<ValueRecord> Args;
1685 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
1686 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
1687 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
1691 static const char *FnName[] =
1692 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
1694 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
1695 MachineInstr *TheCall =
1696 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
1698 std::vector<ValueRecord> Args;
1699 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
1700 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
1701 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
1704 case cByte: case cShort: case cInt:
1705 break; // Small integrals, handled below...
1706 default: assert(0 && "Unknown class!");
1709 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
1710 static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
1711 static const unsigned SarOpcode[]={ X86::SARri8, X86::SARri16, X86::SARri32 };
1712 static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
1713 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
1715 static const unsigned DivOpcode[][4] = {
1716 { X86::DIVr8 , X86::DIVr16 , X86::DIVr32 , 0 }, // Unsigned division
1717 { X86::IDIVr8, X86::IDIVr16, X86::IDIVr32, 0 }, // Signed division
1720 bool isSigned = Ty->isSigned();
1721 unsigned Reg = Regs[Class];
1722 unsigned ExtReg = ExtRegs[Class];
1724 // Put the first operand into one of the A registers...
1725 BMI(BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
1728 // Emit a sign extension instruction...
1729 unsigned ShiftResult = makeAnotherReg(Ty);
1730 BMI(BB, IP, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31);
1731 BMI(BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
1733 // If unsigned, emit a zeroing instruction... (reg = 0)
1734 BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addZImm(0);
1737 // Emit the appropriate divide or remainder instruction...
1738 BMI(BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
1740 // Figure out which register we want to pick the result out of...
1741 unsigned DestReg = isDiv ? Reg : ExtReg;
1743 // Put the result into the destination register...
1744 BMI(BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
1748 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
1749 /// for constant immediate shift values, and for constant immediate
1750 /// shift values equal to 1. Even the general case is sort of special,
1751 /// because the shift amount has to be in CL, not just any old register.
1753 void ISel::visitShiftInst(ShiftInst &I) {
1754 MachineBasicBlock::iterator IP = BB->end ();
1755 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
1756 I.getOpcode () == Instruction::Shl, I.getType (),
1760 /// emitShiftOperation - Common code shared between visitShiftInst and
1761 /// constant expression support.
1762 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
1763 MachineBasicBlock::iterator IP,
1764 Value *Op, Value *ShiftAmount, bool isLeftShift,
1765 const Type *ResultTy, unsigned DestReg) {
1766 unsigned SrcReg = getReg (Op, MBB, IP);
1767 bool isSigned = ResultTy->isSigned ();
1768 unsigned Class = getClass (ResultTy);
1770 static const unsigned ConstantOperand[][4] = {
1771 { X86::SHRri8, X86::SHRri16, X86::SHRri32, X86::SHRDri32 }, // SHR
1772 { X86::SARri8, X86::SARri16, X86::SARri32, X86::SHRDri32 }, // SAR
1773 { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SHL
1774 { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SAL = SHL
1777 static const unsigned NonConstantOperand[][4] = {
1778 { X86::SHRrr8, X86::SHRrr16, X86::SHRrr32 }, // SHR
1779 { X86::SARrr8, X86::SARrr16, X86::SARrr32 }, // SAR
1780 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SHL
1781 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SAL = SHL
1784 // Longs, as usual, are handled specially...
1785 if (Class == cLong) {
1786 // If we have a constant shift, we can generate much more efficient code
1787 // than otherwise...
1789 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1790 unsigned Amount = CUI->getValue();
1792 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1794 BMI(MBB, IP, Opc[3], 3,
1795 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
1796 BMI(MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
1798 BMI(MBB, IP, Opc[3], 3,
1799 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
1800 BMI(MBB, IP, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
1802 } else { // Shifting more than 32 bits
1805 BMI(MBB, IP, X86::SHLri32, 2,
1806 DestReg + 1).addReg(SrcReg).addZImm(Amount);
1807 BMI(MBB, IP, X86::MOVri32, 1,
1808 DestReg).addZImm(0);
1810 unsigned Opcode = isSigned ? X86::SARri32 : X86::SHRri32;
1811 BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
1812 BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
1816 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1818 if (!isLeftShift && isSigned) {
1819 // If this is a SHR of a Long, then we need to do funny sign extension
1820 // stuff. TmpReg gets the value to use as the high-part if we are
1821 // shifting more than 32 bits.
1822 BMI(MBB, IP, X86::SARri32, 2, TmpReg).addReg(SrcReg).addZImm(31);
1824 // Other shifts use a fixed zero value if the shift is more than 32
1826 BMI(MBB, IP, X86::MOVri32, 1, TmpReg).addZImm(0);
1829 // Initialize CL with the shift amount...
1830 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
1831 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1833 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
1834 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
1836 // TmpReg2 = shld inHi, inLo
1837 BMI(MBB, IP, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
1838 // TmpReg3 = shl inLo, CL
1839 BMI(MBB, IP, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
1841 // Set the flags to indicate whether the shift was by more than 32 bits.
1842 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1844 // DestHi = (>32) ? TmpReg3 : TmpReg2;
1845 BMI(MBB, IP, X86::CMOVNErr32, 2,
1846 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
1847 // DestLo = (>32) ? TmpReg : TmpReg3;
1848 BMI(MBB, IP, X86::CMOVNErr32, 2,
1849 DestReg).addReg(TmpReg3).addReg(TmpReg);
1851 // TmpReg2 = shrd inLo, inHi
1852 BMI(MBB, IP, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
1853 // TmpReg3 = s[ah]r inHi, CL
1854 BMI(MBB, IP, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
1857 // Set the flags to indicate whether the shift was by more than 32 bits.
1858 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1860 // DestLo = (>32) ? TmpReg3 : TmpReg2;
1861 BMI(MBB, IP, X86::CMOVNErr32, 2,
1862 DestReg).addReg(TmpReg2).addReg(TmpReg3);
1864 // DestHi = (>32) ? TmpReg : TmpReg3;
1865 BMI(MBB, IP, X86::CMOVNErr32, 2,
1866 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
1872 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1873 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
1874 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
1876 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1877 BMI(MBB, IP, Opc[Class], 2,
1878 DestReg).addReg(SrcReg).addZImm(CUI->getValue());
1879 } else { // The shift amount is non-constant.
1880 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
1881 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1883 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
1884 BMI(MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
1889 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
1890 /// instruction. The load and store instructions are the only place where we
1891 /// need to worry about the memory layout of the target machine.
1893 void ISel::visitLoadInst(LoadInst &I) {
1894 unsigned SrcAddrReg = getReg(I.getOperand(0));
1895 unsigned DestReg = getReg(I);
1897 unsigned Class = getClassB(I.getType());
1899 if (Class == cLong) {
1900 addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), SrcAddrReg);
1901 addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), SrcAddrReg, 4);
1905 static const unsigned Opcodes[] = {
1906 X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDr32
1908 unsigned Opcode = Opcodes[Class];
1909 if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
1910 addDirectMem(BuildMI(BB, Opcode, 4, DestReg), SrcAddrReg);
1913 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
1916 void ISel::visitStoreInst(StoreInst &I) {
1917 unsigned ValReg = getReg(I.getOperand(0));
1918 unsigned AddressReg = getReg(I.getOperand(1));
1920 const Type *ValTy = I.getOperand(0)->getType();
1921 unsigned Class = getClassB(ValTy);
1923 if (Class == cLong) {
1924 addDirectMem(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg).addReg(ValReg);
1925 addRegOffset(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg,4).addReg(ValReg+1);
1929 static const unsigned Opcodes[] = {
1930 X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTr32
1932 unsigned Opcode = Opcodes[Class];
1933 if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
1934 addDirectMem(BuildMI(BB, Opcode, 1+4), AddressReg).addReg(ValReg);
1938 /// visitCastInst - Here we have various kinds of copying with or without
1939 /// sign extension going on.
1940 void ISel::visitCastInst(CastInst &CI) {
1941 Value *Op = CI.getOperand(0);
1942 // If this is a cast from a 32-bit integer to a Long type, and the only uses
1943 // of the case are GEP instructions, then the cast does not need to be
1944 // generated explicitly, it will be folded into the GEP.
1945 if (CI.getType() == Type::LongTy &&
1946 (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) {
1947 bool AllUsesAreGEPs = true;
1948 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
1949 if (!isa<GetElementPtrInst>(*I)) {
1950 AllUsesAreGEPs = false;
1954 // No need to codegen this cast if all users are getelementptr instrs...
1955 if (AllUsesAreGEPs) return;
1958 unsigned DestReg = getReg(CI);
1959 MachineBasicBlock::iterator MI = BB->end();
1960 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
1963 /// emitCastOperation - Common code shared between visitCastInst and
1964 /// constant expression cast support.
1965 void ISel::emitCastOperation(MachineBasicBlock *BB,
1966 MachineBasicBlock::iterator IP,
1967 Value *Src, const Type *DestTy,
1969 unsigned SrcReg = getReg(Src, BB, IP);
1970 const Type *SrcTy = Src->getType();
1971 unsigned SrcClass = getClassB(SrcTy);
1972 unsigned DestClass = getClassB(DestTy);
1974 // Implement casts to bool by using compare on the operand followed by set if
1975 // not zero on the result.
1976 if (DestTy == Type::BoolTy) {
1979 BMI(BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg);
1982 BMI(BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg);
1985 BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg);
1988 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1989 BMI(BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
1993 BMI(BB, IP, X86::FTST, 1).addReg(SrcReg);
1994 BMI(BB, IP, X86::FNSTSWr8, 0);
1995 BMI(BB, IP, X86::SAHF, 1);
1999 // If the zero flag is not set, then the value is true, set the byte to
2001 BMI(BB, IP, X86::SETNEr, 1, DestReg);
2005 static const unsigned RegRegMove[] = {
2006 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV, X86::MOVrr32
2009 // Implement casts between values of the same type class (as determined by
2010 // getClass) by using a register-to-register move.
2011 if (SrcClass == DestClass) {
2012 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
2013 BMI(BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
2014 } else if (SrcClass == cFP) {
2015 if (SrcTy == Type::FloatTy) { // double -> float
2016 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
2017 BMI(BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
2018 } else { // float -> double
2019 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
2020 "Unknown cFP member!");
2021 // Truncate from double to float by storing to memory as short, then
2023 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
2024 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
2025 addFrameReference(BMI(BB, IP, X86::FSTr32, 5), FrameIdx).addReg(SrcReg);
2026 addFrameReference(BMI(BB, IP, X86::FLDr32, 5, DestReg), FrameIdx);
2028 } else if (SrcClass == cLong) {
2029 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
2030 BMI(BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1);
2032 assert(0 && "Cannot handle this type of cast instruction!");
2038 // Handle cast of SMALLER int to LARGER int using a move with sign extension
2039 // or zero extension, depending on whether the source type was signed.
2040 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
2041 SrcClass < DestClass) {
2042 bool isLong = DestClass == cLong;
2043 if (isLong) DestClass = cInt;
2045 static const unsigned Opc[][4] = {
2046 { X86::MOVSXr16r8, X86::MOVSXr32r8, X86::MOVSXr32r16, X86::MOVrr32 }, // s
2047 { X86::MOVZXr16r8, X86::MOVZXr32r8, X86::MOVZXr32r16, X86::MOVrr32 } // u
2050 bool isUnsigned = SrcTy->isUnsigned();
2051 BMI(BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
2052 DestReg).addReg(SrcReg);
2054 if (isLong) { // Handle upper 32 bits as appropriate...
2055 if (isUnsigned) // Zero out top bits...
2056 BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
2057 else // Sign extend bottom half...
2058 BMI(BB, IP, X86::SARri32, 2, DestReg+1).addReg(DestReg).addZImm(31);
2063 // Special case long -> int ...
2064 if (SrcClass == cLong && DestClass == cInt) {
2065 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
2069 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
2070 // move out of AX or AL.
2071 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
2072 && SrcClass > DestClass) {
2073 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
2074 BMI(BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
2075 BMI(BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
2079 // Handle casts from integer to floating point now...
2080 if (DestClass == cFP) {
2081 // Promote the integer to a type supported by FLD. We do this because there
2082 // are no unsigned FLD instructions, so we must promote an unsigned value to
2083 // a larger signed value, then use FLD on the larger value.
2085 const Type *PromoteType = 0;
2086 unsigned PromoteOpcode;
2087 unsigned RealDestReg = DestReg;
2088 switch (SrcTy->getPrimitiveID()) {
2089 case Type::BoolTyID:
2090 case Type::SByteTyID:
2091 // We don't have the facilities for directly loading byte sized data from
2092 // memory (even signed). Promote it to 16 bits.
2093 PromoteType = Type::ShortTy;
2094 PromoteOpcode = X86::MOVSXr16r8;
2096 case Type::UByteTyID:
2097 PromoteType = Type::ShortTy;
2098 PromoteOpcode = X86::MOVZXr16r8;
2100 case Type::UShortTyID:
2101 PromoteType = Type::IntTy;
2102 PromoteOpcode = X86::MOVZXr32r16;
2104 case Type::UIntTyID: {
2105 // Make a 64 bit temporary... and zero out the top of it...
2106 unsigned TmpReg = makeAnotherReg(Type::LongTy);
2107 BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
2108 BMI(BB, IP, X86::MOVri32, 1, TmpReg+1).addZImm(0);
2109 SrcTy = Type::LongTy;
2114 case Type::ULongTyID:
2115 // Don't fild into the read destination.
2116 DestReg = makeAnotherReg(Type::DoubleTy);
2118 default: // No promotion needed...
2123 unsigned TmpReg = makeAnotherReg(PromoteType);
2124 unsigned Opc = SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8;
2125 BMI(BB, IP, Opc, 1, TmpReg).addReg(SrcReg);
2126 SrcTy = PromoteType;
2127 SrcClass = getClass(PromoteType);
2131 // Spill the integer to memory and reload it from there...
2132 int FrameIdx = F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
2134 if (SrcClass == cLong) {
2135 addFrameReference(BMI(BB, IP, X86::MOVmr32, 5), FrameIdx).addReg(SrcReg);
2136 addFrameReference(BMI(BB, IP, X86::MOVmr32, 5),
2137 FrameIdx, 4).addReg(SrcReg+1);
2139 static const unsigned Op1[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
2140 addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
2143 static const unsigned Op2[] =
2144 { 0/*byte*/, X86::FILDr16, X86::FILDr32, 0/*FP*/, X86::FILDr64 };
2145 addFrameReference(BMI(BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
2147 // We need special handling for unsigned 64-bit integer sources. If the
2148 // input number has the "sign bit" set, then we loaded it incorrectly as a
2149 // negative 64-bit number. In this case, add an offset value.
2150 if (SrcTy == Type::ULongTy) {
2151 // Emit a test instruction to see if the dynamic input value was signed.
2152 BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg+1).addReg(SrcReg+1);
2154 // If the sign bit is set, get a pointer to an offset, otherwise get a pointer to a zero.
2155 MachineConstantPool *CP = F->getConstantPool();
2156 unsigned Zero = makeAnotherReg(Type::IntTy);
2157 addConstantPoolReference(BMI(BB, IP, X86::LEAr32, 5, Zero),
2158 CP->getConstantPoolIndex(Constant::getNullValue(Type::UIntTy)));
2159 unsigned Offset = makeAnotherReg(Type::IntTy);
2160 addConstantPoolReference(BMI(BB, IP, X86::LEAr32, 5, Offset),
2161 CP->getConstantPoolIndex(ConstantUInt::get(Type::UIntTy,
2163 unsigned Addr = makeAnotherReg(Type::IntTy);
2164 BMI(BB, IP, X86::CMOVSrr32, 2, Addr).addReg(Zero).addReg(Offset);
2166 // Load the constant for an add. FIXME: this could make an 'fadd' that
2167 // reads directly from memory, but we don't support these yet.
2168 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
2169 addDirectMem(BMI(BB, IP, X86::FLDr32, 4, ConstReg), Addr);
2171 BMI(BB, IP, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(DestReg);
2177 // Handle casts from floating point to integer now...
2178 if (SrcClass == cFP) {
2179 // Change the floating point control register to use "round towards zero"
2180 // mode when truncating to an integer value.
2182 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2183 addFrameReference(BMI(BB, IP, X86::FNSTCWm16, 4), CWFrameIdx);
2185 // Load the old value of the high byte of the control word...
2186 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
2187 addFrameReference(BMI(BB, IP, X86::MOVrm8, 4, HighPartOfCW), CWFrameIdx, 1);
2189 // Set the high part to be round to zero...
2190 addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
2192 // Reload the modified control word now...
2193 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
2195 // Restore the memory image of control word to original value
2196 addFrameReference(BMI(BB, IP, X86::MOVmr8, 5),
2197 CWFrameIdx, 1).addReg(HighPartOfCW);
2199 // We don't have the facilities for directly storing byte sized data to
2200 // memory. Promote it to 16 bits. We also must promote unsigned values to
2201 // larger classes because we only have signed FP stores.
2202 unsigned StoreClass = DestClass;
2203 const Type *StoreTy = DestTy;
2204 if (StoreClass == cByte || DestTy->isUnsigned())
2205 switch (StoreClass) {
2206 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
2207 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
2208 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
2209 // The following treatment of cLong may not be perfectly right,
2210 // but it survives chains of casts of the form
2211 // double->ulong->double.
2212 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
2213 default: assert(0 && "Unknown store class!");
2216 // Spill the integer to memory and reload it from there...
2218 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
2220 static const unsigned Op1[] =
2221 { 0, X86::FISTr16, X86::FISTr32, 0, X86::FISTPr64 };
2222 addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
2224 if (DestClass == cLong) {
2225 addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg), FrameIdx);
2226 addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg+1), FrameIdx, 4);
2228 static const unsigned Op2[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
2229 addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
2232 // Reload the original control word now...
2233 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
2237 // Anything we haven't handled already, we can't (yet) handle at all.
2238 assert(0 && "Unhandled cast instruction!");
2242 /// visitVANextInst - Implement the va_next instruction...
2244 void ISel::visitVANextInst(VANextInst &I) {
2245 unsigned VAList = getReg(I.getOperand(0));
2246 unsigned DestReg = getReg(I);
2249 switch (I.getArgType()->getPrimitiveID()) {
2252 assert(0 && "Error: bad type for va_next instruction!");
2254 case Type::PointerTyID:
2255 case Type::UIntTyID:
2259 case Type::ULongTyID:
2260 case Type::LongTyID:
2261 case Type::DoubleTyID:
2266 // Increment the VAList pointer...
2267 BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addZImm(Size);
2270 void ISel::visitVAArgInst(VAArgInst &I) {
2271 unsigned VAList = getReg(I.getOperand(0));
2272 unsigned DestReg = getReg(I);
2274 switch (I.getType()->getPrimitiveID()) {
2277 assert(0 && "Error: bad type for va_next instruction!");
2279 case Type::PointerTyID:
2280 case Type::UIntTyID:
2282 addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
2284 case Type::ULongTyID:
2285 case Type::LongTyID:
2286 addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
2287 addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), VAList, 4);
2289 case Type::DoubleTyID:
2290 addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
2296 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
2297 unsigned outputReg = getReg(I);
2298 emitGEPOperation(BB, BB->end(), I.getOperand(0),
2299 I.op_begin()+1, I.op_end(), outputReg);
2302 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
2303 MachineBasicBlock::iterator IP,
2304 Value *Src, User::op_iterator IdxBegin,
2305 User::op_iterator IdxEnd, unsigned TargetReg) {
2306 const TargetData &TD = TM.getTargetData();
2308 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
2309 Src = CPR->getValue();
2311 std::vector<Value*> GEPOps;
2312 GEPOps.resize(IdxEnd-IdxBegin+1);
2314 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
2316 std::vector<const Type*> GEPTypes;
2317 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
2318 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
2320 // Keep emitting instructions until we consume the entire GEP instruction.
2321 while (!GEPOps.empty()) {
2322 unsigned OldSize = GEPOps.size();
2324 if (GEPTypes.empty()) {
2325 // The getGEPIndex operation didn't want to build an LEA. Check to see if
2326 // all operands are consumed but the base pointer. If so, just load it
2327 // into the register.
2328 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
2329 BMI(MBB, IP, X86::MOVri32, 1, TargetReg).addGlobalAddress(GV);
2331 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
2332 BMI(MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
2334 break; // we are now done
2335 } else if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
2336 // It's a struct access. CUI is the index into the structure,
2337 // which names the field. This index must have unsigned type.
2338 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
2339 GEPOps.pop_back(); // Consume a GEP operand
2340 GEPTypes.pop_back();
2342 // Use the TargetData structure to pick out what the layout of the
2343 // structure is in memory. Since the structure index must be constant, we
2344 // can get its value and use it to find the right byte offset from the
2345 // StructLayout class's list of structure member offsets.
2346 unsigned idxValue = CUI->getValue();
2347 unsigned FieldOff = TD.getStructLayout(StTy)->MemberOffsets[idxValue];
2349 unsigned Reg = makeAnotherReg(Type::UIntTy);
2350 // Emit an ADD to add FieldOff to the basePtr.
2351 BMI(MBB, IP, X86::ADDri32, 2, TargetReg).addReg(Reg).addZImm(FieldOff);
2352 --IP; // Insert the next instruction before this one.
2353 TargetReg = Reg; // Codegen the rest of the GEP into this
2357 // It's an array or pointer access: [ArraySize x ElementType].
2358 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
2359 Value *idx = GEPOps.back();
2360 GEPOps.pop_back(); // Consume a GEP operand
2361 GEPTypes.pop_back();
2363 // idx is the index into the array. Unlike with structure
2364 // indices, we may not know its actual value at code-generation
2366 assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
2368 // Most GEP instructions use a [cast (int/uint) to LongTy] as their
2369 // operand on X86. Handle this case directly now...
2370 if (CastInst *CI = dyn_cast<CastInst>(idx))
2371 if (CI->getOperand(0)->getType() == Type::IntTy ||
2372 CI->getOperand(0)->getType() == Type::UIntTy)
2373 idx = CI->getOperand(0);
2375 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
2376 // must find the size of the pointed-to type (Not coincidentally, the next
2377 // type is the type of the elements in the array).
2378 const Type *ElTy = SqTy->getElementType();
2379 unsigned elementSize = TD.getTypeSize(ElTy);
2381 // If idxReg is a constant, we don't need to perform the multiply!
2382 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2383 if (!CSI->isNullValue()) {
2384 unsigned Offset = elementSize*CSI->getValue();
2385 unsigned Reg = makeAnotherReg(Type::UIntTy);
2386 BMI(MBB, IP, X86::ADDri32, 2, TargetReg).addReg(Reg).addZImm(Offset);
2387 --IP; // Insert the next instruction before this one.
2388 TargetReg = Reg; // Codegen the rest of the GEP into this
2390 } else if (elementSize == 1) {
2391 // If the element size is 1, we don't have to multiply, just add
2392 unsigned idxReg = getReg(idx, MBB, IP);
2393 unsigned Reg = makeAnotherReg(Type::UIntTy);
2394 BMI(MBB, IP, X86::ADDrr32, 2, TargetReg).addReg(Reg).addReg(idxReg);
2395 --IP; // Insert the next instruction before this one.
2396 TargetReg = Reg; // Codegen the rest of the GEP into this
2398 unsigned idxReg = getReg(idx, MBB, IP);
2399 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
2401 // Make sure we can back the iterator up to point to the first
2402 // instruction emitted.
2403 MachineBasicBlock::iterator BeforeIt = IP;
2404 if (IP == MBB->begin())
2405 BeforeIt = MBB->end();
2408 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
2410 // Emit an ADD to add OffsetReg to the basePtr.
2411 unsigned Reg = makeAnotherReg(Type::UIntTy);
2412 BMI(MBB, IP, X86::ADDrr32, 2, TargetReg).addReg(Reg).addReg(OffsetReg);
2414 // Step to the first instruction of the multiply.
2415 if (BeforeIt == MBB->end())
2420 TargetReg = Reg; // Codegen the rest of the GEP into this
2427 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
2428 /// frame manager, otherwise do it the hard way.
2430 void ISel::visitAllocaInst(AllocaInst &I) {
2431 // Find the data size of the alloca inst's getAllocatedType.
2432 const Type *Ty = I.getAllocatedType();
2433 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
2435 // If this is a fixed size alloca in the entry block for the function,
2436 // statically stack allocate the space.
2438 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
2439 if (I.getParent() == I.getParent()->getParent()->begin()) {
2440 TySize *= CUI->getValue(); // Get total allocated size...
2441 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
2443 // Create a new stack object using the frame manager...
2444 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
2445 addFrameReference(BuildMI(BB, X86::LEAr32, 5, getReg(I)), FrameIdx);
2450 // Create a register to hold the temporary result of multiplying the type size
2451 // constant by the variable amount.
2452 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
2453 unsigned SrcReg1 = getReg(I.getArraySize());
2455 // TotalSizeReg = mul <numelements>, <TypeSize>
2456 MachineBasicBlock::iterator MBBI = BB->end();
2457 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
2459 // AddedSize = add <TotalSizeReg>, 15
2460 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
2461 BuildMI(BB, X86::ADDri32, 2, AddedSizeReg).addReg(TotalSizeReg).addZImm(15);
2463 // AlignedSize = and <AddedSize>, ~15
2464 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
2465 BuildMI(BB, X86::ANDri32, 2, AlignedSize).addReg(AddedSizeReg).addZImm(~15);
2467 // Subtract size from stack pointer, thereby allocating some space.
2468 BuildMI(BB, X86::SUBrr32, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
2470 // Put a pointer to the space into the result register, by copying
2471 // the stack pointer.
2472 BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP);
2474 // Inform the Frame Information that we have just allocated a variable-sized
2476 F->getFrameInfo()->CreateVariableSizedObject();
2479 /// visitMallocInst - Malloc instructions are code generated into direct calls
2480 /// to the library malloc.
2482 void ISel::visitMallocInst(MallocInst &I) {
2483 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
2486 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
2487 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
2489 Arg = makeAnotherReg(Type::UIntTy);
2490 unsigned Op0Reg = getReg(I.getOperand(0));
2491 MachineBasicBlock::iterator MBBI = BB->end();
2492 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
2495 std::vector<ValueRecord> Args;
2496 Args.push_back(ValueRecord(Arg, Type::UIntTy));
2497 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2498 1).addExternalSymbol("malloc", true);
2499 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
2503 /// visitFreeInst - Free instructions are code gen'd to call the free libc
2506 void ISel::visitFreeInst(FreeInst &I) {
2507 std::vector<ValueRecord> Args;
2508 Args.push_back(ValueRecord(I.getOperand(0)));
2509 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2510 1).addExternalSymbol("free", true);
2511 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
2514 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
2515 /// into a machine code representation is a very simple peep-hole fashion. The
2516 /// generated code sucks but the implementation is nice and simple.
2518 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
2519 return new ISel(TM);