1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "llvm/Support/CFG.h"
32 #include "Support/Statistic.h"
37 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
41 struct ISel : public FunctionPass, InstVisitor<ISel> {
43 MachineFunction *F; // The function we are compiling into
44 MachineBasicBlock *BB; // The current MBB we are compiling
45 int VarArgsFrameIndex; // FrameIndex for start of varargs area
46 int ReturnAddressIndex; // FrameIndex for the return address
48 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
50 // MBBMap - Mapping between LLVM BB -> Machine BB
51 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
53 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
55 /// runOnFunction - Top level implementation of instruction selection for
56 /// the entire function.
58 bool runOnFunction(Function &Fn) {
59 // First pass over the function, lower any unknown intrinsic functions
60 // with the IntrinsicLowering class.
61 LowerUnknownIntrinsicFunctionCalls(Fn);
63 F = &MachineFunction::construct(&Fn, TM);
65 // Create all of the machine basic blocks for the function...
66 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
67 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
71 // Set up a frame object for the return address. This is used by the
72 // llvm.returnaddress & llvm.frameaddress intrinisics.
73 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
75 // Copy incoming arguments off of the stack...
76 LoadArgumentsToVirtualRegs(Fn);
78 // Instruction select everything except PHI nodes
81 // Select the PHI nodes
84 // Insert the FP_REG_KILL instructions into blocks that need them.
90 // We always build a machine code representation for the function
94 virtual const char *getPassName() const {
95 return "X86 Simple Instruction Selection";
98 /// visitBasicBlock - This method is called when we are visiting a new basic
99 /// block. This simply creates a new MachineBasicBlock to emit code into
100 /// and adds it to the current MachineFunction. Subsequent visit* for
101 /// instructions will be invoked for all instructions in the basic block.
103 void visitBasicBlock(BasicBlock &LLVM_BB) {
104 BB = MBBMap[&LLVM_BB];
107 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
108 /// function, lowering any calls to unknown intrinsic functions into the
109 /// equivalent LLVM code.
110 void LowerUnknownIntrinsicFunctionCalls(Function &F);
112 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
113 /// from the stack into virtual registers.
115 void LoadArgumentsToVirtualRegs(Function &F);
117 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
118 /// because we have to generate our sources into the source basic blocks,
119 /// not the current one.
121 void SelectPHINodes();
123 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
124 /// that need them. This only occurs due to the floating point stackifier
125 /// not being aggressive enough to handle arbitrary global stackification.
127 void InsertFPRegKills();
129 // Visitation methods for various instructions. These methods simply emit
130 // fixed X86 code for each instruction.
133 // Control flow operators
134 void visitReturnInst(ReturnInst &RI);
135 void visitBranchInst(BranchInst &BI);
141 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
142 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
144 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
145 const std::vector<ValueRecord> &Args);
146 void visitCallInst(CallInst &I);
147 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
149 // Arithmetic operators
150 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
151 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
152 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
153 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
154 unsigned DestReg, const Type *DestTy,
155 unsigned Op0Reg, unsigned Op1Reg);
156 void doMultiplyConst(MachineBasicBlock *MBB,
157 MachineBasicBlock::iterator MBBI,
158 unsigned DestReg, const Type *DestTy,
159 unsigned Op0Reg, unsigned Op1Val);
160 void visitMul(BinaryOperator &B);
162 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
163 void visitRem(BinaryOperator &B) { visitDivRem(B); }
164 void visitDivRem(BinaryOperator &B);
167 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
168 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
169 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
171 // Comparison operators...
172 void visitSetCondInst(SetCondInst &I);
173 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
174 MachineBasicBlock *MBB,
175 MachineBasicBlock::iterator MBBI);
177 // Memory Instructions
178 void visitLoadInst(LoadInst &I);
179 void visitStoreInst(StoreInst &I);
180 void visitGetElementPtrInst(GetElementPtrInst &I);
181 void visitAllocaInst(AllocaInst &I);
182 void visitMallocInst(MallocInst &I);
183 void visitFreeInst(FreeInst &I);
186 void visitShiftInst(ShiftInst &I);
187 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
188 void visitCastInst(CastInst &I);
189 void visitVANextInst(VANextInst &I);
190 void visitVAArgInst(VAArgInst &I);
192 void visitInstruction(Instruction &I) {
193 std::cerr << "Cannot instruction select: " << I;
197 /// promote32 - Make a value 32-bits wide, and put it somewhere.
199 void promote32(unsigned targetReg, const ValueRecord &VR);
201 // getGEPIndex - This is used to fold GEP instructions into X86 addressing
203 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
204 std::vector<Value*> &GEPOps,
205 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
206 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
208 /// isGEPFoldable - Return true if the specified GEP can be completely
209 /// folded into the addressing mode of a load/store or lea instruction.
210 bool isGEPFoldable(MachineBasicBlock *MBB,
211 Value *Src, User::op_iterator IdxBegin,
212 User::op_iterator IdxEnd, unsigned &BaseReg,
213 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
215 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
216 /// constant expression GEP support.
218 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
219 Value *Src, User::op_iterator IdxBegin,
220 User::op_iterator IdxEnd, unsigned TargetReg);
222 /// emitCastOperation - Common code shared between visitCastInst and
223 /// constant expression cast support.
224 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
225 Value *Src, const Type *DestTy, unsigned TargetReg);
227 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
228 /// and constant expression support.
229 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
230 MachineBasicBlock::iterator IP,
231 Value *Op0, Value *Op1,
232 unsigned OperatorClass, unsigned TargetReg);
234 void emitDivRemOperation(MachineBasicBlock *BB,
235 MachineBasicBlock::iterator IP,
236 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
237 const Type *Ty, unsigned TargetReg);
239 /// emitSetCCOperation - Common code shared between visitSetCondInst and
240 /// constant expression support.
241 void emitSetCCOperation(MachineBasicBlock *BB,
242 MachineBasicBlock::iterator IP,
243 Value *Op0, Value *Op1, unsigned Opcode,
246 /// emitShiftOperation - Common code shared between visitShiftInst and
247 /// constant expression support.
248 void emitShiftOperation(MachineBasicBlock *MBB,
249 MachineBasicBlock::iterator IP,
250 Value *Op, Value *ShiftAmount, bool isLeftShift,
251 const Type *ResultTy, unsigned DestReg);
254 /// copyConstantToRegister - Output the instructions required to put the
255 /// specified constant into the specified register.
257 void copyConstantToRegister(MachineBasicBlock *MBB,
258 MachineBasicBlock::iterator MBBI,
259 Constant *C, unsigned Reg);
261 /// makeAnotherReg - This method returns the next register number we haven't
264 /// Long values are handled somewhat specially. They are always allocated
265 /// as pairs of 32 bit integer values. The register number returned is the
266 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
267 /// of the long value.
269 unsigned makeAnotherReg(const Type *Ty) {
270 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
271 "Current target doesn't have X86 reg info??");
272 const X86RegisterInfo *MRI =
273 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
274 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
275 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
276 // Create the lower part
277 F->getSSARegMap()->createVirtualRegister(RC);
278 // Create the upper part.
279 return F->getSSARegMap()->createVirtualRegister(RC)-1;
282 // Add the mapping of regnumber => reg class to MachineFunction
283 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
284 return F->getSSARegMap()->createVirtualRegister(RC);
287 /// getReg - This method turns an LLVM value into a register number. This
288 /// is guaranteed to produce the same register number for a particular value
289 /// every time it is queried.
291 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
292 unsigned getReg(Value *V) {
293 // Just append to the end of the current bb.
294 MachineBasicBlock::iterator It = BB->end();
295 return getReg(V, BB, It);
297 unsigned getReg(Value *V, MachineBasicBlock *MBB,
298 MachineBasicBlock::iterator IPt) {
299 unsigned &Reg = RegMap[V];
301 Reg = makeAnotherReg(V->getType());
305 // If this operand is a constant, emit the code to copy the constant into
306 // the register here...
308 if (Constant *C = dyn_cast<Constant>(V)) {
309 copyConstantToRegister(MBB, IPt, C, Reg);
310 RegMap.erase(V); // Assign a new name to this constant if ref'd again
311 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
312 // Move the address of the global into the register
313 BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
314 RegMap.erase(V); // Assign a new name to this address if ref'd again
322 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
326 cByte, cShort, cInt, cFP, cLong
329 /// getClass - Turn a primitive type into a "class" number which is based on the
330 /// size of the type, and whether or not it is floating point.
332 static inline TypeClass getClass(const Type *Ty) {
333 switch (Ty->getPrimitiveID()) {
334 case Type::SByteTyID:
335 case Type::UByteTyID: return cByte; // Byte operands are class #0
336 case Type::ShortTyID:
337 case Type::UShortTyID: return cShort; // Short operands are class #1
340 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
342 case Type::FloatTyID:
343 case Type::DoubleTyID: return cFP; // Floating Point is #3
346 case Type::ULongTyID: return cLong; // Longs are class #4
348 assert(0 && "Invalid type to getClass!");
349 return cByte; // not reached
353 // getClassB - Just like getClass, but treat boolean values as bytes.
354 static inline TypeClass getClassB(const Type *Ty) {
355 if (Ty == Type::BoolTy) return cByte;
360 /// copyConstantToRegister - Output the instructions required to put the
361 /// specified constant into the specified register.
363 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
364 MachineBasicBlock::iterator IP,
365 Constant *C, unsigned R) {
366 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
368 switch (CE->getOpcode()) {
369 case Instruction::GetElementPtr:
370 emitGEPOperation(MBB, IP, CE->getOperand(0),
371 CE->op_begin()+1, CE->op_end(), R);
373 case Instruction::Cast:
374 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
377 case Instruction::Xor: ++Class; // FALL THROUGH
378 case Instruction::Or: ++Class; // FALL THROUGH
379 case Instruction::And: ++Class; // FALL THROUGH
380 case Instruction::Sub: ++Class; // FALL THROUGH
381 case Instruction::Add:
382 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
386 case Instruction::Mul: {
387 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
388 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
389 doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
392 case Instruction::Div:
393 case Instruction::Rem: {
394 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
395 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
396 emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
397 CE->getOpcode() == Instruction::Div,
402 case Instruction::SetNE:
403 case Instruction::SetEQ:
404 case Instruction::SetLT:
405 case Instruction::SetGT:
406 case Instruction::SetLE:
407 case Instruction::SetGE:
408 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
412 case Instruction::Shl:
413 case Instruction::Shr:
414 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
415 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
419 std::cerr << "Offending expr: " << C << "\n";
420 assert(0 && "Constant expression not yet handled!\n");
424 if (C->getType()->isIntegral()) {
425 unsigned Class = getClassB(C->getType());
427 if (Class == cLong) {
428 // Copy the value into the register pair.
429 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
430 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
431 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
435 assert(Class <= cInt && "Type not handled yet!");
437 static const unsigned IntegralOpcodeTab[] = {
438 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
441 if (C->getType() == Type::BoolTy) {
442 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
444 ConstantInt *CI = cast<ConstantInt>(C);
445 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
447 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
448 if (CFP->isExactlyValue(+0.0))
449 BuildMI(*MBB, IP, X86::FLD0, 0, R);
450 else if (CFP->isExactlyValue(+1.0))
451 BuildMI(*MBB, IP, X86::FLD1, 0, R);
453 // Otherwise we need to spill the constant to memory...
454 MachineConstantPool *CP = F->getConstantPool();
455 unsigned CPI = CP->getConstantPoolIndex(CFP);
456 const Type *Ty = CFP->getType();
458 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
459 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
460 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
463 } else if (isa<ConstantPointerNull>(C)) {
464 // Copy zero (null pointer) to the register.
465 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
466 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
467 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
469 std::cerr << "Offending constant: " << C << "\n";
470 assert(0 && "Type not handled yet!");
474 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
475 /// the stack into virtual registers.
477 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
478 // Emit instructions to load the arguments... On entry to a function on the
479 // X86, the stack frame looks like this:
481 // [ESP] -- return address
482 // [ESP + 4] -- first argument (leftmost lexically)
483 // [ESP + 8] -- second argument, if first argument is four bytes in size
486 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
487 MachineFrameInfo *MFI = F->getFrameInfo();
489 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
490 unsigned Reg = getReg(*I);
492 int FI; // Frame object index
493 switch (getClassB(I->getType())) {
495 FI = MFI->CreateFixedObject(1, ArgOffset);
496 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
499 FI = MFI->CreateFixedObject(2, ArgOffset);
500 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
503 FI = MFI->CreateFixedObject(4, ArgOffset);
504 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
507 FI = MFI->CreateFixedObject(8, ArgOffset);
508 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
509 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
510 ArgOffset += 4; // longs require 4 additional bytes
514 if (I->getType() == Type::FloatTy) {
515 Opcode = X86::FLD32m;
516 FI = MFI->CreateFixedObject(4, ArgOffset);
518 Opcode = X86::FLD64m;
519 FI = MFI->CreateFixedObject(8, ArgOffset);
520 ArgOffset += 4; // doubles require 4 additional bytes
522 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
525 assert(0 && "Unhandled argument type!");
527 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
530 // If the function takes variable number of arguments, add a frame offset for
531 // the start of the first vararg value... this is used to expand
533 if (Fn.getFunctionType()->isVarArg())
534 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
538 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
539 /// because we have to generate our sources into the source basic blocks, not
542 void ISel::SelectPHINodes() {
543 const TargetInstrInfo &TII = TM.getInstrInfo();
544 const Function &LF = *F->getFunction(); // The LLVM function...
545 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
546 const BasicBlock *BB = I;
547 MachineBasicBlock &MBB = *MBBMap[I];
549 // Loop over all of the PHI nodes in the LLVM basic block...
550 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
551 for (BasicBlock::const_iterator I = BB->begin();
552 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
554 // Create a new machine instr PHI node, and insert it.
555 unsigned PHIReg = getReg(*PN);
556 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
557 X86::PHI, PN->getNumOperands(), PHIReg);
559 MachineInstr *LongPhiMI = 0;
560 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
561 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
562 X86::PHI, PN->getNumOperands(), PHIReg+1);
564 // PHIValues - Map of blocks to incoming virtual registers. We use this
565 // so that we only initialize one incoming value for a particular block,
566 // even if the block has multiple entries in the PHI node.
568 std::map<MachineBasicBlock*, unsigned> PHIValues;
570 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
571 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
573 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
574 PHIValues.lower_bound(PredMBB);
576 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
577 // We already inserted an initialization of the register for this
578 // predecessor. Recycle it.
579 ValReg = EntryIt->second;
582 // Get the incoming value into a virtual register.
584 Value *Val = PN->getIncomingValue(i);
586 // If this is a constant or GlobalValue, we may have to insert code
587 // into the basic block to compute it into a virtual register.
588 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
589 // Because we don't want to clobber any values which might be in
590 // physical registers with the computation of this constant (which
591 // might be arbitrarily complex if it is a constant expression),
592 // just insert the computation at the top of the basic block.
593 MachineBasicBlock::iterator PI = PredMBB->begin();
595 // Skip over any PHI nodes though!
596 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
599 ValReg = getReg(Val, PredMBB, PI);
601 ValReg = getReg(Val);
604 // Remember that we inserted a value for this PHI for this predecessor
605 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
608 PhiMI->addRegOperand(ValReg);
609 PhiMI->addMachineBasicBlockOperand(PredMBB);
611 LongPhiMI->addRegOperand(ValReg+1);
612 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
616 // Now that we emitted all of the incoming values for the PHI node, make
617 // sure to reposition the InsertPoint after the PHI that we just added.
618 // This is needed because we might have inserted a constant into this
619 // block, right after the PHI's which is before the old insert point!
620 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
626 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
627 /// compensation code on critical edges. As such, it requires that we kill all
628 /// FP registers on the exit from any blocks that either ARE critical edges, or
629 /// branch to a block that has incoming critical edges.
631 /// Note that this kill instruction will eventually be eliminated when
632 /// restrictions in the stackifier are relaxed.
634 static bool RequiresFPRegKill(const BasicBlock *BB) {
636 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
637 const BasicBlock *Succ = *SI;
638 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
639 ++PI; // Block have at least one predecessory
640 if (PI != PE) { // If it has exactly one, this isn't crit edge
641 // If this block has more than one predecessor, check all of the
642 // predecessors to see if they have multiple successors. If so, then the
643 // block we are analyzing needs an FPRegKill.
644 for (PI = pred_begin(Succ); PI != PE; ++PI) {
645 const BasicBlock *Pred = *PI;
646 succ_const_iterator SI2 = succ_begin(Pred);
647 ++SI2; // There must be at least one successor of this block.
648 if (SI2 != succ_end(Pred))
649 return true; // Yes, we must insert the kill on this edge.
653 // If we got this far, there is no need to insert the kill instruction.
660 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
661 // need them. This only occurs due to the floating point stackifier not being
662 // aggressive enough to handle arbitrary global stackification.
664 // Currently we insert an FP_REG_KILL instruction into each block that uses or
665 // defines a floating point virtual register.
667 // When the global register allocators (like linear scan) finally update live
668 // variable analysis, we can keep floating point values in registers across
669 // portions of the CFG that do not involve critical edges. This will be a big
670 // win, but we are waiting on the global allocators before we can do this.
672 // With a bit of work, the floating point stackifier pass can be enhanced to
673 // break critical edges as needed (to make a place to put compensation code),
674 // but this will require some infrastructure improvements as well.
676 void ISel::InsertFPRegKills() {
677 SSARegMap &RegMap = *F->getSSARegMap();
679 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
680 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
681 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
682 MachineOperand& MO = I->getOperand(i);
683 if (MO.isRegister() && MO.getReg()) {
684 unsigned Reg = MO.getReg();
685 if (MRegisterInfo::isVirtualRegister(Reg))
686 if (RegMap.getRegClass(Reg)->getSize() == 10)
690 // If we haven't found an FP register use or def in this basic block, check
691 // to see if any of our successors has an FP PHI node, which will cause a
692 // copy to be inserted into this block.
693 for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
694 E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
695 MachineBasicBlock *SBB = MBBMap[*SI];
696 for (MachineBasicBlock::iterator I = SBB->begin();
697 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
698 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
704 // Okay, this block uses an FP register. If the block has successors (ie,
705 // it's not an unwind/return), insert the FP_REG_KILL instruction.
706 if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
707 RequiresFPRegKill(BB->getBasicBlock())) {
708 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
715 // canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into
716 // the conditional branch instruction which is the only user of the cc
717 // instruction. This is the case if the conditional branch is the only user of
718 // the setcc, and if the setcc is in the same basic block as the conditional
719 // branch. We also don't handle long arguments below, so we reject them here as
722 static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
723 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
724 if (SCI->hasOneUse() && isa<BranchInst>(SCI->use_back()) &&
725 SCI->getParent() == cast<BranchInst>(SCI->use_back())->getParent()) {
726 const Type *Ty = SCI->getOperand(0)->getType();
727 if (Ty != Type::LongTy && Ty != Type::ULongTy)
733 // Return a fixed numbering for setcc instructions which does not depend on the
734 // order of the opcodes.
736 static unsigned getSetCCNumber(unsigned Opcode) {
738 default: assert(0 && "Unknown setcc instruction!");
739 case Instruction::SetEQ: return 0;
740 case Instruction::SetNE: return 1;
741 case Instruction::SetLT: return 2;
742 case Instruction::SetGE: return 3;
743 case Instruction::SetGT: return 4;
744 case Instruction::SetLE: return 5;
748 // LLVM -> X86 signed X86 unsigned
749 // ----- ---------- ------------
750 // seteq -> sete sete
751 // setne -> setne setne
752 // setlt -> setl setb
753 // setge -> setge setae
754 // setgt -> setg seta
755 // setle -> setle setbe
757 // sets // Used by comparison with 0 optimization
759 static const unsigned SetCCOpcodeTab[2][8] = {
760 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
762 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
763 X86::SETSr, X86::SETNSr },
766 // EmitComparison - This function emits a comparison of the two operands,
767 // returning the extended setcc code to use.
768 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
769 MachineBasicBlock *MBB,
770 MachineBasicBlock::iterator IP) {
771 // The arguments are already supposed to be of the same type.
772 const Type *CompTy = Op0->getType();
773 unsigned Class = getClassB(CompTy);
774 unsigned Op0r = getReg(Op0, MBB, IP);
776 // Special case handling of: cmp R, i
777 if (Class == cByte || Class == cShort || Class == cInt)
778 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
779 uint64_t Op1v = cast<ConstantInt>(CI)->getRawValue();
781 // Mask off any upper bits of the constant, if there are any...
782 Op1v &= (1ULL << (8 << Class)) - 1;
784 // If this is a comparison against zero, emit more efficient code. We
785 // can't handle unsigned comparisons against zero unless they are == or
786 // !=. These should have been strength reduced already anyway.
787 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
788 static const unsigned TESTTab[] = {
789 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
791 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
793 if (OpNum == 2) return 6; // Map jl -> js
794 if (OpNum == 3) return 7; // Map jg -> jns
798 static const unsigned CMPTab[] = {
799 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
802 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
806 // Special case handling of comparison against +/- 0.0
807 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
808 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
809 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
810 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
811 BuildMI(*MBB, IP, X86::SAHF, 1);
815 unsigned Op1r = getReg(Op1, MBB, IP);
817 default: assert(0 && "Unknown type class!");
818 // Emit: cmp <var1>, <var2> (do the comparison). We can
819 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
822 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
825 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
828 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
831 BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
832 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
833 BuildMI(*MBB, IP, X86::SAHF, 1);
837 if (OpNum < 2) { // seteq, setne
838 unsigned LoTmp = makeAnotherReg(Type::IntTy);
839 unsigned HiTmp = makeAnotherReg(Type::IntTy);
840 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
841 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
842 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
843 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
844 break; // Allow the sete or setne to be generated from flags set by OR
846 // Emit a sequence of code which compares the high and low parts once
847 // each, then uses a conditional move to handle the overflow case. For
848 // example, a setlt for long would generate code like this:
850 // AL = lo(op1) < lo(op2) // Signedness depends on operands
851 // BL = hi(op1) < hi(op2) // Always unsigned comparison
852 // dest = hi(op1) == hi(op2) ? AL : BL;
855 // FIXME: This would be much better if we had hierarchical register
856 // classes! Until then, hardcode registers so that we can deal with their
857 // aliases (because we don't have conditional byte moves).
859 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
860 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
861 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
862 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
863 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
864 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
865 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
867 // NOTE: visitSetCondInst knows that the value is dumped into the BL
868 // register at this point for long values...
876 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
877 /// register, then move it to wherever the result should be.
879 void ISel::visitSetCondInst(SetCondInst &I) {
880 if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch...
882 unsigned DestReg = getReg(I);
883 MachineBasicBlock::iterator MII = BB->end();
884 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
888 /// emitSetCCOperation - Common code shared between visitSetCondInst and
889 /// constant expression support.
890 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
891 MachineBasicBlock::iterator IP,
892 Value *Op0, Value *Op1, unsigned Opcode,
893 unsigned TargetReg) {
894 unsigned OpNum = getSetCCNumber(Opcode);
895 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
897 const Type *CompTy = Op0->getType();
898 unsigned CompClass = getClassB(CompTy);
899 bool isSigned = CompTy->isSigned() && CompClass != cFP;
901 if (CompClass != cLong || OpNum < 2) {
902 // Handle normal comparisons with a setcc instruction...
903 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
905 // Handle long comparisons by copying the value which is already in BL into
906 // the register we want...
907 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
914 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
915 /// operand, in the specified target register.
916 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
917 bool isUnsigned = VR.Ty->isUnsigned();
919 // Make sure we have the register number for this value...
920 unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg;
922 switch (getClassB(VR.Ty)) {
924 // Extend value into target register (8->32)
926 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
928 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
931 // Extend value into target register (16->32)
933 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
935 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
938 // Move value into target register (32->32)
939 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
942 assert(0 && "Unpromotable operand class in promote32");
946 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
947 /// we have the following possibilities:
949 /// ret void: No return value, simply emit a 'ret' instruction
950 /// ret sbyte, ubyte : Extend value into EAX and return
951 /// ret short, ushort: Extend value into EAX and return
952 /// ret int, uint : Move value into EAX and return
953 /// ret pointer : Move value into EAX and return
954 /// ret long, ulong : Move value into EAX/EDX and return
955 /// ret float/double : Top of FP stack
957 void ISel::visitReturnInst(ReturnInst &I) {
958 if (I.getNumOperands() == 0) {
959 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
963 Value *RetVal = I.getOperand(0);
964 unsigned RetReg = getReg(RetVal);
965 switch (getClassB(RetVal->getType())) {
966 case cByte: // integral return values: extend or move into EAX and return
969 promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType()));
970 // Declare that EAX is live on exit
971 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
973 case cFP: // Floats & Doubles: Return in ST(0)
974 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
975 // Declare that top-of-stack is live on exit
976 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
979 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
980 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
981 // Declare that EAX & EDX are live on exit
982 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
988 // Emit a 'ret' instruction
989 BuildMI(BB, X86::RET, 0);
992 // getBlockAfter - Return the basic block which occurs lexically after the
994 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
995 Function::iterator I = BB; ++I; // Get iterator to next block
996 return I != BB->getParent()->end() ? &*I : 0;
999 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1000 /// that since code layout is frozen at this point, that if we are trying to
1001 /// jump to a block that is the immediate successor of the current block, we can
1002 /// just make a fall-through (but we don't currently).
1004 void ISel::visitBranchInst(BranchInst &BI) {
1005 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1007 if (!BI.isConditional()) { // Unconditional branch?
1008 if (BI.getSuccessor(0) != NextBB)
1009 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1013 // See if we can fold the setcc into the branch itself...
1014 SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition());
1016 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1017 // computed some other way...
1018 unsigned condReg = getReg(BI.getCondition());
1019 BuildMI(BB, X86::CMP8ri, 2).addReg(condReg).addImm(0);
1020 if (BI.getSuccessor(1) == NextBB) {
1021 if (BI.getSuccessor(0) != NextBB)
1022 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
1024 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
1026 if (BI.getSuccessor(0) != NextBB)
1027 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1032 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1033 MachineBasicBlock::iterator MII = BB->end();
1034 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1036 const Type *CompTy = SCI->getOperand(0)->getType();
1037 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1040 // LLVM -> X86 signed X86 unsigned
1041 // ----- ---------- ------------
1049 // js // Used by comparison with 0 optimization
1052 static const unsigned OpcodeTab[2][8] = {
1053 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1054 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1055 X86::JS, X86::JNS },
1058 if (BI.getSuccessor(0) != NextBB) {
1059 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
1060 if (BI.getSuccessor(1) != NextBB)
1061 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
1063 // Change to the inverse condition...
1064 if (BI.getSuccessor(1) != NextBB) {
1066 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
1072 /// doCall - This emits an abstract call instruction, setting up the arguments
1073 /// and the return value as appropriate. For the actual function call itself,
1074 /// it inserts the specified CallMI instruction into the stream.
1076 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1077 const std::vector<ValueRecord> &Args) {
1079 // Count how many bytes are to be pushed on the stack...
1080 unsigned NumBytes = 0;
1082 if (!Args.empty()) {
1083 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1084 switch (getClassB(Args[i].Ty)) {
1085 case cByte: case cShort: case cInt:
1086 NumBytes += 4; break;
1088 NumBytes += 8; break;
1090 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1092 default: assert(0 && "Unknown class!");
1095 // Adjust the stack pointer for the new arguments...
1096 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1098 // Arguments go on the stack in reverse order, as specified by the ABI.
1099 unsigned ArgOffset = 0;
1100 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1102 switch (getClassB(Args[i].Ty)) {
1105 // Promote arg to 32 bits wide into a temporary register...
1106 unsigned R = makeAnotherReg(Type::UIntTy);
1107 promote32(R, Args[i]);
1108 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1109 X86::ESP, ArgOffset).addReg(R);
1113 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1114 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1115 X86::ESP, ArgOffset).addReg(ArgReg);
1118 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1119 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1120 X86::ESP, ArgOffset).addReg(ArgReg);
1121 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1122 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1123 ArgOffset += 4; // 8 byte entry, not 4.
1127 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1128 if (Args[i].Ty == Type::FloatTy) {
1129 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1130 X86::ESP, ArgOffset).addReg(ArgReg);
1132 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1133 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1134 X86::ESP, ArgOffset).addReg(ArgReg);
1135 ArgOffset += 4; // 8 byte entry, not 4.
1139 default: assert(0 && "Unknown class!");
1144 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1147 BB->push_back(CallMI);
1149 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
1151 // If there is a return value, scavenge the result from the location the call
1154 if (Ret.Ty != Type::VoidTy) {
1155 unsigned DestClass = getClassB(Ret.Ty);
1156 switch (DestClass) {
1160 // Integral results are in %eax, or the appropriate portion
1162 static const unsigned regRegMove[] = {
1163 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1165 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1166 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1169 case cFP: // Floating-point return values live in %ST(0)
1170 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1172 case cLong: // Long values are left in EDX:EAX
1173 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1174 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1176 default: assert(0 && "Unknown class!");
1182 /// visitCallInst - Push args on stack and do a procedure call instruction.
1183 void ISel::visitCallInst(CallInst &CI) {
1184 MachineInstr *TheCall;
1185 if (Function *F = CI.getCalledFunction()) {
1186 // Is it an intrinsic function call?
1187 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1188 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1192 // Emit a CALL instruction with PC-relative displacement.
1193 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1194 } else { // Emit an indirect call...
1195 unsigned Reg = getReg(CI.getCalledValue());
1196 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1199 std::vector<ValueRecord> Args;
1200 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1201 Args.push_back(ValueRecord(CI.getOperand(i)));
1203 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1204 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1208 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1209 /// function, lowering any calls to unknown intrinsic functions into the
1210 /// equivalent LLVM code.
1211 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1212 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1213 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1214 if (CallInst *CI = dyn_cast<CallInst>(I++))
1215 if (Function *F = CI->getCalledFunction())
1216 switch (F->getIntrinsicID()) {
1217 case Intrinsic::not_intrinsic:
1218 case Intrinsic::va_start:
1219 case Intrinsic::va_copy:
1220 case Intrinsic::va_end:
1221 case Intrinsic::returnaddress:
1222 case Intrinsic::frameaddress:
1223 case Intrinsic::memcpy:
1224 case Intrinsic::memset:
1225 // We directly implement these intrinsics
1228 // All other intrinsic calls we must lower.
1229 Instruction *Before = CI->getPrev();
1230 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1231 if (Before) { // Move iterator to instruction after call
1240 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1241 unsigned TmpReg1, TmpReg2;
1243 case Intrinsic::va_start:
1244 // Get the address of the first vararg value...
1245 TmpReg1 = getReg(CI);
1246 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1249 case Intrinsic::va_copy:
1250 TmpReg1 = getReg(CI);
1251 TmpReg2 = getReg(CI.getOperand(1));
1252 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1254 case Intrinsic::va_end: return; // Noop on X86
1256 case Intrinsic::returnaddress:
1257 case Intrinsic::frameaddress:
1258 TmpReg1 = getReg(CI);
1259 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1260 if (ID == Intrinsic::returnaddress) {
1261 // Just load the return address
1262 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1263 ReturnAddressIndex);
1265 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1266 ReturnAddressIndex, -4);
1269 // Values other than zero are not implemented yet.
1270 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1274 case Intrinsic::memcpy: {
1275 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1277 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1278 Align = AlignC->getRawValue();
1279 if (Align == 0) Align = 1;
1282 // Turn the byte code into # iterations
1285 switch (Align & 3) {
1286 case 2: // WORD aligned
1287 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1288 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1290 CountReg = makeAnotherReg(Type::IntTy);
1291 unsigned ByteReg = getReg(CI.getOperand(3));
1292 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1294 Opcode = X86::REP_MOVSW;
1296 case 0: // DWORD aligned
1297 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1298 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1300 CountReg = makeAnotherReg(Type::IntTy);
1301 unsigned ByteReg = getReg(CI.getOperand(3));
1302 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1304 Opcode = X86::REP_MOVSD;
1306 default: // BYTE aligned
1307 CountReg = getReg(CI.getOperand(3));
1308 Opcode = X86::REP_MOVSB;
1312 // No matter what the alignment is, we put the source in ESI, the
1313 // destination in EDI, and the count in ECX.
1314 TmpReg1 = getReg(CI.getOperand(1));
1315 TmpReg2 = getReg(CI.getOperand(2));
1316 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1317 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1318 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1319 BuildMI(BB, Opcode, 0);
1322 case Intrinsic::memset: {
1323 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1325 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1326 Align = AlignC->getRawValue();
1327 if (Align == 0) Align = 1;
1330 // Turn the byte code into # iterations
1333 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1334 unsigned Val = ValC->getRawValue() & 255;
1336 // If the value is a constant, then we can potentially use larger copies.
1337 switch (Align & 3) {
1338 case 2: // WORD aligned
1339 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1340 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1342 CountReg = makeAnotherReg(Type::IntTy);
1343 unsigned ByteReg = getReg(CI.getOperand(3));
1344 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1346 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1347 Opcode = X86::REP_STOSW;
1349 case 0: // DWORD aligned
1350 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1351 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1353 CountReg = makeAnotherReg(Type::IntTy);
1354 unsigned ByteReg = getReg(CI.getOperand(3));
1355 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1357 Val = (Val << 8) | Val;
1358 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1359 Opcode = X86::REP_STOSD;
1361 default: // BYTE aligned
1362 CountReg = getReg(CI.getOperand(3));
1363 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1364 Opcode = X86::REP_STOSB;
1368 // If it's not a constant value we are storing, just fall back. We could
1369 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1370 unsigned ValReg = getReg(CI.getOperand(2));
1371 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1372 CountReg = getReg(CI.getOperand(3));
1373 Opcode = X86::REP_STOSB;
1376 // No matter what the alignment is, we put the source in ESI, the
1377 // destination in EDI, and the count in ECX.
1378 TmpReg1 = getReg(CI.getOperand(1));
1379 //TmpReg2 = getReg(CI.getOperand(2));
1380 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1381 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1382 BuildMI(BB, Opcode, 0);
1386 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1391 /// visitSimpleBinary - Implement simple binary operators for integral types...
1392 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1394 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1395 unsigned DestReg = getReg(B);
1396 MachineBasicBlock::iterator MI = BB->end();
1397 emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
1398 OperatorClass, DestReg);
1401 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1402 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
1405 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
1406 /// and constant expression support.
1408 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
1409 MachineBasicBlock::iterator IP,
1410 Value *Op0, Value *Op1,
1411 unsigned OperatorClass, unsigned DestReg) {
1412 unsigned Class = getClassB(Op0->getType());
1414 // sub 0, X -> neg X
1415 if (OperatorClass == 1 && Class != cLong)
1416 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0)) {
1417 if (CI->isNullValue()) {
1418 unsigned op1Reg = getReg(Op1, MBB, IP);
1420 default: assert(0 && "Unknown class for this function!");
1422 BuildMI(*MBB, IP, X86::NEG8r, 1, DestReg).addReg(op1Reg);
1425 BuildMI(*MBB, IP, X86::NEG16r, 1, DestReg).addReg(op1Reg);
1428 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg).addReg(op1Reg);
1432 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
1433 if (CFP->isExactlyValue(-0.0)) {
1435 unsigned op1Reg = getReg(Op1, MBB, IP);
1436 BuildMI(*MBB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
1440 if (!isa<ConstantInt>(Op1) || Class == cLong) {
1441 static const unsigned OpcodeTab[][4] = {
1442 // Arithmetic operators
1443 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
1444 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
1446 // Bitwise operators
1447 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
1448 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
1449 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
1452 bool isLong = false;
1453 if (Class == cLong) {
1455 Class = cInt; // Bottom 32 bits are handled just like ints
1458 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1459 assert(Opcode && "Floating point arguments to logical inst?");
1460 unsigned Op0r = getReg(Op0, MBB, IP);
1461 unsigned Op1r = getReg(Op1, MBB, IP);
1462 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1464 if (isLong) { // Handle the upper 32 bits of long values...
1465 static const unsigned TopTab[] = {
1466 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
1468 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
1469 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
1474 // Special case: op Reg, <const>
1475 ConstantInt *Op1C = cast<ConstantInt>(Op1);
1476 unsigned Op0r = getReg(Op0, MBB, IP);
1478 // xor X, -1 -> not X
1479 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
1480 static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
1481 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
1485 // add X, -1 -> dec X
1486 if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
1487 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
1488 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1492 // add X, 1 -> inc X
1493 if (OperatorClass == 0 && Op1C->equalsInt(1)) {
1494 static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
1495 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1499 static const unsigned OpcodeTab[][3] = {
1500 // Arithmetic operators
1501 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
1502 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
1504 // Bitwise operators
1505 { X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
1506 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
1507 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
1510 assert(Class < 3 && "General code handles 64-bit integer types!");
1511 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1512 uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
1514 // Mask off any upper bits of the constant, if there are any...
1515 Op1v &= (1ULL << (8 << Class)) - 1;
1516 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1v);
1519 /// doMultiply - Emit appropriate instructions to multiply together the
1520 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
1521 /// result should be given as DestTy.
1523 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
1524 unsigned DestReg, const Type *DestTy,
1525 unsigned op0Reg, unsigned op1Reg) {
1526 unsigned Class = getClass(DestTy);
1528 case cFP: // Floating point multiply
1529 BuildMI(*MBB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
1533 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
1534 .addReg(op0Reg).addReg(op1Reg);
1537 // Must use the MUL instruction, which forces use of AL...
1538 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
1539 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
1540 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1543 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
1547 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
1548 // returns zero when the input is not exactly a power of two.
1549 static unsigned ExactLog2(unsigned Val) {
1550 if (Val == 0) return 0;
1553 if (Val & 1) return 0;
1560 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
1561 MachineBasicBlock::iterator IP,
1562 unsigned DestReg, const Type *DestTy,
1563 unsigned op0Reg, unsigned ConstRHS) {
1564 unsigned Class = getClass(DestTy);
1566 // If the element size is exactly a power of 2, use a shift to get it.
1567 if (unsigned Shift = ExactLog2(ConstRHS)) {
1569 default: assert(0 && "Unknown class for this function!");
1571 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1574 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1577 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
1582 if (Class == cShort) {
1583 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
1585 } else if (Class == cInt) {
1586 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
1590 // Most general case, emit a normal multiply...
1591 static const unsigned MOVriTab[] = {
1592 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
1595 unsigned TmpReg = makeAnotherReg(DestTy);
1596 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
1598 // Emit a MUL to multiply the register holding the index by
1599 // elementSize, putting the result in OffsetReg.
1600 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
1603 /// visitMul - Multiplies are not simple binary operators because they must deal
1604 /// with the EAX register explicitly.
1606 void ISel::visitMul(BinaryOperator &I) {
1607 unsigned Op0Reg = getReg(I.getOperand(0));
1608 unsigned DestReg = getReg(I);
1610 // Simple scalar multiply?
1611 if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) {
1612 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
1613 unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
1614 MachineBasicBlock::iterator MBBI = BB->end();
1615 doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
1617 unsigned Op1Reg = getReg(I.getOperand(1));
1618 MachineBasicBlock::iterator MBBI = BB->end();
1619 doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
1622 unsigned Op1Reg = getReg(I.getOperand(1));
1624 // Long value. We have to do things the hard way...
1625 // Multiply the two low parts... capturing carry into EDX
1626 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
1627 BuildMI(BB, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
1629 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
1630 BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
1631 BuildMI(BB, X86::MOV32rr, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
1633 MachineBasicBlock::iterator MBBI = BB->end();
1634 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
1635 BuildMI(*BB, MBBI, X86::IMUL32rr,2,AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
1637 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
1638 BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
1639 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
1642 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
1643 BuildMI(*BB, MBBI, X86::IMUL32rr,2,ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
1645 BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
1646 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
1651 /// visitDivRem - Handle division and remainder instructions... these
1652 /// instruction both require the same instructions to be generated, they just
1653 /// select the result from a different register. Note that both of these
1654 /// instructions work differently for signed and unsigned operands.
1656 void ISel::visitDivRem(BinaryOperator &I) {
1657 unsigned Op0Reg = getReg(I.getOperand(0));
1658 unsigned Op1Reg = getReg(I.getOperand(1));
1659 unsigned ResultReg = getReg(I);
1661 MachineBasicBlock::iterator IP = BB->end();
1662 emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
1663 I.getType(), ResultReg);
1666 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
1667 MachineBasicBlock::iterator IP,
1668 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
1669 const Type *Ty, unsigned ResultReg) {
1670 unsigned Class = getClass(Ty);
1672 case cFP: // Floating point divide
1674 BuildMI(*BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1675 } else { // Floating point remainder...
1676 MachineInstr *TheCall =
1677 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
1678 std::vector<ValueRecord> Args;
1679 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
1680 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
1681 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
1685 static const char *FnName[] =
1686 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
1688 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
1689 MachineInstr *TheCall =
1690 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
1692 std::vector<ValueRecord> Args;
1693 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
1694 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
1695 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
1698 case cByte: case cShort: case cInt:
1699 break; // Small integrals, handled below...
1700 default: assert(0 && "Unknown class!");
1703 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
1704 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
1705 static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
1706 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
1707 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
1709 static const unsigned DivOpcode[][4] = {
1710 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
1711 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
1714 bool isSigned = Ty->isSigned();
1715 unsigned Reg = Regs[Class];
1716 unsigned ExtReg = ExtRegs[Class];
1718 // Put the first operand into one of the A registers...
1719 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
1722 // Emit a sign extension instruction...
1723 unsigned ShiftResult = makeAnotherReg(Ty);
1724 BuildMI(*BB, IP, SarOpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
1725 BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
1727 // If unsigned, emit a zeroing instruction... (reg = 0)
1728 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
1731 // Emit the appropriate divide or remainder instruction...
1732 BuildMI(*BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
1734 // Figure out which register we want to pick the result out of...
1735 unsigned DestReg = isDiv ? Reg : ExtReg;
1737 // Put the result into the destination register...
1738 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
1742 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
1743 /// for constant immediate shift values, and for constant immediate
1744 /// shift values equal to 1. Even the general case is sort of special,
1745 /// because the shift amount has to be in CL, not just any old register.
1747 void ISel::visitShiftInst(ShiftInst &I) {
1748 MachineBasicBlock::iterator IP = BB->end ();
1749 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
1750 I.getOpcode () == Instruction::Shl, I.getType (),
1754 /// emitShiftOperation - Common code shared between visitShiftInst and
1755 /// constant expression support.
1756 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
1757 MachineBasicBlock::iterator IP,
1758 Value *Op, Value *ShiftAmount, bool isLeftShift,
1759 const Type *ResultTy, unsigned DestReg) {
1760 unsigned SrcReg = getReg (Op, MBB, IP);
1761 bool isSigned = ResultTy->isSigned ();
1762 unsigned Class = getClass (ResultTy);
1764 static const unsigned ConstantOperand[][4] = {
1765 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
1766 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
1767 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
1768 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
1771 static const unsigned NonConstantOperand[][4] = {
1772 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
1773 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
1774 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
1775 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
1778 // Longs, as usual, are handled specially...
1779 if (Class == cLong) {
1780 // If we have a constant shift, we can generate much more efficient code
1781 // than otherwise...
1783 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1784 unsigned Amount = CUI->getValue();
1786 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1788 BuildMI(*MBB, IP, Opc[3], 3,
1789 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
1790 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
1792 BuildMI(*MBB, IP, Opc[3], 3,
1793 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
1794 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
1796 } else { // Shifting more than 32 bits
1799 BuildMI(*MBB, IP, X86::SHL32ri, 2,
1800 DestReg + 1).addReg(SrcReg).addImm(Amount);
1801 BuildMI(*MBB, IP, X86::MOV32ri, 1,
1804 unsigned Opcode = isSigned ? X86::SAR32ri : X86::SHR32ri;
1805 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addImm(Amount);
1806 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
1810 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1812 if (!isLeftShift && isSigned) {
1813 // If this is a SHR of a Long, then we need to do funny sign extension
1814 // stuff. TmpReg gets the value to use as the high-part if we are
1815 // shifting more than 32 bits.
1816 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
1818 // Other shifts use a fixed zero value if the shift is more than 32
1820 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
1823 // Initialize CL with the shift amount...
1824 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
1825 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
1827 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
1828 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
1830 // TmpReg2 = shld inHi, inLo
1831 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
1833 // TmpReg3 = shl inLo, CL
1834 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
1836 // Set the flags to indicate whether the shift was by more than 32 bits.
1837 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
1839 // DestHi = (>32) ? TmpReg3 : TmpReg2;
1840 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
1841 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
1842 // DestLo = (>32) ? TmpReg : TmpReg3;
1843 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
1844 DestReg).addReg(TmpReg3).addReg(TmpReg);
1846 // TmpReg2 = shrd inLo, inHi
1847 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
1849 // TmpReg3 = s[ah]r inHi, CL
1850 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
1853 // Set the flags to indicate whether the shift was by more than 32 bits.
1854 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
1856 // DestLo = (>32) ? TmpReg3 : TmpReg2;
1857 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
1858 DestReg).addReg(TmpReg2).addReg(TmpReg3);
1860 // DestHi = (>32) ? TmpReg : TmpReg3;
1861 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
1862 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
1868 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1869 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
1870 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
1872 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1873 BuildMI(*MBB, IP, Opc[Class], 2,
1874 DestReg).addReg(SrcReg).addImm(CUI->getValue());
1875 } else { // The shift amount is non-constant.
1876 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
1877 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
1879 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
1880 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
1885 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
1886 /// instruction. The load and store instructions are the only place where we
1887 /// need to worry about the memory layout of the target machine.
1889 void ISel::visitLoadInst(LoadInst &I) {
1890 unsigned DestReg = getReg(I);
1891 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
1892 Value *Addr = I.getOperand(0);
1893 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
1894 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
1895 BaseReg, Scale, IndexReg, Disp))
1896 Addr = 0; // Address is consumed!
1897 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
1898 if (CE->getOpcode() == Instruction::GetElementPtr)
1899 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
1900 BaseReg, Scale, IndexReg, Disp))
1905 // If it's not foldable, reset addr mode.
1906 BaseReg = getReg(Addr);
1907 Scale = 1; IndexReg = 0; Disp = 0;
1910 unsigned Class = getClassB(I.getType());
1911 if (Class == cLong) {
1912 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
1913 BaseReg, Scale, IndexReg, Disp);
1914 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
1915 BaseReg, Scale, IndexReg, Disp+4);
1919 static const unsigned Opcodes[] = {
1920 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
1922 unsigned Opcode = Opcodes[Class];
1923 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
1924 addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
1925 BaseReg, Scale, IndexReg, Disp);
1928 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
1931 void ISel::visitStoreInst(StoreInst &I) {
1932 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
1933 Value *Addr = I.getOperand(1);
1934 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
1935 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
1936 BaseReg, Scale, IndexReg, Disp))
1937 Addr = 0; // Address is consumed!
1938 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
1939 if (CE->getOpcode() == Instruction::GetElementPtr)
1940 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
1941 BaseReg, Scale, IndexReg, Disp))
1946 // If it's not foldable, reset addr mode.
1947 BaseReg = getReg(Addr);
1948 Scale = 1; IndexReg = 0; Disp = 0;
1951 const Type *ValTy = I.getOperand(0)->getType();
1952 unsigned Class = getClassB(ValTy);
1954 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
1955 uint64_t Val = CI->getRawValue();
1956 if (Class == cLong) {
1957 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
1958 BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
1959 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
1960 BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
1962 static const unsigned Opcodes[] = {
1963 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
1965 unsigned Opcode = Opcodes[Class];
1966 addFullAddress(BuildMI(BB, Opcode, 5),
1967 BaseReg, Scale, IndexReg, Disp).addImm(Val);
1969 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
1970 addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
1971 BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
1973 if (Class == cLong) {
1974 unsigned ValReg = getReg(I.getOperand(0));
1975 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
1976 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
1977 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
1978 BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
1980 unsigned ValReg = getReg(I.getOperand(0));
1981 static const unsigned Opcodes[] = {
1982 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
1984 unsigned Opcode = Opcodes[Class];
1985 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
1986 addFullAddress(BuildMI(BB, Opcode, 1+4),
1987 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
1993 /// visitCastInst - Here we have various kinds of copying with or without
1994 /// sign extension going on.
1995 void ISel::visitCastInst(CastInst &CI) {
1996 Value *Op = CI.getOperand(0);
1997 // If this is a cast from a 32-bit integer to a Long type, and the only uses
1998 // of the case are GEP instructions, then the cast does not need to be
1999 // generated explicitly, it will be folded into the GEP.
2000 if (CI.getType() == Type::LongTy &&
2001 (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) {
2002 bool AllUsesAreGEPs = true;
2003 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
2004 if (!isa<GetElementPtrInst>(*I)) {
2005 AllUsesAreGEPs = false;
2009 // No need to codegen this cast if all users are getelementptr instrs...
2010 if (AllUsesAreGEPs) return;
2013 unsigned DestReg = getReg(CI);
2014 MachineBasicBlock::iterator MI = BB->end();
2015 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
2018 /// emitCastOperation - Common code shared between visitCastInst and
2019 /// constant expression cast support.
2020 void ISel::emitCastOperation(MachineBasicBlock *BB,
2021 MachineBasicBlock::iterator IP,
2022 Value *Src, const Type *DestTy,
2024 unsigned SrcReg = getReg(Src, BB, IP);
2025 const Type *SrcTy = Src->getType();
2026 unsigned SrcClass = getClassB(SrcTy);
2027 unsigned DestClass = getClassB(DestTy);
2029 // Implement casts to bool by using compare on the operand followed by set if
2030 // not zero on the result.
2031 if (DestTy == Type::BoolTy) {
2034 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
2037 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
2040 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
2043 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2044 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
2048 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
2049 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
2050 BuildMI(*BB, IP, X86::SAHF, 1);
2054 // If the zero flag is not set, then the value is true, set the byte to
2056 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
2060 static const unsigned RegRegMove[] = {
2061 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
2064 // Implement casts between values of the same type class (as determined by
2065 // getClass) by using a register-to-register move.
2066 if (SrcClass == DestClass) {
2067 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
2068 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
2069 } else if (SrcClass == cFP) {
2070 if (SrcTy == Type::FloatTy) { // double -> float
2071 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
2072 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
2073 } else { // float -> double
2074 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
2075 "Unknown cFP member!");
2076 // Truncate from double to float by storing to memory as short, then
2078 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
2079 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
2080 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
2081 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
2083 } else if (SrcClass == cLong) {
2084 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2085 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
2087 assert(0 && "Cannot handle this type of cast instruction!");
2093 // Handle cast of SMALLER int to LARGER int using a move with sign extension
2094 // or zero extension, depending on whether the source type was signed.
2095 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
2096 SrcClass < DestClass) {
2097 bool isLong = DestClass == cLong;
2098 if (isLong) DestClass = cInt;
2100 static const unsigned Opc[][4] = {
2101 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
2102 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
2105 bool isUnsigned = SrcTy->isUnsigned();
2106 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
2107 DestReg).addReg(SrcReg);
2109 if (isLong) { // Handle upper 32 bits as appropriate...
2110 if (isUnsigned) // Zero out top bits...
2111 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2112 else // Sign extend bottom half...
2113 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
2118 // Special case long -> int ...
2119 if (SrcClass == cLong && DestClass == cInt) {
2120 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2124 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
2125 // move out of AX or AL.
2126 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
2127 && SrcClass > DestClass) {
2128 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
2129 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
2130 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
2134 // Handle casts from integer to floating point now...
2135 if (DestClass == cFP) {
2136 // Promote the integer to a type supported by FLD. We do this because there
2137 // are no unsigned FLD instructions, so we must promote an unsigned value to
2138 // a larger signed value, then use FLD on the larger value.
2140 const Type *PromoteType = 0;
2141 unsigned PromoteOpcode;
2142 unsigned RealDestReg = DestReg;
2143 switch (SrcTy->getPrimitiveID()) {
2144 case Type::BoolTyID:
2145 case Type::SByteTyID:
2146 // We don't have the facilities for directly loading byte sized data from
2147 // memory (even signed). Promote it to 16 bits.
2148 PromoteType = Type::ShortTy;
2149 PromoteOpcode = X86::MOVSX16rr8;
2151 case Type::UByteTyID:
2152 PromoteType = Type::ShortTy;
2153 PromoteOpcode = X86::MOVZX16rr8;
2155 case Type::UShortTyID:
2156 PromoteType = Type::IntTy;
2157 PromoteOpcode = X86::MOVZX32rr16;
2159 case Type::UIntTyID: {
2160 // Make a 64 bit temporary... and zero out the top of it...
2161 unsigned TmpReg = makeAnotherReg(Type::LongTy);
2162 BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
2163 BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
2164 SrcTy = Type::LongTy;
2169 case Type::ULongTyID:
2170 // Don't fild into the read destination.
2171 DestReg = makeAnotherReg(Type::DoubleTy);
2173 default: // No promotion needed...
2178 unsigned TmpReg = makeAnotherReg(PromoteType);
2179 unsigned Opc = SrcTy->isSigned() ? X86::MOVSX16rr8 : X86::MOVZX16rr8;
2180 BuildMI(*BB, IP, Opc, 1, TmpReg).addReg(SrcReg);
2181 SrcTy = PromoteType;
2182 SrcClass = getClass(PromoteType);
2186 // Spill the integer to memory and reload it from there...
2188 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
2190 if (SrcClass == cLong) {
2191 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
2192 FrameIdx).addReg(SrcReg);
2193 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
2194 FrameIdx, 4).addReg(SrcReg+1);
2196 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
2197 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
2198 FrameIdx).addReg(SrcReg);
2201 static const unsigned Op2[] =
2202 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
2203 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
2205 // We need special handling for unsigned 64-bit integer sources. If the
2206 // input number has the "sign bit" set, then we loaded it incorrectly as a
2207 // negative 64-bit number. In this case, add an offset value.
2208 if (SrcTy == Type::ULongTy) {
2209 // Emit a test instruction to see if the dynamic input value was signed.
2210 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
2212 // If the sign bit is set, get a pointer to an offset, otherwise get a
2213 // pointer to a zero.
2214 MachineConstantPool *CP = F->getConstantPool();
2215 unsigned Zero = makeAnotherReg(Type::IntTy);
2216 Constant *Null = Constant::getNullValue(Type::UIntTy);
2217 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
2218 CP->getConstantPoolIndex(Null));
2219 unsigned Offset = makeAnotherReg(Type::IntTy);
2220 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
2222 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
2223 CP->getConstantPoolIndex(OffsetCst));
2224 unsigned Addr = makeAnotherReg(Type::IntTy);
2225 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
2227 // Load the constant for an add. FIXME: this could make an 'fadd' that
2228 // reads directly from memory, but we don't support these yet.
2229 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
2230 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
2232 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
2233 .addReg(ConstReg).addReg(DestReg);
2239 // Handle casts from floating point to integer now...
2240 if (SrcClass == cFP) {
2241 // Change the floating point control register to use "round towards zero"
2242 // mode when truncating to an integer value.
2244 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2245 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
2247 // Load the old value of the high byte of the control word...
2248 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
2249 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
2252 // Set the high part to be round to zero...
2253 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
2254 CWFrameIdx, 1).addImm(12);
2256 // Reload the modified control word now...
2257 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
2259 // Restore the memory image of control word to original value
2260 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
2261 CWFrameIdx, 1).addReg(HighPartOfCW);
2263 // We don't have the facilities for directly storing byte sized data to
2264 // memory. Promote it to 16 bits. We also must promote unsigned values to
2265 // larger classes because we only have signed FP stores.
2266 unsigned StoreClass = DestClass;
2267 const Type *StoreTy = DestTy;
2268 if (StoreClass == cByte || DestTy->isUnsigned())
2269 switch (StoreClass) {
2270 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
2271 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
2272 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
2273 // The following treatment of cLong may not be perfectly right,
2274 // but it survives chains of casts of the form
2275 // double->ulong->double.
2276 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
2277 default: assert(0 && "Unknown store class!");
2280 // Spill the integer to memory and reload it from there...
2282 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
2284 static const unsigned Op1[] =
2285 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
2286 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
2287 FrameIdx).addReg(SrcReg);
2289 if (DestClass == cLong) {
2290 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
2291 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
2294 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
2295 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
2298 // Reload the original control word now...
2299 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
2303 // Anything we haven't handled already, we can't (yet) handle at all.
2304 assert(0 && "Unhandled cast instruction!");
2308 /// visitVANextInst - Implement the va_next instruction...
2310 void ISel::visitVANextInst(VANextInst &I) {
2311 unsigned VAList = getReg(I.getOperand(0));
2312 unsigned DestReg = getReg(I);
2315 switch (I.getArgType()->getPrimitiveID()) {
2318 assert(0 && "Error: bad type for va_next instruction!");
2320 case Type::PointerTyID:
2321 case Type::UIntTyID:
2325 case Type::ULongTyID:
2326 case Type::LongTyID:
2327 case Type::DoubleTyID:
2332 // Increment the VAList pointer...
2333 BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
2336 void ISel::visitVAArgInst(VAArgInst &I) {
2337 unsigned VAList = getReg(I.getOperand(0));
2338 unsigned DestReg = getReg(I);
2340 switch (I.getType()->getPrimitiveID()) {
2343 assert(0 && "Error: bad type for va_next instruction!");
2345 case Type::PointerTyID:
2346 case Type::UIntTyID:
2348 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
2350 case Type::ULongTyID:
2351 case Type::LongTyID:
2352 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
2353 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
2355 case Type::DoubleTyID:
2356 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
2362 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
2363 // If this GEP instruction will be folded into all of its users, we don't need
2364 // to explicitly calculate it!
2365 unsigned A, B, C, D;
2366 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
2367 // Check all of the users of the instruction to see if they are loads and
2369 bool AllWillFold = true;
2370 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
2371 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
2372 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
2373 cast<Instruction>(*UI)->getOperand(0) == &I) {
2374 AllWillFold = false;
2378 // If the instruction is foldable, and will be folded into all users, don't
2380 if (AllWillFold) return;
2383 unsigned outputReg = getReg(I);
2384 emitGEPOperation(BB, BB->end(), I.getOperand(0),
2385 I.op_begin()+1, I.op_end(), outputReg);
2388 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
2389 /// GEPTypes (the derived types being stepped through at each level). On return
2390 /// from this function, if some indexes of the instruction are representable as
2391 /// an X86 lea instruction, the machine operands are put into the Ops
2392 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
2393 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
2394 /// addressing mode that only partially consumes the input, the BaseReg input of
2395 /// the addressing mode must be left free.
2397 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
2399 void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
2400 std::vector<Value*> &GEPOps,
2401 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
2402 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
2403 const TargetData &TD = TM.getTargetData();
2405 // Clear out the state we are working with...
2406 BaseReg = 0; // No base register
2407 Scale = 1; // Unit scale
2408 IndexReg = 0; // No index register
2409 Disp = 0; // No displacement
2411 // While there are GEP indexes that can be folded into the current address,
2412 // keep processing them.
2413 while (!GEPTypes.empty()) {
2414 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
2415 // It's a struct access. CUI is the index into the structure,
2416 // which names the field. This index must have unsigned type.
2417 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
2419 // Use the TargetData structure to pick out what the layout of the
2420 // structure is in memory. Since the structure index must be constant, we
2421 // can get its value and use it to find the right byte offset from the
2422 // StructLayout class's list of structure member offsets.
2423 Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
2424 GEPOps.pop_back(); // Consume a GEP operand
2425 GEPTypes.pop_back();
2427 // It's an array or pointer access: [ArraySize x ElementType].
2428 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
2429 Value *idx = GEPOps.back();
2431 // idx is the index into the array. Unlike with structure
2432 // indices, we may not know its actual value at code-generation
2434 assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
2436 // If idx is a constant, fold it into the offset.
2437 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
2438 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2439 Disp += TypeSize*CSI->getValue();
2441 // If the index reg is already taken, we can't handle this index.
2442 if (IndexReg) return;
2444 // If this is a size that we can handle, then add the index as
2446 case 1: case 2: case 4: case 8:
2447 // These are all acceptable scales on X86.
2451 // Otherwise, we can't handle this scale
2455 if (CastInst *CI = dyn_cast<CastInst>(idx))
2456 if (CI->getOperand(0)->getType() == Type::IntTy ||
2457 CI->getOperand(0)->getType() == Type::UIntTy)
2458 idx = CI->getOperand(0);
2460 IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
2463 GEPOps.pop_back(); // Consume a GEP operand
2464 GEPTypes.pop_back();
2468 // GEPTypes is empty, which means we have a single operand left. See if we
2469 // can set it as the base register.
2471 // FIXME: When addressing modes are more powerful/correct, we could load
2472 // global addresses directly as 32-bit immediates.
2473 assert(BaseReg == 0);
2474 BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
2475 GEPOps.pop_back(); // Consume the last GEP operand
2479 /// isGEPFoldable - Return true if the specified GEP can be completely
2480 /// folded into the addressing mode of a load/store or lea instruction.
2481 bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
2482 Value *Src, User::op_iterator IdxBegin,
2483 User::op_iterator IdxEnd, unsigned &BaseReg,
2484 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
2485 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
2486 Src = CPR->getValue();
2488 std::vector<Value*> GEPOps;
2489 GEPOps.resize(IdxEnd-IdxBegin+1);
2491 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
2493 std::vector<const Type*> GEPTypes;
2494 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
2495 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
2497 MachineBasicBlock::iterator IP;
2498 if (MBB) IP = MBB->end();
2499 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
2501 // We can fold it away iff the getGEPIndex call eliminated all operands.
2502 return GEPOps.empty();
2505 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
2506 MachineBasicBlock::iterator IP,
2507 Value *Src, User::op_iterator IdxBegin,
2508 User::op_iterator IdxEnd, unsigned TargetReg) {
2509 const TargetData &TD = TM.getTargetData();
2510 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
2511 Src = CPR->getValue();
2513 std::vector<Value*> GEPOps;
2514 GEPOps.resize(IdxEnd-IdxBegin+1);
2516 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
2518 std::vector<const Type*> GEPTypes;
2519 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
2520 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
2522 // Keep emitting instructions until we consume the entire GEP instruction.
2523 while (!GEPOps.empty()) {
2524 unsigned OldSize = GEPOps.size();
2525 unsigned BaseReg, Scale, IndexReg, Disp;
2526 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
2528 if (GEPOps.size() != OldSize) {
2529 // getGEPIndex consumed some of the input. Build an LEA instruction here.
2530 unsigned NextTarget = 0;
2531 if (!GEPOps.empty()) {
2532 assert(BaseReg == 0 &&
2533 "getGEPIndex should have left the base register open for chaining!");
2534 NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
2537 if (IndexReg == 0 && Disp == 0)
2538 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
2540 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
2541 BaseReg, Scale, IndexReg, Disp);
2543 TargetReg = NextTarget;
2544 } else if (GEPTypes.empty()) {
2545 // The getGEPIndex operation didn't want to build an LEA. Check to see if
2546 // all operands are consumed but the base pointer. If so, just load it
2547 // into the register.
2548 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
2549 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
2551 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
2552 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
2554 break; // we are now done
2557 // It's an array or pointer access: [ArraySize x ElementType].
2558 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
2559 Value *idx = GEPOps.back();
2560 GEPOps.pop_back(); // Consume a GEP operand
2561 GEPTypes.pop_back();
2563 // idx is the index into the array. Unlike with structure
2564 // indices, we may not know its actual value at code-generation
2566 assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
2568 // Most GEP instructions use a [cast (int/uint) to LongTy] as their
2569 // operand on X86. Handle this case directly now...
2570 if (CastInst *CI = dyn_cast<CastInst>(idx))
2571 if (CI->getOperand(0)->getType() == Type::IntTy ||
2572 CI->getOperand(0)->getType() == Type::UIntTy)
2573 idx = CI->getOperand(0);
2575 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
2576 // must find the size of the pointed-to type (Not coincidentally, the next
2577 // type is the type of the elements in the array).
2578 const Type *ElTy = SqTy->getElementType();
2579 unsigned elementSize = TD.getTypeSize(ElTy);
2581 // If idxReg is a constant, we don't need to perform the multiply!
2582 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2583 if (!CSI->isNullValue()) {
2584 unsigned Offset = elementSize*CSI->getValue();
2585 unsigned Reg = makeAnotherReg(Type::UIntTy);
2586 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
2587 .addReg(Reg).addImm(Offset);
2588 --IP; // Insert the next instruction before this one.
2589 TargetReg = Reg; // Codegen the rest of the GEP into this
2591 } else if (elementSize == 1) {
2592 // If the element size is 1, we don't have to multiply, just add
2593 unsigned idxReg = getReg(idx, MBB, IP);
2594 unsigned Reg = makeAnotherReg(Type::UIntTy);
2595 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
2596 --IP; // Insert the next instruction before this one.
2597 TargetReg = Reg; // Codegen the rest of the GEP into this
2599 unsigned idxReg = getReg(idx, MBB, IP);
2600 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
2602 // Make sure we can back the iterator up to point to the first
2603 // instruction emitted.
2604 MachineBasicBlock::iterator BeforeIt = IP;
2605 if (IP == MBB->begin())
2606 BeforeIt = MBB->end();
2609 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
2611 // Emit an ADD to add OffsetReg to the basePtr.
2612 unsigned Reg = makeAnotherReg(Type::UIntTy);
2613 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
2614 .addReg(Reg).addReg(OffsetReg);
2616 // Step to the first instruction of the multiply.
2617 if (BeforeIt == MBB->end())
2622 TargetReg = Reg; // Codegen the rest of the GEP into this
2629 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
2630 /// frame manager, otherwise do it the hard way.
2632 void ISel::visitAllocaInst(AllocaInst &I) {
2633 // Find the data size of the alloca inst's getAllocatedType.
2634 const Type *Ty = I.getAllocatedType();
2635 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
2637 // If this is a fixed size alloca in the entry block for the function,
2638 // statically stack allocate the space.
2640 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
2641 if (I.getParent() == I.getParent()->getParent()->begin()) {
2642 TySize *= CUI->getValue(); // Get total allocated size...
2643 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
2645 // Create a new stack object using the frame manager...
2646 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
2647 addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
2652 // Create a register to hold the temporary result of multiplying the type size
2653 // constant by the variable amount.
2654 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
2655 unsigned SrcReg1 = getReg(I.getArraySize());
2657 // TotalSizeReg = mul <numelements>, <TypeSize>
2658 MachineBasicBlock::iterator MBBI = BB->end();
2659 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
2661 // AddedSize = add <TotalSizeReg>, 15
2662 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
2663 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
2665 // AlignedSize = and <AddedSize>, ~15
2666 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
2667 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
2669 // Subtract size from stack pointer, thereby allocating some space.
2670 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
2672 // Put a pointer to the space into the result register, by copying
2673 // the stack pointer.
2674 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
2676 // Inform the Frame Information that we have just allocated a variable-sized
2678 F->getFrameInfo()->CreateVariableSizedObject();
2681 /// visitMallocInst - Malloc instructions are code generated into direct calls
2682 /// to the library malloc.
2684 void ISel::visitMallocInst(MallocInst &I) {
2685 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
2688 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
2689 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
2691 Arg = makeAnotherReg(Type::UIntTy);
2692 unsigned Op0Reg = getReg(I.getOperand(0));
2693 MachineBasicBlock::iterator MBBI = BB->end();
2694 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
2697 std::vector<ValueRecord> Args;
2698 Args.push_back(ValueRecord(Arg, Type::UIntTy));
2699 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2700 1).addExternalSymbol("malloc", true);
2701 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
2705 /// visitFreeInst - Free instructions are code gen'd to call the free libc
2708 void ISel::visitFreeInst(FreeInst &I) {
2709 std::vector<ValueRecord> Args;
2710 Args.push_back(ValueRecord(I.getOperand(0)));
2711 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2712 1).addExternalSymbol("free", true);
2713 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
2716 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
2717 /// into a machine code representation is a very simple peep-hole fashion. The
2718 /// generated code sucks but the implementation is nice and simple.
2720 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
2721 return new ISel(TM);