1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicLowering.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/MRegisterInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/GetElementPtrTypeIterator.h"
30 #include "llvm/Support/InstVisitor.h"
31 #include "llvm/Support/CFG.h"
32 #include "Support/Statistic.h"
37 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
39 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
43 cByte, cShort, cInt, cFP, cLong
47 /// getClass - Turn a primitive type into a "class" number which is based on the
48 /// size of the type, and whether or not it is floating point.
50 static inline TypeClass getClass(const Type *Ty) {
51 switch (Ty->getPrimitiveID()) {
53 case Type::UByteTyID: return cByte; // Byte operands are class #0
55 case Type::UShortTyID: return cShort; // Short operands are class #1
58 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
61 case Type::DoubleTyID: return cFP; // Floating Point is #3
64 case Type::ULongTyID: return cLong; // Longs are class #4
66 assert(0 && "Invalid type to getClass!");
67 return cByte; // not reached
71 // getClassB - Just like getClass, but treat boolean values as bytes.
72 static inline TypeClass getClassB(const Type *Ty) {
73 if (Ty == Type::BoolTy) return cByte;
78 struct ISel : public FunctionPass, InstVisitor<ISel> {
80 MachineFunction *F; // The function we are compiling into
81 MachineBasicBlock *BB; // The current MBB we are compiling
82 int VarArgsFrameIndex; // FrameIndex for start of varargs area
83 int ReturnAddressIndex; // FrameIndex for the return address
85 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
87 // MBBMap - Mapping between LLVM BB -> Machine BB
88 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
90 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
92 /// runOnFunction - Top level implementation of instruction selection for
93 /// the entire function.
95 bool runOnFunction(Function &Fn) {
96 // First pass over the function, lower any unknown intrinsic functions
97 // with the IntrinsicLowering class.
98 LowerUnknownIntrinsicFunctionCalls(Fn);
100 F = &MachineFunction::construct(&Fn, TM);
102 // Create all of the machine basic blocks for the function...
103 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
104 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
108 // Set up a frame object for the return address. This is used by the
109 // llvm.returnaddress & llvm.frameaddress intrinisics.
110 ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4);
112 // Copy incoming arguments off of the stack...
113 LoadArgumentsToVirtualRegs(Fn);
115 // Instruction select everything except PHI nodes
118 // Select the PHI nodes
121 // Insert the FP_REG_KILL instructions into blocks that need them.
127 // We always build a machine code representation for the function
131 virtual const char *getPassName() const {
132 return "X86 Simple Instruction Selection";
135 /// visitBasicBlock - This method is called when we are visiting a new basic
136 /// block. This simply creates a new MachineBasicBlock to emit code into
137 /// and adds it to the current MachineFunction. Subsequent visit* for
138 /// instructions will be invoked for all instructions in the basic block.
140 void visitBasicBlock(BasicBlock &LLVM_BB) {
141 BB = MBBMap[&LLVM_BB];
144 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
145 /// function, lowering any calls to unknown intrinsic functions into the
146 /// equivalent LLVM code.
148 void LowerUnknownIntrinsicFunctionCalls(Function &F);
150 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
151 /// from the stack into virtual registers.
153 void LoadArgumentsToVirtualRegs(Function &F);
155 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
156 /// because we have to generate our sources into the source basic blocks,
157 /// not the current one.
159 void SelectPHINodes();
161 /// InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks
162 /// that need them. This only occurs due to the floating point stackifier
163 /// not being aggressive enough to handle arbitrary global stackification.
165 void InsertFPRegKills();
167 // Visitation methods for various instructions. These methods simply emit
168 // fixed X86 code for each instruction.
171 // Control flow operators
172 void visitReturnInst(ReturnInst &RI);
173 void visitBranchInst(BranchInst &BI);
179 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
180 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
182 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
183 const std::vector<ValueRecord> &Args);
184 void visitCallInst(CallInst &I);
185 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
187 // Arithmetic operators
188 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
189 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
190 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
191 void visitMul(BinaryOperator &B);
193 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
194 void visitRem(BinaryOperator &B) { visitDivRem(B); }
195 void visitDivRem(BinaryOperator &B);
198 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
199 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
200 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
202 // Comparison operators...
203 void visitSetCondInst(SetCondInst &I);
204 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
205 MachineBasicBlock *MBB,
206 MachineBasicBlock::iterator MBBI);
207 void visitSelectInst(SelectInst &SI);
210 // Memory Instructions
211 void visitLoadInst(LoadInst &I);
212 void visitStoreInst(StoreInst &I);
213 void visitGetElementPtrInst(GetElementPtrInst &I);
214 void visitAllocaInst(AllocaInst &I);
215 void visitMallocInst(MallocInst &I);
216 void visitFreeInst(FreeInst &I);
219 void visitShiftInst(ShiftInst &I);
220 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
221 void visitCastInst(CastInst &I);
222 void visitVANextInst(VANextInst &I);
223 void visitVAArgInst(VAArgInst &I);
225 void visitInstruction(Instruction &I) {
226 std::cerr << "Cannot instruction select: " << I;
230 /// promote32 - Make a value 32-bits wide, and put it somewhere.
232 void promote32(unsigned targetReg, const ValueRecord &VR);
234 /// getAddressingMode - Get the addressing mode to use to address the
235 /// specified value. The returned value should be used with addFullAddress.
236 void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
237 unsigned &IndexReg, unsigned &Disp);
240 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
242 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
243 std::vector<Value*> &GEPOps,
244 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
245 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
247 /// isGEPFoldable - Return true if the specified GEP can be completely
248 /// folded into the addressing mode of a load/store or lea instruction.
249 bool isGEPFoldable(MachineBasicBlock *MBB,
250 Value *Src, User::op_iterator IdxBegin,
251 User::op_iterator IdxEnd, unsigned &BaseReg,
252 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
254 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
255 /// constant expression GEP support.
257 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
258 Value *Src, User::op_iterator IdxBegin,
259 User::op_iterator IdxEnd, unsigned TargetReg);
261 /// emitCastOperation - Common code shared between visitCastInst and
262 /// constant expression cast support.
264 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP,
265 Value *Src, const Type *DestTy, unsigned TargetReg);
267 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
268 /// and constant expression support.
270 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
271 MachineBasicBlock::iterator IP,
272 Value *Op0, Value *Op1,
273 unsigned OperatorClass, unsigned TargetReg);
275 /// emitBinaryFPOperation - This method handles emission of floating point
276 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
277 void emitBinaryFPOperation(MachineBasicBlock *BB,
278 MachineBasicBlock::iterator IP,
279 Value *Op0, Value *Op1,
280 unsigned OperatorClass, unsigned TargetReg);
282 void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP,
283 Value *Op0, Value *Op1, unsigned TargetReg);
285 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
286 unsigned DestReg, const Type *DestTy,
287 unsigned Op0Reg, unsigned Op1Reg);
288 void doMultiplyConst(MachineBasicBlock *MBB,
289 MachineBasicBlock::iterator MBBI,
290 unsigned DestReg, const Type *DestTy,
291 unsigned Op0Reg, unsigned Op1Val);
293 void emitDivRemOperation(MachineBasicBlock *BB,
294 MachineBasicBlock::iterator IP,
295 Value *Op0, Value *Op1, bool isDiv,
298 /// emitSetCCOperation - Common code shared between visitSetCondInst and
299 /// constant expression support.
301 void emitSetCCOperation(MachineBasicBlock *BB,
302 MachineBasicBlock::iterator IP,
303 Value *Op0, Value *Op1, unsigned Opcode,
306 /// emitShiftOperation - Common code shared between visitShiftInst and
307 /// constant expression support.
309 void emitShiftOperation(MachineBasicBlock *MBB,
310 MachineBasicBlock::iterator IP,
311 Value *Op, Value *ShiftAmount, bool isLeftShift,
312 const Type *ResultTy, unsigned DestReg);
314 /// emitSelectOperation - Common code shared between visitSelectInst and the
315 /// constant expression support.
316 void emitSelectOperation(MachineBasicBlock *MBB,
317 MachineBasicBlock::iterator IP,
318 Value *Cond, Value *TrueVal, Value *FalseVal,
321 /// copyConstantToRegister - Output the instructions required to put the
322 /// specified constant into the specified register.
324 void copyConstantToRegister(MachineBasicBlock *MBB,
325 MachineBasicBlock::iterator MBBI,
326 Constant *C, unsigned Reg);
328 /// makeAnotherReg - This method returns the next register number we haven't
331 /// Long values are handled somewhat specially. They are always allocated
332 /// as pairs of 32 bit integer values. The register number returned is the
333 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
334 /// of the long value.
336 unsigned makeAnotherReg(const Type *Ty) {
337 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
338 "Current target doesn't have X86 reg info??");
339 const X86RegisterInfo *MRI =
340 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
341 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
342 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
343 // Create the lower part
344 F->getSSARegMap()->createVirtualRegister(RC);
345 // Create the upper part.
346 return F->getSSARegMap()->createVirtualRegister(RC)-1;
349 // Add the mapping of regnumber => reg class to MachineFunction
350 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
351 return F->getSSARegMap()->createVirtualRegister(RC);
354 /// getReg - This method turns an LLVM value into a register number. This
355 /// is guaranteed to produce the same register number for a particular value
356 /// every time it is queried.
358 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
359 unsigned getReg(Value *V) {
360 // Just append to the end of the current bb.
361 MachineBasicBlock::iterator It = BB->end();
362 return getReg(V, BB, It);
364 unsigned getReg(Value *V, MachineBasicBlock *MBB,
365 MachineBasicBlock::iterator IPt) {
366 // If this operand is a constant, emit the code to copy the constant into
367 // the register here...
369 if (Constant *C = dyn_cast<Constant>(V)) {
370 unsigned Reg = makeAnotherReg(V->getType());
371 copyConstantToRegister(MBB, IPt, C, Reg);
373 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
374 unsigned Reg = makeAnotherReg(V->getType());
375 // Move the address of the global into the register
376 BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
378 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
379 // Do not emit noop casts at all.
380 if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType()))
381 return getReg(CI->getOperand(0), MBB, IPt);
384 unsigned &Reg = RegMap[V];
386 Reg = makeAnotherReg(V->getType());
395 /// copyConstantToRegister - Output the instructions required to put the
396 /// specified constant into the specified register.
398 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
399 MachineBasicBlock::iterator IP,
400 Constant *C, unsigned R) {
401 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
403 switch (CE->getOpcode()) {
404 case Instruction::GetElementPtr:
405 emitGEPOperation(MBB, IP, CE->getOperand(0),
406 CE->op_begin()+1, CE->op_end(), R);
408 case Instruction::Cast:
409 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
412 case Instruction::Xor: ++Class; // FALL THROUGH
413 case Instruction::Or: ++Class; // FALL THROUGH
414 case Instruction::And: ++Class; // FALL THROUGH
415 case Instruction::Sub: ++Class; // FALL THROUGH
416 case Instruction::Add:
417 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
421 case Instruction::Mul:
422 emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R);
425 case Instruction::Div:
426 case Instruction::Rem:
427 emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
428 CE->getOpcode() == Instruction::Div, R);
431 case Instruction::SetNE:
432 case Instruction::SetEQ:
433 case Instruction::SetLT:
434 case Instruction::SetGT:
435 case Instruction::SetLE:
436 case Instruction::SetGE:
437 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
441 case Instruction::Shl:
442 case Instruction::Shr:
443 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
444 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
447 case Instruction::Select:
448 emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
449 CE->getOperand(2), R);
453 std::cerr << "Offending expr: " << C << "\n";
454 assert(0 && "Constant expression not yet handled!\n");
458 if (C->getType()->isIntegral()) {
459 unsigned Class = getClassB(C->getType());
461 if (Class == cLong) {
462 // Copy the value into the register pair.
463 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
464 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
465 BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
469 assert(Class <= cInt && "Type not handled yet!");
471 static const unsigned IntegralOpcodeTab[] = {
472 X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
475 if (C->getType() == Type::BoolTy) {
476 BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
478 ConstantInt *CI = cast<ConstantInt>(C);
479 BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
481 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
482 if (CFP->isExactlyValue(+0.0))
483 BuildMI(*MBB, IP, X86::FLD0, 0, R);
484 else if (CFP->isExactlyValue(+1.0))
485 BuildMI(*MBB, IP, X86::FLD1, 0, R);
487 // Otherwise we need to spill the constant to memory...
488 MachineConstantPool *CP = F->getConstantPool();
489 unsigned CPI = CP->getConstantPoolIndex(CFP);
490 const Type *Ty = CFP->getType();
492 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
493 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
494 addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
497 } else if (isa<ConstantPointerNull>(C)) {
498 // Copy zero (null pointer) to the register.
499 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
500 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
501 BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
503 std::cerr << "Offending constant: " << C << "\n";
504 assert(0 && "Type not handled yet!");
508 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
509 /// the stack into virtual registers.
511 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
512 // Emit instructions to load the arguments... On entry to a function on the
513 // X86, the stack frame looks like this:
515 // [ESP] -- return address
516 // [ESP + 4] -- first argument (leftmost lexically)
517 // [ESP + 8] -- second argument, if first argument is four bytes in size
520 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
521 MachineFrameInfo *MFI = F->getFrameInfo();
523 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
524 bool ArgLive = !I->use_empty();
525 unsigned Reg = ArgLive ? getReg(*I) : 0;
526 int FI; // Frame object index
528 switch (getClassB(I->getType())) {
531 FI = MFI->CreateFixedObject(1, ArgOffset);
532 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
537 FI = MFI->CreateFixedObject(2, ArgOffset);
538 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
543 FI = MFI->CreateFixedObject(4, ArgOffset);
544 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
549 FI = MFI->CreateFixedObject(8, ArgOffset);
550 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
551 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
553 ArgOffset += 4; // longs require 4 additional bytes
558 if (I->getType() == Type::FloatTy) {
559 Opcode = X86::FLD32m;
560 FI = MFI->CreateFixedObject(4, ArgOffset);
562 Opcode = X86::FLD64m;
563 FI = MFI->CreateFixedObject(8, ArgOffset);
565 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
567 if (I->getType() == Type::DoubleTy)
568 ArgOffset += 4; // doubles require 4 additional bytes
571 assert(0 && "Unhandled argument type!");
573 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
576 // If the function takes variable number of arguments, add a frame offset for
577 // the start of the first vararg value... this is used to expand
579 if (Fn.getFunctionType()->isVarArg())
580 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
584 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
585 /// because we have to generate our sources into the source basic blocks, not
588 void ISel::SelectPHINodes() {
589 const TargetInstrInfo &TII = TM.getInstrInfo();
590 const Function &LF = *F->getFunction(); // The LLVM function...
591 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
592 const BasicBlock *BB = I;
593 MachineBasicBlock &MBB = *MBBMap[I];
595 // Loop over all of the PHI nodes in the LLVM basic block...
596 MachineBasicBlock::iterator PHIInsertPoint = MBB.begin();
597 for (BasicBlock::const_iterator I = BB->begin();
598 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
600 // Create a new machine instr PHI node, and insert it.
601 unsigned PHIReg = getReg(*PN);
602 MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint,
603 X86::PHI, PN->getNumOperands(), PHIReg);
605 MachineInstr *LongPhiMI = 0;
606 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy)
607 LongPhiMI = BuildMI(MBB, PHIInsertPoint,
608 X86::PHI, PN->getNumOperands(), PHIReg+1);
610 // PHIValues - Map of blocks to incoming virtual registers. We use this
611 // so that we only initialize one incoming value for a particular block,
612 // even if the block has multiple entries in the PHI node.
614 std::map<MachineBasicBlock*, unsigned> PHIValues;
616 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
617 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
619 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
620 PHIValues.lower_bound(PredMBB);
622 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
623 // We already inserted an initialization of the register for this
624 // predecessor. Recycle it.
625 ValReg = EntryIt->second;
628 // Get the incoming value into a virtual register.
630 Value *Val = PN->getIncomingValue(i);
632 // If this is a constant or GlobalValue, we may have to insert code
633 // into the basic block to compute it into a virtual register.
634 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
635 if (isa<ConstantExpr>(Val)) {
636 // Because we don't want to clobber any values which might be in
637 // physical registers with the computation of this constant (which
638 // might be arbitrarily complex if it is a constant expression),
639 // just insert the computation at the top of the basic block.
640 MachineBasicBlock::iterator PI = PredMBB->begin();
642 // Skip over any PHI nodes though!
643 while (PI != PredMBB->end() && PI->getOpcode() == X86::PHI)
646 ValReg = getReg(Val, PredMBB, PI);
648 // Simple constants get emitted at the end of the basic block,
649 // before any terminator instructions. We "know" that the code to
650 // move a constant into a register will never clobber any flags.
651 ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator());
654 ValReg = getReg(Val);
657 // Remember that we inserted a value for this PHI for this predecessor
658 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
661 PhiMI->addRegOperand(ValReg);
662 PhiMI->addMachineBasicBlockOperand(PredMBB);
664 LongPhiMI->addRegOperand(ValReg+1);
665 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
669 // Now that we emitted all of the incoming values for the PHI node, make
670 // sure to reposition the InsertPoint after the PHI that we just added.
671 // This is needed because we might have inserted a constant into this
672 // block, right after the PHI's which is before the old insert point!
673 PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI;
679 /// RequiresFPRegKill - The floating point stackifier pass cannot insert
680 /// compensation code on critical edges. As such, it requires that we kill all
681 /// FP registers on the exit from any blocks that either ARE critical edges, or
682 /// branch to a block that has incoming critical edges.
684 /// Note that this kill instruction will eventually be eliminated when
685 /// restrictions in the stackifier are relaxed.
687 static bool RequiresFPRegKill(const BasicBlock *BB) {
689 for (succ_const_iterator SI = succ_begin(BB), E = succ_end(BB); SI!=E; ++SI) {
690 const BasicBlock *Succ = *SI;
691 pred_const_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
692 ++PI; // Block have at least one predecessory
693 if (PI != PE) { // If it has exactly one, this isn't crit edge
694 // If this block has more than one predecessor, check all of the
695 // predecessors to see if they have multiple successors. If so, then the
696 // block we are analyzing needs an FPRegKill.
697 for (PI = pred_begin(Succ); PI != PE; ++PI) {
698 const BasicBlock *Pred = *PI;
699 succ_const_iterator SI2 = succ_begin(Pred);
700 ++SI2; // There must be at least one successor of this block.
701 if (SI2 != succ_end(Pred))
702 return true; // Yes, we must insert the kill on this edge.
706 // If we got this far, there is no need to insert the kill instruction.
713 // InsertFPRegKills - Insert FP_REG_KILL instructions into basic blocks that
714 // need them. This only occurs due to the floating point stackifier not being
715 // aggressive enough to handle arbitrary global stackification.
717 // Currently we insert an FP_REG_KILL instruction into each block that uses or
718 // defines a floating point virtual register.
720 // When the global register allocators (like linear scan) finally update live
721 // variable analysis, we can keep floating point values in registers across
722 // portions of the CFG that do not involve critical edges. This will be a big
723 // win, but we are waiting on the global allocators before we can do this.
725 // With a bit of work, the floating point stackifier pass can be enhanced to
726 // break critical edges as needed (to make a place to put compensation code),
727 // but this will require some infrastructure improvements as well.
729 void ISel::InsertFPRegKills() {
730 SSARegMap &RegMap = *F->getSSARegMap();
732 for (MachineFunction::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
733 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
734 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
735 MachineOperand& MO = I->getOperand(i);
736 if (MO.isRegister() && MO.getReg()) {
737 unsigned Reg = MO.getReg();
738 if (MRegisterInfo::isVirtualRegister(Reg))
739 if (RegMap.getRegClass(Reg)->getSize() == 10)
743 // If we haven't found an FP register use or def in this basic block, check
744 // to see if any of our successors has an FP PHI node, which will cause a
745 // copy to be inserted into this block.
746 for (succ_const_iterator SI = succ_begin(BB->getBasicBlock()),
747 E = succ_end(BB->getBasicBlock()); SI != E; ++SI) {
748 MachineBasicBlock *SBB = MBBMap[*SI];
749 for (MachineBasicBlock::iterator I = SBB->begin();
750 I != SBB->end() && I->getOpcode() == X86::PHI; ++I) {
751 if (RegMap.getRegClass(I->getOperand(0).getReg())->getSize() == 10)
757 // Okay, this block uses an FP register. If the block has successors (ie,
758 // it's not an unwind/return), insert the FP_REG_KILL instruction.
759 if (BB->getBasicBlock()->getTerminator()->getNumSuccessors() &&
760 RequiresFPRegKill(BB->getBasicBlock())) {
761 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
768 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
769 // it into the conditional branch or select instruction which is the only user
770 // of the cc instruction. This is the case if the conditional branch is the
771 // only user of the setcc, and if the setcc is in the same basic block as the
772 // conditional branch. We also don't handle long arguments below, so we reject
773 // them here as well.
775 static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) {
776 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
777 if (SCI->hasOneUse()) {
778 Instruction *User = cast<Instruction>(SCI->use_back());
779 if ((isa<BranchInst>(User) || isa<SelectInst>(User)) &&
780 SCI->getParent() == User->getParent() &&
781 (getClassB(SCI->getOperand(0)->getType()) != cLong ||
782 SCI->getOpcode() == Instruction::SetEQ ||
783 SCI->getOpcode() == Instruction::SetNE))
789 // Return a fixed numbering for setcc instructions which does not depend on the
790 // order of the opcodes.
792 static unsigned getSetCCNumber(unsigned Opcode) {
794 default: assert(0 && "Unknown setcc instruction!");
795 case Instruction::SetEQ: return 0;
796 case Instruction::SetNE: return 1;
797 case Instruction::SetLT: return 2;
798 case Instruction::SetGE: return 3;
799 case Instruction::SetGT: return 4;
800 case Instruction::SetLE: return 5;
804 // LLVM -> X86 signed X86 unsigned
805 // ----- ---------- ------------
806 // seteq -> sete sete
807 // setne -> setne setne
808 // setlt -> setl setb
809 // setge -> setge setae
810 // setgt -> setg seta
811 // setle -> setle setbe
813 // sets // Used by comparison with 0 optimization
815 static const unsigned SetCCOpcodeTab[2][8] = {
816 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
818 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
819 X86::SETSr, X86::SETNSr },
822 // EmitComparison - This function emits a comparison of the two operands,
823 // returning the extended setcc code to use.
824 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
825 MachineBasicBlock *MBB,
826 MachineBasicBlock::iterator IP) {
827 // The arguments are already supposed to be of the same type.
828 const Type *CompTy = Op0->getType();
829 unsigned Class = getClassB(CompTy);
830 unsigned Op0r = getReg(Op0, MBB, IP);
832 // Special case handling of: cmp R, i
833 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
834 if (Class == cByte || Class == cShort || Class == cInt) {
835 unsigned Op1v = CI->getRawValue();
837 // Mask off any upper bits of the constant, if there are any...
838 Op1v &= (1ULL << (8 << Class)) - 1;
840 // If this is a comparison against zero, emit more efficient code. We
841 // can't handle unsigned comparisons against zero unless they are == or
842 // !=. These should have been strength reduced already anyway.
843 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
844 static const unsigned TESTTab[] = {
845 X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
847 BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
849 if (OpNum == 2) return 6; // Map jl -> js
850 if (OpNum == 3) return 7; // Map jg -> jns
854 static const unsigned CMPTab[] = {
855 X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
858 BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
861 assert(Class == cLong && "Unknown integer class!");
862 unsigned LowCst = CI->getRawValue();
863 unsigned HiCst = CI->getRawValue() >> 32;
864 if (OpNum < 2) { // seteq, setne
865 unsigned LoTmp = Op0r;
867 LoTmp = makeAnotherReg(Type::IntTy);
868 BuildMI(*MBB, IP, X86::XOR32ri, 2, LoTmp).addReg(Op0r).addImm(LowCst);
870 unsigned HiTmp = Op0r+1;
872 HiTmp = makeAnotherReg(Type::IntTy);
873 BuildMI(*MBB, IP, X86::XOR32ri, 2,HiTmp).addReg(Op0r+1).addImm(HiCst);
875 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
876 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
879 // Emit a sequence of code which compares the high and low parts once
880 // each, then uses a conditional move to handle the overflow case. For
881 // example, a setlt for long would generate code like this:
883 // AL = lo(op1) < lo(op2) // Signedness depends on operands
884 // BL = hi(op1) < hi(op2) // Always unsigned comparison
885 // dest = hi(op1) == hi(op2) ? AL : BL;
888 // FIXME: This would be much better if we had hierarchical register
889 // classes! Until then, hardcode registers so that we can deal with
890 // their aliases (because we don't have conditional byte moves).
892 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r).addImm(LowCst);
893 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
894 BuildMI(*MBB, IP, X86::CMP32ri, 2).addReg(Op0r+1).addImm(HiCst);
895 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0,X86::BL);
896 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
897 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
898 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
900 // NOTE: visitSetCondInst knows that the value is dumped into the BL
901 // register at this point for long values...
907 // Special case handling of comparison against +/- 0.0
908 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
909 if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
910 BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
911 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
912 BuildMI(*MBB, IP, X86::SAHF, 1);
916 unsigned Op1r = getReg(Op1, MBB, IP);
918 default: assert(0 && "Unknown type class!");
919 // Emit: cmp <var1>, <var2> (do the comparison). We can
920 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
923 BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
926 BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
929 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
932 if (0) { // for processors prior to the P6
933 BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
934 BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
935 BuildMI(*MBB, IP, X86::SAHF, 1);
937 BuildMI(*MBB, IP, X86::FpUCOMI, 2).addReg(Op0r).addReg(Op1r);
942 if (OpNum < 2) { // seteq, setne
943 unsigned LoTmp = makeAnotherReg(Type::IntTy);
944 unsigned HiTmp = makeAnotherReg(Type::IntTy);
945 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
946 BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
947 BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
948 BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
949 break; // Allow the sete or setne to be generated from flags set by OR
951 // Emit a sequence of code which compares the high and low parts once
952 // each, then uses a conditional move to handle the overflow case. For
953 // example, a setlt for long would generate code like this:
955 // AL = lo(op1) < lo(op2) // Signedness depends on operands
956 // BL = hi(op1) < hi(op2) // Always unsigned comparison
957 // dest = hi(op1) == hi(op2) ? AL : BL;
960 // FIXME: This would be much better if we had hierarchical register
961 // classes! Until then, hardcode registers so that we can deal with their
962 // aliases (because we don't have conditional byte moves).
964 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
965 BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
966 BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
967 BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
968 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
969 BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
970 BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
972 // NOTE: visitSetCondInst knows that the value is dumped into the BL
973 // register at this point for long values...
980 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
981 /// register, then move it to wherever the result should be.
983 void ISel::visitSetCondInst(SetCondInst &I) {
984 if (canFoldSetCCIntoBranchOrSelect(&I))
985 return; // Fold this into a branch or select.
987 unsigned DestReg = getReg(I);
988 MachineBasicBlock::iterator MII = BB->end();
989 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
993 /// emitSetCCOperation - Common code shared between visitSetCondInst and
994 /// constant expression support.
996 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
997 MachineBasicBlock::iterator IP,
998 Value *Op0, Value *Op1, unsigned Opcode,
999 unsigned TargetReg) {
1000 unsigned OpNum = getSetCCNumber(Opcode);
1001 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
1003 const Type *CompTy = Op0->getType();
1004 unsigned CompClass = getClassB(CompTy);
1005 bool isSigned = CompTy->isSigned() && CompClass != cFP;
1007 if (CompClass != cLong || OpNum < 2) {
1008 // Handle normal comparisons with a setcc instruction...
1009 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
1011 // Handle long comparisons by copying the value which is already in BL into
1012 // the register we want...
1013 BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
1017 void ISel::visitSelectInst(SelectInst &SI) {
1018 unsigned DestReg = getReg(SI);
1019 MachineBasicBlock::iterator MII = BB->end();
1020 emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),
1021 SI.getFalseValue(), DestReg);
1024 /// emitSelect - Common code shared between visitSelectInst and the constant
1025 /// expression support.
1026 void ISel::emitSelectOperation(MachineBasicBlock *MBB,
1027 MachineBasicBlock::iterator IP,
1028 Value *Cond, Value *TrueVal, Value *FalseVal,
1030 unsigned SelectClass = getClassB(TrueVal->getType());
1032 // We don't support 8-bit conditional moves. If we have incoming constants,
1033 // transform them into 16-bit constants to avoid having a run-time conversion.
1034 if (SelectClass == cByte) {
1035 if (Constant *T = dyn_cast<Constant>(TrueVal))
1036 TrueVal = ConstantExpr::getCast(T, Type::ShortTy);
1037 if (Constant *F = dyn_cast<Constant>(FalseVal))
1038 FalseVal = ConstantExpr::getCast(F, Type::ShortTy);
1041 unsigned TrueReg = getReg(TrueVal, MBB, IP);
1042 unsigned FalseReg = getReg(FalseVal, MBB, IP);
1043 if (TrueReg == FalseReg) {
1044 static const unsigned Opcode[] = {
1045 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
1047 BuildMI(*MBB, IP, Opcode[SelectClass], 1, DestReg).addReg(TrueReg);
1048 if (SelectClass == cLong)
1049 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(TrueReg+1);
1054 if (SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(Cond)) {
1055 // We successfully folded the setcc into the select instruction.
1057 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1058 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), MBB,
1061 const Type *CompTy = SCI->getOperand(0)->getType();
1062 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1064 // LLVM -> X86 signed X86 unsigned
1065 // ----- ---------- ------------
1066 // seteq -> cmovNE cmovNE
1067 // setne -> cmovE cmovE
1068 // setlt -> cmovGE cmovAE
1069 // setge -> cmovL cmovB
1070 // setgt -> cmovLE cmovBE
1071 // setle -> cmovG cmovA
1073 // cmovNS // Used by comparison with 0 optimization
1076 switch (SelectClass) {
1077 default: assert(0 && "Unknown value class!");
1079 // Annoyingly, we don't have a full set of floating point conditional
1081 static const unsigned OpcodeTab[2][8] = {
1082 { X86::FCMOVNE, X86::FCMOVE, X86::FCMOVAE, X86::FCMOVB,
1083 X86::FCMOVBE, X86::FCMOVA, 0, 0 },
1084 { X86::FCMOVNE, X86::FCMOVE, 0, 0, 0, 0, 0, 0 },
1086 Opcode = OpcodeTab[isSigned][OpNum];
1088 // If opcode == 0, we hit a case that we don't support. Output a setcc
1089 // and compare the result against zero.
1091 unsigned CompClass = getClassB(CompTy);
1093 if (CompClass != cLong || OpNum < 2) {
1094 CondReg = makeAnotherReg(Type::BoolTy);
1095 // Handle normal comparisons with a setcc instruction...
1096 BuildMI(*MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, CondReg);
1098 // Long comparisons end up in the BL register.
1102 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1103 Opcode = X86::FCMOVE;
1109 static const unsigned OpcodeTab[2][8] = {
1110 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVAE16rr, X86::CMOVB16rr,
1111 X86::CMOVBE16rr, X86::CMOVA16rr, 0, 0 },
1112 { X86::CMOVNE16rr, X86::CMOVE16rr, X86::CMOVGE16rr, X86::CMOVL16rr,
1113 X86::CMOVLE16rr, X86::CMOVG16rr, X86::CMOVNS16rr, X86::CMOVS16rr },
1115 Opcode = OpcodeTab[isSigned][OpNum];
1120 static const unsigned OpcodeTab[2][8] = {
1121 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVAE32rr, X86::CMOVB32rr,
1122 X86::CMOVBE32rr, X86::CMOVA32rr, 0, 0 },
1123 { X86::CMOVNE32rr, X86::CMOVE32rr, X86::CMOVGE32rr, X86::CMOVL32rr,
1124 X86::CMOVLE32rr, X86::CMOVG32rr, X86::CMOVNS32rr, X86::CMOVS32rr },
1126 Opcode = OpcodeTab[isSigned][OpNum];
1131 // Get the value being branched on, and use it to set the condition codes.
1132 unsigned CondReg = getReg(Cond, MBB, IP);
1133 BuildMI(*MBB, IP, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1134 switch (SelectClass) {
1135 default: assert(0 && "Unknown value class!");
1136 case cFP: Opcode = X86::FCMOVE; break;
1138 case cShort: Opcode = X86::CMOVE16rr; break;
1140 case cLong: Opcode = X86::CMOVE32rr; break;
1144 unsigned RealDestReg = DestReg;
1147 // Annoyingly enough, X86 doesn't HAVE 8-bit conditional moves. Because of
1148 // this, we have to promote the incoming values to 16 bits, perform a 16-bit
1149 // cmove, then truncate the result.
1150 if (SelectClass == cByte) {
1151 DestReg = makeAnotherReg(Type::ShortTy);
1152 if (getClassB(TrueVal->getType()) == cByte) {
1153 // Promote the true value, by storing it into AL, and reading from AX.
1154 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::AL).addReg(TrueReg);
1155 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::AH).addImm(0);
1156 TrueReg = makeAnotherReg(Type::ShortTy);
1157 BuildMI(*MBB, IP, X86::MOV16rr, 1, TrueReg).addReg(X86::AX);
1159 if (getClassB(FalseVal->getType()) == cByte) {
1160 // Promote the true value, by storing it into CL, and reading from CX.
1161 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(FalseReg);
1162 BuildMI(*MBB, IP, X86::MOV8ri, 1, X86::CH).addImm(0);
1163 FalseReg = makeAnotherReg(Type::ShortTy);
1164 BuildMI(*MBB, IP, X86::MOV16rr, 1, FalseReg).addReg(X86::CX);
1168 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(TrueReg).addReg(FalseReg);
1170 switch (SelectClass) {
1172 // We did the computation with 16-bit registers. Truncate back to our
1173 // result by copying into AX then copying out AL.
1174 BuildMI(*MBB, IP, X86::MOV16rr, 1, X86::AX).addReg(DestReg);
1175 BuildMI(*MBB, IP, X86::MOV8rr, 1, RealDestReg).addReg(X86::AL);
1178 // Move the upper half of the value as well.
1179 BuildMI(*MBB, IP, Opcode, 2,DestReg+1).addReg(TrueReg+1).addReg(FalseReg+1);
1186 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
1187 /// operand, in the specified target register.
1189 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
1190 bool isUnsigned = VR.Ty->isUnsigned();
1192 Value *Val = VR.Val;
1193 const Type *Ty = VR.Ty;
1195 if (Constant *C = dyn_cast<Constant>(Val)) {
1196 Val = ConstantExpr::getCast(C, Type::IntTy);
1200 // If this is a simple constant, just emit a MOVri directly to avoid the
1202 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
1203 int TheVal = CI->getRawValue() & 0xFFFFFFFF;
1204 BuildMI(BB, X86::MOV32ri, 1, targetReg).addImm(TheVal);
1209 // Make sure we have the register number for this value...
1210 unsigned Reg = Val ? getReg(Val) : VR.Reg;
1212 switch (getClassB(Ty)) {
1214 // Extend value into target register (8->32)
1216 BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
1218 BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
1221 // Extend value into target register (16->32)
1223 BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
1225 BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
1228 // Move value into target register (32->32)
1229 BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
1232 assert(0 && "Unpromotable operand class in promote32");
1236 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
1237 /// we have the following possibilities:
1239 /// ret void: No return value, simply emit a 'ret' instruction
1240 /// ret sbyte, ubyte : Extend value into EAX and return
1241 /// ret short, ushort: Extend value into EAX and return
1242 /// ret int, uint : Move value into EAX and return
1243 /// ret pointer : Move value into EAX and return
1244 /// ret long, ulong : Move value into EAX/EDX and return
1245 /// ret float/double : Top of FP stack
1247 void ISel::visitReturnInst(ReturnInst &I) {
1248 if (I.getNumOperands() == 0) {
1249 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1253 Value *RetVal = I.getOperand(0);
1254 switch (getClassB(RetVal->getType())) {
1255 case cByte: // integral return values: extend or move into EAX and return
1258 promote32(X86::EAX, ValueRecord(RetVal));
1259 // Declare that EAX is live on exit
1260 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1262 case cFP: { // Floats & Doubles: Return in ST(0)
1263 unsigned RetReg = getReg(RetVal);
1264 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
1265 // Declare that top-of-stack is live on exit
1266 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1270 unsigned RetReg = getReg(RetVal);
1271 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
1272 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
1273 // Declare that EAX & EDX are live on exit
1274 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1279 visitInstruction(I);
1281 // Emit a 'ret' instruction
1282 BuildMI(BB, X86::RET, 0);
1285 // getBlockAfter - Return the basic block which occurs lexically after the
1287 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
1288 Function::iterator I = BB; ++I; // Get iterator to next block
1289 return I != BB->getParent()->end() ? &*I : 0;
1292 /// visitBranchInst - Handle conditional and unconditional branches here. Note
1293 /// that since code layout is frozen at this point, that if we are trying to
1294 /// jump to a block that is the immediate successor of the current block, we can
1295 /// just make a fall-through (but we don't currently).
1297 void ISel::visitBranchInst(BranchInst &BI) {
1298 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
1300 if (!BI.isConditional()) { // Unconditional branch?
1301 if (BI.getSuccessor(0) != NextBB)
1302 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1306 // See if we can fold the setcc into the branch itself...
1307 SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition());
1309 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
1310 // computed some other way...
1311 unsigned condReg = getReg(BI.getCondition());
1312 BuildMI(BB, X86::TEST8rr, 2).addReg(condReg).addReg(condReg);
1313 if (BI.getSuccessor(1) == NextBB) {
1314 if (BI.getSuccessor(0) != NextBB)
1315 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
1317 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
1319 if (BI.getSuccessor(0) != NextBB)
1320 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
1325 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
1326 MachineBasicBlock::iterator MII = BB->end();
1327 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
1329 const Type *CompTy = SCI->getOperand(0)->getType();
1330 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
1333 // LLVM -> X86 signed X86 unsigned
1334 // ----- ---------- ------------
1342 // js // Used by comparison with 0 optimization
1345 static const unsigned OpcodeTab[2][8] = {
1346 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
1347 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
1348 X86::JS, X86::JNS },
1351 if (BI.getSuccessor(0) != NextBB) {
1352 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
1353 if (BI.getSuccessor(1) != NextBB)
1354 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
1356 // Change to the inverse condition...
1357 if (BI.getSuccessor(1) != NextBB) {
1359 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
1365 /// doCall - This emits an abstract call instruction, setting up the arguments
1366 /// and the return value as appropriate. For the actual function call itself,
1367 /// it inserts the specified CallMI instruction into the stream.
1369 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
1370 const std::vector<ValueRecord> &Args) {
1372 // Count how many bytes are to be pushed on the stack...
1373 unsigned NumBytes = 0;
1375 if (!Args.empty()) {
1376 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1377 switch (getClassB(Args[i].Ty)) {
1378 case cByte: case cShort: case cInt:
1379 NumBytes += 4; break;
1381 NumBytes += 8; break;
1383 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
1385 default: assert(0 && "Unknown class!");
1388 // Adjust the stack pointer for the new arguments...
1389 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(NumBytes);
1391 // Arguments go on the stack in reverse order, as specified by the ABI.
1392 unsigned ArgOffset = 0;
1393 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1395 switch (getClassB(Args[i].Ty)) {
1398 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1399 // Zero/Sign extend constant, then stuff into memory.
1400 ConstantInt *Val = cast<ConstantInt>(Args[i].Val);
1401 Val = cast<ConstantInt>(ConstantExpr::getCast(Val, Type::IntTy));
1402 addRegOffset(BuildMI(BB, X86::MOV32mi, 5), X86::ESP, ArgOffset)
1403 .addImm(Val->getRawValue() & 0xFFFFFFFF);
1405 // Promote arg to 32 bits wide into a temporary register...
1406 ArgReg = makeAnotherReg(Type::UIntTy);
1407 promote32(ArgReg, Args[i]);
1408 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1409 X86::ESP, ArgOffset).addReg(ArgReg);
1413 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1414 unsigned Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1415 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1416 X86::ESP, ArgOffset).addImm(Val);
1418 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1419 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1420 X86::ESP, ArgOffset).addReg(ArgReg);
1424 if (Args[i].Val && isa<ConstantInt>(Args[i].Val)) {
1425 uint64_t Val = cast<ConstantInt>(Args[i].Val)->getRawValue();
1426 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1427 X86::ESP, ArgOffset).addImm(Val & ~0U);
1428 addRegOffset(BuildMI(BB, X86::MOV32mi, 5),
1429 X86::ESP, ArgOffset+4).addImm(Val >> 32ULL);
1431 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1432 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1433 X86::ESP, ArgOffset).addReg(ArgReg);
1434 addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
1435 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1437 ArgOffset += 4; // 8 byte entry, not 4.
1441 ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
1442 if (Args[i].Ty == Type::FloatTy) {
1443 addRegOffset(BuildMI(BB, X86::FST32m, 5),
1444 X86::ESP, ArgOffset).addReg(ArgReg);
1446 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1447 addRegOffset(BuildMI(BB, X86::FST64m, 5),
1448 X86::ESP, ArgOffset).addReg(ArgReg);
1449 ArgOffset += 4; // 8 byte entry, not 4.
1453 default: assert(0 && "Unknown class!");
1458 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(0);
1461 BB->push_back(CallMI);
1463 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addImm(NumBytes);
1465 // If there is a return value, scavenge the result from the location the call
1468 if (Ret.Ty != Type::VoidTy) {
1469 unsigned DestClass = getClassB(Ret.Ty);
1470 switch (DestClass) {
1474 // Integral results are in %eax, or the appropriate portion
1476 static const unsigned regRegMove[] = {
1477 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
1479 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1480 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1483 case cFP: // Floating-point return values live in %ST(0)
1484 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1486 case cLong: // Long values are left in EDX:EAX
1487 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
1488 BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
1490 default: assert(0 && "Unknown class!");
1496 /// visitCallInst - Push args on stack and do a procedure call instruction.
1497 void ISel::visitCallInst(CallInst &CI) {
1498 MachineInstr *TheCall;
1499 if (Function *F = CI.getCalledFunction()) {
1500 // Is it an intrinsic function call?
1501 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1502 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1506 // Emit a CALL instruction with PC-relative displacement.
1507 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1508 } else { // Emit an indirect call...
1509 unsigned Reg = getReg(CI.getCalledValue());
1510 TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
1513 std::vector<ValueRecord> Args;
1514 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1515 Args.push_back(ValueRecord(CI.getOperand(i)));
1517 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1518 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1522 /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the
1523 /// function, lowering any calls to unknown intrinsic functions into the
1524 /// equivalent LLVM code.
1526 void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
1527 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
1528 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
1529 if (CallInst *CI = dyn_cast<CallInst>(I++))
1530 if (Function *F = CI->getCalledFunction())
1531 switch (F->getIntrinsicID()) {
1532 case Intrinsic::not_intrinsic:
1533 case Intrinsic::vastart:
1534 case Intrinsic::vacopy:
1535 case Intrinsic::vaend:
1536 case Intrinsic::returnaddress:
1537 case Intrinsic::frameaddress:
1538 case Intrinsic::memcpy:
1539 case Intrinsic::memset:
1540 case Intrinsic::readport:
1541 case Intrinsic::writeport:
1542 // We directly implement these intrinsics
1544 case Intrinsic::readio: {
1545 // On X86, memory operations are in-order. Lower this intrinsic
1546 // into a volatile load.
1547 Instruction *Before = CI->getPrev();
1548 LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI);
1549 CI->replaceAllUsesWith (LI);
1550 BB->getInstList().erase (CI);
1551 if (Before) { // Move iterator to instruction after call
1558 case Intrinsic::writeio: {
1559 // On X86, memory operations are in-order. Lower this intrinsic
1560 // into a volatile store.
1561 Instruction *Before = CI->getPrev();
1562 StoreInst * LI = new StoreInst (CI->getOperand(1),
1563 CI->getOperand(2), true, CI);
1564 CI->replaceAllUsesWith (LI);
1565 BB->getInstList().erase (CI);
1566 if (Before) { // Move iterator to instruction after call
1574 // All other intrinsic calls we must lower.
1575 Instruction *Before = CI->getPrev();
1576 TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
1577 if (Before) { // Move iterator to instruction after call
1586 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1587 unsigned TmpReg1, TmpReg2;
1589 case Intrinsic::vastart:
1590 // Get the address of the first vararg value...
1591 TmpReg1 = getReg(CI);
1592 addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
1595 case Intrinsic::vacopy:
1596 TmpReg1 = getReg(CI);
1597 TmpReg2 = getReg(CI.getOperand(1));
1598 BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
1600 case Intrinsic::vaend: return; // Noop on X86
1602 case Intrinsic::returnaddress:
1603 case Intrinsic::frameaddress:
1604 TmpReg1 = getReg(CI);
1605 if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
1606 if (ID == Intrinsic::returnaddress) {
1607 // Just load the return address
1608 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
1609 ReturnAddressIndex);
1611 addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
1612 ReturnAddressIndex, -4);
1615 // Values other than zero are not implemented yet.
1616 BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
1620 case Intrinsic::memcpy: {
1621 assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
1623 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1624 Align = AlignC->getRawValue();
1625 if (Align == 0) Align = 1;
1628 // Turn the byte code into # iterations
1631 switch (Align & 3) {
1632 case 2: // WORD aligned
1633 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1634 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1636 CountReg = makeAnotherReg(Type::IntTy);
1637 unsigned ByteReg = getReg(CI.getOperand(3));
1638 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1640 Opcode = X86::REP_MOVSW;
1642 case 0: // DWORD aligned
1643 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1644 CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1646 CountReg = makeAnotherReg(Type::IntTy);
1647 unsigned ByteReg = getReg(CI.getOperand(3));
1648 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1650 Opcode = X86::REP_MOVSD;
1652 default: // BYTE aligned
1653 CountReg = getReg(CI.getOperand(3));
1654 Opcode = X86::REP_MOVSB;
1658 // No matter what the alignment is, we put the source in ESI, the
1659 // destination in EDI, and the count in ECX.
1660 TmpReg1 = getReg(CI.getOperand(1));
1661 TmpReg2 = getReg(CI.getOperand(2));
1662 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1663 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1664 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
1665 BuildMI(BB, Opcode, 0);
1668 case Intrinsic::memset: {
1669 assert(CI.getNumOperands() == 5 && "Illegal llvm.memset call!");
1671 if (ConstantInt *AlignC = dyn_cast<ConstantInt>(CI.getOperand(4))) {
1672 Align = AlignC->getRawValue();
1673 if (Align == 0) Align = 1;
1676 // Turn the byte code into # iterations
1679 if (ConstantInt *ValC = dyn_cast<ConstantInt>(CI.getOperand(2))) {
1680 unsigned Val = ValC->getRawValue() & 255;
1682 // If the value is a constant, then we can potentially use larger copies.
1683 switch (Align & 3) {
1684 case 2: // WORD aligned
1685 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1686 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
1688 CountReg = makeAnotherReg(Type::IntTy);
1689 unsigned ByteReg = getReg(CI.getOperand(3));
1690 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
1692 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
1693 Opcode = X86::REP_STOSW;
1695 case 0: // DWORD aligned
1696 if (ConstantInt *I = dyn_cast<ConstantInt>(CI.getOperand(3))) {
1697 CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
1699 CountReg = makeAnotherReg(Type::IntTy);
1700 unsigned ByteReg = getReg(CI.getOperand(3));
1701 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
1703 Val = (Val << 8) | Val;
1704 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
1705 Opcode = X86::REP_STOSD;
1707 default: // BYTE aligned
1708 CountReg = getReg(CI.getOperand(3));
1709 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
1710 Opcode = X86::REP_STOSB;
1714 // If it's not a constant value we are storing, just fall back. We could
1715 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
1716 unsigned ValReg = getReg(CI.getOperand(2));
1717 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1718 CountReg = getReg(CI.getOperand(3));
1719 Opcode = X86::REP_STOSB;
1722 // No matter what the alignment is, we put the source in ESI, the
1723 // destination in EDI, and the count in ECX.
1724 TmpReg1 = getReg(CI.getOperand(1));
1725 //TmpReg2 = getReg(CI.getOperand(2));
1726 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
1727 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
1728 BuildMI(BB, Opcode, 0);
1732 case Intrinsic::readport: {
1733 // First, determine that the size of the operand falls within the acceptable
1734 // range for this architecture.
1736 if (getClassB(CI.getOperand(1)->getType()) != cShort) {
1737 std::cerr << "llvm.readport: Address size is not 16 bits\n";
1741 // Now, move the I/O port address into the DX register and use the IN
1742 // instruction to get the input data.
1744 unsigned Class = getClass(CI.getCalledFunction()->getReturnType());
1745 unsigned DestReg = getReg(CI);
1747 // If the port is a single-byte constant, use the immediate form.
1748 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(1)))
1749 if ((C->getRawValue() & 255) == C->getRawValue()) {
1752 BuildMI(BB, X86::IN8ri, 1).addImm((unsigned char)C->getRawValue());
1753 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1756 BuildMI(BB, X86::IN16ri, 1).addImm((unsigned char)C->getRawValue());
1757 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1760 BuildMI(BB, X86::IN32ri, 1).addImm((unsigned char)C->getRawValue());
1761 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1766 unsigned Reg = getReg(CI.getOperand(1));
1767 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1770 BuildMI(BB, X86::IN8rr, 0);
1771 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
1774 BuildMI(BB, X86::IN16rr, 0);
1775 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::AX);
1778 BuildMI(BB, X86::IN32rr, 0);
1779 BuildMI(BB, X86::MOV8rr, 1, DestReg).addReg(X86::EAX);
1782 std::cerr << "Cannot do input on this data type";
1788 case Intrinsic::writeport: {
1789 // First, determine that the size of the operand falls within the
1790 // acceptable range for this architecture.
1791 if (getClass(CI.getOperand(2)->getType()) != cShort) {
1792 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
1796 unsigned Class = getClassB(CI.getOperand(1)->getType());
1797 unsigned ValReg = getReg(CI.getOperand(1));
1800 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
1803 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(ValReg);
1806 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(ValReg);
1809 std::cerr << "llvm.writeport: invalid data type for X86 target";
1814 // If the port is a single-byte constant, use the immediate form.
1815 if (ConstantInt *C = dyn_cast<ConstantInt>(CI.getOperand(2)))
1816 if ((C->getRawValue() & 255) == C->getRawValue()) {
1817 static const unsigned O[] = { X86::OUT8ir, X86::OUT16ir, X86::OUT32ir };
1818 BuildMI(BB, O[Class], 1).addImm((unsigned char)C->getRawValue());
1822 // Otherwise, move the I/O port address into the DX register and the value
1823 // to write into the AL/AX/EAX register.
1824 static const unsigned Opc[] = { X86::OUT8rr, X86::OUT16rr, X86::OUT32rr };
1825 unsigned Reg = getReg(CI.getOperand(2));
1826 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
1827 BuildMI(BB, Opc[Class], 0);
1831 default: assert(0 && "Error: unknown intrinsics should have been lowered!");
1835 static bool isSafeToFoldLoadIntoInstruction(LoadInst &LI, Instruction &User) {
1836 if (LI.getParent() != User.getParent())
1838 BasicBlock::iterator It = &LI;
1839 // Check all of the instructions between the load and the user. We should
1840 // really use alias analysis here, but for now we just do something simple.
1841 for (++It; It != BasicBlock::iterator(&User); ++It) {
1842 switch (It->getOpcode()) {
1843 case Instruction::Free:
1844 case Instruction::Store:
1845 case Instruction::Call:
1846 case Instruction::Invoke:
1848 case Instruction::Load:
1849 if (cast<LoadInst>(It)->isVolatile() && LI.isVolatile())
1857 /// visitSimpleBinary - Implement simple binary operators for integral types...
1858 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1861 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1862 unsigned DestReg = getReg(B);
1863 MachineBasicBlock::iterator MI = BB->end();
1864 Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
1866 // Special case: op Reg, load [mem]
1867 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
1868 if (!B.swapOperands())
1869 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
1871 unsigned Class = getClassB(B.getType());
1872 if (isa<LoadInst>(Op1) && Class != cLong &&
1873 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op1), B)) {
1877 static const unsigned OpcodeTab[][3] = {
1878 // Arithmetic operators
1879 { X86::ADD8rm, X86::ADD16rm, X86::ADD32rm }, // ADD
1880 { X86::SUB8rm, X86::SUB16rm, X86::SUB32rm }, // SUB
1882 // Bitwise operators
1883 { X86::AND8rm, X86::AND16rm, X86::AND32rm }, // AND
1884 { X86:: OR8rm, X86:: OR16rm, X86:: OR32rm }, // OR
1885 { X86::XOR8rm, X86::XOR16rm, X86::XOR32rm }, // XOR
1887 Opcode = OpcodeTab[OperatorClass][Class];
1889 static const unsigned OpcodeTab[][2] = {
1890 { X86::FADD32m, X86::FADD64m }, // ADD
1891 { X86::FSUB32m, X86::FSUB64m }, // SUB
1893 const Type *Ty = Op0->getType();
1894 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
1895 Opcode = OpcodeTab[OperatorClass][Ty == Type::DoubleTy];
1898 unsigned BaseReg, Scale, IndexReg, Disp;
1899 getAddressingMode(cast<LoadInst>(Op1)->getOperand(0), BaseReg,
1900 Scale, IndexReg, Disp);
1902 unsigned Op0r = getReg(Op0);
1903 addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op0r),
1904 BaseReg, Scale, IndexReg, Disp);
1908 // If this is a floating point subtract, check to see if we can fold the first
1910 if (Class == cFP && OperatorClass == 1 &&
1911 isa<LoadInst>(Op0) &&
1912 isSafeToFoldLoadIntoInstruction(*cast<LoadInst>(Op0), B)) {
1913 const Type *Ty = Op0->getType();
1914 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
1915 unsigned Opcode = Ty == Type::FloatTy ? X86::FSUBR32m : X86::FSUBR64m;
1917 unsigned BaseReg, Scale, IndexReg, Disp;
1918 getAddressingMode(cast<LoadInst>(Op0)->getOperand(0), BaseReg,
1919 Scale, IndexReg, Disp);
1921 unsigned Op1r = getReg(Op1);
1922 addFullAddress(BuildMI(BB, Opcode, 2, DestReg).addReg(Op1r),
1923 BaseReg, Scale, IndexReg, Disp);
1927 emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
1931 /// emitBinaryFPOperation - This method handles emission of floating point
1932 /// Add (0), Sub (1), Mul (2), and Div (3) operations.
1933 void ISel::emitBinaryFPOperation(MachineBasicBlock *BB,
1934 MachineBasicBlock::iterator IP,
1935 Value *Op0, Value *Op1,
1936 unsigned OperatorClass, unsigned DestReg) {
1938 // Special case: op Reg, <const fp>
1939 if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1))
1940 if (!Op1C->isExactlyValue(+0.0) && !Op1C->isExactlyValue(+1.0)) {
1941 // Create a constant pool entry for this constant.
1942 MachineConstantPool *CP = F->getConstantPool();
1943 unsigned CPI = CP->getConstantPoolIndex(Op1C);
1944 const Type *Ty = Op1->getType();
1946 static const unsigned OpcodeTab[][4] = {
1947 { X86::FADD32m, X86::FSUB32m, X86::FMUL32m, X86::FDIV32m }, // Float
1948 { X86::FADD64m, X86::FSUB64m, X86::FMUL64m, X86::FDIV64m }, // Double
1951 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
1952 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
1953 unsigned Op0r = getReg(Op0, BB, IP);
1954 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
1955 DestReg).addReg(Op0r), CPI);
1959 // Special case: R1 = op <const fp>, R2
1960 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op0))
1961 if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) {
1963 unsigned op1Reg = getReg(Op1, BB, IP);
1964 BuildMI(*BB, IP, X86::FCHS, 1, DestReg).addReg(op1Reg);
1966 } else if (!CFP->isExactlyValue(+0.0) && !CFP->isExactlyValue(+1.0)) {
1967 // R1 = op CST, R2 --> R1 = opr R2, CST
1969 // Create a constant pool entry for this constant.
1970 MachineConstantPool *CP = F->getConstantPool();
1971 unsigned CPI = CP->getConstantPoolIndex(CFP);
1972 const Type *Ty = CFP->getType();
1974 static const unsigned OpcodeTab[][4] = {
1975 { X86::FADD32m, X86::FSUBR32m, X86::FMUL32m, X86::FDIVR32m }, // Float
1976 { X86::FADD64m, X86::FSUBR64m, X86::FMUL64m, X86::FDIVR64m }, // Double
1979 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
1980 unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass];
1981 unsigned Op1r = getReg(Op1, BB, IP);
1982 addConstantPoolReference(BuildMI(*BB, IP, Opcode, 5,
1983 DestReg).addReg(Op1r), CPI);
1988 static const unsigned OpcodeTab[4] = {
1989 X86::FpADD, X86::FpSUB, X86::FpMUL, X86::FpDIV
1992 unsigned Opcode = OpcodeTab[OperatorClass];
1993 unsigned Op0r = getReg(Op0, BB, IP);
1994 unsigned Op1r = getReg(Op1, BB, IP);
1995 BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1998 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1999 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
2002 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
2003 /// and constant expression support.
2005 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
2006 MachineBasicBlock::iterator IP,
2007 Value *Op0, Value *Op1,
2008 unsigned OperatorClass, unsigned DestReg) {
2009 unsigned Class = getClassB(Op0->getType());
2012 assert(OperatorClass < 2 && "No logical ops for FP!");
2013 emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg);
2017 // sub 0, X -> neg X
2018 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
2019 if (OperatorClass == 1 && CI->isNullValue()) {
2020 unsigned op1Reg = getReg(Op1, MBB, IP);
2021 static unsigned const NEGTab[] = {
2022 X86::NEG8r, X86::NEG16r, X86::NEG32r, 0, X86::NEG32r
2024 BuildMI(*MBB, IP, NEGTab[Class], 1, DestReg).addReg(op1Reg);
2026 if (Class == cLong) {
2027 // We just emitted: Dl = neg Sl
2028 // Now emit : T = addc Sh, 0
2030 unsigned T = makeAnotherReg(Type::IntTy);
2031 BuildMI(*MBB, IP, X86::ADC32ri, 2, T).addReg(op1Reg+1).addImm(0);
2032 BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg+1).addReg(T);
2037 // Special case: op Reg, <const int>
2038 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
2039 unsigned Op0r = getReg(Op0, MBB, IP);
2041 // xor X, -1 -> not X
2042 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
2043 static unsigned const NOTTab[] = {
2044 X86::NOT8r, X86::NOT16r, X86::NOT32r, 0, X86::NOT32r
2046 BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
2047 if (Class == cLong) // Invert the top part too
2048 BuildMI(*MBB, IP, X86::NOT32r, 1, DestReg+1).addReg(Op0r+1);
2052 // add X, -1 -> dec X
2053 if (OperatorClass == 0 && Op1C->isAllOnesValue() && Class != cLong) {
2054 // Note that we can't use dec for 64-bit decrements, because it does not
2055 // set the carry flag!
2056 static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
2057 BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
2061 // add X, 1 -> inc X
2062 if (OperatorClass == 0 && Op1C->equalsInt(1) && Class != cLong) {
2063 // Note that we can't use inc for 64-bit increments, because it does not
2064 // set the carry flag!
2065 static unsigned const INCTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
2066 BuildMI(*MBB, IP, INCTab[Class], 1, DestReg).addReg(Op0r);
2070 static const unsigned OpcodeTab[][5] = {
2071 // Arithmetic operators
2072 { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri, 0, X86::ADD32ri }, // ADD
2073 { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, X86::SUB32ri }, // SUB
2075 // Bitwise operators
2076 { X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, X86::AND32ri }, // AND
2077 { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri, 0, X86::OR32ri }, // OR
2078 { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, X86::XOR32ri }, // XOR
2081 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2082 unsigned Op1l = cast<ConstantInt>(Op1C)->getRawValue();
2084 if (Class != cLong) {
2085 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2089 // If this is a long value and the high or low bits have a special
2090 // property, emit some special cases.
2091 unsigned Op1h = cast<ConstantInt>(Op1C)->getRawValue() >> 32LL;
2093 // If the constant is zero in the low 32-bits, just copy the low part
2094 // across and apply the normal 32-bit operation to the high parts. There
2095 // will be no carry or borrow into the top.
2097 if (OperatorClass != 2) // All but and...
2098 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0r);
2100 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2101 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg+1)
2102 .addReg(Op0r+1).addImm(Op1h);
2106 // If this is a logical operation and the top 32-bits are zero, just
2107 // operate on the lower 32.
2108 if (Op1h == 0 && OperatorClass > 1) {
2109 BuildMI(*MBB, IP, OpcodeTab[OperatorClass][cLong], 2, DestReg)
2110 .addReg(Op0r).addImm(Op1l);
2111 if (OperatorClass != 2) // All but and
2112 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(Op0r+1);
2114 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2118 // TODO: We could handle lots of other special cases here, such as AND'ing
2119 // with 0xFFFFFFFF00000000 -> noop, etc.
2121 // Otherwise, code generate the full operation with a constant.
2122 static const unsigned TopTab[] = {
2123 X86::ADC32ri, X86::SBB32ri, X86::AND32ri, X86::OR32ri, X86::XOR32ri
2126 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1l);
2127 BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1)
2128 .addReg(Op0r+1).addImm(Op1h);
2132 // Finally, handle the general case now.
2133 static const unsigned OpcodeTab[][5] = {
2134 // Arithmetic operators
2135 { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, 0, X86::ADD32rr }, // ADD
2136 { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, 0, X86::SUB32rr }, // SUB
2138 // Bitwise operators
2139 { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, X86::AND32rr }, // AND
2140 { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0, X86:: OR32rr }, // OR
2141 { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, X86::XOR32rr }, // XOR
2144 unsigned Opcode = OpcodeTab[OperatorClass][Class];
2145 unsigned Op0r = getReg(Op0, MBB, IP);
2146 unsigned Op1r = getReg(Op1, MBB, IP);
2147 BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
2149 if (Class == cLong) { // Handle the upper 32 bits of long values...
2150 static const unsigned TopTab[] = {
2151 X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
2153 BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
2154 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
2158 /// doMultiply - Emit appropriate instructions to multiply together the
2159 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
2160 /// result should be given as DestTy.
2162 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI,
2163 unsigned DestReg, const Type *DestTy,
2164 unsigned op0Reg, unsigned op1Reg) {
2165 unsigned Class = getClass(DestTy);
2169 BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
2170 .addReg(op0Reg).addReg(op1Reg);
2173 // Must use the MUL instruction, which forces use of AL...
2174 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
2175 BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
2176 BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
2179 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
2183 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
2184 // returns zero when the input is not exactly a power of two.
2185 static unsigned ExactLog2(unsigned Val) {
2186 if (Val == 0) return 0;
2189 if (Val & 1) return 0;
2197 /// doMultiplyConst - This function is specialized to efficiently codegen an 8,
2198 /// 16, or 32-bit integer multiply by a constant.
2199 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
2200 MachineBasicBlock::iterator IP,
2201 unsigned DestReg, const Type *DestTy,
2202 unsigned op0Reg, unsigned ConstRHS) {
2203 static const unsigned MOVrrTab[] = {X86::MOV8rr, X86::MOV16rr, X86::MOV32rr};
2204 static const unsigned MOVriTab[] = {X86::MOV8ri, X86::MOV16ri, X86::MOV32ri};
2206 unsigned Class = getClass(DestTy);
2208 if (ConstRHS == 0) {
2209 BuildMI(*MBB, IP, MOVriTab[Class], 1, DestReg).addImm(0);
2211 } else if (ConstRHS == 1) {
2212 BuildMI(*MBB, IP, MOVrrTab[Class], 1, DestReg).addReg(op0Reg);
2216 // If the element size is exactly a power of 2, use a shift to get it.
2217 if (unsigned Shift = ExactLog2(ConstRHS)) {
2219 default: assert(0 && "Unknown class for this function!");
2221 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2224 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2227 BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
2232 if (Class == cShort) {
2233 BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2235 } else if (Class == cInt) {
2236 BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
2240 // Most general case, emit a normal multiply...
2241 unsigned TmpReg = makeAnotherReg(DestTy);
2242 BuildMI(*MBB, IP, MOVriTab[Class], 1, TmpReg).addImm(ConstRHS);
2244 // Emit a MUL to multiply the register holding the index by
2245 // elementSize, putting the result in OffsetReg.
2246 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
2249 /// visitMul - Multiplies are not simple binary operators because they must deal
2250 /// with the EAX register explicitly.
2252 void ISel::visitMul(BinaryOperator &I) {
2253 unsigned ResultReg = getReg(I);
2255 Value *Op0 = I.getOperand(0);
2256 Value *Op1 = I.getOperand(1);
2258 // Fold loads into floating point multiplies.
2259 if (getClass(Op0->getType()) == cFP) {
2260 if (isa<LoadInst>(Op0) && !isa<LoadInst>(Op1))
2261 if (!I.swapOperands())
2262 std::swap(Op0, Op1); // Make sure any loads are in the RHS.
2263 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2264 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2265 const Type *Ty = Op0->getType();
2266 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2267 unsigned Opcode = Ty == Type::FloatTy ? X86::FMUL32m : X86::FMUL64m;
2269 unsigned BaseReg, Scale, IndexReg, Disp;
2270 getAddressingMode(LI->getOperand(0), BaseReg,
2271 Scale, IndexReg, Disp);
2273 unsigned Op0r = getReg(Op0);
2274 addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op0r),
2275 BaseReg, Scale, IndexReg, Disp);
2280 MachineBasicBlock::iterator IP = BB->end();
2281 emitMultiply(BB, IP, Op0, Op1, ResultReg);
2284 void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
2285 Value *Op0, Value *Op1, unsigned DestReg) {
2286 MachineBasicBlock &BB = *MBB;
2287 TypeClass Class = getClass(Op0->getType());
2289 // Simple scalar multiply?
2290 unsigned Op0Reg = getReg(Op0, &BB, IP);
2295 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2296 unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant
2297 doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val);
2299 unsigned Op1Reg = getReg(Op1, &BB, IP);
2300 doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg);
2304 emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg);
2310 // Long value. We have to do things the hard way...
2311 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2312 unsigned CLow = CI->getRawValue();
2313 unsigned CHi = CI->getRawValue() >> 32;
2316 // If the low part of the constant is all zeros, things are simple.
2317 BuildMI(BB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2318 doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi);
2322 // Multiply the two low parts... capturing carry into EDX
2323 unsigned OverflowReg = 0;
2325 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(Op0Reg);
2327 unsigned Op1RegL = makeAnotherReg(Type::UIntTy);
2328 OverflowReg = makeAnotherReg(Type::UIntTy);
2329 BuildMI(BB, IP, X86::MOV32ri, 1, Op1RegL).addImm(CLow);
2330 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2331 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1RegL); // AL*BL
2333 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2334 BuildMI(BB, IP, X86::MOV32rr, 1,
2335 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2338 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2339 doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow);
2341 unsigned AHBLplusOverflowReg;
2343 AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2344 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2345 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2347 AHBLplusOverflowReg = AHBLReg;
2351 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(AHBLplusOverflowReg);
2353 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2354 doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi);
2356 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2357 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2362 // General 64x64 multiply
2364 unsigned Op1Reg = getReg(Op1, &BB, IP);
2365 // Multiply the two low parts... capturing carry into EDX
2366 BuildMI(BB, IP, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
2367 BuildMI(BB, IP, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
2369 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
2370 BuildMI(BB, IP, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
2371 BuildMI(BB, IP, X86::MOV32rr, 1,
2372 OverflowReg).addReg(X86::EDX); // AL*BL >> 32
2374 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
2375 BuildMI(BB, IP, X86::IMUL32rr, 2,
2376 AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
2378 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
2379 BuildMI(BB, IP, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
2380 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
2382 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
2383 BuildMI(BB, IP, X86::IMUL32rr, 2,
2384 ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
2386 BuildMI(BB, IP, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
2387 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
2391 /// visitDivRem - Handle division and remainder instructions... these
2392 /// instruction both require the same instructions to be generated, they just
2393 /// select the result from a different register. Note that both of these
2394 /// instructions work differently for signed and unsigned operands.
2396 void ISel::visitDivRem(BinaryOperator &I) {
2397 unsigned ResultReg = getReg(I);
2398 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2400 // Fold loads into floating point divides.
2401 if (getClass(Op0->getType()) == cFP) {
2402 if (LoadInst *LI = dyn_cast<LoadInst>(Op1))
2403 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2404 const Type *Ty = Op0->getType();
2405 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2406 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIV32m : X86::FDIV64m;
2408 unsigned BaseReg, Scale, IndexReg, Disp;
2409 getAddressingMode(LI->getOperand(0), BaseReg,
2410 Scale, IndexReg, Disp);
2412 unsigned Op0r = getReg(Op0);
2413 addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op0r),
2414 BaseReg, Scale, IndexReg, Disp);
2418 if (LoadInst *LI = dyn_cast<LoadInst>(Op0))
2419 if (isSafeToFoldLoadIntoInstruction(*LI, I)) {
2420 const Type *Ty = Op0->getType();
2421 assert(Ty == Type::FloatTy||Ty == Type::DoubleTy && "Unknown FP type!");
2422 unsigned Opcode = Ty == Type::FloatTy ? X86::FDIVR32m : X86::FDIVR64m;
2424 unsigned BaseReg, Scale, IndexReg, Disp;
2425 getAddressingMode(LI->getOperand(0), BaseReg,
2426 Scale, IndexReg, Disp);
2428 unsigned Op1r = getReg(Op1);
2429 addFullAddress(BuildMI(BB, Opcode, 2, ResultReg).addReg(Op1r),
2430 BaseReg, Scale, IndexReg, Disp);
2436 MachineBasicBlock::iterator IP = BB->end();
2437 emitDivRemOperation(BB, IP, Op0, Op1,
2438 I.getOpcode() == Instruction::Div, ResultReg);
2441 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
2442 MachineBasicBlock::iterator IP,
2443 Value *Op0, Value *Op1, bool isDiv,
2444 unsigned ResultReg) {
2445 const Type *Ty = Op0->getType();
2446 unsigned Class = getClass(Ty);
2448 case cFP: // Floating point divide
2450 emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg);
2452 } else { // Floating point remainder...
2453 unsigned Op0Reg = getReg(Op0, BB, IP);
2454 unsigned Op1Reg = getReg(Op1, BB, IP);
2455 MachineInstr *TheCall =
2456 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
2457 std::vector<ValueRecord> Args;
2458 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
2459 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
2460 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
2464 static const char *FnName[] =
2465 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
2466 unsigned Op0Reg = getReg(Op0, BB, IP);
2467 unsigned Op1Reg = getReg(Op1, BB, IP);
2468 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
2469 MachineInstr *TheCall =
2470 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
2472 std::vector<ValueRecord> Args;
2473 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
2474 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
2475 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
2478 case cByte: case cShort: case cInt:
2479 break; // Small integrals, handled below...
2480 default: assert(0 && "Unknown class!");
2483 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
2484 static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
2485 static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
2486 static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
2487 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
2489 static const unsigned DivOpcode[][4] = {
2490 { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
2491 { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
2494 bool isSigned = Ty->isSigned();
2495 unsigned Reg = Regs[Class];
2496 unsigned ExtReg = ExtRegs[Class];
2498 // Put the first operand into one of the A registers...
2499 unsigned Op0Reg = getReg(Op0, BB, IP);
2500 unsigned Op1Reg = getReg(Op1, BB, IP);
2501 BuildMI(*BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
2504 // Emit a sign extension instruction...
2505 unsigned ShiftResult = makeAnotherReg(Op0->getType());
2506 BuildMI(*BB, IP, SarOpcode[Class], 2,ShiftResult).addReg(Op0Reg).addImm(31);
2507 BuildMI(*BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
2509 // If unsigned, emit a zeroing instruction... (reg = 0)
2510 BuildMI(*BB, IP, ClrOpcode[Class], 2, ExtReg).addImm(0);
2513 // Emit the appropriate divide or remainder instruction...
2514 BuildMI(*BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
2516 // Figure out which register we want to pick the result out of...
2517 unsigned DestReg = isDiv ? Reg : ExtReg;
2519 // Put the result into the destination register...
2520 BuildMI(*BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
2524 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
2525 /// for constant immediate shift values, and for constant immediate
2526 /// shift values equal to 1. Even the general case is sort of special,
2527 /// because the shift amount has to be in CL, not just any old register.
2529 void ISel::visitShiftInst(ShiftInst &I) {
2530 MachineBasicBlock::iterator IP = BB->end ();
2531 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
2532 I.getOpcode () == Instruction::Shl, I.getType (),
2536 /// emitShiftOperation - Common code shared between visitShiftInst and
2537 /// constant expression support.
2538 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
2539 MachineBasicBlock::iterator IP,
2540 Value *Op, Value *ShiftAmount, bool isLeftShift,
2541 const Type *ResultTy, unsigned DestReg) {
2542 unsigned SrcReg = getReg (Op, MBB, IP);
2543 bool isSigned = ResultTy->isSigned ();
2544 unsigned Class = getClass (ResultTy);
2546 static const unsigned ConstantOperand[][4] = {
2547 { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
2548 { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
2549 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
2550 { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
2553 static const unsigned NonConstantOperand[][4] = {
2554 { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
2555 { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
2556 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
2557 { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
2560 // Longs, as usual, are handled specially...
2561 if (Class == cLong) {
2562 // If we have a constant shift, we can generate much more efficient code
2563 // than otherwise...
2565 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2566 unsigned Amount = CUI->getValue();
2568 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2570 BuildMI(*MBB, IP, Opc[3], 3,
2571 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addImm(Amount);
2572 BuildMI(*MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addImm(Amount);
2574 BuildMI(*MBB, IP, Opc[3], 3,
2575 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addImm(Amount);
2576 BuildMI(*MBB, IP, Opc[2],2,DestReg+1).addReg(SrcReg+1).addImm(Amount);
2578 } else { // Shifting more than 32 bits
2582 BuildMI(*MBB, IP, X86::SHL32ri, 2,
2583 DestReg + 1).addReg(SrcReg).addImm(Amount);
2585 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
2587 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
2590 BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
2591 DestReg).addReg(SrcReg+1).addImm(Amount);
2593 BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
2595 BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2599 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2601 if (!isLeftShift && isSigned) {
2602 // If this is a SHR of a Long, then we need to do funny sign extension
2603 // stuff. TmpReg gets the value to use as the high-part if we are
2604 // shifting more than 32 bits.
2605 BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
2607 // Other shifts use a fixed zero value if the shift is more than 32
2609 BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
2612 // Initialize CL with the shift amount...
2613 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
2614 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2616 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
2617 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
2619 // TmpReg2 = shld inHi, inLo
2620 BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
2622 // TmpReg3 = shl inLo, CL
2623 BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
2625 // Set the flags to indicate whether the shift was by more than 32 bits.
2626 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2628 // DestHi = (>32) ? TmpReg3 : TmpReg2;
2629 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2630 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
2631 // DestLo = (>32) ? TmpReg : TmpReg3;
2632 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2633 DestReg).addReg(TmpReg3).addReg(TmpReg);
2635 // TmpReg2 = shrd inLo, inHi
2636 BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
2638 // TmpReg3 = s[ah]r inHi, CL
2639 BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
2642 // Set the flags to indicate whether the shift was by more than 32 bits.
2643 BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2645 // DestLo = (>32) ? TmpReg3 : TmpReg2;
2646 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2647 DestReg).addReg(TmpReg2).addReg(TmpReg3);
2649 // DestHi = (>32) ? TmpReg : TmpReg3;
2650 BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
2651 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
2657 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
2658 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
2659 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
2661 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
2662 BuildMI(*MBB, IP, Opc[Class], 2,
2663 DestReg).addReg(SrcReg).addImm(CUI->getValue());
2664 } else { // The shift amount is non-constant.
2665 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
2666 BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2668 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
2669 BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
2674 void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
2675 unsigned &IndexReg, unsigned &Disp) {
2676 BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
2677 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
2678 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
2679 BaseReg, Scale, IndexReg, Disp))
2681 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
2682 if (CE->getOpcode() == Instruction::GetElementPtr)
2683 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
2684 BaseReg, Scale, IndexReg, Disp))
2688 // If it's not foldable, reset addr mode.
2689 BaseReg = getReg(Addr);
2690 Scale = 1; IndexReg = 0; Disp = 0;
2694 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
2695 /// instruction. The load and store instructions are the only place where we
2696 /// need to worry about the memory layout of the target machine.
2698 void ISel::visitLoadInst(LoadInst &I) {
2699 // Check to see if this load instruction is going to be folded into a binary
2700 // instruction, like add. If so, we don't want to emit it. Wouldn't a real
2701 // pattern matching instruction selector be nice?
2702 unsigned Class = getClassB(I.getType());
2703 if (I.hasOneUse()) {
2704 Instruction *User = cast<Instruction>(I.use_back());
2705 switch (User->getOpcode()) {
2706 case Instruction::Cast:
2707 // If this is a cast from a signed-integer type to a floating point type,
2708 // fold the cast here.
2709 if (getClass(User->getType()) == cFP &&
2710 (I.getType() == Type::ShortTy || I.getType() == Type::IntTy ||
2711 I.getType() == Type::LongTy)) {
2712 unsigned DestReg = getReg(User);
2713 static const unsigned Opcode[] = {
2714 0/*BYTE*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m
2716 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
2717 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
2718 addFullAddress(BuildMI(BB, Opcode[Class], 5, DestReg),
2719 BaseReg, Scale, IndexReg, Disp);
2726 case Instruction::Add:
2727 case Instruction::Sub:
2728 case Instruction::And:
2729 case Instruction::Or:
2730 case Instruction::Xor:
2731 if (Class == cLong) User = 0;
2733 case Instruction::Mul:
2734 case Instruction::Div:
2735 if (Class != cFP) User = 0;
2736 break; // Folding only implemented for floating point.
2737 default: User = 0; break;
2741 // Okay, we found a user. If the load is the first operand and there is
2742 // no second operand load, reverse the operand ordering. Note that this
2743 // can fail for a subtract (ie, no change will be made).
2744 if (!isa<LoadInst>(User->getOperand(1)))
2745 cast<BinaryOperator>(User)->swapOperands();
2747 // Okay, now that everything is set up, if this load is used by the second
2748 // operand, and if there are no instructions that invalidate the load
2749 // before the binary operator, eliminate the load.
2750 if (User->getOperand(1) == &I &&
2751 isSafeToFoldLoadIntoInstruction(I, *User))
2752 return; // Eliminate the load!
2754 // If this is a floating point sub or div, we won't be able to swap the
2755 // operands, but we will still be able to eliminate the load.
2756 if (Class == cFP && User->getOperand(0) == &I &&
2757 !isa<LoadInst>(User->getOperand(1)) &&
2758 (User->getOpcode() == Instruction::Sub ||
2759 User->getOpcode() == Instruction::Div) &&
2760 isSafeToFoldLoadIntoInstruction(I, *User))
2761 return; // Eliminate the load!
2765 unsigned DestReg = getReg(I);
2766 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
2767 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
2769 if (Class == cLong) {
2770 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
2771 BaseReg, Scale, IndexReg, Disp);
2772 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
2773 BaseReg, Scale, IndexReg, Disp+4);
2777 static const unsigned Opcodes[] = {
2778 X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
2780 unsigned Opcode = Opcodes[Class];
2781 if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
2782 addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
2783 BaseReg, Scale, IndexReg, Disp);
2786 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
2789 void ISel::visitStoreInst(StoreInst &I) {
2790 unsigned BaseReg, Scale, IndexReg, Disp;
2791 getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
2793 const Type *ValTy = I.getOperand(0)->getType();
2794 unsigned Class = getClassB(ValTy);
2796 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
2797 uint64_t Val = CI->getRawValue();
2798 if (Class == cLong) {
2799 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
2800 BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
2801 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
2802 BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
2804 static const unsigned Opcodes[] = {
2805 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
2807 unsigned Opcode = Opcodes[Class];
2808 addFullAddress(BuildMI(BB, Opcode, 5),
2809 BaseReg, Scale, IndexReg, Disp).addImm(Val);
2811 } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
2812 addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
2813 BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
2815 if (Class == cLong) {
2816 unsigned ValReg = getReg(I.getOperand(0));
2817 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
2818 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
2819 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
2820 BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
2822 unsigned ValReg = getReg(I.getOperand(0));
2823 static const unsigned Opcodes[] = {
2824 X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
2826 unsigned Opcode = Opcodes[Class];
2827 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
2828 addFullAddress(BuildMI(BB, Opcode, 1+4),
2829 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
2835 /// visitCastInst - Here we have various kinds of copying with or without sign
2836 /// extension going on.
2838 void ISel::visitCastInst(CastInst &CI) {
2839 Value *Op = CI.getOperand(0);
2841 unsigned SrcClass = getClassB(Op->getType());
2842 unsigned DestClass = getClassB(CI.getType());
2843 // Noop casts are not emitted: getReg will return the source operand as the
2844 // register to use for any uses of the noop cast.
2845 if (DestClass == SrcClass)
2848 // If this is a cast from a 32-bit integer to a Long type, and the only uses
2849 // of the case are GEP instructions, then the cast does not need to be
2850 // generated explicitly, it will be folded into the GEP.
2851 if (DestClass == cLong && SrcClass == cInt) {
2852 bool AllUsesAreGEPs = true;
2853 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
2854 if (!isa<GetElementPtrInst>(*I)) {
2855 AllUsesAreGEPs = false;
2859 // No need to codegen this cast if all users are getelementptr instrs...
2860 if (AllUsesAreGEPs) return;
2863 // If this cast converts a load from a short,int, or long integer to a FP
2864 // value, we will have folded this cast away.
2865 if (DestClass == cFP && isa<LoadInst>(Op) && Op->hasOneUse() &&
2866 (Op->getType() == Type::ShortTy || Op->getType() == Type::IntTy ||
2867 Op->getType() == Type::LongTy))
2871 unsigned DestReg = getReg(CI);
2872 MachineBasicBlock::iterator MI = BB->end();
2873 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
2876 /// emitCastOperation - Common code shared between visitCastInst and constant
2877 /// expression cast support.
2879 void ISel::emitCastOperation(MachineBasicBlock *BB,
2880 MachineBasicBlock::iterator IP,
2881 Value *Src, const Type *DestTy,
2883 const Type *SrcTy = Src->getType();
2884 unsigned SrcClass = getClassB(SrcTy);
2885 unsigned DestClass = getClassB(DestTy);
2886 unsigned SrcReg = getReg(Src, BB, IP);
2888 // Implement casts to bool by using compare on the operand followed by set if
2889 // not zero on the result.
2890 if (DestTy == Type::BoolTy) {
2893 BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
2896 BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
2899 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
2902 unsigned TmpReg = makeAnotherReg(Type::IntTy);
2903 BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
2907 BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
2908 BuildMI(*BB, IP, X86::FNSTSW8r, 0);
2909 BuildMI(*BB, IP, X86::SAHF, 1);
2913 // If the zero flag is not set, then the value is true, set the byte to
2915 BuildMI(*BB, IP, X86::SETNEr, 1, DestReg);
2919 static const unsigned RegRegMove[] = {
2920 X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
2923 // Implement casts between values of the same type class (as determined by
2924 // getClass) by using a register-to-register move.
2925 if (SrcClass == DestClass) {
2926 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
2927 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
2928 } else if (SrcClass == cFP) {
2929 if (SrcTy == Type::FloatTy) { // double -> float
2930 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
2931 BuildMI(*BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
2932 } else { // float -> double
2933 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
2934 "Unknown cFP member!");
2935 // Truncate from double to float by storing to memory as short, then
2937 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
2938 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
2939 addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
2940 addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
2942 } else if (SrcClass == cLong) {
2943 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2944 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
2946 assert(0 && "Cannot handle this type of cast instruction!");
2952 // Handle cast of SMALLER int to LARGER int using a move with sign extension
2953 // or zero extension, depending on whether the source type was signed.
2954 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
2955 SrcClass < DestClass) {
2956 bool isLong = DestClass == cLong;
2957 if (isLong) DestClass = cInt;
2959 static const unsigned Opc[][4] = {
2960 { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
2961 { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
2964 bool isUnsigned = SrcTy->isUnsigned();
2965 BuildMI(*BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
2966 DestReg).addReg(SrcReg);
2968 if (isLong) { // Handle upper 32 bits as appropriate...
2969 if (isUnsigned) // Zero out top bits...
2970 BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
2971 else // Sign extend bottom half...
2972 BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
2977 // Special case long -> int ...
2978 if (SrcClass == cLong && DestClass == cInt) {
2979 BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
2983 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
2984 // move out of AX or AL.
2985 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
2986 && SrcClass > DestClass) {
2987 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
2988 BuildMI(*BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
2989 BuildMI(*BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
2993 // Handle casts from integer to floating point now...
2994 if (DestClass == cFP) {
2995 // Promote the integer to a type supported by FLD. We do this because there
2996 // are no unsigned FLD instructions, so we must promote an unsigned value to
2997 // a larger signed value, then use FLD on the larger value.
2999 const Type *PromoteType = 0;
3000 unsigned PromoteOpcode = 0;
3001 unsigned RealDestReg = DestReg;
3002 switch (SrcTy->getPrimitiveID()) {
3003 case Type::BoolTyID:
3004 case Type::SByteTyID:
3005 // We don't have the facilities for directly loading byte sized data from
3006 // memory (even signed). Promote it to 16 bits.
3007 PromoteType = Type::ShortTy;
3008 PromoteOpcode = X86::MOVSX16rr8;
3010 case Type::UByteTyID:
3011 PromoteType = Type::ShortTy;
3012 PromoteOpcode = X86::MOVZX16rr8;
3014 case Type::UShortTyID:
3015 PromoteType = Type::IntTy;
3016 PromoteOpcode = X86::MOVZX32rr16;
3018 case Type::UIntTyID: {
3019 // Make a 64 bit temporary... and zero out the top of it...
3020 unsigned TmpReg = makeAnotherReg(Type::LongTy);
3021 BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
3022 BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
3023 SrcTy = Type::LongTy;
3028 case Type::ULongTyID:
3029 // Don't fild into the read destination.
3030 DestReg = makeAnotherReg(Type::DoubleTy);
3032 default: // No promotion needed...
3037 unsigned TmpReg = makeAnotherReg(PromoteType);
3038 BuildMI(*BB, IP, PromoteOpcode, 1, TmpReg).addReg(SrcReg);
3039 SrcTy = PromoteType;
3040 SrcClass = getClass(PromoteType);
3044 // Spill the integer to memory and reload it from there...
3046 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
3048 if (SrcClass == cLong) {
3049 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3050 FrameIdx).addReg(SrcReg);
3051 addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
3052 FrameIdx, 4).addReg(SrcReg+1);
3054 static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
3055 addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
3056 FrameIdx).addReg(SrcReg);
3059 static const unsigned Op2[] =
3060 { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
3061 addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
3063 // We need special handling for unsigned 64-bit integer sources. If the
3064 // input number has the "sign bit" set, then we loaded it incorrectly as a
3065 // negative 64-bit number. In this case, add an offset value.
3066 if (SrcTy == Type::ULongTy) {
3067 // Emit a test instruction to see if the dynamic input value was signed.
3068 BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
3070 // If the sign bit is set, get a pointer to an offset, otherwise get a
3071 // pointer to a zero.
3072 MachineConstantPool *CP = F->getConstantPool();
3073 unsigned Zero = makeAnotherReg(Type::IntTy);
3074 Constant *Null = Constant::getNullValue(Type::UIntTy);
3075 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
3076 CP->getConstantPoolIndex(Null));
3077 unsigned Offset = makeAnotherReg(Type::IntTy);
3078 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
3080 addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
3081 CP->getConstantPoolIndex(OffsetCst));
3082 unsigned Addr = makeAnotherReg(Type::IntTy);
3083 BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
3085 // Load the constant for an add. FIXME: this could make an 'fadd' that
3086 // reads directly from memory, but we don't support these yet.
3087 unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
3088 addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
3090 BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
3091 .addReg(ConstReg).addReg(DestReg);
3097 // Handle casts from floating point to integer now...
3098 if (SrcClass == cFP) {
3099 // Change the floating point control register to use "round towards zero"
3100 // mode when truncating to an integer value.
3102 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
3103 addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
3105 // Load the old value of the high byte of the control word...
3106 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
3107 addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
3110 // Set the high part to be round to zero...
3111 addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
3112 CWFrameIdx, 1).addImm(12);
3114 // Reload the modified control word now...
3115 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3117 // Restore the memory image of control word to original value
3118 addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
3119 CWFrameIdx, 1).addReg(HighPartOfCW);
3121 // We don't have the facilities for directly storing byte sized data to
3122 // memory. Promote it to 16 bits. We also must promote unsigned values to
3123 // larger classes because we only have signed FP stores.
3124 unsigned StoreClass = DestClass;
3125 const Type *StoreTy = DestTy;
3126 if (StoreClass == cByte || DestTy->isUnsigned())
3127 switch (StoreClass) {
3128 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
3129 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
3130 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
3131 // The following treatment of cLong may not be perfectly right,
3132 // but it survives chains of casts of the form
3133 // double->ulong->double.
3134 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
3135 default: assert(0 && "Unknown store class!");
3138 // Spill the integer to memory and reload it from there...
3140 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
3142 static const unsigned Op1[] =
3143 { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
3144 addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
3145 FrameIdx).addReg(SrcReg);
3147 if (DestClass == cLong) {
3148 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
3149 addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
3152 static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
3153 addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
3156 // Reload the original control word now...
3157 addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
3161 // Anything we haven't handled already, we can't (yet) handle at all.
3162 assert(0 && "Unhandled cast instruction!");
3166 /// visitVANextInst - Implement the va_next instruction...
3168 void ISel::visitVANextInst(VANextInst &I) {
3169 unsigned VAList = getReg(I.getOperand(0));
3170 unsigned DestReg = getReg(I);
3173 switch (I.getArgType()->getPrimitiveID()) {
3176 assert(0 && "Error: bad type for va_next instruction!");
3178 case Type::PointerTyID:
3179 case Type::UIntTyID:
3183 case Type::ULongTyID:
3184 case Type::LongTyID:
3185 case Type::DoubleTyID:
3190 // Increment the VAList pointer...
3191 BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
3194 void ISel::visitVAArgInst(VAArgInst &I) {
3195 unsigned VAList = getReg(I.getOperand(0));
3196 unsigned DestReg = getReg(I);
3198 switch (I.getType()->getPrimitiveID()) {
3201 assert(0 && "Error: bad type for va_next instruction!");
3203 case Type::PointerTyID:
3204 case Type::UIntTyID:
3206 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3208 case Type::ULongTyID:
3209 case Type::LongTyID:
3210 addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
3211 addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
3213 case Type::DoubleTyID:
3214 addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
3219 /// visitGetElementPtrInst - instruction-select GEP instructions
3221 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
3222 // If this GEP instruction will be folded into all of its users, we don't need
3223 // to explicitly calculate it!
3224 unsigned A, B, C, D;
3225 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
3226 // Check all of the users of the instruction to see if they are loads and
3228 bool AllWillFold = true;
3229 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI)
3230 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Load)
3231 if (cast<Instruction>(*UI)->getOpcode() != Instruction::Store ||
3232 cast<Instruction>(*UI)->getOperand(0) == &I) {
3233 AllWillFold = false;
3237 // If the instruction is foldable, and will be folded into all users, don't
3239 if (AllWillFold) return;
3242 unsigned outputReg = getReg(I);
3243 emitGEPOperation(BB, BB->end(), I.getOperand(0),
3244 I.op_begin()+1, I.op_end(), outputReg);
3247 /// getGEPIndex - Inspect the getelementptr operands specified with GEPOps and
3248 /// GEPTypes (the derived types being stepped through at each level). On return
3249 /// from this function, if some indexes of the instruction are representable as
3250 /// an X86 lea instruction, the machine operands are put into the Ops
3251 /// instruction and the consumed indexes are poped from the GEPOps/GEPTypes
3252 /// lists. Otherwise, GEPOps.size() is returned. If this returns a an
3253 /// addressing mode that only partially consumes the input, the BaseReg input of
3254 /// the addressing mode must be left free.
3256 /// Note that there is one fewer entry in GEPTypes than there is in GEPOps.
3258 void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
3259 std::vector<Value*> &GEPOps,
3260 std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
3261 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3262 const TargetData &TD = TM.getTargetData();
3264 // Clear out the state we are working with...
3265 BaseReg = 0; // No base register
3266 Scale = 1; // Unit scale
3267 IndexReg = 0; // No index register
3268 Disp = 0; // No displacement
3270 // While there are GEP indexes that can be folded into the current address,
3271 // keep processing them.
3272 while (!GEPTypes.empty()) {
3273 if (const StructType *StTy = dyn_cast<StructType>(GEPTypes.back())) {
3274 // It's a struct access. CUI is the index into the structure,
3275 // which names the field. This index must have unsigned type.
3276 const ConstantUInt *CUI = cast<ConstantUInt>(GEPOps.back());
3278 // Use the TargetData structure to pick out what the layout of the
3279 // structure is in memory. Since the structure index must be constant, we
3280 // can get its value and use it to find the right byte offset from the
3281 // StructLayout class's list of structure member offsets.
3282 Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
3283 GEPOps.pop_back(); // Consume a GEP operand
3284 GEPTypes.pop_back();
3286 // It's an array or pointer access: [ArraySize x ElementType].
3287 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3288 Value *idx = GEPOps.back();
3290 // idx is the index into the array. Unlike with structure
3291 // indices, we may not know its actual value at code-generation
3294 // If idx is a constant, fold it into the offset.
3295 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
3296 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
3297 Disp += TypeSize*CSI->getValue();
3298 } else if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(idx)) {
3299 Disp += TypeSize*CUI->getValue();
3301 // If the index reg is already taken, we can't handle this index.
3302 if (IndexReg) return;
3304 // If this is a size that we can handle, then add the index as
3306 case 1: case 2: case 4: case 8:
3307 // These are all acceptable scales on X86.
3311 // Otherwise, we can't handle this scale
3315 if (CastInst *CI = dyn_cast<CastInst>(idx))
3316 if (CI->getOperand(0)->getType() == Type::IntTy ||
3317 CI->getOperand(0)->getType() == Type::UIntTy)
3318 idx = CI->getOperand(0);
3320 IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
3323 GEPOps.pop_back(); // Consume a GEP operand
3324 GEPTypes.pop_back();
3328 // GEPTypes is empty, which means we have a single operand left. See if we
3329 // can set it as the base register.
3331 // FIXME: When addressing modes are more powerful/correct, we could load
3332 // global addresses directly as 32-bit immediates.
3333 assert(BaseReg == 0);
3334 BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
3335 GEPOps.pop_back(); // Consume the last GEP operand
3339 /// isGEPFoldable - Return true if the specified GEP can be completely
3340 /// folded into the addressing mode of a load/store or lea instruction.
3341 bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
3342 Value *Src, User::op_iterator IdxBegin,
3343 User::op_iterator IdxEnd, unsigned &BaseReg,
3344 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3345 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
3346 Src = CPR->getValue();
3348 std::vector<Value*> GEPOps;
3349 GEPOps.resize(IdxEnd-IdxBegin+1);
3351 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3353 std::vector<const Type*> GEPTypes;
3354 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3355 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3357 MachineBasicBlock::iterator IP;
3358 if (MBB) IP = MBB->end();
3359 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3361 // We can fold it away iff the getGEPIndex call eliminated all operands.
3362 return GEPOps.empty();
3365 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
3366 MachineBasicBlock::iterator IP,
3367 Value *Src, User::op_iterator IdxBegin,
3368 User::op_iterator IdxEnd, unsigned TargetReg) {
3369 const TargetData &TD = TM.getTargetData();
3370 if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
3371 Src = CPR->getValue();
3373 std::vector<Value*> GEPOps;
3374 GEPOps.resize(IdxEnd-IdxBegin+1);
3376 std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1);
3378 std::vector<const Type*> GEPTypes;
3379 GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd),
3380 gep_type_end(Src->getType(), IdxBegin, IdxEnd));
3382 // Keep emitting instructions until we consume the entire GEP instruction.
3383 while (!GEPOps.empty()) {
3384 unsigned OldSize = GEPOps.size();
3385 unsigned BaseReg, Scale, IndexReg, Disp;
3386 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3388 if (GEPOps.size() != OldSize) {
3389 // getGEPIndex consumed some of the input. Build an LEA instruction here.
3390 unsigned NextTarget = 0;
3391 if (!GEPOps.empty()) {
3392 assert(BaseReg == 0 &&
3393 "getGEPIndex should have left the base register open for chaining!");
3394 NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
3397 if (IndexReg == 0 && Disp == 0)
3398 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3400 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
3401 BaseReg, Scale, IndexReg, Disp);
3403 TargetReg = NextTarget;
3404 } else if (GEPTypes.empty()) {
3405 // The getGEPIndex operation didn't want to build an LEA. Check to see if
3406 // all operands are consumed but the base pointer. If so, just load it
3407 // into the register.
3408 if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
3409 BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
3411 unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
3412 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3414 break; // we are now done
3417 // It's an array or pointer access: [ArraySize x ElementType].
3418 const SequentialType *SqTy = cast<SequentialType>(GEPTypes.back());
3419 Value *idx = GEPOps.back();
3420 GEPOps.pop_back(); // Consume a GEP operand
3421 GEPTypes.pop_back();
3423 // Many GEP instructions use a [cast (int/uint) to LongTy] as their
3424 // operand on X86. Handle this case directly now...
3425 if (CastInst *CI = dyn_cast<CastInst>(idx))
3426 if (CI->getOperand(0)->getType() == Type::IntTy ||
3427 CI->getOperand(0)->getType() == Type::UIntTy)
3428 idx = CI->getOperand(0);
3430 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
3431 // must find the size of the pointed-to type (Not coincidentally, the next
3432 // type is the type of the elements in the array).
3433 const Type *ElTy = SqTy->getElementType();
3434 unsigned elementSize = TD.getTypeSize(ElTy);
3436 // If idxReg is a constant, we don't need to perform the multiply!
3437 if (ConstantInt *CSI = dyn_cast<ConstantInt>(idx)) {
3438 if (!CSI->isNullValue()) {
3439 unsigned Offset = elementSize*CSI->getRawValue();
3440 unsigned Reg = makeAnotherReg(Type::UIntTy);
3441 BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
3442 .addReg(Reg).addImm(Offset);
3443 --IP; // Insert the next instruction before this one.
3444 TargetReg = Reg; // Codegen the rest of the GEP into this
3446 } else if (elementSize == 1) {
3447 // If the element size is 1, we don't have to multiply, just add
3448 unsigned idxReg = getReg(idx, MBB, IP);
3449 unsigned Reg = makeAnotherReg(Type::UIntTy);
3450 BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
3451 --IP; // Insert the next instruction before this one.
3452 TargetReg = Reg; // Codegen the rest of the GEP into this
3454 unsigned idxReg = getReg(idx, MBB, IP);
3455 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
3457 // Make sure we can back the iterator up to point to the first
3458 // instruction emitted.
3459 MachineBasicBlock::iterator BeforeIt = IP;
3460 if (IP == MBB->begin())
3461 BeforeIt = MBB->end();
3464 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
3466 // Emit an ADD to add OffsetReg to the basePtr.
3467 unsigned Reg = makeAnotherReg(Type::UIntTy);
3468 BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
3469 .addReg(Reg).addReg(OffsetReg);
3471 // Step to the first instruction of the multiply.
3472 if (BeforeIt == MBB->end())
3477 TargetReg = Reg; // Codegen the rest of the GEP into this
3484 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
3485 /// frame manager, otherwise do it the hard way.
3487 void ISel::visitAllocaInst(AllocaInst &I) {
3488 // Find the data size of the alloca inst's getAllocatedType.
3489 const Type *Ty = I.getAllocatedType();
3490 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
3492 // If this is a fixed size alloca in the entry block for the function,
3493 // statically stack allocate the space.
3495 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
3496 if (I.getParent() == I.getParent()->getParent()->begin()) {
3497 TySize *= CUI->getValue(); // Get total allocated size...
3498 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
3500 // Create a new stack object using the frame manager...
3501 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
3502 addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
3507 // Create a register to hold the temporary result of multiplying the type size
3508 // constant by the variable amount.
3509 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
3510 unsigned SrcReg1 = getReg(I.getArraySize());
3512 // TotalSizeReg = mul <numelements>, <TypeSize>
3513 MachineBasicBlock::iterator MBBI = BB->end();
3514 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
3516 // AddedSize = add <TotalSizeReg>, 15
3517 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
3518 BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
3520 // AlignedSize = and <AddedSize>, ~15
3521 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
3522 BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
3524 // Subtract size from stack pointer, thereby allocating some space.
3525 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
3527 // Put a pointer to the space into the result register, by copying
3528 // the stack pointer.
3529 BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
3531 // Inform the Frame Information that we have just allocated a variable-sized
3533 F->getFrameInfo()->CreateVariableSizedObject();
3536 /// visitMallocInst - Malloc instructions are code generated into direct calls
3537 /// to the library malloc.
3539 void ISel::visitMallocInst(MallocInst &I) {
3540 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
3543 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
3544 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
3546 Arg = makeAnotherReg(Type::UIntTy);
3547 unsigned Op0Reg = getReg(I.getOperand(0));
3548 MachineBasicBlock::iterator MBBI = BB->end();
3549 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
3552 std::vector<ValueRecord> Args;
3553 Args.push_back(ValueRecord(Arg, Type::UIntTy));
3554 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3555 1).addExternalSymbol("malloc", true);
3556 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
3560 /// visitFreeInst - Free instructions are code gen'd to call the free libc
3563 void ISel::visitFreeInst(FreeInst &I) {
3564 std::vector<ValueRecord> Args;
3565 Args.push_back(ValueRecord(I.getOperand(0)));
3566 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
3567 1).addExternalSymbol("free", true);
3568 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
3571 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
3572 /// into a machine code representation is a very simple peep-hole fashion. The
3573 /// generated code sucks but the implementation is nice and simple.
3575 FunctionPass *llvm::createX86SimpleInstructionSelector(TargetMachine &TM) {
3576 return new ISel(TM);