1 //===-- InstSelectSimple.cpp - A simple instruction selector for x86 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a simple peephole instruction selector for the x86 target
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "llvm/Constants.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/Pass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SSARegMap.h"
28 #include "llvm/Target/MRegisterInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Support/InstVisitor.h"
34 /// BMI - A special BuildMI variant that takes an iterator to insert the
35 /// instruction at as well as a basic block. This is the version for when you
36 /// have a destination register in mind.
37 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
38 MachineBasicBlock::iterator &I,
39 int Opcode, unsigned NumOperands,
41 assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!");
42 MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1, true, true);
43 I = MBB->insert(I, MI)+1;
44 return MachineInstrBuilder(MI).addReg(DestReg, MOTy::Def);
47 /// BMI - A special BuildMI variant that takes an iterator to insert the
48 /// instruction at as well as a basic block.
49 inline static MachineInstrBuilder BMI(MachineBasicBlock *MBB,
50 MachineBasicBlock::iterator &I,
51 int Opcode, unsigned NumOperands) {
52 assert(I >= MBB->begin() && I <= MBB->end() && "Bad iterator!");
53 MachineInstr *MI = new MachineInstr(Opcode, NumOperands, true, true);
54 I = MBB->insert(I, MI)+1;
55 return MachineInstrBuilder(MI);
60 struct ISel : public FunctionPass, InstVisitor<ISel> {
62 MachineFunction *F; // The function we are compiling into
63 MachineBasicBlock *BB; // The current MBB we are compiling
64 int VarArgsFrameIndex; // FrameIndex for start of varargs area
66 std::map<Value*, unsigned> RegMap; // Mapping between Val's and SSA Regs
68 // MBBMap - Mapping between LLVM BB -> Machine BB
69 std::map<const BasicBlock*, MachineBasicBlock*> MBBMap;
71 ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {}
73 /// runOnFunction - Top level implementation of instruction selection for
74 /// the entire function.
76 bool runOnFunction(Function &Fn) {
77 F = &MachineFunction::construct(&Fn, TM);
79 // Create all of the machine basic blocks for the function...
80 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
81 F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I));
85 // Copy incoming arguments off of the stack...
86 LoadArgumentsToVirtualRegs(Fn);
88 // Instruction select everything except PHI nodes
91 // Select the PHI nodes
97 // We always build a machine code representation for the function
101 virtual const char *getPassName() const {
102 return "X86 Simple Instruction Selection";
105 /// visitBasicBlock - This method is called when we are visiting a new basic
106 /// block. This simply creates a new MachineBasicBlock to emit code into
107 /// and adds it to the current MachineFunction. Subsequent visit* for
108 /// instructions will be invoked for all instructions in the basic block.
110 void visitBasicBlock(BasicBlock &LLVM_BB) {
111 BB = MBBMap[&LLVM_BB];
114 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function
115 /// from the stack into virtual registers.
117 void LoadArgumentsToVirtualRegs(Function &F);
119 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
120 /// because we have to generate our sources into the source basic blocks,
121 /// not the current one.
123 void SelectPHINodes();
125 // Visitation methods for various instructions. These methods simply emit
126 // fixed X86 code for each instruction.
129 // Control flow operators
130 void visitReturnInst(ReturnInst &RI);
131 void visitBranchInst(BranchInst &BI);
137 ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {}
138 ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {}
140 void doCall(const ValueRecord &Ret, MachineInstr *CallMI,
141 const std::vector<ValueRecord> &Args);
142 void visitCallInst(CallInst &I);
143 void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I);
145 // Arithmetic operators
146 void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass);
147 void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); }
148 void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); }
149 void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI,
150 unsigned DestReg, const Type *DestTy,
151 unsigned Op0Reg, unsigned Op1Reg);
152 void doMultiplyConst(MachineBasicBlock *MBB,
153 MachineBasicBlock::iterator &MBBI,
154 unsigned DestReg, const Type *DestTy,
155 unsigned Op0Reg, unsigned Op1Val);
156 void visitMul(BinaryOperator &B);
158 void visitDiv(BinaryOperator &B) { visitDivRem(B); }
159 void visitRem(BinaryOperator &B) { visitDivRem(B); }
160 void visitDivRem(BinaryOperator &B);
163 void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); }
164 void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); }
165 void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); }
167 // Comparison operators...
168 void visitSetCondInst(SetCondInst &I);
169 unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
170 MachineBasicBlock *MBB,
171 MachineBasicBlock::iterator &MBBI);
173 // Memory Instructions
174 void visitLoadInst(LoadInst &I);
175 void visitStoreInst(StoreInst &I);
176 void visitGetElementPtrInst(GetElementPtrInst &I);
177 void visitAllocaInst(AllocaInst &I);
178 void visitMallocInst(MallocInst &I);
179 void visitFreeInst(FreeInst &I);
182 void visitShiftInst(ShiftInst &I);
183 void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass
184 void visitCastInst(CastInst &I);
185 void visitVANextInst(VANextInst &I);
186 void visitVAArgInst(VAArgInst &I);
188 void visitInstruction(Instruction &I) {
189 std::cerr << "Cannot instruction select: " << I;
193 /// promote32 - Make a value 32-bits wide, and put it somewhere.
195 void promote32(unsigned targetReg, const ValueRecord &VR);
197 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
198 /// constant expression GEP support.
200 void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator&IP,
201 Value *Src, User::op_iterator IdxBegin,
202 User::op_iterator IdxEnd, unsigned TargetReg);
204 /// emitCastOperation - Common code shared between visitCastInst and
205 /// constant expression cast support.
206 void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator&IP,
207 Value *Src, const Type *DestTy, unsigned TargetReg);
209 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
210 /// and constant expression support.
211 void emitSimpleBinaryOperation(MachineBasicBlock *BB,
212 MachineBasicBlock::iterator &IP,
213 Value *Op0, Value *Op1,
214 unsigned OperatorClass, unsigned TargetReg);
216 void emitDivRemOperation(MachineBasicBlock *BB,
217 MachineBasicBlock::iterator &IP,
218 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
219 const Type *Ty, unsigned TargetReg);
221 /// emitSetCCOperation - Common code shared between visitSetCondInst and
222 /// constant expression support.
223 void emitSetCCOperation(MachineBasicBlock *BB,
224 MachineBasicBlock::iterator &IP,
225 Value *Op0, Value *Op1, unsigned Opcode,
228 /// emitShiftOperation - Common code shared between visitShiftInst and
229 /// constant expression support.
230 void emitShiftOperation(MachineBasicBlock *MBB,
231 MachineBasicBlock::iterator &IP,
232 Value *Op, Value *ShiftAmount, bool isLeftShift,
233 const Type *ResultTy, unsigned DestReg);
236 /// copyConstantToRegister - Output the instructions required to put the
237 /// specified constant into the specified register.
239 void copyConstantToRegister(MachineBasicBlock *MBB,
240 MachineBasicBlock::iterator &MBBI,
241 Constant *C, unsigned Reg);
243 /// makeAnotherReg - This method returns the next register number we haven't
246 /// Long values are handled somewhat specially. They are always allocated
247 /// as pairs of 32 bit integer values. The register number returned is the
248 /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits
249 /// of the long value.
251 unsigned makeAnotherReg(const Type *Ty) {
252 assert(dynamic_cast<const X86RegisterInfo*>(TM.getRegisterInfo()) &&
253 "Current target doesn't have X86 reg info??");
254 const X86RegisterInfo *MRI =
255 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
256 if (Ty == Type::LongTy || Ty == Type::ULongTy) {
257 const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy);
258 // Create the lower part
259 F->getSSARegMap()->createVirtualRegister(RC);
260 // Create the upper part.
261 return F->getSSARegMap()->createVirtualRegister(RC)-1;
264 // Add the mapping of regnumber => reg class to MachineFunction
265 const TargetRegisterClass *RC = MRI->getRegClassForType(Ty);
266 return F->getSSARegMap()->createVirtualRegister(RC);
269 /// getReg - This method turns an LLVM value into a register number. This
270 /// is guaranteed to produce the same register number for a particular value
271 /// every time it is queried.
273 unsigned getReg(Value &V) { return getReg(&V); } // Allow references
274 unsigned getReg(Value *V) {
275 // Just append to the end of the current bb.
276 MachineBasicBlock::iterator It = BB->end();
277 return getReg(V, BB, It);
279 unsigned getReg(Value *V, MachineBasicBlock *MBB,
280 MachineBasicBlock::iterator &IPt) {
281 unsigned &Reg = RegMap[V];
283 Reg = makeAnotherReg(V->getType());
287 // If this operand is a constant, emit the code to copy the constant into
288 // the register here...
290 if (Constant *C = dyn_cast<Constant>(V)) {
291 copyConstantToRegister(MBB, IPt, C, Reg);
292 RegMap.erase(V); // Assign a new name to this constant if ref'd again
293 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
294 // Move the address of the global into the register
295 BMI(MBB, IPt, X86::MOVir32, 1, Reg).addGlobalAddress(GV);
296 RegMap.erase(V); // Assign a new name to this address if ref'd again
304 /// TypeClass - Used by the X86 backend to group LLVM types by their basic X86
308 cByte, cShort, cInt, cFP, cLong
311 /// getClass - Turn a primitive type into a "class" number which is based on the
312 /// size of the type, and whether or not it is floating point.
314 static inline TypeClass getClass(const Type *Ty) {
315 switch (Ty->getPrimitiveID()) {
316 case Type::SByteTyID:
317 case Type::UByteTyID: return cByte; // Byte operands are class #0
318 case Type::ShortTyID:
319 case Type::UShortTyID: return cShort; // Short operands are class #1
322 case Type::PointerTyID: return cInt; // Int's and pointers are class #2
324 case Type::FloatTyID:
325 case Type::DoubleTyID: return cFP; // Floating Point is #3
328 case Type::ULongTyID: return cLong; // Longs are class #4
330 assert(0 && "Invalid type to getClass!");
331 return cByte; // not reached
335 // getClassB - Just like getClass, but treat boolean values as bytes.
336 static inline TypeClass getClassB(const Type *Ty) {
337 if (Ty == Type::BoolTy) return cByte;
342 /// copyConstantToRegister - Output the instructions required to put the
343 /// specified constant into the specified register.
345 void ISel::copyConstantToRegister(MachineBasicBlock *MBB,
346 MachineBasicBlock::iterator &IP,
347 Constant *C, unsigned R) {
348 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
350 switch (CE->getOpcode()) {
351 case Instruction::GetElementPtr:
352 emitGEPOperation(MBB, IP, CE->getOperand(0),
353 CE->op_begin()+1, CE->op_end(), R);
355 case Instruction::Cast:
356 emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R);
359 case Instruction::Xor: ++Class; // FALL THROUGH
360 case Instruction::Or: ++Class; // FALL THROUGH
361 case Instruction::And: ++Class; // FALL THROUGH
362 case Instruction::Sub: ++Class; // FALL THROUGH
363 case Instruction::Add:
364 emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
368 case Instruction::Mul: {
369 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
370 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
371 doMultiply(MBB, IP, R, CE->getType(), Op0Reg, Op1Reg);
374 case Instruction::Div:
375 case Instruction::Rem: {
376 unsigned Op0Reg = getReg(CE->getOperand(0), MBB, IP);
377 unsigned Op1Reg = getReg(CE->getOperand(1), MBB, IP);
378 emitDivRemOperation(MBB, IP, Op0Reg, Op1Reg,
379 CE->getOpcode() == Instruction::Div,
384 case Instruction::SetNE:
385 case Instruction::SetEQ:
386 case Instruction::SetLT:
387 case Instruction::SetGT:
388 case Instruction::SetLE:
389 case Instruction::SetGE:
390 emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
394 case Instruction::Shl:
395 case Instruction::Shr:
396 emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1),
397 CE->getOpcode() == Instruction::Shl, CE->getType(), R);
401 std::cerr << "Offending expr: " << C << "\n";
402 assert(0 && "Constant expression not yet handled!\n");
406 if (C->getType()->isIntegral()) {
407 unsigned Class = getClassB(C->getType());
409 if (Class == cLong) {
410 // Copy the value into the register pair.
411 uint64_t Val = cast<ConstantInt>(C)->getRawValue();
412 BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF);
413 BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32);
417 assert(Class <= cInt && "Type not handled yet!");
419 static const unsigned IntegralOpcodeTab[] = {
420 X86::MOVir8, X86::MOVir16, X86::MOVir32
423 if (C->getType() == Type::BoolTy) {
424 BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True);
426 ConstantInt *CI = cast<ConstantInt>(C);
427 BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
429 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
430 double Value = CFP->getValue();
432 BMI(MBB, IP, X86::FLD0, 0, R);
433 else if (Value == +1.0)
434 BMI(MBB, IP, X86::FLD1, 0, R);
436 // Otherwise we need to spill the constant to memory...
437 MachineConstantPool *CP = F->getConstantPool();
438 unsigned CPI = CP->getConstantPoolIndex(CFP);
439 const Type *Ty = CFP->getType();
441 assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
442 unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDr32 : X86::FLDr64;
443 addConstantPoolReference(BMI(MBB, IP, LoadOpcode, 4, R), CPI);
446 } else if (isa<ConstantPointerNull>(C)) {
447 // Copy zero (null pointer) to the register.
448 BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(0);
449 } else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
450 unsigned SrcReg = getReg(CPR->getValue(), MBB, IP);
451 BMI(MBB, IP, X86::MOVrr32, 1, R).addReg(SrcReg);
453 std::cerr << "Offending constant: " << C << "\n";
454 assert(0 && "Type not handled yet!");
458 /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from
459 /// the stack into virtual registers.
461 void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
462 // Emit instructions to load the arguments... On entry to a function on the
463 // X86, the stack frame looks like this:
465 // [ESP] -- return address
466 // [ESP + 4] -- first argument (leftmost lexically)
467 // [ESP + 8] -- second argument, if first argument is four bytes in size
470 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
471 MachineFrameInfo *MFI = F->getFrameInfo();
473 for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) {
474 unsigned Reg = getReg(*I);
476 int FI; // Frame object index
477 switch (getClassB(I->getType())) {
479 FI = MFI->CreateFixedObject(1, ArgOffset);
480 addFrameReference(BuildMI(BB, X86::MOVmr8, 4, Reg), FI);
483 FI = MFI->CreateFixedObject(2, ArgOffset);
484 addFrameReference(BuildMI(BB, X86::MOVmr16, 4, Reg), FI);
487 FI = MFI->CreateFixedObject(4, ArgOffset);
488 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
491 FI = MFI->CreateFixedObject(8, ArgOffset);
492 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
493 addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg+1), FI, 4);
494 ArgOffset += 4; // longs require 4 additional bytes
498 if (I->getType() == Type::FloatTy) {
499 Opcode = X86::FLDr32;
500 FI = MFI->CreateFixedObject(4, ArgOffset);
502 Opcode = X86::FLDr64;
503 FI = MFI->CreateFixedObject(8, ArgOffset);
504 ArgOffset += 4; // doubles require 4 additional bytes
506 addFrameReference(BuildMI(BB, Opcode, 4, Reg), FI);
509 assert(0 && "Unhandled argument type!");
511 ArgOffset += 4; // Each argument takes at least 4 bytes on the stack...
514 // If the function takes variable number of arguments, add a frame offset for
515 // the start of the first vararg value... this is used to expand
517 if (Fn.getFunctionType()->isVarArg())
518 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
522 /// SelectPHINodes - Insert machine code to generate phis. This is tricky
523 /// because we have to generate our sources into the source basic blocks, not
526 void ISel::SelectPHINodes() {
527 const TargetInstrInfo &TII = TM.getInstrInfo();
528 const Function &LF = *F->getFunction(); // The LLVM function...
529 for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) {
530 const BasicBlock *BB = I;
531 MachineBasicBlock *MBB = MBBMap[I];
533 // Loop over all of the PHI nodes in the LLVM basic block...
534 unsigned NumPHIs = 0;
535 for (BasicBlock::const_iterator I = BB->begin();
536 PHINode *PN = const_cast<PHINode*>(dyn_cast<PHINode>(I)); ++I) {
538 // Create a new machine instr PHI node, and insert it.
539 unsigned PHIReg = getReg(*PN);
540 MachineInstr *PhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg);
541 MBB->insert(MBB->begin()+NumPHIs++, PhiMI);
543 MachineInstr *LongPhiMI = 0;
544 if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) {
545 LongPhiMI = BuildMI(X86::PHI, PN->getNumOperands(), PHIReg+1);
546 MBB->insert(MBB->begin()+NumPHIs++, LongPhiMI);
549 // PHIValues - Map of blocks to incoming virtual registers. We use this
550 // so that we only initialize one incoming value for a particular block,
551 // even if the block has multiple entries in the PHI node.
553 std::map<MachineBasicBlock*, unsigned> PHIValues;
555 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
556 MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)];
558 std::map<MachineBasicBlock*, unsigned>::iterator EntryIt =
559 PHIValues.lower_bound(PredMBB);
561 if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) {
562 // We already inserted an initialization of the register for this
563 // predecessor. Recycle it.
564 ValReg = EntryIt->second;
567 // Get the incoming value into a virtual register.
569 Value *Val = PN->getIncomingValue(i);
571 // If this is a constant or GlobalValue, we may have to insert code
572 // into the basic block to compute it into a virtual register.
573 if (isa<Constant>(Val) || isa<GlobalValue>(Val)) {
574 // Because we don't want to clobber any values which might be in
575 // physical registers with the computation of this constant (which
576 // might be arbitrarily complex if it is a constant expression),
577 // just insert the computation at the top of the basic block.
578 MachineBasicBlock::iterator PI = PredMBB->begin();
580 // Skip over any PHI nodes though!
581 while (PI != PredMBB->end() && (*PI)->getOpcode() == X86::PHI)
584 ValReg = getReg(Val, PredMBB, PI);
586 ValReg = getReg(Val);
589 // Remember that we inserted a value for this PHI for this predecessor
590 PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg));
593 PhiMI->addRegOperand(ValReg);
594 PhiMI->addMachineBasicBlockOperand(PredMBB);
596 LongPhiMI->addRegOperand(ValReg+1);
597 LongPhiMI->addMachineBasicBlockOperand(PredMBB);
604 // canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it into
605 // the conditional branch instruction which is the only user of the cc
606 // instruction. This is the case if the conditional branch is the only user of
607 // the setcc, and if the setcc is in the same basic block as the conditional
608 // branch. We also don't handle long arguments below, so we reject them here as
611 static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
612 if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
613 if (SCI->hasOneUse() && isa<BranchInst>(SCI->use_back()) &&
614 SCI->getParent() == cast<BranchInst>(SCI->use_back())->getParent()) {
615 const Type *Ty = SCI->getOperand(0)->getType();
616 if (Ty != Type::LongTy && Ty != Type::ULongTy)
622 // Return a fixed numbering for setcc instructions which does not depend on the
623 // order of the opcodes.
625 static unsigned getSetCCNumber(unsigned Opcode) {
627 default: assert(0 && "Unknown setcc instruction!");
628 case Instruction::SetEQ: return 0;
629 case Instruction::SetNE: return 1;
630 case Instruction::SetLT: return 2;
631 case Instruction::SetGE: return 3;
632 case Instruction::SetGT: return 4;
633 case Instruction::SetLE: return 5;
637 // LLVM -> X86 signed X86 unsigned
638 // ----- ---------- ------------
639 // seteq -> sete sete
640 // setne -> setne setne
641 // setlt -> setl setb
642 // setge -> setge setae
643 // setgt -> setg seta
644 // setle -> setle setbe
646 // sets // Used by comparison with 0 optimization
648 static const unsigned SetCCOpcodeTab[2][8] = {
649 { X86::SETEr, X86::SETNEr, X86::SETBr, X86::SETAEr, X86::SETAr, X86::SETBEr,
651 { X86::SETEr, X86::SETNEr, X86::SETLr, X86::SETGEr, X86::SETGr, X86::SETLEr,
652 X86::SETSr, X86::SETNSr },
655 // EmitComparison - This function emits a comparison of the two operands,
656 // returning the extended setcc code to use.
657 unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1,
658 MachineBasicBlock *MBB,
659 MachineBasicBlock::iterator &IP) {
660 // The arguments are already supposed to be of the same type.
661 const Type *CompTy = Op0->getType();
662 unsigned Class = getClassB(CompTy);
663 unsigned Op0r = getReg(Op0, MBB, IP);
665 // Special case handling of: cmp R, i
666 if (Class == cByte || Class == cShort || Class == cInt)
667 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
668 uint64_t Op1v = cast<ConstantInt>(CI)->getRawValue();
670 // Mask off any upper bits of the constant, if there are any...
671 Op1v &= (1ULL << (8 << Class)) - 1;
673 // If this is a comparison against zero, emit more efficient code. We
674 // can't handle unsigned comparisons against zero unless they are == or
675 // !=. These should have been strength reduced already anyway.
676 if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
677 static const unsigned TESTTab[] = {
678 X86::TESTrr8, X86::TESTrr16, X86::TESTrr32
680 BMI(MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
682 if (OpNum == 2) return 6; // Map jl -> js
683 if (OpNum == 3) return 7; // Map jg -> jns
687 static const unsigned CMPTab[] = {
688 X86::CMPri8, X86::CMPri16, X86::CMPri32
691 BMI(MBB, IP, CMPTab[Class], 2).addReg(Op0r).addZImm(Op1v);
695 unsigned Op1r = getReg(Op1, MBB, IP);
697 default: assert(0 && "Unknown type class!");
698 // Emit: cmp <var1>, <var2> (do the comparison). We can
699 // compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
702 BMI(MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r);
705 BMI(MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r);
708 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
711 BMI(MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
712 BMI(MBB, IP, X86::FNSTSWr8, 0);
713 BMI(MBB, IP, X86::SAHF, 1);
717 if (OpNum < 2) { // seteq, setne
718 unsigned LoTmp = makeAnotherReg(Type::IntTy);
719 unsigned HiTmp = makeAnotherReg(Type::IntTy);
720 unsigned FinalTmp = makeAnotherReg(Type::IntTy);
721 BMI(MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r);
722 BMI(MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
723 BMI(MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
724 break; // Allow the sete or setne to be generated from flags set by OR
726 // Emit a sequence of code which compares the high and low parts once
727 // each, then uses a conditional move to handle the overflow case. For
728 // example, a setlt for long would generate code like this:
730 // AL = lo(op1) < lo(op2) // Signedness depends on operands
731 // BL = hi(op1) < hi(op2) // Always unsigned comparison
732 // dest = hi(op1) == hi(op2) ? AL : BL;
735 // FIXME: This would be much better if we had hierarchical register
736 // classes! Until then, hardcode registers so that we can deal with their
737 // aliases (because we don't have conditional byte moves).
739 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
740 BMI(MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
741 BMI(MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1);
742 BMI(MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
743 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
744 BMI(MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
745 BMI(MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX).addReg(X86::AX);
746 // NOTE: visitSetCondInst knows that the value is dumped into the BL
747 // register at this point for long values...
755 /// SetCC instructions - Here we just emit boilerplate code to set a byte-sized
756 /// register, then move it to wherever the result should be.
758 void ISel::visitSetCondInst(SetCondInst &I) {
759 if (canFoldSetCCIntoBranch(&I)) return; // Fold this into a branch...
761 unsigned DestReg = getReg(I);
762 MachineBasicBlock::iterator MII = BB->end();
763 emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),
767 /// emitSetCCOperation - Common code shared between visitSetCondInst and
768 /// constant expression support.
769 void ISel::emitSetCCOperation(MachineBasicBlock *MBB,
770 MachineBasicBlock::iterator &IP,
771 Value *Op0, Value *Op1, unsigned Opcode,
772 unsigned TargetReg) {
773 unsigned OpNum = getSetCCNumber(Opcode);
774 OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP);
776 const Type *CompTy = Op0->getType();
777 unsigned CompClass = getClassB(CompTy);
778 bool isSigned = CompTy->isSigned() && CompClass != cFP;
780 if (CompClass != cLong || OpNum < 2) {
781 // Handle normal comparisons with a setcc instruction...
782 BMI(MBB, IP, SetCCOpcodeTab[isSigned][OpNum], 0, TargetReg);
784 // Handle long comparisons by copying the value which is already in BL into
785 // the register we want...
786 BMI(MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL);
793 /// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide
794 /// operand, in the specified target register.
795 void ISel::promote32(unsigned targetReg, const ValueRecord &VR) {
796 bool isUnsigned = VR.Ty->isUnsigned();
798 // Make sure we have the register number for this value...
799 unsigned Reg = VR.Val ? getReg(VR.Val) : VR.Reg;
801 switch (getClassB(VR.Ty)) {
803 // Extend value into target register (8->32)
805 BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg);
807 BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg);
810 // Extend value into target register (16->32)
812 BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg);
814 BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg);
817 // Move value into target register (32->32)
818 BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg);
821 assert(0 && "Unpromotable operand class in promote32");
825 /// 'ret' instruction - Here we are interested in meeting the x86 ABI. As such,
826 /// we have the following possibilities:
828 /// ret void: No return value, simply emit a 'ret' instruction
829 /// ret sbyte, ubyte : Extend value into EAX and return
830 /// ret short, ushort: Extend value into EAX and return
831 /// ret int, uint : Move value into EAX and return
832 /// ret pointer : Move value into EAX and return
833 /// ret long, ulong : Move value into EAX/EDX and return
834 /// ret float/double : Top of FP stack
836 void ISel::visitReturnInst(ReturnInst &I) {
837 if (I.getNumOperands() == 0) {
838 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
842 Value *RetVal = I.getOperand(0);
843 unsigned RetReg = getReg(RetVal);
844 switch (getClassB(RetVal->getType())) {
845 case cByte: // integral return values: extend or move into EAX and return
848 promote32(X86::EAX, ValueRecord(RetReg, RetVal->getType()));
849 // Declare that EAX is live on exit
850 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
852 case cFP: // Floats & Doubles: Return in ST(0)
853 BuildMI(BB, X86::FpSETRESULT, 1).addReg(RetReg);
854 // Declare that top-of-stack is live on exit
855 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
858 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg);
859 BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1);
860 // Declare that EAX & EDX are live on exit
861 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
867 // Emit a 'ret' instruction
868 BuildMI(BB, X86::RET, 0);
871 // getBlockAfter - Return the basic block which occurs lexically after the
873 static inline BasicBlock *getBlockAfter(BasicBlock *BB) {
874 Function::iterator I = BB; ++I; // Get iterator to next block
875 return I != BB->getParent()->end() ? &*I : 0;
878 /// visitBranchInst - Handle conditional and unconditional branches here. Note
879 /// that since code layout is frozen at this point, that if we are trying to
880 /// jump to a block that is the immediate successor of the current block, we can
881 /// just make a fall-through (but we don't currently).
883 void ISel::visitBranchInst(BranchInst &BI) {
884 BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one
886 if (!BI.isConditional()) { // Unconditional branch?
887 if (BI.getSuccessor(0) != NextBB)
888 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
892 // See if we can fold the setcc into the branch itself...
893 SetCondInst *SCI = canFoldSetCCIntoBranch(BI.getCondition());
895 // Nope, cannot fold setcc into this branch. Emit a branch on a condition
896 // computed some other way...
897 unsigned condReg = getReg(BI.getCondition());
898 BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addZImm(0);
899 if (BI.getSuccessor(1) == NextBB) {
900 if (BI.getSuccessor(0) != NextBB)
901 BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
903 BuildMI(BB, X86::JE, 1).addPCDisp(BI.getSuccessor(1));
905 if (BI.getSuccessor(0) != NextBB)
906 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(0));
911 unsigned OpNum = getSetCCNumber(SCI->getOpcode());
912 MachineBasicBlock::iterator MII = BB->end();
913 OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII);
915 const Type *CompTy = SCI->getOperand(0)->getType();
916 bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP;
919 // LLVM -> X86 signed X86 unsigned
920 // ----- ---------- ------------
928 // js // Used by comparison with 0 optimization
931 static const unsigned OpcodeTab[2][8] = {
932 { X86::JE, X86::JNE, X86::JB, X86::JAE, X86::JA, X86::JBE, 0, 0 },
933 { X86::JE, X86::JNE, X86::JL, X86::JGE, X86::JG, X86::JLE,
937 if (BI.getSuccessor(0) != NextBB) {
938 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(0));
939 if (BI.getSuccessor(1) != NextBB)
940 BuildMI(BB, X86::JMP, 1).addPCDisp(BI.getSuccessor(1));
942 // Change to the inverse condition...
943 if (BI.getSuccessor(1) != NextBB) {
945 BuildMI(BB, OpcodeTab[isSigned][OpNum], 1).addPCDisp(BI.getSuccessor(1));
951 /// doCall - This emits an abstract call instruction, setting up the arguments
952 /// and the return value as appropriate. For the actual function call itself,
953 /// it inserts the specified CallMI instruction into the stream.
955 void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
956 const std::vector<ValueRecord> &Args) {
958 // Count how many bytes are to be pushed on the stack...
959 unsigned NumBytes = 0;
962 for (unsigned i = 0, e = Args.size(); i != e; ++i)
963 switch (getClassB(Args[i].Ty)) {
964 case cByte: case cShort: case cInt:
965 NumBytes += 4; break;
967 NumBytes += 8; break;
969 NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8;
971 default: assert(0 && "Unknown class!");
974 // Adjust the stack pointer for the new arguments...
975 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(NumBytes);
977 // Arguments go on the stack in reverse order, as specified by the ABI.
978 unsigned ArgOffset = 0;
979 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
980 unsigned ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg;
981 switch (getClassB(Args[i].Ty)) {
984 // Promote arg to 32 bits wide into a temporary register...
985 unsigned R = makeAnotherReg(Type::UIntTy);
986 promote32(R, Args[i]);
987 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
988 X86::ESP, ArgOffset).addReg(R);
992 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
993 X86::ESP, ArgOffset).addReg(ArgReg);
996 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
997 X86::ESP, ArgOffset).addReg(ArgReg);
998 addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
999 X86::ESP, ArgOffset+4).addReg(ArgReg+1);
1000 ArgOffset += 4; // 8 byte entry, not 4.
1004 if (Args[i].Ty == Type::FloatTy) {
1005 addRegOffset(BuildMI(BB, X86::FSTr32, 5),
1006 X86::ESP, ArgOffset).addReg(ArgReg);
1008 assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
1009 addRegOffset(BuildMI(BB, X86::FSTr64, 5),
1010 X86::ESP, ArgOffset).addReg(ArgReg);
1011 ArgOffset += 4; // 8 byte entry, not 4.
1015 default: assert(0 && "Unknown class!");
1020 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addZImm(0);
1023 BB->push_back(CallMI);
1025 BuildMI(BB, X86::ADJCALLSTACKUP, 1).addZImm(NumBytes);
1027 // If there is a return value, scavenge the result from the location the call
1030 if (Ret.Ty != Type::VoidTy) {
1031 unsigned DestClass = getClassB(Ret.Ty);
1032 switch (DestClass) {
1036 // Integral results are in %eax, or the appropriate portion
1038 static const unsigned regRegMove[] = {
1039 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32
1041 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
1042 BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
1045 case cFP: // Floating-point return values live in %ST(0)
1046 BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
1048 case cLong: // Long values are left in EDX:EAX
1049 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg).addReg(X86::EAX);
1050 BuildMI(BB, X86::MOVrr32, 1, Ret.Reg+1).addReg(X86::EDX);
1052 default: assert(0 && "Unknown class!");
1058 /// visitCallInst - Push args on stack and do a procedure call instruction.
1059 void ISel::visitCallInst(CallInst &CI) {
1060 MachineInstr *TheCall;
1061 if (Function *F = CI.getCalledFunction()) {
1062 // Is it an intrinsic function call?
1063 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) {
1064 visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here
1068 // Emit a CALL instruction with PC-relative displacement.
1069 TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
1070 } else { // Emit an indirect call...
1071 unsigned Reg = getReg(CI.getCalledValue());
1072 TheCall = BuildMI(X86::CALLr32, 1).addReg(Reg);
1075 std::vector<ValueRecord> Args;
1076 for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
1077 Args.push_back(ValueRecord(CI.getOperand(i)));
1079 unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0;
1080 doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args);
1084 void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
1085 unsigned TmpReg1, TmpReg2;
1087 case Intrinsic::va_start:
1088 // Get the address of the first vararg value...
1089 TmpReg1 = getReg(CI);
1090 addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex);
1093 case Intrinsic::va_copy:
1094 TmpReg1 = getReg(CI);
1095 TmpReg2 = getReg(CI.getOperand(1));
1096 BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2);
1098 case Intrinsic::va_end: return; // Noop on X86
1100 case Intrinsic::longjmp:
1101 case Intrinsic::siglongjmp:
1102 BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("abort", true);
1105 case Intrinsic::setjmp:
1106 case Intrinsic::sigsetjmp:
1107 // Setjmp always returns zero...
1108 BuildMI(BB, X86::MOVir32, 1, getReg(CI)).addZImm(0);
1110 default: assert(0 && "Unknown intrinsic for X86!");
1115 /// visitSimpleBinary - Implement simple binary operators for integral types...
1116 /// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for
1118 void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
1119 unsigned DestReg = getReg(B);
1120 MachineBasicBlock::iterator MI = BB->end();
1121 emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
1122 OperatorClass, DestReg);
1125 /// emitSimpleBinaryOperation - Implement simple binary operators for integral
1126 /// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for
1129 /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary
1130 /// and constant expression support.
1132 void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
1133 MachineBasicBlock::iterator &IP,
1134 Value *Op0, Value *Op1,
1135 unsigned OperatorClass, unsigned DestReg) {
1136 unsigned Class = getClassB(Op0->getType());
1138 // sub 0, X -> neg X
1139 if (OperatorClass == 1 && Class != cLong)
1140 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0))
1141 if (CI->isNullValue()) {
1142 unsigned op1Reg = getReg(Op1, MBB, IP);
1144 default: assert(0 && "Unknown class for this function!");
1146 BMI(MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg);
1149 BMI(MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg);
1152 BMI(MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg);
1157 if (!isa<ConstantInt>(Op1) || Class == cLong) {
1158 static const unsigned OpcodeTab[][4] = {
1159 // Arithmetic operators
1160 { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD
1161 { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB
1163 // Bitwise operators
1164 { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND
1165 { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR
1166 { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR
1169 bool isLong = false;
1170 if (Class == cLong) {
1172 Class = cInt; // Bottom 32 bits are handled just like ints
1175 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1176 assert(Opcode && "Floating point arguments to logical inst?");
1177 unsigned Op0r = getReg(Op0, MBB, IP);
1178 unsigned Op1r = getReg(Op1, MBB, IP);
1179 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
1181 if (isLong) { // Handle the upper 32 bits of long values...
1182 static const unsigned TopTab[] = {
1183 X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32
1185 BMI(MBB, IP, TopTab[OperatorClass], 2,
1186 DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
1191 // Special case: op Reg, <const>
1192 ConstantInt *Op1C = cast<ConstantInt>(Op1);
1193 unsigned Op0r = getReg(Op0, MBB, IP);
1195 // xor X, -1 -> not X
1196 if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
1197 static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 };
1198 BMI(MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
1202 // add X, -1 -> dec X
1203 if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
1204 static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 };
1205 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1209 // add X, 1 -> inc X
1210 if (OperatorClass == 0 && Op1C->equalsInt(1)) {
1211 static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 };
1212 BMI(MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
1216 static const unsigned OpcodeTab[][3] = {
1217 // Arithmetic operators
1218 { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD
1219 { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB
1221 // Bitwise operators
1222 { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND
1223 { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR
1224 { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR
1227 assert(Class < 3 && "General code handles 64-bit integer types!");
1228 unsigned Opcode = OpcodeTab[OperatorClass][Class];
1229 uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
1231 // Mask off any upper bits of the constant, if there are any...
1232 Op1v &= (1ULL << (8 << Class)) - 1;
1233 BMI(MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addZImm(Op1v);
1236 /// doMultiply - Emit appropriate instructions to multiply together the
1237 /// registers op0Reg and op1Reg, and put the result in DestReg. The type of the
1238 /// result should be given as DestTy.
1240 void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator &MBBI,
1241 unsigned DestReg, const Type *DestTy,
1242 unsigned op0Reg, unsigned op1Reg) {
1243 unsigned Class = getClass(DestTy);
1245 case cFP: // Floating point multiply
1246 BMI(BB, MBBI, X86::FpMUL, 2, DestReg).addReg(op0Reg).addReg(op1Reg);
1250 BMI(BB, MBBI, Class == cInt ? X86::IMULrr32 : X86::IMULrr16, 2, DestReg)
1251 .addReg(op0Reg).addReg(op1Reg);
1254 // Must use the MUL instruction, which forces use of AL...
1255 BMI(MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg);
1256 BMI(MBB, MBBI, X86::MULr8, 1).addReg(op1Reg);
1257 BMI(MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL);
1260 case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
1264 // ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
1265 // returns zero when the input is not exactly a power of two.
1266 static unsigned ExactLog2(unsigned Val) {
1267 if (Val == 0) return 0;
1270 if (Val & 1) return 0;
1277 void ISel::doMultiplyConst(MachineBasicBlock *MBB,
1278 MachineBasicBlock::iterator &IP,
1279 unsigned DestReg, const Type *DestTy,
1280 unsigned op0Reg, unsigned ConstRHS) {
1281 unsigned Class = getClass(DestTy);
1283 // If the element size is exactly a power of 2, use a shift to get it.
1284 if (unsigned Shift = ExactLog2(ConstRHS)) {
1286 default: assert(0 && "Unknown class for this function!");
1288 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1291 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1294 BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
1299 if (Class == cShort) {
1300 BMI(MBB, IP, X86::IMULri16, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1302 } else if (Class == cInt) {
1303 BMI(MBB, IP, X86::IMULri32, 2, DestReg).addReg(op0Reg).addZImm(ConstRHS);
1307 // Most general case, emit a normal multiply...
1308 static const unsigned MOVirTab[] = {
1309 X86::MOVir8, X86::MOVir16, X86::MOVir32
1312 unsigned TmpReg = makeAnotherReg(DestTy);
1313 BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS);
1315 // Emit a MUL to multiply the register holding the index by
1316 // elementSize, putting the result in OffsetReg.
1317 doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg);
1320 /// visitMul - Multiplies are not simple binary operators because they must deal
1321 /// with the EAX register explicitly.
1323 void ISel::visitMul(BinaryOperator &I) {
1324 unsigned Op0Reg = getReg(I.getOperand(0));
1325 unsigned DestReg = getReg(I);
1327 // Simple scalar multiply?
1328 if (I.getType() != Type::LongTy && I.getType() != Type::ULongTy) {
1329 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1))) {
1330 unsigned Val = (unsigned)CI->getRawValue(); // Cannot be 64-bit constant
1331 MachineBasicBlock::iterator MBBI = BB->end();
1332 doMultiplyConst(BB, MBBI, DestReg, I.getType(), Op0Reg, Val);
1334 unsigned Op1Reg = getReg(I.getOperand(1));
1335 MachineBasicBlock::iterator MBBI = BB->end();
1336 doMultiply(BB, MBBI, DestReg, I.getType(), Op0Reg, Op1Reg);
1339 unsigned Op1Reg = getReg(I.getOperand(1));
1341 // Long value. We have to do things the hard way...
1342 // Multiply the two low parts... capturing carry into EDX
1343 BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg);
1344 BuildMI(BB, X86::MULr32, 1).addReg(Op1Reg); // AL*BL
1346 unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
1347 BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(X86::EAX); // AL*BL
1348 BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
1350 MachineBasicBlock::iterator MBBI = BB->end();
1351 unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
1352 BMI(BB, MBBI, X86::IMULrr32, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
1354 unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
1355 BuildMI(BB, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32)
1356 AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
1359 unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
1360 BMI(BB, MBBI, X86::IMULrr32, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
1362 BuildMI(BB, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32)
1363 DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
1368 /// visitDivRem - Handle division and remainder instructions... these
1369 /// instruction both require the same instructions to be generated, they just
1370 /// select the result from a different register. Note that both of these
1371 /// instructions work differently for signed and unsigned operands.
1373 void ISel::visitDivRem(BinaryOperator &I) {
1374 unsigned Op0Reg = getReg(I.getOperand(0));
1375 unsigned Op1Reg = getReg(I.getOperand(1));
1376 unsigned ResultReg = getReg(I);
1378 MachineBasicBlock::iterator IP = BB->end();
1379 emitDivRemOperation(BB, IP, Op0Reg, Op1Reg, I.getOpcode() == Instruction::Div,
1380 I.getType(), ResultReg);
1383 void ISel::emitDivRemOperation(MachineBasicBlock *BB,
1384 MachineBasicBlock::iterator &IP,
1385 unsigned Op0Reg, unsigned Op1Reg, bool isDiv,
1386 const Type *Ty, unsigned ResultReg) {
1387 unsigned Class = getClass(Ty);
1389 case cFP: // Floating point divide
1391 BMI(BB, IP, X86::FpDIV, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
1392 } else { // Floating point remainder...
1393 MachineInstr *TheCall =
1394 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("fmod", true);
1395 std::vector<ValueRecord> Args;
1396 Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy));
1397 Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy));
1398 doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args);
1402 static const char *FnName[] =
1403 { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" };
1405 unsigned NameIdx = Ty->isUnsigned()*2 + isDiv;
1406 MachineInstr *TheCall =
1407 BuildMI(X86::CALLpcrel32, 1).addExternalSymbol(FnName[NameIdx], true);
1409 std::vector<ValueRecord> Args;
1410 Args.push_back(ValueRecord(Op0Reg, Type::LongTy));
1411 Args.push_back(ValueRecord(Op1Reg, Type::LongTy));
1412 doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args);
1415 case cByte: case cShort: case cInt:
1416 break; // Small integrals, handled below...
1417 default: assert(0 && "Unknown class!");
1420 static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
1421 static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
1422 static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
1423 static const unsigned ClrOpcode[]={ X86::XORrr8, X86::XORrr16, X86::XORrr32 };
1424 static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
1426 static const unsigned DivOpcode[][4] = {
1427 { X86::DIVr8 , X86::DIVr16 , X86::DIVr32 , 0 }, // Unsigned division
1428 { X86::IDIVr8, X86::IDIVr16, X86::IDIVr32, 0 }, // Signed division
1431 bool isSigned = Ty->isSigned();
1432 unsigned Reg = Regs[Class];
1433 unsigned ExtReg = ExtRegs[Class];
1435 // Put the first operand into one of the A registers...
1436 BMI(BB, IP, MovOpcode[Class], 1, Reg).addReg(Op0Reg);
1439 // Emit a sign extension instruction...
1440 unsigned ShiftResult = makeAnotherReg(Ty);
1441 BMI(BB, IP, SarOpcode[Class], 2, ShiftResult).addReg(Op0Reg).addZImm(31);
1442 BMI(BB, IP, MovOpcode[Class], 1, ExtReg).addReg(ShiftResult);
1444 // If unsigned, emit a zeroing instruction... (reg = xor reg, reg)
1445 BMI(BB, IP, ClrOpcode[Class], 2, ExtReg).addReg(ExtReg).addReg(ExtReg);
1448 // Emit the appropriate divide or remainder instruction...
1449 BMI(BB, IP, DivOpcode[isSigned][Class], 1).addReg(Op1Reg);
1451 // Figure out which register we want to pick the result out of...
1452 unsigned DestReg = isDiv ? Reg : ExtReg;
1454 // Put the result into the destination register...
1455 BMI(BB, IP, MovOpcode[Class], 1, ResultReg).addReg(DestReg);
1459 /// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here
1460 /// for constant immediate shift values, and for constant immediate
1461 /// shift values equal to 1. Even the general case is sort of special,
1462 /// because the shift amount has to be in CL, not just any old register.
1464 void ISel::visitShiftInst(ShiftInst &I) {
1465 MachineBasicBlock::iterator IP = BB->end ();
1466 emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1),
1467 I.getOpcode () == Instruction::Shl, I.getType (),
1471 /// emitShiftOperation - Common code shared between visitShiftInst and
1472 /// constant expression support.
1473 void ISel::emitShiftOperation(MachineBasicBlock *MBB,
1474 MachineBasicBlock::iterator &IP,
1475 Value *Op, Value *ShiftAmount, bool isLeftShift,
1476 const Type *ResultTy, unsigned DestReg) {
1477 unsigned SrcReg = getReg (Op, MBB, IP);
1478 bool isSigned = ResultTy->isSigned ();
1479 unsigned Class = getClass (ResultTy);
1481 static const unsigned ConstantOperand[][4] = {
1482 { X86::SHRir8, X86::SHRir16, X86::SHRir32, X86::SHRDir32 }, // SHR
1483 { X86::SARir8, X86::SARir16, X86::SARir32, X86::SHRDir32 }, // SAR
1484 { X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SHL
1485 { X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SAL = SHL
1488 static const unsigned NonConstantOperand[][4] = {
1489 { X86::SHRrr8, X86::SHRrr16, X86::SHRrr32 }, // SHR
1490 { X86::SARrr8, X86::SARrr16, X86::SARrr32 }, // SAR
1491 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SHL
1492 { X86::SHLrr8, X86::SHLrr16, X86::SHLrr32 }, // SAL = SHL
1495 // Longs, as usual, are handled specially...
1496 if (Class == cLong) {
1497 // If we have a constant shift, we can generate much more efficient code
1498 // than otherwise...
1500 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1501 unsigned Amount = CUI->getValue();
1503 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1505 BMI(MBB, IP, Opc[3], 3,
1506 DestReg+1).addReg(SrcReg+1).addReg(SrcReg).addZImm(Amount);
1507 BMI(MBB, IP, Opc[2], 2, DestReg).addReg(SrcReg).addZImm(Amount);
1509 BMI(MBB, IP, Opc[3], 3,
1510 DestReg).addReg(SrcReg ).addReg(SrcReg+1).addZImm(Amount);
1511 BMI(MBB, IP, Opc[2], 2, DestReg+1).addReg(SrcReg+1).addZImm(Amount);
1513 } else { // Shifting more than 32 bits
1516 BMI(MBB, IP, X86::SHLir32, 2,
1517 DestReg + 1).addReg(SrcReg).addZImm(Amount);
1518 BMI(MBB, IP, X86::MOVir32, 1,
1519 DestReg).addZImm(0);
1521 unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
1522 BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
1523 BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
1527 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1529 if (!isLeftShift && isSigned) {
1530 // If this is a SHR of a Long, then we need to do funny sign extension
1531 // stuff. TmpReg gets the value to use as the high-part if we are
1532 // shifting more than 32 bits.
1533 BMI(MBB, IP, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
1535 // Other shifts use a fixed zero value if the shift is more than 32
1537 BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
1540 // Initialize CL with the shift amount...
1541 unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
1542 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1544 unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
1545 unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
1547 // TmpReg2 = shld inHi, inLo
1548 BMI(MBB, IP, X86::SHLDrr32, 2, TmpReg2).addReg(SrcReg+1).addReg(SrcReg);
1549 // TmpReg3 = shl inLo, CL
1550 BMI(MBB, IP, X86::SHLrr32, 1, TmpReg3).addReg(SrcReg);
1552 // Set the flags to indicate whether the shift was by more than 32 bits.
1553 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1555 // DestHi = (>32) ? TmpReg3 : TmpReg2;
1556 BMI(MBB, IP, X86::CMOVNErr32, 2,
1557 DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
1558 // DestLo = (>32) ? TmpReg : TmpReg3;
1559 BMI(MBB, IP, X86::CMOVNErr32, 2,
1560 DestReg).addReg(TmpReg3).addReg(TmpReg);
1562 // TmpReg2 = shrd inLo, inHi
1563 BMI(MBB, IP, X86::SHRDrr32, 2, TmpReg2).addReg(SrcReg).addReg(SrcReg+1);
1564 // TmpReg3 = s[ah]r inHi, CL
1565 BMI(MBB, IP, isSigned ? X86::SARrr32 : X86::SHRrr32, 1, TmpReg3)
1568 // Set the flags to indicate whether the shift was by more than 32 bits.
1569 BMI(MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addZImm(32);
1571 // DestLo = (>32) ? TmpReg3 : TmpReg2;
1572 BMI(MBB, IP, X86::CMOVNErr32, 2,
1573 DestReg).addReg(TmpReg2).addReg(TmpReg3);
1575 // DestHi = (>32) ? TmpReg : TmpReg3;
1576 BMI(MBB, IP, X86::CMOVNErr32, 2,
1577 DestReg+1).addReg(TmpReg3).addReg(TmpReg);
1583 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(ShiftAmount)) {
1584 // The shift amount is constant, guaranteed to be a ubyte. Get its value.
1585 assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?");
1587 const unsigned *Opc = ConstantOperand[isLeftShift*2+isSigned];
1588 BMI(MBB, IP, Opc[Class], 2,
1589 DestReg).addReg(SrcReg).addZImm(CUI->getValue());
1590 } else { // The shift amount is non-constant.
1591 unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
1592 BMI(MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
1594 const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
1595 BMI(MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
1600 /// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
1601 /// instruction. The load and store instructions are the only place where we
1602 /// need to worry about the memory layout of the target machine.
1604 void ISel::visitLoadInst(LoadInst &I) {
1605 unsigned SrcAddrReg = getReg(I.getOperand(0));
1606 unsigned DestReg = getReg(I);
1608 unsigned Class = getClassB(I.getType());
1610 if (Class == cLong) {
1611 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), SrcAddrReg);
1612 addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4);
1616 static const unsigned Opcodes[] = {
1617 X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FLDr32
1619 unsigned Opcode = Opcodes[Class];
1620 if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
1621 addDirectMem(BuildMI(BB, Opcode, 4, DestReg), SrcAddrReg);
1624 /// visitStoreInst - Implement LLVM store instructions in terms of the x86 'mov'
1627 void ISel::visitStoreInst(StoreInst &I) {
1628 unsigned ValReg = getReg(I.getOperand(0));
1629 unsigned AddressReg = getReg(I.getOperand(1));
1631 const Type *ValTy = I.getOperand(0)->getType();
1632 unsigned Class = getClassB(ValTy);
1634 if (Class == cLong) {
1635 addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg);
1636 addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg,4).addReg(ValReg+1);
1640 static const unsigned Opcodes[] = {
1641 X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FSTr32
1643 unsigned Opcode = Opcodes[Class];
1644 if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
1645 addDirectMem(BuildMI(BB, Opcode, 1+4), AddressReg).addReg(ValReg);
1649 /// visitCastInst - Here we have various kinds of copying with or without
1650 /// sign extension going on.
1651 void ISel::visitCastInst(CastInst &CI) {
1652 Value *Op = CI.getOperand(0);
1653 // If this is a cast from a 32-bit integer to a Long type, and the only uses
1654 // of the case are GEP instructions, then the cast does not need to be
1655 // generated explicitly, it will be folded into the GEP.
1656 if (CI.getType() == Type::LongTy &&
1657 (Op->getType() == Type::IntTy || Op->getType() == Type::UIntTy)) {
1658 bool AllUsesAreGEPs = true;
1659 for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I)
1660 if (!isa<GetElementPtrInst>(*I)) {
1661 AllUsesAreGEPs = false;
1665 // No need to codegen this cast if all users are getelementptr instrs...
1666 if (AllUsesAreGEPs) return;
1669 unsigned DestReg = getReg(CI);
1670 MachineBasicBlock::iterator MI = BB->end();
1671 emitCastOperation(BB, MI, Op, CI.getType(), DestReg);
1674 /// emitCastOperation - Common code shared between visitCastInst and
1675 /// constant expression cast support.
1676 void ISel::emitCastOperation(MachineBasicBlock *BB,
1677 MachineBasicBlock::iterator &IP,
1678 Value *Src, const Type *DestTy,
1680 unsigned SrcReg = getReg(Src, BB, IP);
1681 const Type *SrcTy = Src->getType();
1682 unsigned SrcClass = getClassB(SrcTy);
1683 unsigned DestClass = getClassB(DestTy);
1685 // Implement casts to bool by using compare on the operand followed by set if
1686 // not zero on the result.
1687 if (DestTy == Type::BoolTy) {
1690 BMI(BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg);
1693 BMI(BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg);
1696 BMI(BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg);
1699 unsigned TmpReg = makeAnotherReg(Type::IntTy);
1700 BMI(BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
1704 assert(0 && "FIXME: implement cast FP to bool");
1708 // If the zero flag is not set, then the value is true, set the byte to
1710 BMI(BB, IP, X86::SETNEr, 1, DestReg);
1714 static const unsigned RegRegMove[] = {
1715 X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV, X86::MOVrr32
1718 // Implement casts between values of the same type class (as determined by
1719 // getClass) by using a register-to-register move.
1720 if (SrcClass == DestClass) {
1721 if (SrcClass <= cInt || (SrcClass == cFP && SrcTy == DestTy)) {
1722 BMI(BB, IP, RegRegMove[SrcClass], 1, DestReg).addReg(SrcReg);
1723 } else if (SrcClass == cFP) {
1724 if (SrcTy == Type::FloatTy) { // double -> float
1725 assert(DestTy == Type::DoubleTy && "Unknown cFP member!");
1726 BMI(BB, IP, X86::FpMOV, 1, DestReg).addReg(SrcReg);
1727 } else { // float -> double
1728 assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy &&
1729 "Unknown cFP member!");
1730 // Truncate from double to float by storing to memory as short, then
1732 unsigned FltAlign = TM.getTargetData().getFloatAlignment();
1733 int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
1734 addFrameReference(BMI(BB, IP, X86::FSTr32, 5), FrameIdx).addReg(SrcReg);
1735 addFrameReference(BMI(BB, IP, X86::FLDr32, 5, DestReg), FrameIdx);
1737 } else if (SrcClass == cLong) {
1738 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
1739 BMI(BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1);
1741 assert(0 && "Cannot handle this type of cast instruction!");
1747 // Handle cast of SMALLER int to LARGER int using a move with sign extension
1748 // or zero extension, depending on whether the source type was signed.
1749 if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) &&
1750 SrcClass < DestClass) {
1751 bool isLong = DestClass == cLong;
1752 if (isLong) DestClass = cInt;
1754 static const unsigned Opc[][4] = {
1755 { X86::MOVSXr16r8, X86::MOVSXr32r8, X86::MOVSXr32r16, X86::MOVrr32 }, // s
1756 { X86::MOVZXr16r8, X86::MOVZXr32r8, X86::MOVZXr32r16, X86::MOVrr32 } // u
1759 bool isUnsigned = SrcTy->isUnsigned();
1760 BMI(BB, IP, Opc[isUnsigned][SrcClass + DestClass - 1], 1,
1761 DestReg).addReg(SrcReg);
1763 if (isLong) { // Handle upper 32 bits as appropriate...
1764 if (isUnsigned) // Zero out top bits...
1765 BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
1766 else // Sign extend bottom half...
1767 BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
1772 // Special case long -> int ...
1773 if (SrcClass == cLong && DestClass == cInt) {
1774 BMI(BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
1778 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by a
1779 // move out of AX or AL.
1780 if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt
1781 && SrcClass > DestClass) {
1782 static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX, 0, X86::EAX };
1783 BMI(BB, IP, RegRegMove[SrcClass], 1, AReg[SrcClass]).addReg(SrcReg);
1784 BMI(BB, IP, RegRegMove[DestClass], 1, DestReg).addReg(AReg[DestClass]);
1788 // Handle casts from integer to floating point now...
1789 if (DestClass == cFP) {
1790 // Promote the integer to a type supported by FLD. We do this because there
1791 // are no unsigned FLD instructions, so we must promote an unsigned value to
1792 // a larger signed value, then use FLD on the larger value.
1794 const Type *PromoteType = 0;
1795 unsigned PromoteOpcode;
1796 switch (SrcTy->getPrimitiveID()) {
1797 case Type::BoolTyID:
1798 case Type::SByteTyID:
1799 // We don't have the facilities for directly loading byte sized data from
1800 // memory (even signed). Promote it to 16 bits.
1801 PromoteType = Type::ShortTy;
1802 PromoteOpcode = X86::MOVSXr16r8;
1804 case Type::UByteTyID:
1805 PromoteType = Type::ShortTy;
1806 PromoteOpcode = X86::MOVZXr16r8;
1808 case Type::UShortTyID:
1809 PromoteType = Type::IntTy;
1810 PromoteOpcode = X86::MOVZXr32r16;
1812 case Type::UIntTyID: {
1813 // Make a 64 bit temporary... and zero out the top of it...
1814 unsigned TmpReg = makeAnotherReg(Type::LongTy);
1815 BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
1816 BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0);
1817 SrcTy = Type::LongTy;
1822 case Type::ULongTyID:
1823 assert("FIXME: not implemented: cast ulong X to fp type!");
1824 default: // No promotion needed...
1829 unsigned TmpReg = makeAnotherReg(PromoteType);
1830 BMI(BB, IP, SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8,
1831 1, TmpReg).addReg(SrcReg);
1832 SrcTy = PromoteType;
1833 SrcClass = getClass(PromoteType);
1837 // Spill the integer to memory and reload it from there...
1839 F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
1841 if (SrcClass == cLong) {
1842 addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg);
1843 addFrameReference(BMI(BB, IP, X86::MOVrm32, 5),
1844 FrameIdx, 4).addReg(SrcReg+1);
1846 static const unsigned Op1[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
1847 addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
1850 static const unsigned Op2[] =
1851 { 0/*byte*/, X86::FILDr16, X86::FILDr32, 0/*FP*/, X86::FILDr64 };
1852 addFrameReference(BMI(BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
1856 // Handle casts from floating point to integer now...
1857 if (SrcClass == cFP) {
1858 // Change the floating point control register to use "round towards zero"
1859 // mode when truncating to an integer value.
1861 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1862 addFrameReference(BMI(BB, IP, X86::FNSTCWm16, 4), CWFrameIdx);
1864 // Load the old value of the high byte of the control word...
1865 unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
1866 addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
1868 // Set the high part to be round to zero...
1869 addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12);
1871 // Reload the modified control word now...
1872 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
1874 // Restore the memory image of control word to original value
1875 addFrameReference(BMI(BB, IP, X86::MOVrm8, 5),
1876 CWFrameIdx, 1).addReg(HighPartOfCW);
1878 // We don't have the facilities for directly storing byte sized data to
1879 // memory. Promote it to 16 bits. We also must promote unsigned values to
1880 // larger classes because we only have signed FP stores.
1881 unsigned StoreClass = DestClass;
1882 const Type *StoreTy = DestTy;
1883 if (StoreClass == cByte || DestTy->isUnsigned())
1884 switch (StoreClass) {
1885 case cByte: StoreTy = Type::ShortTy; StoreClass = cShort; break;
1886 case cShort: StoreTy = Type::IntTy; StoreClass = cInt; break;
1887 case cInt: StoreTy = Type::LongTy; StoreClass = cLong; break;
1888 // The following treatment of cLong may not be perfectly right,
1889 // but it survives chains of casts of the form
1890 // double->ulong->double.
1891 case cLong: StoreTy = Type::LongTy; StoreClass = cLong; break;
1892 default: assert(0 && "Unknown store class!");
1895 // Spill the integer to memory and reload it from there...
1897 F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
1899 static const unsigned Op1[] =
1900 { 0, X86::FISTr16, X86::FISTr32, 0, X86::FISTPr64 };
1901 addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
1903 if (DestClass == cLong) {
1904 addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg), FrameIdx);
1905 addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4);
1907 static const unsigned Op2[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
1908 addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
1911 // Reload the original control word now...
1912 addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
1916 // Anything we haven't handled already, we can't (yet) handle at all.
1917 assert(0 && "Unhandled cast instruction!");
1921 /// visitVANextInst - Implement the va_next instruction...
1923 void ISel::visitVANextInst(VANextInst &I) {
1924 unsigned VAList = getReg(I.getOperand(0));
1925 unsigned DestReg = getReg(I);
1928 switch (I.getArgType()->getPrimitiveID()) {
1931 assert(0 && "Error: bad type for va_next instruction!");
1933 case Type::PointerTyID:
1934 case Type::UIntTyID:
1938 case Type::ULongTyID:
1939 case Type::LongTyID:
1940 case Type::DoubleTyID:
1945 // Increment the VAList pointer...
1946 BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addZImm(Size);
1949 void ISel::visitVAArgInst(VAArgInst &I) {
1950 unsigned VAList = getReg(I.getOperand(0));
1951 unsigned DestReg = getReg(I);
1953 switch (I.getType()->getPrimitiveID()) {
1956 assert(0 && "Error: bad type for va_next instruction!");
1958 case Type::PointerTyID:
1959 case Type::UIntTyID:
1961 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
1963 case Type::ULongTyID:
1964 case Type::LongTyID:
1965 addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
1966 addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), VAList, 4);
1968 case Type::DoubleTyID:
1969 addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
1975 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
1976 unsigned outputReg = getReg(I);
1977 MachineBasicBlock::iterator MI = BB->end();
1978 emitGEPOperation(BB, MI, I.getOperand(0),
1979 I.op_begin()+1, I.op_end(), outputReg);
1982 void ISel::emitGEPOperation(MachineBasicBlock *MBB,
1983 MachineBasicBlock::iterator &IP,
1984 Value *Src, User::op_iterator IdxBegin,
1985 User::op_iterator IdxEnd, unsigned TargetReg) {
1986 const TargetData &TD = TM.getTargetData();
1987 const Type *Ty = Src->getType();
1988 unsigned BaseReg = getReg(Src, MBB, IP);
1990 // GEPs have zero or more indices; we must perform a struct access
1991 // or array access for each one.
1992 for (GetElementPtrInst::op_iterator oi = IdxBegin,
1993 oe = IdxEnd; oi != oe; ++oi) {
1995 unsigned NextReg = BaseReg;
1996 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1997 // It's a struct access. idx is the index into the structure,
1998 // which names the field. This index must have ubyte type.
1999 const ConstantUInt *CUI = cast<ConstantUInt>(idx);
2000 assert(CUI->getType() == Type::UByteTy
2001 && "Funny-looking structure index in GEP");
2002 // Use the TargetData structure to pick out what the layout of
2003 // the structure is in memory. Since the structure index must
2004 // be constant, we can get its value and use it to find the
2005 // right byte offset from the StructLayout class's list of
2006 // structure member offsets.
2007 unsigned idxValue = CUI->getValue();
2008 unsigned FieldOff = TD.getStructLayout(StTy)->MemberOffsets[idxValue];
2010 NextReg = makeAnotherReg(Type::UIntTy);
2011 // Emit an ADD to add FieldOff to the basePtr.
2012 BMI(MBB, IP, X86::ADDri32, 2,NextReg).addReg(BaseReg).addZImm(FieldOff);
2014 // The next type is the member of the structure selected by the
2016 Ty = StTy->getElementTypes()[idxValue];
2017 } else if (const SequentialType *SqTy = cast<SequentialType>(Ty)) {
2018 // It's an array or pointer access: [ArraySize x ElementType].
2020 // idx is the index into the array. Unlike with structure
2021 // indices, we may not know its actual value at code-generation
2023 assert(idx->getType() == Type::LongTy && "Bad GEP array index!");
2025 // Most GEP instructions use a [cast (int/uint) to LongTy] as their
2026 // operand on X86. Handle this case directly now...
2027 if (CastInst *CI = dyn_cast<CastInst>(idx))
2028 if (CI->getOperand(0)->getType() == Type::IntTy ||
2029 CI->getOperand(0)->getType() == Type::UIntTy)
2030 idx = CI->getOperand(0);
2032 // We want to add BaseReg to(idxReg * sizeof ElementType). First, we
2033 // must find the size of the pointed-to type (Not coincidentally, the next
2034 // type is the type of the elements in the array).
2035 Ty = SqTy->getElementType();
2036 unsigned elementSize = TD.getTypeSize(Ty);
2038 // If idxReg is a constant, we don't need to perform the multiply!
2039 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(idx)) {
2040 if (!CSI->isNullValue()) {
2041 unsigned Offset = elementSize*CSI->getValue();
2042 NextReg = makeAnotherReg(Type::UIntTy);
2043 BMI(MBB, IP, X86::ADDri32, 2,NextReg).addReg(BaseReg).addZImm(Offset);
2045 } else if (elementSize == 1) {
2046 // If the element size is 1, we don't have to multiply, just add
2047 unsigned idxReg = getReg(idx, MBB, IP);
2048 NextReg = makeAnotherReg(Type::UIntTy);
2049 BMI(MBB, IP, X86::ADDrr32, 2, NextReg).addReg(BaseReg).addReg(idxReg);
2051 unsigned idxReg = getReg(idx, MBB, IP);
2052 unsigned OffsetReg = makeAnotherReg(Type::UIntTy);
2054 doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize);
2056 // Emit an ADD to add OffsetReg to the basePtr.
2057 NextReg = makeAnotherReg(Type::UIntTy);
2058 BMI(MBB, IP, X86::ADDrr32, 2,NextReg).addReg(BaseReg).addReg(OffsetReg);
2061 // Now that we are here, further indices refer to subtypes of this
2062 // one, so we don't need to worry about BaseReg itself, anymore.
2065 // After we have processed all the indices, the result is left in
2066 // BaseReg. Move it to the register where we were expected to
2067 // put the answer. A 32-bit move should do it, because we are in
2069 BMI(MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
2073 /// visitAllocaInst - If this is a fixed size alloca, allocate space from the
2074 /// frame manager, otherwise do it the hard way.
2076 void ISel::visitAllocaInst(AllocaInst &I) {
2077 // Find the data size of the alloca inst's getAllocatedType.
2078 const Type *Ty = I.getAllocatedType();
2079 unsigned TySize = TM.getTargetData().getTypeSize(Ty);
2081 // If this is a fixed size alloca in the entry block for the function,
2082 // statically stack allocate the space.
2084 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(I.getArraySize())) {
2085 if (I.getParent() == I.getParent()->getParent()->begin()) {
2086 TySize *= CUI->getValue(); // Get total allocated size...
2087 unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty);
2089 // Create a new stack object using the frame manager...
2090 int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
2091 addFrameReference(BuildMI(BB, X86::LEAr32, 5, getReg(I)), FrameIdx);
2096 // Create a register to hold the temporary result of multiplying the type size
2097 // constant by the variable amount.
2098 unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy);
2099 unsigned SrcReg1 = getReg(I.getArraySize());
2101 // TotalSizeReg = mul <numelements>, <TypeSize>
2102 MachineBasicBlock::iterator MBBI = BB->end();
2103 doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize);
2105 // AddedSize = add <TotalSizeReg>, 15
2106 unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
2107 BuildMI(BB, X86::ADDri32, 2, AddedSizeReg).addReg(TotalSizeReg).addZImm(15);
2109 // AlignedSize = and <AddedSize>, ~15
2110 unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
2111 BuildMI(BB, X86::ANDri32, 2, AlignedSize).addReg(AddedSizeReg).addZImm(~15);
2113 // Subtract size from stack pointer, thereby allocating some space.
2114 BuildMI(BB, X86::SUBrr32, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
2116 // Put a pointer to the space into the result register, by copying
2117 // the stack pointer.
2118 BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP);
2120 // Inform the Frame Information that we have just allocated a variable-sized
2122 F->getFrameInfo()->CreateVariableSizedObject();
2125 /// visitMallocInst - Malloc instructions are code generated into direct calls
2126 /// to the library malloc.
2128 void ISel::visitMallocInst(MallocInst &I) {
2129 unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType());
2132 if (ConstantUInt *C = dyn_cast<ConstantUInt>(I.getOperand(0))) {
2133 Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize));
2135 Arg = makeAnotherReg(Type::UIntTy);
2136 unsigned Op0Reg = getReg(I.getOperand(0));
2137 MachineBasicBlock::iterator MBBI = BB->end();
2138 doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize);
2141 std::vector<ValueRecord> Args;
2142 Args.push_back(ValueRecord(Arg, Type::UIntTy));
2143 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2144 1).addExternalSymbol("malloc", true);
2145 doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args);
2149 /// visitFreeInst - Free instructions are code gen'd to call the free libc
2152 void ISel::visitFreeInst(FreeInst &I) {
2153 std::vector<ValueRecord> Args;
2154 Args.push_back(ValueRecord(I.getOperand(0)));
2155 MachineInstr *TheCall = BuildMI(X86::CALLpcrel32,
2156 1).addExternalSymbol("free", true);
2157 doCall(ValueRecord(0, Type::VoidTy), TheCall, Args);
2160 /// createX86SimpleInstructionSelector - This pass converts an LLVM function
2161 /// into a machine code representation is a very simple peep-hole fashion. The
2162 /// generated code sucks but the implementation is nice and simple.
2164 FunctionPass *createX86SimpleInstructionSelector(TargetMachine &TM) {
2165 return new ISel(TM);
2168 } // End llvm namespace