1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/DwarfWriter.h"
51 #include "llvm/Analysis/DebugInfo.h"
52 #include "llvm/Target/TargetData.h"
53 #include "llvm/Target/TargetInstrInfo.h"
54 #include "llvm/Target/TargetLowering.h"
55 #include "llvm/Target/TargetMachine.h"
56 #include "SelectionDAGBuilder.h"
57 #include "FunctionLoweringInfo.h"
60 unsigned FastISel::getRegForValue(Value *V) {
61 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
62 // Don't handle non-simple values in FastISel.
63 if (!RealVT.isSimple())
66 // Ignore illegal types. We must do this before looking up the value
67 // in ValueMap because Arguments are given virtual registers regardless
68 // of whether FastISel can handle them.
69 MVT VT = RealVT.getSimpleVT();
70 if (!TLI.isTypeLegal(VT)) {
71 // Promote MVT::i1 to a legal type though, because it's common and easy.
73 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
78 // Look up the value to see if we already have a register for it. We
79 // cache values defined by Instructions across blocks, and other values
80 // only locally. This is because Instructions already have the SSA
81 // def-dominates-use requirement enforced.
82 if (ValueMap.count(V))
84 unsigned Reg = LocalValueMap[V];
88 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
89 if (CI->getValue().getActiveBits() <= 64)
90 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
91 } else if (isa<AllocaInst>(V)) {
92 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
93 } else if (isa<ConstantPointerNull>(V)) {
94 // Translate this as an integer zero so that it can be
95 // local-CSE'd with actual integer zeros.
97 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
98 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
99 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
102 const APFloat &Flt = CF->getValueAPF();
103 EVT IntVT = TLI.getPointerTy();
106 uint32_t IntBitWidth = IntVT.getSizeInBits();
108 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
109 APFloat::rmTowardZero, &isExact);
111 APInt IntVal(IntBitWidth, 2, x);
113 unsigned IntegerReg =
114 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
116 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
119 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
120 if (!SelectOperator(CE, CE->getOpcode())) return 0;
121 Reg = LocalValueMap[CE];
122 } else if (isa<UndefValue>(V)) {
123 Reg = createResultReg(TLI.getRegClassFor(VT));
124 BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
127 // If target-independent code couldn't handle the value, give target-specific
129 if (!Reg && isa<Constant>(V))
130 Reg = TargetMaterializeConstant(cast<Constant>(V));
132 // Don't cache constant materializations in the general ValueMap.
133 // To do so would require tracking what uses they dominate.
135 LocalValueMap[V] = Reg;
139 unsigned FastISel::lookUpRegForValue(Value *V) {
140 // Look up the value to see if we already have a register for it. We
141 // cache values defined by Instructions across blocks, and other values
142 // only locally. This is because Instructions already have the SSA
143 // def-dominatess-use requirement enforced.
144 if (ValueMap.count(V))
146 return LocalValueMap[V];
149 /// UpdateValueMap - Update the value map to include the new mapping for this
150 /// instruction, or insert an extra copy to get the result in a previous
151 /// determined register.
152 /// NOTE: This is only necessary because we might select a block that uses
153 /// a value before we select the block that defines the value. It might be
154 /// possible to fix this by selecting blocks in reverse postorder.
155 unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
156 if (!isa<Instruction>(I)) {
157 LocalValueMap[I] = Reg;
161 unsigned &AssignedReg = ValueMap[I];
162 if (AssignedReg == 0)
164 else if (Reg != AssignedReg) {
165 const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
166 TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
167 Reg, RegClass, RegClass);
172 unsigned FastISel::getRegForGEPIndex(Value *Idx) {
173 unsigned IdxN = getRegForValue(Idx);
175 // Unhandled operand. Halt "fast" selection and bail.
178 // If the index is smaller or larger than intptr_t, truncate or extend it.
179 MVT PtrVT = TLI.getPointerTy();
180 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
181 if (IdxVT.bitsLT(PtrVT))
182 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
183 else if (IdxVT.bitsGT(PtrVT))
184 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
188 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
189 /// which has an opcode which directly corresponds to the given ISD opcode.
191 bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
192 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
193 if (VT == MVT::Other || !VT.isSimple())
194 // Unhandled type. Halt "fast" selection and bail.
197 // We only handle legal types. For example, on x86-32 the instruction
198 // selector contains all of the 64-bit instructions from x86-64,
199 // under the assumption that i64 won't be used if the target doesn't
201 if (!TLI.isTypeLegal(VT)) {
202 // MVT::i1 is special. Allow AND, OR, or XOR because they
203 // don't require additional zeroing, which makes them easy.
205 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
206 ISDOpcode == ISD::XOR))
207 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
212 unsigned Op0 = getRegForValue(I->getOperand(0));
214 // Unhandled operand. Halt "fast" selection and bail.
217 // Check if the second operand is a constant and handle it appropriately.
218 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
219 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
220 ISDOpcode, Op0, CI->getZExtValue());
221 if (ResultReg != 0) {
222 // We successfully emitted code for the given LLVM Instruction.
223 UpdateValueMap(I, ResultReg);
228 // Check if the second operand is a constant float.
229 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
230 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
232 if (ResultReg != 0) {
233 // We successfully emitted code for the given LLVM Instruction.
234 UpdateValueMap(I, ResultReg);
239 unsigned Op1 = getRegForValue(I->getOperand(1));
241 // Unhandled operand. Halt "fast" selection and bail.
244 // Now we have both operands in registers. Emit the instruction.
245 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
246 ISDOpcode, Op0, Op1);
248 // Target-specific code wasn't able to find a machine opcode for
249 // the given ISD opcode and type. Halt "fast" selection and bail.
252 // We successfully emitted code for the given LLVM Instruction.
253 UpdateValueMap(I, ResultReg);
257 bool FastISel::SelectGetElementPtr(User *I) {
258 unsigned N = getRegForValue(I->getOperand(0));
260 // Unhandled operand. Halt "fast" selection and bail.
263 const Type *Ty = I->getOperand(0)->getType();
264 MVT VT = TLI.getPointerTy();
265 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
268 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
269 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
272 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
273 // FIXME: This can be optimized by combining the add with a
275 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
277 // Unhandled operand. Halt "fast" selection and bail.
280 Ty = StTy->getElementType(Field);
282 Ty = cast<SequentialType>(Ty)->getElementType();
284 // If this is a constant subscript, handle it quickly.
285 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
286 if (CI->getZExtValue() == 0) continue;
288 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
289 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
291 // Unhandled operand. Halt "fast" selection and bail.
296 // N = N + Idx * ElementSize;
297 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
298 unsigned IdxN = getRegForGEPIndex(Idx);
300 // Unhandled operand. Halt "fast" selection and bail.
303 if (ElementSize != 1) {
304 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
306 // Unhandled operand. Halt "fast" selection and bail.
309 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
311 // Unhandled operand. Halt "fast" selection and bail.
316 // We successfully emitted code for the given LLVM Instruction.
317 UpdateValueMap(I, N);
321 bool FastISel::SelectCall(User *I) {
322 Function *F = cast<CallInst>(I)->getCalledFunction();
323 if (!F) return false;
325 unsigned IID = F->getIntrinsicID();
328 case Intrinsic::dbg_declare: {
329 DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
330 if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None)||!DW
331 || !DW->ShouldEmitDwarfDebug())
334 Value *Address = DI->getAddress();
337 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
338 // Don't handle byval struct arguments or VLAs, for example.
340 DenseMap<const AllocaInst*, int>::iterator SI =
341 StaticAllocaMap.find(AI);
342 if (SI == StaticAllocaMap.end()) break; // VLAs.
345 if (MDNode *Dbg = DI->getMetadata("dbg"))
346 MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg);
348 // Building the map above is target independent. Generating DBG_VALUE
349 // inline is target dependent; do this now.
350 (void)TargetSelectInstruction(cast<Instruction>(I));
353 case Intrinsic::dbg_value: {
354 // This requires target support, but right now X86 is the only Fast target.
355 DbgValueInst *DI = cast<DbgValueInst>(I);
356 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
357 Value *V = DI->getValue();
359 // Currently the optimizer can produce this; insert an undef to
360 // help debugging. Probably the optimizer should not do this.
361 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
362 addMetadata(DI->getVariable());
363 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
364 BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
365 addMetadata(DI->getVariable());
366 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
367 BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
368 addMetadata(DI->getVariable());
369 } else if (unsigned Reg = lookUpRegForValue(V)) {
370 BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
371 addMetadata(DI->getVariable());
373 // We can't yet handle anything else here because it would require
374 // generating code, thus altering codegen because of debug info.
375 // Insert an undef so we can see what we dropped.
376 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
377 addMetadata(DI->getVariable());
381 case Intrinsic::eh_exception: {
382 EVT VT = TLI.getValueType(I->getType());
383 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
385 case TargetLowering::Expand: {
386 assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
387 unsigned Reg = TLI.getExceptionAddressRegister();
388 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
389 unsigned ResultReg = createResultReg(RC);
390 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
392 assert(InsertedCopy && "Can't copy address registers!");
393 InsertedCopy = InsertedCopy;
394 UpdateValueMap(I, ResultReg);
400 case Intrinsic::eh_selector: {
401 EVT VT = TLI.getValueType(I->getType());
402 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
404 case TargetLowering::Expand: {
406 if (MBB->isLandingPad())
407 AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
410 CatchInfoLost.insert(cast<CallInst>(I));
412 // FIXME: Mark exception selector register as live in. Hack for PR1508.
413 unsigned Reg = TLI.getExceptionSelectorRegister();
414 if (Reg) MBB->addLiveIn(Reg);
417 unsigned Reg = TLI.getExceptionSelectorRegister();
418 EVT SrcVT = TLI.getPointerTy();
419 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
420 unsigned ResultReg = createResultReg(RC);
421 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
423 assert(InsertedCopy && "Can't copy address registers!");
424 InsertedCopy = InsertedCopy;
426 // Cast the register to the type of the selector.
427 if (SrcVT.bitsGT(MVT::i32))
428 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
430 else if (SrcVT.bitsLT(MVT::i32))
431 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
432 ISD::SIGN_EXTEND, ResultReg);
434 // Unhandled operand. Halt "fast" selection and bail.
437 UpdateValueMap(I, ResultReg);
440 getRegForValue(Constant::getNullValue(I->getType()));
441 UpdateValueMap(I, ResultReg);
452 bool FastISel::SelectCast(User *I, unsigned Opcode) {
453 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
454 EVT DstVT = TLI.getValueType(I->getType());
456 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
457 DstVT == MVT::Other || !DstVT.isSimple())
458 // Unhandled type. Halt "fast" selection and bail.
461 // Check if the destination type is legal. Or as a special case,
462 // it may be i1 if we're doing a truncate because that's
463 // easy and somewhat common.
464 if (!TLI.isTypeLegal(DstVT))
465 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
466 // Unhandled type. Halt "fast" selection and bail.
469 // Check if the source operand is legal. Or as a special case,
470 // it may be i1 if we're doing zero-extension because that's
471 // easy and somewhat common.
472 if (!TLI.isTypeLegal(SrcVT))
473 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
474 // Unhandled type. Halt "fast" selection and bail.
477 unsigned InputReg = getRegForValue(I->getOperand(0));
479 // Unhandled operand. Halt "fast" selection and bail.
482 // If the operand is i1, arrange for the high bits in the register to be zero.
483 if (SrcVT == MVT::i1) {
484 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
485 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
489 // If the result is i1, truncate to the target's type for i1 first.
490 if (DstVT == MVT::i1)
491 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
493 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
500 UpdateValueMap(I, ResultReg);
504 bool FastISel::SelectBitCast(User *I) {
505 // If the bitcast doesn't change the type, just use the operand value.
506 if (I->getType() == I->getOperand(0)->getType()) {
507 unsigned Reg = getRegForValue(I->getOperand(0));
510 UpdateValueMap(I, Reg);
514 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
515 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
516 EVT DstVT = TLI.getValueType(I->getType());
518 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
519 DstVT == MVT::Other || !DstVT.isSimple() ||
520 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
521 // Unhandled type. Halt "fast" selection and bail.
524 unsigned Op0 = getRegForValue(I->getOperand(0));
526 // Unhandled operand. Halt "fast" selection and bail.
529 // First, try to perform the bitcast by inserting a reg-reg copy.
530 unsigned ResultReg = 0;
531 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
532 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
533 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
534 ResultReg = createResultReg(DstClass);
536 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
537 Op0, DstClass, SrcClass);
542 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
544 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
545 ISD::BIT_CONVERT, Op0);
550 UpdateValueMap(I, ResultReg);
555 FastISel::SelectInstruction(Instruction *I) {
556 // First, try doing target-independent selection.
557 if (SelectOperator(I, I->getOpcode()))
560 // Next, try calling the target to attempt to handle the instruction.
561 if (TargetSelectInstruction(I))
567 /// FastEmitBranch - Emit an unconditional branch to the given block,
568 /// unless it is the immediate (fall-through) successor, and update
571 FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
572 if (MBB->isLayoutSuccessor(MSucc)) {
573 // The unconditional fall-through case, which needs no instructions.
575 // The unconditional branch case.
576 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
578 MBB->addSuccessor(MSucc);
581 /// SelectFNeg - Emit an FNeg operation.
584 FastISel::SelectFNeg(User *I) {
585 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
586 if (OpReg == 0) return false;
588 // If the target has ISD::FNEG, use it.
589 EVT VT = TLI.getValueType(I->getType());
590 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
592 if (ResultReg != 0) {
593 UpdateValueMap(I, ResultReg);
597 // Bitcast the value to integer, twiddle the sign bit with xor,
598 // and then bitcast it back to floating-point.
599 if (VT.getSizeInBits() > 64) return false;
600 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
601 if (!TLI.isTypeLegal(IntVT))
604 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
605 ISD::BIT_CONVERT, OpReg);
609 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
610 UINT64_C(1) << (VT.getSizeInBits()-1),
611 IntVT.getSimpleVT());
612 if (IntResultReg == 0)
615 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
616 ISD::BIT_CONVERT, IntResultReg);
620 UpdateValueMap(I, ResultReg);
625 FastISel::SelectOperator(User *I, unsigned Opcode) {
627 case Instruction::Add:
628 return SelectBinaryOp(I, ISD::ADD);
629 case Instruction::FAdd:
630 return SelectBinaryOp(I, ISD::FADD);
631 case Instruction::Sub:
632 return SelectBinaryOp(I, ISD::SUB);
633 case Instruction::FSub:
634 // FNeg is currently represented in LLVM IR as a special case of FSub.
635 if (BinaryOperator::isFNeg(I))
636 return SelectFNeg(I);
637 return SelectBinaryOp(I, ISD::FSUB);
638 case Instruction::Mul:
639 return SelectBinaryOp(I, ISD::MUL);
640 case Instruction::FMul:
641 return SelectBinaryOp(I, ISD::FMUL);
642 case Instruction::SDiv:
643 return SelectBinaryOp(I, ISD::SDIV);
644 case Instruction::UDiv:
645 return SelectBinaryOp(I, ISD::UDIV);
646 case Instruction::FDiv:
647 return SelectBinaryOp(I, ISD::FDIV);
648 case Instruction::SRem:
649 return SelectBinaryOp(I, ISD::SREM);
650 case Instruction::URem:
651 return SelectBinaryOp(I, ISD::UREM);
652 case Instruction::FRem:
653 return SelectBinaryOp(I, ISD::FREM);
654 case Instruction::Shl:
655 return SelectBinaryOp(I, ISD::SHL);
656 case Instruction::LShr:
657 return SelectBinaryOp(I, ISD::SRL);
658 case Instruction::AShr:
659 return SelectBinaryOp(I, ISD::SRA);
660 case Instruction::And:
661 return SelectBinaryOp(I, ISD::AND);
662 case Instruction::Or:
663 return SelectBinaryOp(I, ISD::OR);
664 case Instruction::Xor:
665 return SelectBinaryOp(I, ISD::XOR);
667 case Instruction::GetElementPtr:
668 return SelectGetElementPtr(I);
670 case Instruction::Br: {
671 BranchInst *BI = cast<BranchInst>(I);
673 if (BI->isUnconditional()) {
674 BasicBlock *LLVMSucc = BI->getSuccessor(0);
675 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
676 FastEmitBranch(MSucc);
680 // Conditional branches are not handed yet.
681 // Halt "fast" selection and bail.
685 case Instruction::Unreachable:
689 case Instruction::PHI:
690 // PHI nodes are already emitted.
693 case Instruction::Alloca:
694 // FunctionLowering has the static-sized case covered.
695 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
698 // Dynamic-sized alloca is not handled yet.
701 case Instruction::Call:
702 return SelectCall(I);
704 case Instruction::BitCast:
705 return SelectBitCast(I);
707 case Instruction::FPToSI:
708 return SelectCast(I, ISD::FP_TO_SINT);
709 case Instruction::ZExt:
710 return SelectCast(I, ISD::ZERO_EXTEND);
711 case Instruction::SExt:
712 return SelectCast(I, ISD::SIGN_EXTEND);
713 case Instruction::Trunc:
714 return SelectCast(I, ISD::TRUNCATE);
715 case Instruction::SIToFP:
716 return SelectCast(I, ISD::SINT_TO_FP);
718 case Instruction::IntToPtr: // Deliberate fall-through.
719 case Instruction::PtrToInt: {
720 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
721 EVT DstVT = TLI.getValueType(I->getType());
722 if (DstVT.bitsGT(SrcVT))
723 return SelectCast(I, ISD::ZERO_EXTEND);
724 if (DstVT.bitsLT(SrcVT))
725 return SelectCast(I, ISD::TRUNCATE);
726 unsigned Reg = getRegForValue(I->getOperand(0));
727 if (Reg == 0) return false;
728 UpdateValueMap(I, Reg);
733 // Unhandled instruction. Halt "fast" selection and bail.
738 FastISel::FastISel(MachineFunction &mf,
739 MachineModuleInfo *mmi,
741 DenseMap<const Value *, unsigned> &vm,
742 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
743 DenseMap<const AllocaInst *, int> &am
745 , SmallSet<Instruction*, 8> &cil
758 MRI(MF.getRegInfo()),
759 MFI(*MF.getFrameInfo()),
760 MCP(*MF.getConstantPool()),
762 TD(*TM.getTargetData()),
763 TII(*TM.getInstrInfo()),
764 TLI(*TM.getTargetLowering()) {
767 FastISel::~FastISel() {}
769 unsigned FastISel::FastEmit_(MVT, MVT,
774 unsigned FastISel::FastEmit_r(MVT, MVT,
775 unsigned, unsigned /*Op0*/) {
779 unsigned FastISel::FastEmit_rr(MVT, MVT,
780 unsigned, unsigned /*Op0*/,
785 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
789 unsigned FastISel::FastEmit_f(MVT, MVT,
790 unsigned, ConstantFP * /*FPImm*/) {
794 unsigned FastISel::FastEmit_ri(MVT, MVT,
795 unsigned, unsigned /*Op0*/,
800 unsigned FastISel::FastEmit_rf(MVT, MVT,
801 unsigned, unsigned /*Op0*/,
802 ConstantFP * /*FPImm*/) {
806 unsigned FastISel::FastEmit_rri(MVT, MVT,
808 unsigned /*Op0*/, unsigned /*Op1*/,
813 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
814 /// to emit an instruction with an immediate operand using FastEmit_ri.
815 /// If that fails, it materializes the immediate into a register and try
816 /// FastEmit_rr instead.
817 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
818 unsigned Op0, uint64_t Imm,
820 // First check if immediate type is legal. If not, we can't use the ri form.
821 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
824 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
825 if (MaterialReg == 0)
827 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
830 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
831 /// to emit an instruction with a floating-point immediate operand using
832 /// FastEmit_rf. If that fails, it materializes the immediate into a register
833 /// and try FastEmit_rr instead.
834 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
835 unsigned Op0, ConstantFP *FPImm,
837 // First check if immediate type is legal. If not, we can't use the rf form.
838 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
842 // Materialize the constant in a register.
843 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
844 if (MaterialReg == 0) {
845 // If the target doesn't have a way to directly enter a floating-point
846 // value into a register, use an alternate approach.
847 // TODO: The current approach only supports floating-point constants
848 // that can be constructed by conversion from integer values. This should
849 // be replaced by code that creates a load from a constant-pool entry,
850 // which will require some target-specific work.
851 const APFloat &Flt = FPImm->getValueAPF();
852 EVT IntVT = TLI.getPointerTy();
855 uint32_t IntBitWidth = IntVT.getSizeInBits();
857 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
858 APFloat::rmTowardZero, &isExact);
861 APInt IntVal(IntBitWidth, 2, x);
863 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
864 ISD::Constant, IntVal.getZExtValue());
867 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
868 ISD::SINT_TO_FP, IntegerReg);
869 if (MaterialReg == 0)
872 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
875 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
876 return MRI.createVirtualRegister(RC);
879 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
880 const TargetRegisterClass* RC) {
881 unsigned ResultReg = createResultReg(RC);
882 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
884 BuildMI(MBB, DL, II, ResultReg);
888 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
889 const TargetRegisterClass *RC,
891 unsigned ResultReg = createResultReg(RC);
892 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
894 if (II.getNumDefs() >= 1)
895 BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
897 BuildMI(MBB, DL, II).addReg(Op0);
898 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
899 II.ImplicitDefs[0], RC, RC);
907 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
908 const TargetRegisterClass *RC,
909 unsigned Op0, unsigned Op1) {
910 unsigned ResultReg = createResultReg(RC);
911 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
913 if (II.getNumDefs() >= 1)
914 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
916 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
917 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
918 II.ImplicitDefs[0], RC, RC);
925 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
926 const TargetRegisterClass *RC,
927 unsigned Op0, uint64_t Imm) {
928 unsigned ResultReg = createResultReg(RC);
929 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
931 if (II.getNumDefs() >= 1)
932 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
934 BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
935 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
936 II.ImplicitDefs[0], RC, RC);
943 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
944 const TargetRegisterClass *RC,
945 unsigned Op0, ConstantFP *FPImm) {
946 unsigned ResultReg = createResultReg(RC);
947 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
949 if (II.getNumDefs() >= 1)
950 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
952 BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
953 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
954 II.ImplicitDefs[0], RC, RC);
961 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
962 const TargetRegisterClass *RC,
963 unsigned Op0, unsigned Op1, uint64_t Imm) {
964 unsigned ResultReg = createResultReg(RC);
965 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
967 if (II.getNumDefs() >= 1)
968 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
970 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
971 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
972 II.ImplicitDefs[0], RC, RC);
979 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
980 const TargetRegisterClass *RC,
982 unsigned ResultReg = createResultReg(RC);
983 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
985 if (II.getNumDefs() >= 1)
986 BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
988 BuildMI(MBB, DL, II).addImm(Imm);
989 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
990 II.ImplicitDefs[0], RC, RC);
997 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
998 unsigned Op0, uint32_t Idx) {
999 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
1001 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1002 const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
1004 if (II.getNumDefs() >= 1)
1005 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
1007 BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
1008 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1009 II.ImplicitDefs[0], RC, RC);
1016 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1017 /// with all but the least significant bit set to zero.
1018 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
1019 return FastEmit_ri(VT, VT, ISD::AND, Op, 1);