1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/Analysis/DebugInfo.h"
51 #include "llvm/Target/TargetData.h"
52 #include "llvm/Target/TargetInstrInfo.h"
53 #include "llvm/Target/TargetLowering.h"
54 #include "llvm/Target/TargetMachine.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "FunctionLoweringInfo.h"
59 unsigned FastISel::getRegForValue(const Value *V) {
60 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
61 // Don't handle non-simple values in FastISel.
62 if (!RealVT.isSimple())
65 // Ignore illegal types. We must do this before looking up the value
66 // in ValueMap because Arguments are given virtual registers regardless
67 // of whether FastISel can handle them.
68 MVT VT = RealVT.getSimpleVT();
69 if (!TLI.isTypeLegal(VT)) {
70 // Promote MVT::i1 to a legal type though, because it's common and easy.
72 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
77 // Look up the value to see if we already have a register for it. We
78 // cache values defined by Instructions across blocks, and other values
79 // only locally. This is because Instructions already have the SSA
80 // def-dominates-use requirement enforced.
81 if (ValueMap.count(V))
83 unsigned Reg = LocalValueMap[V];
87 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
88 if (CI->getValue().getActiveBits() <= 64)
89 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
90 } else if (isa<AllocaInst>(V)) {
91 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
92 } else if (isa<ConstantPointerNull>(V)) {
93 // Translate this as an integer zero so that it can be
94 // local-CSE'd with actual integer zeros.
96 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
97 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
98 // Try to emit the constant directly.
99 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
102 // Try to emit the constant by using an integer constant with a cast.
103 const APFloat &Flt = CF->getValueAPF();
104 EVT IntVT = TLI.getPointerTy();
107 uint32_t IntBitWidth = IntVT.getSizeInBits();
109 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
110 APFloat::rmTowardZero, &isExact);
112 APInt IntVal(IntBitWidth, 2, x);
114 unsigned IntegerReg =
115 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
117 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
120 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
121 if (!SelectOperator(Op, Op->getOpcode())) return 0;
122 Reg = LocalValueMap[Op];
123 } else if (isa<UndefValue>(V)) {
124 Reg = createResultReg(TLI.getRegClassFor(VT));
125 BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
128 // If target-independent code couldn't handle the value, give target-specific
130 if (!Reg && isa<Constant>(V))
131 Reg = TargetMaterializeConstant(cast<Constant>(V));
133 // Don't cache constant materializations in the general ValueMap.
134 // To do so would require tracking what uses they dominate.
136 LocalValueMap[V] = Reg;
140 unsigned FastISel::lookUpRegForValue(const Value *V) {
141 // Look up the value to see if we already have a register for it. We
142 // cache values defined by Instructions across blocks, and other values
143 // only locally. This is because Instructions already have the SSA
144 // def-dominatess-use requirement enforced.
145 if (ValueMap.count(V))
147 return LocalValueMap[V];
150 /// UpdateValueMap - Update the value map to include the new mapping for this
151 /// instruction, or insert an extra copy to get the result in a previous
152 /// determined register.
153 /// NOTE: This is only necessary because we might select a block that uses
154 /// a value before we select the block that defines the value. It might be
155 /// possible to fix this by selecting blocks in reverse postorder.
156 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
157 if (!isa<Instruction>(I)) {
158 LocalValueMap[I] = Reg;
162 unsigned &AssignedReg = ValueMap[I];
163 if (AssignedReg == 0)
165 else if (Reg != AssignedReg) {
166 const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
167 TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
168 Reg, RegClass, RegClass);
173 unsigned FastISel::getRegForGEPIndex(const Value *Idx) {
174 unsigned IdxN = getRegForValue(Idx);
176 // Unhandled operand. Halt "fast" selection and bail.
179 // If the index is smaller or larger than intptr_t, truncate or extend it.
180 MVT PtrVT = TLI.getPointerTy();
181 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
182 if (IdxVT.bitsLT(PtrVT))
183 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
184 else if (IdxVT.bitsGT(PtrVT))
185 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
189 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
190 /// which has an opcode which directly corresponds to the given ISD opcode.
192 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
193 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
194 if (VT == MVT::Other || !VT.isSimple())
195 // Unhandled type. Halt "fast" selection and bail.
198 // We only handle legal types. For example, on x86-32 the instruction
199 // selector contains all of the 64-bit instructions from x86-64,
200 // under the assumption that i64 won't be used if the target doesn't
202 if (!TLI.isTypeLegal(VT)) {
203 // MVT::i1 is special. Allow AND, OR, or XOR because they
204 // don't require additional zeroing, which makes them easy.
206 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
207 ISDOpcode == ISD::XOR))
208 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
213 unsigned Op0 = getRegForValue(I->getOperand(0));
215 // Unhandled operand. Halt "fast" selection and bail.
218 // Check if the second operand is a constant and handle it appropriately.
219 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
220 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
221 ISDOpcode, Op0, CI->getZExtValue());
222 if (ResultReg != 0) {
223 // We successfully emitted code for the given LLVM Instruction.
224 UpdateValueMap(I, ResultReg);
229 // Check if the second operand is a constant float.
230 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
231 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
233 if (ResultReg != 0) {
234 // We successfully emitted code for the given LLVM Instruction.
235 UpdateValueMap(I, ResultReg);
240 unsigned Op1 = getRegForValue(I->getOperand(1));
242 // Unhandled operand. Halt "fast" selection and bail.
245 // Now we have both operands in registers. Emit the instruction.
246 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
247 ISDOpcode, Op0, Op1);
249 // Target-specific code wasn't able to find a machine opcode for
250 // the given ISD opcode and type. Halt "fast" selection and bail.
253 // We successfully emitted code for the given LLVM Instruction.
254 UpdateValueMap(I, ResultReg);
258 bool FastISel::SelectGetElementPtr(const User *I) {
259 unsigned N = getRegForValue(I->getOperand(0));
261 // Unhandled operand. Halt "fast" selection and bail.
264 const Type *Ty = I->getOperand(0)->getType();
265 MVT VT = TLI.getPointerTy();
266 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
267 E = I->op_end(); OI != E; ++OI) {
268 const Value *Idx = *OI;
269 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
270 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
273 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
274 // FIXME: This can be optimized by combining the add with a
276 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
278 // Unhandled operand. Halt "fast" selection and bail.
281 Ty = StTy->getElementType(Field);
283 Ty = cast<SequentialType>(Ty)->getElementType();
285 // If this is a constant subscript, handle it quickly.
286 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
287 if (CI->getZExtValue() == 0) continue;
289 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
290 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
292 // Unhandled operand. Halt "fast" selection and bail.
297 // N = N + Idx * ElementSize;
298 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
299 unsigned IdxN = getRegForGEPIndex(Idx);
301 // Unhandled operand. Halt "fast" selection and bail.
304 if (ElementSize != 1) {
305 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
307 // Unhandled operand. Halt "fast" selection and bail.
310 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
312 // Unhandled operand. Halt "fast" selection and bail.
317 // We successfully emitted code for the given LLVM Instruction.
318 UpdateValueMap(I, N);
322 bool FastISel::SelectCall(const User *I) {
323 const Function *F = cast<CallInst>(I)->getCalledFunction();
324 if (!F) return false;
326 // Handle selected intrinsic function calls.
327 unsigned IID = F->getIntrinsicID();
330 case Intrinsic::dbg_declare: {
331 const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
332 if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None) ||
333 !MF.getMMI().hasDebugInfo())
336 const Value *Address = DI->getAddress();
339 if (isa<UndefValue>(Address))
341 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
342 // Don't handle byval struct arguments or VLAs, for example.
344 DenseMap<const AllocaInst*, int>::iterator SI =
345 StaticAllocaMap.find(AI);
346 if (SI == StaticAllocaMap.end()) break; // VLAs.
348 if (!DI->getDebugLoc().isUnknown())
349 MF.getMMI().setVariableDbgInfo(DI->getVariable(), FI, DI->getDebugLoc());
351 // Building the map above is target independent. Generating DBG_VALUE
352 // inline is target dependent; do this now.
353 (void)TargetSelectInstruction(cast<Instruction>(I));
356 case Intrinsic::dbg_value: {
357 // This form of DBG_VALUE is target-independent.
358 const DbgValueInst *DI = cast<DbgValueInst>(I);
359 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
360 const Value *V = DI->getValue();
362 // Currently the optimizer can produce this; insert an undef to
363 // help debugging. Probably the optimizer should not do this.
364 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
365 addMetadata(DI->getVariable());
366 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
367 BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
368 addMetadata(DI->getVariable());
369 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
370 BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
371 addMetadata(DI->getVariable());
372 } else if (unsigned Reg = lookUpRegForValue(V)) {
373 BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
374 addMetadata(DI->getVariable());
376 // We can't yet handle anything else here because it would require
377 // generating code, thus altering codegen because of debug info.
378 // Insert an undef so we can see what we dropped.
379 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
380 addMetadata(DI->getVariable());
384 case Intrinsic::eh_exception: {
385 EVT VT = TLI.getValueType(I->getType());
386 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
388 case TargetLowering::Expand: {
389 assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
390 unsigned Reg = TLI.getExceptionAddressRegister();
391 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
392 unsigned ResultReg = createResultReg(RC);
393 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
395 assert(InsertedCopy && "Can't copy address registers!");
396 InsertedCopy = InsertedCopy;
397 UpdateValueMap(I, ResultReg);
403 case Intrinsic::eh_selector: {
404 EVT VT = TLI.getValueType(I->getType());
405 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
407 case TargetLowering::Expand: {
408 if (MBB->isLandingPad())
409 AddCatchInfo(*cast<CallInst>(I), &MF.getMMI(), MBB);
412 CatchInfoLost.insert(cast<CallInst>(I));
414 // FIXME: Mark exception selector register as live in. Hack for PR1508.
415 unsigned Reg = TLI.getExceptionSelectorRegister();
416 if (Reg) MBB->addLiveIn(Reg);
419 unsigned Reg = TLI.getExceptionSelectorRegister();
420 EVT SrcVT = TLI.getPointerTy();
421 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
422 unsigned ResultReg = createResultReg(RC);
423 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
425 assert(InsertedCopy && "Can't copy address registers!");
426 InsertedCopy = InsertedCopy;
428 // Cast the register to the type of the selector.
429 if (SrcVT.bitsGT(MVT::i32))
430 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
432 else if (SrcVT.bitsLT(MVT::i32))
433 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
434 ISD::SIGN_EXTEND, ResultReg);
436 // Unhandled operand. Halt "fast" selection and bail.
439 UpdateValueMap(I, ResultReg);
448 // An arbitrary call. Bail.
452 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
453 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
454 EVT DstVT = TLI.getValueType(I->getType());
456 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
457 DstVT == MVT::Other || !DstVT.isSimple())
458 // Unhandled type. Halt "fast" selection and bail.
461 // Check if the destination type is legal. Or as a special case,
462 // it may be i1 if we're doing a truncate because that's
463 // easy and somewhat common.
464 if (!TLI.isTypeLegal(DstVT))
465 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
466 // Unhandled type. Halt "fast" selection and bail.
469 // Check if the source operand is legal. Or as a special case,
470 // it may be i1 if we're doing zero-extension because that's
471 // easy and somewhat common.
472 if (!TLI.isTypeLegal(SrcVT))
473 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
474 // Unhandled type. Halt "fast" selection and bail.
477 unsigned InputReg = getRegForValue(I->getOperand(0));
479 // Unhandled operand. Halt "fast" selection and bail.
482 // If the operand is i1, arrange for the high bits in the register to be zero.
483 if (SrcVT == MVT::i1) {
484 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
485 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
489 // If the result is i1, truncate to the target's type for i1 first.
490 if (DstVT == MVT::i1)
491 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
493 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
500 UpdateValueMap(I, ResultReg);
504 bool FastISel::SelectBitCast(const User *I) {
505 // If the bitcast doesn't change the type, just use the operand value.
506 if (I->getType() == I->getOperand(0)->getType()) {
507 unsigned Reg = getRegForValue(I->getOperand(0));
510 UpdateValueMap(I, Reg);
514 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
515 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
516 EVT DstVT = TLI.getValueType(I->getType());
518 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
519 DstVT == MVT::Other || !DstVT.isSimple() ||
520 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
521 // Unhandled type. Halt "fast" selection and bail.
524 unsigned Op0 = getRegForValue(I->getOperand(0));
526 // Unhandled operand. Halt "fast" selection and bail.
529 // First, try to perform the bitcast by inserting a reg-reg copy.
530 unsigned ResultReg = 0;
531 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
532 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
533 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
534 ResultReg = createResultReg(DstClass);
536 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
537 Op0, DstClass, SrcClass);
542 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
544 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
545 ISD::BIT_CONVERT, Op0);
550 UpdateValueMap(I, ResultReg);
555 FastISel::SelectInstruction(const Instruction *I) {
556 DL = I->getDebugLoc();
558 // First, try doing target-independent selection.
559 if (SelectOperator(I, I->getOpcode())) {
564 // Next, try calling the target to attempt to handle the instruction.
565 if (TargetSelectInstruction(I)) {
574 /// FastEmitBranch - Emit an unconditional branch to the given block,
575 /// unless it is the immediate (fall-through) successor, and update
578 FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
579 if (MBB->isLayoutSuccessor(MSucc)) {
580 // The unconditional fall-through case, which needs no instructions.
582 // The unconditional branch case.
583 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
585 MBB->addSuccessor(MSucc);
588 /// SelectFNeg - Emit an FNeg operation.
591 FastISel::SelectFNeg(const User *I) {
592 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
593 if (OpReg == 0) return false;
595 // If the target has ISD::FNEG, use it.
596 EVT VT = TLI.getValueType(I->getType());
597 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
599 if (ResultReg != 0) {
600 UpdateValueMap(I, ResultReg);
604 // Bitcast the value to integer, twiddle the sign bit with xor,
605 // and then bitcast it back to floating-point.
606 if (VT.getSizeInBits() > 64) return false;
607 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
608 if (!TLI.isTypeLegal(IntVT))
611 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
612 ISD::BIT_CONVERT, OpReg);
616 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
617 UINT64_C(1) << (VT.getSizeInBits()-1),
618 IntVT.getSimpleVT());
619 if (IntResultReg == 0)
622 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
623 ISD::BIT_CONVERT, IntResultReg);
627 UpdateValueMap(I, ResultReg);
632 FastISel::SelectOperator(const User *I, unsigned Opcode) {
634 case Instruction::Add:
635 return SelectBinaryOp(I, ISD::ADD);
636 case Instruction::FAdd:
637 return SelectBinaryOp(I, ISD::FADD);
638 case Instruction::Sub:
639 return SelectBinaryOp(I, ISD::SUB);
640 case Instruction::FSub:
641 // FNeg is currently represented in LLVM IR as a special case of FSub.
642 if (BinaryOperator::isFNeg(I))
643 return SelectFNeg(I);
644 return SelectBinaryOp(I, ISD::FSUB);
645 case Instruction::Mul:
646 return SelectBinaryOp(I, ISD::MUL);
647 case Instruction::FMul:
648 return SelectBinaryOp(I, ISD::FMUL);
649 case Instruction::SDiv:
650 return SelectBinaryOp(I, ISD::SDIV);
651 case Instruction::UDiv:
652 return SelectBinaryOp(I, ISD::UDIV);
653 case Instruction::FDiv:
654 return SelectBinaryOp(I, ISD::FDIV);
655 case Instruction::SRem:
656 return SelectBinaryOp(I, ISD::SREM);
657 case Instruction::URem:
658 return SelectBinaryOp(I, ISD::UREM);
659 case Instruction::FRem:
660 return SelectBinaryOp(I, ISD::FREM);
661 case Instruction::Shl:
662 return SelectBinaryOp(I, ISD::SHL);
663 case Instruction::LShr:
664 return SelectBinaryOp(I, ISD::SRL);
665 case Instruction::AShr:
666 return SelectBinaryOp(I, ISD::SRA);
667 case Instruction::And:
668 return SelectBinaryOp(I, ISD::AND);
669 case Instruction::Or:
670 return SelectBinaryOp(I, ISD::OR);
671 case Instruction::Xor:
672 return SelectBinaryOp(I, ISD::XOR);
674 case Instruction::GetElementPtr:
675 return SelectGetElementPtr(I);
677 case Instruction::Br: {
678 const BranchInst *BI = cast<BranchInst>(I);
680 if (BI->isUnconditional()) {
681 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
682 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
683 FastEmitBranch(MSucc);
687 // Conditional branches are not handed yet.
688 // Halt "fast" selection and bail.
692 case Instruction::Unreachable:
696 case Instruction::Alloca:
697 // FunctionLowering has the static-sized case covered.
698 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
701 // Dynamic-sized alloca is not handled yet.
704 case Instruction::Call:
705 return SelectCall(I);
707 case Instruction::BitCast:
708 return SelectBitCast(I);
710 case Instruction::FPToSI:
711 return SelectCast(I, ISD::FP_TO_SINT);
712 case Instruction::ZExt:
713 return SelectCast(I, ISD::ZERO_EXTEND);
714 case Instruction::SExt:
715 return SelectCast(I, ISD::SIGN_EXTEND);
716 case Instruction::Trunc:
717 return SelectCast(I, ISD::TRUNCATE);
718 case Instruction::SIToFP:
719 return SelectCast(I, ISD::SINT_TO_FP);
721 case Instruction::IntToPtr: // Deliberate fall-through.
722 case Instruction::PtrToInt: {
723 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
724 EVT DstVT = TLI.getValueType(I->getType());
725 if (DstVT.bitsGT(SrcVT))
726 return SelectCast(I, ISD::ZERO_EXTEND);
727 if (DstVT.bitsLT(SrcVT))
728 return SelectCast(I, ISD::TRUNCATE);
729 unsigned Reg = getRegForValue(I->getOperand(0));
730 if (Reg == 0) return false;
731 UpdateValueMap(I, Reg);
735 case Instruction::PHI:
736 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
739 // Unhandled instruction. Halt "fast" selection and bail.
744 FastISel::FastISel(MachineFunction &mf,
745 DenseMap<const Value *, unsigned> &vm,
746 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
747 DenseMap<const AllocaInst *, int> &am
749 , SmallSet<const Instruction *, 8> &cil
760 MRI(MF.getRegInfo()),
761 MFI(*MF.getFrameInfo()),
762 MCP(*MF.getConstantPool()),
764 TD(*TM.getTargetData()),
765 TII(*TM.getInstrInfo()),
766 TLI(*TM.getTargetLowering()) {
769 FastISel::~FastISel() {}
771 unsigned FastISel::FastEmit_(MVT, MVT,
776 unsigned FastISel::FastEmit_r(MVT, MVT,
777 unsigned, unsigned /*Op0*/) {
781 unsigned FastISel::FastEmit_rr(MVT, MVT,
782 unsigned, unsigned /*Op0*/,
787 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
791 unsigned FastISel::FastEmit_f(MVT, MVT,
792 unsigned, const ConstantFP * /*FPImm*/) {
796 unsigned FastISel::FastEmit_ri(MVT, MVT,
797 unsigned, unsigned /*Op0*/,
802 unsigned FastISel::FastEmit_rf(MVT, MVT,
803 unsigned, unsigned /*Op0*/,
804 const ConstantFP * /*FPImm*/) {
808 unsigned FastISel::FastEmit_rri(MVT, MVT,
810 unsigned /*Op0*/, unsigned /*Op1*/,
815 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
816 /// to emit an instruction with an immediate operand using FastEmit_ri.
817 /// If that fails, it materializes the immediate into a register and try
818 /// FastEmit_rr instead.
819 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
820 unsigned Op0, uint64_t Imm,
822 // First check if immediate type is legal. If not, we can't use the ri form.
823 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
826 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
827 if (MaterialReg == 0)
829 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
832 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
833 /// to emit an instruction with a floating-point immediate operand using
834 /// FastEmit_rf. If that fails, it materializes the immediate into a register
835 /// and try FastEmit_rr instead.
836 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
837 unsigned Op0, const ConstantFP *FPImm,
839 // First check if immediate type is legal. If not, we can't use the rf form.
840 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
844 // Materialize the constant in a register.
845 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
846 if (MaterialReg == 0) {
847 // If the target doesn't have a way to directly enter a floating-point
848 // value into a register, use an alternate approach.
849 // TODO: The current approach only supports floating-point constants
850 // that can be constructed by conversion from integer values. This should
851 // be replaced by code that creates a load from a constant-pool entry,
852 // which will require some target-specific work.
853 const APFloat &Flt = FPImm->getValueAPF();
854 EVT IntVT = TLI.getPointerTy();
857 uint32_t IntBitWidth = IntVT.getSizeInBits();
859 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
860 APFloat::rmTowardZero, &isExact);
863 APInt IntVal(IntBitWidth, 2, x);
865 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
866 ISD::Constant, IntVal.getZExtValue());
869 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
870 ISD::SINT_TO_FP, IntegerReg);
871 if (MaterialReg == 0)
874 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
877 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
878 return MRI.createVirtualRegister(RC);
881 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
882 const TargetRegisterClass* RC) {
883 unsigned ResultReg = createResultReg(RC);
884 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
886 BuildMI(MBB, DL, II, ResultReg);
890 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
891 const TargetRegisterClass *RC,
893 unsigned ResultReg = createResultReg(RC);
894 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
896 if (II.getNumDefs() >= 1)
897 BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
899 BuildMI(MBB, DL, II).addReg(Op0);
900 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
901 II.ImplicitDefs[0], RC, RC);
909 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
910 const TargetRegisterClass *RC,
911 unsigned Op0, unsigned Op1) {
912 unsigned ResultReg = createResultReg(RC);
913 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
915 if (II.getNumDefs() >= 1)
916 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
918 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
919 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
920 II.ImplicitDefs[0], RC, RC);
927 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
928 const TargetRegisterClass *RC,
929 unsigned Op0, uint64_t Imm) {
930 unsigned ResultReg = createResultReg(RC);
931 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
933 if (II.getNumDefs() >= 1)
934 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
936 BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
937 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
938 II.ImplicitDefs[0], RC, RC);
945 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
946 const TargetRegisterClass *RC,
947 unsigned Op0, const ConstantFP *FPImm) {
948 unsigned ResultReg = createResultReg(RC);
949 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
951 if (II.getNumDefs() >= 1)
952 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
954 BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
955 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
956 II.ImplicitDefs[0], RC, RC);
963 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
964 const TargetRegisterClass *RC,
965 unsigned Op0, unsigned Op1, uint64_t Imm) {
966 unsigned ResultReg = createResultReg(RC);
967 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
969 if (II.getNumDefs() >= 1)
970 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
972 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
973 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
974 II.ImplicitDefs[0], RC, RC);
981 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
982 const TargetRegisterClass *RC,
984 unsigned ResultReg = createResultReg(RC);
985 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
987 if (II.getNumDefs() >= 1)
988 BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
990 BuildMI(MBB, DL, II).addImm(Imm);
991 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
992 II.ImplicitDefs[0], RC, RC);
999 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1000 unsigned Op0, uint32_t Idx) {
1001 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
1003 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1004 const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
1006 if (II.getNumDefs() >= 1)
1007 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
1009 BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
1010 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1011 II.ImplicitDefs[0], RC, RC);
1018 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1019 /// with all but the least significant bit set to zero.
1020 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
1021 return FastEmit_ri(VT, VT, ISD::AND, Op, 1);