1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #define DEBUG_TYPE "isel"
43 #include "llvm/CodeGen/FastISel.h"
44 #include "llvm/ADT/Optional.h"
45 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FunctionLoweringInfo.h"
49 #include "llvm/CodeGen/MachineInstrBuilder.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/DebugInfo.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GlobalVariable.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Target/TargetInstrInfo.h"
62 #include "llvm/Target/TargetLibraryInfo.h"
63 #include "llvm/Target/TargetLowering.h"
64 #include "llvm/Target/TargetMachine.h"
67 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
68 "target-independent selector");
69 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
70 "target-specific selector");
71 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
73 /// startNewBlock - Set the current block to which generated machine
74 /// instructions will be appended, and clear the local CSE map.
76 void FastISel::startNewBlock() {
77 LocalValueMap.clear();
79 // Instructions are appended to FuncInfo.MBB. If the basic block already
80 // contains labels or copies, use the last instruction as the last local
83 if (!FuncInfo.MBB->empty())
84 EmitStartPt = &FuncInfo.MBB->back();
85 LastLocalValue = EmitStartPt;
88 bool FastISel::LowerArguments() {
89 if (!FuncInfo.CanLowerReturn)
90 // Fallback to SDISel argument lowering code to deal with sret pointer
94 if (!FastLowerArguments())
97 // Enter arguments into ValueMap for uses in non-entry BBs.
98 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
99 E = FuncInfo.Fn->arg_end(); I != E; ++I) {
100 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
101 assert(VI != LocalValueMap.end() && "Missed an argument?");
102 FuncInfo.ValueMap[I] = VI->second;
107 void FastISel::flushLocalValueMap() {
108 LocalValueMap.clear();
109 LastLocalValue = EmitStartPt;
113 bool FastISel::hasTrivialKill(const Value *V) const {
114 // Don't consider constants or arguments to have trivial kills.
115 const Instruction *I = dyn_cast<Instruction>(V);
119 // No-op casts are trivially coalesced by fast-isel.
120 if (const CastInst *Cast = dyn_cast<CastInst>(I))
121 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
122 !hasTrivialKill(Cast->getOperand(0)))
125 // GEPs with all zero indices are trivially coalesced by fast-isel.
126 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
127 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
130 // Only instructions with a single use in the same basic block are considered
131 // to have trivial kills.
132 return I->hasOneUse() &&
133 !(I->getOpcode() == Instruction::BitCast ||
134 I->getOpcode() == Instruction::PtrToInt ||
135 I->getOpcode() == Instruction::IntToPtr) &&
136 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
139 unsigned FastISel::getRegForValue(const Value *V) {
140 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
141 // Don't handle non-simple values in FastISel.
142 if (!RealVT.isSimple())
145 // Ignore illegal types. We must do this before looking up the value
146 // in ValueMap because Arguments are given virtual registers regardless
147 // of whether FastISel can handle them.
148 MVT VT = RealVT.getSimpleVT();
149 if (!TLI.isTypeLegal(VT)) {
150 // Handle integer promotions, though, because they're common and easy.
151 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
152 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
157 // Look up the value to see if we already have a register for it.
158 unsigned Reg = lookUpRegForValue(V);
162 // In bottom-up mode, just create the virtual register which will be used
163 // to hold the value. It will be materialized later.
164 if (isa<Instruction>(V) &&
165 (!isa<AllocaInst>(V) ||
166 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
167 return FuncInfo.InitializeRegForValue(V);
169 SavePoint SaveInsertPt = enterLocalValueArea();
171 // Materialize the value in a register. Emit any instructions in the
173 Reg = materializeRegForValue(V, VT);
175 leaveLocalValueArea(SaveInsertPt);
180 /// materializeRegForValue - Helper for getRegForValue. This function is
181 /// called when the value isn't already available in a register and must
182 /// be materialized with new instructions.
183 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
186 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
187 if (CI->getValue().getActiveBits() <= 64)
188 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
189 } else if (isa<AllocaInst>(V)) {
190 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
191 } else if (isa<ConstantPointerNull>(V)) {
192 // Translate this as an integer zero so that it can be
193 // local-CSE'd with actual integer zeros.
195 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
196 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
197 if (CF->isNullValue()) {
198 Reg = TargetMaterializeFloatZero(CF);
200 // Try to emit the constant directly.
201 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
205 // Try to emit the constant by using an integer constant with a cast.
206 const APFloat &Flt = CF->getValueAPF();
207 EVT IntVT = TLI.getPointerTy();
210 uint32_t IntBitWidth = IntVT.getSizeInBits();
212 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
213 APFloat::rmTowardZero, &isExact);
215 APInt IntVal(IntBitWidth, x);
217 unsigned IntegerReg =
218 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
220 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
221 IntegerReg, /*Kill=*/false);
224 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
225 if (!SelectOperator(Op, Op->getOpcode()))
226 if (!isa<Instruction>(Op) ||
227 !TargetSelectInstruction(cast<Instruction>(Op)))
229 Reg = lookUpRegForValue(Op);
230 } else if (isa<UndefValue>(V)) {
231 Reg = createResultReg(TLI.getRegClassFor(VT));
232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
233 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
236 // If target-independent code couldn't handle the value, give target-specific
238 if (!Reg && isa<Constant>(V))
239 Reg = TargetMaterializeConstant(cast<Constant>(V));
241 // Don't cache constant materializations in the general ValueMap.
242 // To do so would require tracking what uses they dominate.
244 LocalValueMap[V] = Reg;
245 LastLocalValue = MRI.getVRegDef(Reg);
250 unsigned FastISel::lookUpRegForValue(const Value *V) {
251 // Look up the value to see if we already have a register for it. We
252 // cache values defined by Instructions across blocks, and other values
253 // only locally. This is because Instructions already have the SSA
254 // def-dominates-use requirement enforced.
255 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
256 if (I != FuncInfo.ValueMap.end())
258 return LocalValueMap[V];
261 /// UpdateValueMap - Update the value map to include the new mapping for this
262 /// instruction, or insert an extra copy to get the result in a previous
263 /// determined register.
264 /// NOTE: This is only necessary because we might select a block that uses
265 /// a value before we select the block that defines the value. It might be
266 /// possible to fix this by selecting blocks in reverse postorder.
267 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
268 if (!isa<Instruction>(I)) {
269 LocalValueMap[I] = Reg;
273 unsigned &AssignedReg = FuncInfo.ValueMap[I];
274 if (AssignedReg == 0)
275 // Use the new register.
277 else if (Reg != AssignedReg) {
278 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
279 for (unsigned i = 0; i < NumRegs; i++)
280 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
286 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
287 unsigned IdxN = getRegForValue(Idx);
289 // Unhandled operand. Halt "fast" selection and bail.
290 return std::pair<unsigned, bool>(0, false);
292 bool IdxNIsKill = hasTrivialKill(Idx);
294 // If the index is smaller or larger than intptr_t, truncate or extend it.
295 MVT PtrVT = TLI.getPointerTy();
296 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
297 if (IdxVT.bitsLT(PtrVT)) {
298 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
302 else if (IdxVT.bitsGT(PtrVT)) {
303 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
307 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
310 void FastISel::recomputeInsertPt() {
311 if (getLastLocalValue()) {
312 FuncInfo.InsertPt = getLastLocalValue();
313 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
316 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
318 // Now skip past any EH_LABELs, which must remain at the beginning.
319 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
320 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
324 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
325 MachineBasicBlock::iterator E) {
326 assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
328 MachineInstr *Dead = &*I;
330 Dead->eraseFromParent();
336 FastISel::SavePoint FastISel::enterLocalValueArea() {
337 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
341 SavePoint SP = { OldInsertPt, OldDL };
345 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
346 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
347 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
349 // Restore the previous insert position.
350 FuncInfo.InsertPt = OldInsertPt.InsertPt;
354 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
355 /// which has an opcode which directly corresponds to the given ISD opcode.
357 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
358 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
359 if (VT == MVT::Other || !VT.isSimple())
360 // Unhandled type. Halt "fast" selection and bail.
363 // We only handle legal types. For example, on x86-32 the instruction
364 // selector contains all of the 64-bit instructions from x86-64,
365 // under the assumption that i64 won't be used if the target doesn't
367 if (!TLI.isTypeLegal(VT)) {
368 // MVT::i1 is special. Allow AND, OR, or XOR because they
369 // don't require additional zeroing, which makes them easy.
371 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
372 ISDOpcode == ISD::XOR))
373 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
378 // Check if the first operand is a constant, and handle it as "ri". At -O0,
379 // we don't have anything that canonicalizes operand order.
380 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
381 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
382 unsigned Op1 = getRegForValue(I->getOperand(1));
383 if (Op1 == 0) return false;
385 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
387 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
388 Op1IsKill, CI->getZExtValue(),
390 if (ResultReg == 0) return false;
392 // We successfully emitted code for the given LLVM Instruction.
393 UpdateValueMap(I, ResultReg);
398 unsigned Op0 = getRegForValue(I->getOperand(0));
399 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
402 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
404 // Check if the second operand is a constant and handle it appropriately.
405 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
406 uint64_t Imm = CI->getZExtValue();
408 // Transform "sdiv exact X, 8" -> "sra X, 3".
409 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
410 cast<BinaryOperator>(I)->isExact() &&
411 isPowerOf2_64(Imm)) {
413 ISDOpcode = ISD::SRA;
416 // Transform "urem x, pow2" -> "and x, pow2-1".
417 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
418 isPowerOf2_64(Imm)) {
420 ISDOpcode = ISD::AND;
423 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
424 Op0IsKill, Imm, VT.getSimpleVT());
425 if (ResultReg == 0) return false;
427 // We successfully emitted code for the given LLVM Instruction.
428 UpdateValueMap(I, ResultReg);
432 // Check if the second operand is a constant float.
433 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
434 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
435 ISDOpcode, Op0, Op0IsKill, CF);
436 if (ResultReg != 0) {
437 // We successfully emitted code for the given LLVM Instruction.
438 UpdateValueMap(I, ResultReg);
443 unsigned Op1 = getRegForValue(I->getOperand(1));
445 // Unhandled operand. Halt "fast" selection and bail.
448 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
450 // Now we have both operands in registers. Emit the instruction.
451 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
456 // Target-specific code wasn't able to find a machine opcode for
457 // the given ISD opcode and type. Halt "fast" selection and bail.
460 // We successfully emitted code for the given LLVM Instruction.
461 UpdateValueMap(I, ResultReg);
465 bool FastISel::SelectGetElementPtr(const User *I) {
466 unsigned N = getRegForValue(I->getOperand(0));
468 // Unhandled operand. Halt "fast" selection and bail.
471 bool NIsKill = hasTrivialKill(I->getOperand(0));
473 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
474 // into a single N = N + TotalOffset.
475 uint64_t TotalOffs = 0;
476 // FIXME: What's a good SWAG number for MaxOffs?
477 uint64_t MaxOffs = 2048;
478 Type *Ty = I->getOperand(0)->getType();
479 MVT VT = TLI.getPointerTy();
480 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
481 E = I->op_end(); OI != E; ++OI) {
482 const Value *Idx = *OI;
483 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
484 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
487 TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
488 if (TotalOffs >= MaxOffs) {
489 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
491 // Unhandled operand. Halt "fast" selection and bail.
497 Ty = StTy->getElementType(Field);
499 Ty = cast<SequentialType>(Ty)->getElementType();
501 // If this is a constant subscript, handle it quickly.
502 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
503 if (CI->isZero()) continue;
506 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
507 if (TotalOffs >= MaxOffs) {
508 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
510 // Unhandled operand. Halt "fast" selection and bail.
518 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
520 // Unhandled operand. Halt "fast" selection and bail.
526 // N = N + Idx * ElementSize;
527 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
528 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
529 unsigned IdxN = Pair.first;
530 bool IdxNIsKill = Pair.second;
532 // Unhandled operand. Halt "fast" selection and bail.
535 if (ElementSize != 1) {
536 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
538 // Unhandled operand. Halt "fast" selection and bail.
542 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
544 // Unhandled operand. Halt "fast" selection and bail.
549 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
551 // Unhandled operand. Halt "fast" selection and bail.
555 // We successfully emitted code for the given LLVM Instruction.
556 UpdateValueMap(I, N);
560 bool FastISel::SelectCall(const User *I) {
561 const CallInst *Call = cast<CallInst>(I);
563 // Handle simple inline asms.
564 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
565 // Don't attempt to handle constraints.
566 if (!IA->getConstraintString().empty())
569 unsigned ExtraInfo = 0;
570 if (IA->hasSideEffects())
571 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
572 if (IA->isAlignStack())
573 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
576 TII.get(TargetOpcode::INLINEASM))
577 .addExternalSymbol(IA->getAsmString().c_str())
582 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
583 ComputeUsesVAFloatArgument(*Call, &MMI);
585 const Function *F = Call->getCalledFunction();
586 if (!F) return false;
588 // Handle selected intrinsic function calls.
589 switch (F->getIntrinsicID()) {
591 // At -O0 we don't care about the lifetime intrinsics.
592 case Intrinsic::lifetime_start:
593 case Intrinsic::lifetime_end:
594 // The donothing intrinsic does, well, nothing.
595 case Intrinsic::donothing:
598 case Intrinsic::dbg_declare: {
599 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
600 DIVariable DIVar(DI->getVariable());
601 assert((!DIVar || DIVar.isVariable()) &&
602 "Variable in DbgDeclareInst should be either null or a DIVariable.");
604 !FuncInfo.MF->getMMI().hasDebugInfo()) {
605 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
609 const Value *Address = DI->getAddress();
610 if (!Address || isa<UndefValue>(Address)) {
611 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
616 Optional<MachineOperand> Op;
617 if (const Argument *Arg = dyn_cast<Argument>(Address))
618 // Some arguments' frame index is recorded during argument lowering.
619 Offset = FuncInfo.getArgumentFrameIndex(Arg);
621 Op = MachineOperand::CreateFI(Offset);
623 if (unsigned Reg = lookUpRegForValue(Address))
624 Op = MachineOperand::CreateReg(Reg, false);
626 // If we have a VLA that has a "use" in a metadata node that's then used
627 // here but it has no other uses, then we have a problem. E.g.,
629 // int foo (const int *x) {
634 // If we assign 'a' a vreg and fast isel later on has to use the selection
635 // DAG isel, it will want to copy the value to the vreg. However, there are
636 // no uses, which goes counter to what selection DAG isel expects.
637 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
638 (!isa<AllocaInst>(Address) ||
639 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
640 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
645 // Set the indirect flag if the type and the DIVariable's
646 // indirect field are in disagreement: Indirectly-addressed
647 // variables that are nonpointer types should be marked as
648 // indirect, and VLAs should be marked as indirect eventhough
649 // they are a pointer type.
650 bool IsIndirect = DI->getAddress()->getType()->isPointerTy()
651 ^ DIVar.isIndirect();
652 Op->setIsDebug(true);
653 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
654 TII.get(TargetOpcode::DBG_VALUE),
655 IsIndirect, Op->getReg(), Offset, DI->getVariable());
657 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
658 TII.get(TargetOpcode::DBG_VALUE)).addOperand(*Op).addImm(0)
659 .addMetadata(DI->getVariable());
661 // We can't yet handle anything else here because it would require
662 // generating code, thus altering codegen because of debug info.
663 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
666 case Intrinsic::dbg_value: {
667 // This form of DBG_VALUE is target-independent.
668 const DbgValueInst *DI = cast<DbgValueInst>(Call);
669 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
670 const Value *V = DI->getValue();
672 // Currently the optimizer can produce this; insert an undef to
673 // help debugging. Probably the optimizer should not do this.
674 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
675 .addReg(0U).addImm(DI->getOffset())
676 .addMetadata(DI->getVariable());
677 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
678 if (CI->getBitWidth() > 64)
679 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
680 .addCImm(CI).addImm(DI->getOffset())
681 .addMetadata(DI->getVariable());
683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
684 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
685 .addMetadata(DI->getVariable());
686 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
687 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
688 .addFPImm(CF).addImm(DI->getOffset())
689 .addMetadata(DI->getVariable());
690 } else if (unsigned Reg = lookUpRegForValue(V)) {
691 bool IsIndirect = DI->getOffset() != 0;
692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect,
693 Reg, DI->getOffset(), DI->getVariable());
695 // We can't yet handle anything else here because it would require
696 // generating code, thus altering codegen because of debug info.
697 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
701 case Intrinsic::objectsize: {
702 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
703 unsigned long long Res = CI->isZero() ? -1ULL : 0;
704 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
705 unsigned ResultReg = getRegForValue(ResCI);
708 UpdateValueMap(Call, ResultReg);
711 case Intrinsic::expect: {
712 unsigned ResultReg = getRegForValue(Call->getArgOperand(0));
715 UpdateValueMap(Call, ResultReg);
720 // Usually, it does not make sense to initialize a value,
721 // make an unrelated function call and use the value, because
722 // it tends to be spilled on the stack. So, we move the pointer
723 // to the last local value to the beginning of the block, so that
724 // all the values which have already been materialized,
725 // appear after the call. It also makes sense to skip intrinsics
726 // since they tend to be inlined.
727 if (!isa<IntrinsicInst>(Call))
728 flushLocalValueMap();
730 // An arbitrary call. Bail.
734 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
735 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
736 EVT DstVT = TLI.getValueType(I->getType());
738 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
739 DstVT == MVT::Other || !DstVT.isSimple())
740 // Unhandled type. Halt "fast" selection and bail.
743 // Check if the destination type is legal.
744 if (!TLI.isTypeLegal(DstVT))
747 // Check if the source operand is legal.
748 if (!TLI.isTypeLegal(SrcVT))
751 unsigned InputReg = getRegForValue(I->getOperand(0));
753 // Unhandled operand. Halt "fast" selection and bail.
756 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
758 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
761 InputReg, InputRegIsKill);
765 UpdateValueMap(I, ResultReg);
769 bool FastISel::SelectBitCast(const User *I) {
770 // If the bitcast doesn't change the type, just use the operand value.
771 if (I->getType() == I->getOperand(0)->getType()) {
772 unsigned Reg = getRegForValue(I->getOperand(0));
775 UpdateValueMap(I, Reg);
779 // Bitcasts of other values become reg-reg copies or BITCAST operators.
780 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
781 EVT DstEVT = TLI.getValueType(I->getType());
782 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
783 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
784 // Unhandled type. Halt "fast" selection and bail.
787 MVT SrcVT = SrcEVT.getSimpleVT();
788 MVT DstVT = DstEVT.getSimpleVT();
789 unsigned Op0 = getRegForValue(I->getOperand(0));
791 // Unhandled operand. Halt "fast" selection and bail.
794 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
796 // First, try to perform the bitcast by inserting a reg-reg copy.
797 unsigned ResultReg = 0;
798 if (SrcVT == DstVT) {
799 const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
800 const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
801 // Don't attempt a cross-class copy. It will likely fail.
802 if (SrcClass == DstClass) {
803 ResultReg = createResultReg(DstClass);
804 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
805 ResultReg).addReg(Op0);
809 // If the reg-reg copy failed, select a BITCAST opcode.
811 ResultReg = FastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
816 UpdateValueMap(I, ResultReg);
821 FastISel::SelectInstruction(const Instruction *I) {
822 // Just before the terminator instruction, insert instructions to
823 // feed PHI nodes in successor blocks.
824 if (isa<TerminatorInst>(I))
825 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
828 DL = I->getDebugLoc();
830 MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
832 // As a special case, don't handle calls to builtin library functions that
833 // may be translated directly to target instructions.
834 if (const CallInst *Call = dyn_cast<CallInst>(I)) {
835 const Function *F = Call->getCalledFunction();
837 if (F && !F->hasLocalLinkage() && F->hasName() &&
838 LibInfo->getLibFunc(F->getName(), Func) &&
839 LibInfo->hasOptimizedCodeGen(Func))
843 // First, try doing target-independent selection.
844 if (SelectOperator(I, I->getOpcode())) {
845 ++NumFastIselSuccessIndependent;
849 // Remove dead code. However, ignore call instructions since we've flushed
850 // the local value map and recomputed the insert point.
851 if (!isa<CallInst>(I)) {
853 if (SavedInsertPt != FuncInfo.InsertPt)
854 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
857 // Next, try calling the target to attempt to handle the instruction.
858 SavedInsertPt = FuncInfo.InsertPt;
859 if (TargetSelectInstruction(I)) {
860 ++NumFastIselSuccessTarget;
864 // Check for dead code and remove as necessary.
866 if (SavedInsertPt != FuncInfo.InsertPt)
867 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
873 /// FastEmitBranch - Emit an unconditional branch to the given block,
874 /// unless it is the immediate (fall-through) successor, and update
877 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
879 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
880 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
881 // For more accurate line information if this is the only instruction
882 // in the block then emit it, otherwise we have the unconditional
883 // fall-through case, which needs no instructions.
885 // The unconditional branch case.
886 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
887 SmallVector<MachineOperand, 0>(), DL);
889 FuncInfo.MBB->addSuccessor(MSucc);
892 /// SelectFNeg - Emit an FNeg operation.
895 FastISel::SelectFNeg(const User *I) {
896 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
897 if (OpReg == 0) return false;
899 bool OpRegIsKill = hasTrivialKill(I);
901 // If the target has ISD::FNEG, use it.
902 EVT VT = TLI.getValueType(I->getType());
903 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
904 ISD::FNEG, OpReg, OpRegIsKill);
905 if (ResultReg != 0) {
906 UpdateValueMap(I, ResultReg);
910 // Bitcast the value to integer, twiddle the sign bit with xor,
911 // and then bitcast it back to floating-point.
912 if (VT.getSizeInBits() > 64) return false;
913 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
914 if (!TLI.isTypeLegal(IntVT))
917 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
918 ISD::BITCAST, OpReg, OpRegIsKill);
922 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
923 IntReg, /*Kill=*/true,
924 UINT64_C(1) << (VT.getSizeInBits()-1),
925 IntVT.getSimpleVT());
926 if (IntResultReg == 0)
929 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
930 ISD::BITCAST, IntResultReg, /*Kill=*/true);
934 UpdateValueMap(I, ResultReg);
939 FastISel::SelectExtractValue(const User *U) {
940 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
944 // Make sure we only try to handle extracts with a legal result. But also
945 // allow i1 because it's easy.
946 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
947 if (!RealVT.isSimple())
949 MVT VT = RealVT.getSimpleVT();
950 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
953 const Value *Op0 = EVI->getOperand(0);
954 Type *AggTy = Op0->getType();
956 // Get the base result register.
958 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
959 if (I != FuncInfo.ValueMap.end())
960 ResultReg = I->second;
961 else if (isa<Instruction>(Op0))
962 ResultReg = FuncInfo.InitializeRegForValue(Op0);
964 return false; // fast-isel can't handle aggregate constants at the moment
966 // Get the actual result register, which is an offset from the base register.
967 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
969 SmallVector<EVT, 4> AggValueVTs;
970 ComputeValueVTs(TLI, AggTy, AggValueVTs);
972 for (unsigned i = 0; i < VTIndex; i++)
973 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
975 UpdateValueMap(EVI, ResultReg);
980 FastISel::SelectOperator(const User *I, unsigned Opcode) {
982 case Instruction::Add:
983 return SelectBinaryOp(I, ISD::ADD);
984 case Instruction::FAdd:
985 return SelectBinaryOp(I, ISD::FADD);
986 case Instruction::Sub:
987 return SelectBinaryOp(I, ISD::SUB);
988 case Instruction::FSub:
989 // FNeg is currently represented in LLVM IR as a special case of FSub.
990 if (BinaryOperator::isFNeg(I))
991 return SelectFNeg(I);
992 return SelectBinaryOp(I, ISD::FSUB);
993 case Instruction::Mul:
994 return SelectBinaryOp(I, ISD::MUL);
995 case Instruction::FMul:
996 return SelectBinaryOp(I, ISD::FMUL);
997 case Instruction::SDiv:
998 return SelectBinaryOp(I, ISD::SDIV);
999 case Instruction::UDiv:
1000 return SelectBinaryOp(I, ISD::UDIV);
1001 case Instruction::FDiv:
1002 return SelectBinaryOp(I, ISD::FDIV);
1003 case Instruction::SRem:
1004 return SelectBinaryOp(I, ISD::SREM);
1005 case Instruction::URem:
1006 return SelectBinaryOp(I, ISD::UREM);
1007 case Instruction::FRem:
1008 return SelectBinaryOp(I, ISD::FREM);
1009 case Instruction::Shl:
1010 return SelectBinaryOp(I, ISD::SHL);
1011 case Instruction::LShr:
1012 return SelectBinaryOp(I, ISD::SRL);
1013 case Instruction::AShr:
1014 return SelectBinaryOp(I, ISD::SRA);
1015 case Instruction::And:
1016 return SelectBinaryOp(I, ISD::AND);
1017 case Instruction::Or:
1018 return SelectBinaryOp(I, ISD::OR);
1019 case Instruction::Xor:
1020 return SelectBinaryOp(I, ISD::XOR);
1022 case Instruction::GetElementPtr:
1023 return SelectGetElementPtr(I);
1025 case Instruction::Br: {
1026 const BranchInst *BI = cast<BranchInst>(I);
1028 if (BI->isUnconditional()) {
1029 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1030 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1031 FastEmitBranch(MSucc, BI->getDebugLoc());
1035 // Conditional branches are not handed yet.
1036 // Halt "fast" selection and bail.
1040 case Instruction::Unreachable:
1044 case Instruction::Alloca:
1045 // FunctionLowering has the static-sized case covered.
1046 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1049 // Dynamic-sized alloca is not handled yet.
1052 case Instruction::Call:
1053 return SelectCall(I);
1055 case Instruction::BitCast:
1056 return SelectBitCast(I);
1058 case Instruction::FPToSI:
1059 return SelectCast(I, ISD::FP_TO_SINT);
1060 case Instruction::ZExt:
1061 return SelectCast(I, ISD::ZERO_EXTEND);
1062 case Instruction::SExt:
1063 return SelectCast(I, ISD::SIGN_EXTEND);
1064 case Instruction::Trunc:
1065 return SelectCast(I, ISD::TRUNCATE);
1066 case Instruction::SIToFP:
1067 return SelectCast(I, ISD::SINT_TO_FP);
1069 case Instruction::IntToPtr: // Deliberate fall-through.
1070 case Instruction::PtrToInt: {
1071 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1072 EVT DstVT = TLI.getValueType(I->getType());
1073 if (DstVT.bitsGT(SrcVT))
1074 return SelectCast(I, ISD::ZERO_EXTEND);
1075 if (DstVT.bitsLT(SrcVT))
1076 return SelectCast(I, ISD::TRUNCATE);
1077 unsigned Reg = getRegForValue(I->getOperand(0));
1078 if (Reg == 0) return false;
1079 UpdateValueMap(I, Reg);
1083 case Instruction::ExtractValue:
1084 return SelectExtractValue(I);
1086 case Instruction::PHI:
1087 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1090 // Unhandled instruction. Halt "fast" selection and bail.
1095 FastISel::FastISel(FunctionLoweringInfo &funcInfo,
1096 const TargetLibraryInfo *libInfo)
1097 : FuncInfo(funcInfo),
1098 MRI(FuncInfo.MF->getRegInfo()),
1099 MFI(*FuncInfo.MF->getFrameInfo()),
1100 MCP(*FuncInfo.MF->getConstantPool()),
1101 TM(FuncInfo.MF->getTarget()),
1102 TD(*TM.getDataLayout()),
1103 TII(*TM.getInstrInfo()),
1104 TLI(*TM.getTargetLowering()),
1105 TRI(*TM.getRegisterInfo()),
1109 FastISel::~FastISel() {}
1111 bool FastISel::FastLowerArguments() {
1115 unsigned FastISel::FastEmit_(MVT, MVT,
1120 unsigned FastISel::FastEmit_r(MVT, MVT,
1122 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1126 unsigned FastISel::FastEmit_rr(MVT, MVT,
1128 unsigned /*Op0*/, bool /*Op0IsKill*/,
1129 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1133 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1137 unsigned FastISel::FastEmit_f(MVT, MVT,
1138 unsigned, const ConstantFP * /*FPImm*/) {
1142 unsigned FastISel::FastEmit_ri(MVT, MVT,
1144 unsigned /*Op0*/, bool /*Op0IsKill*/,
1149 unsigned FastISel::FastEmit_rf(MVT, MVT,
1151 unsigned /*Op0*/, bool /*Op0IsKill*/,
1152 const ConstantFP * /*FPImm*/) {
1156 unsigned FastISel::FastEmit_rri(MVT, MVT,
1158 unsigned /*Op0*/, bool /*Op0IsKill*/,
1159 unsigned /*Op1*/, bool /*Op1IsKill*/,
1164 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1165 /// to emit an instruction with an immediate operand using FastEmit_ri.
1166 /// If that fails, it materializes the immediate into a register and try
1167 /// FastEmit_rr instead.
1168 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1169 unsigned Op0, bool Op0IsKill,
1170 uint64_t Imm, MVT ImmType) {
1171 // If this is a multiply by a power of two, emit this as a shift left.
1172 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1175 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1176 // div x, 8 -> srl x, 3
1181 // Horrible hack (to be removed), check to make sure shift amounts are
1183 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1184 Imm >= VT.getSizeInBits())
1187 // First check if immediate type is legal. If not, we can't use the ri form.
1188 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1191 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1192 if (MaterialReg == 0) {
1193 // This is a bit ugly/slow, but failing here means falling out of
1194 // fast-isel, which would be very slow.
1195 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1196 VT.getSizeInBits());
1197 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1198 assert (MaterialReg != 0 && "Unable to materialize imm.");
1199 if (MaterialReg == 0) return 0;
1201 return FastEmit_rr(VT, VT, Opcode,
1203 MaterialReg, /*Kill=*/true);
1206 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1207 return MRI.createVirtualRegister(RC);
1210 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1211 const TargetRegisterClass* RC) {
1212 unsigned ResultReg = createResultReg(RC);
1213 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1219 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1220 const TargetRegisterClass *RC,
1221 unsigned Op0, bool Op0IsKill) {
1222 unsigned ResultReg = createResultReg(RC);
1223 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1225 if (II.getNumDefs() >= 1)
1226 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1227 .addReg(Op0, Op0IsKill * RegState::Kill);
1229 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1230 .addReg(Op0, Op0IsKill * RegState::Kill);
1231 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1232 ResultReg).addReg(II.ImplicitDefs[0]);
1238 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1239 const TargetRegisterClass *RC,
1240 unsigned Op0, bool Op0IsKill,
1241 unsigned Op1, bool Op1IsKill) {
1242 unsigned ResultReg = createResultReg(RC);
1243 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1245 if (II.getNumDefs() >= 1)
1246 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1247 .addReg(Op0, Op0IsKill * RegState::Kill)
1248 .addReg(Op1, Op1IsKill * RegState::Kill);
1250 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1251 .addReg(Op0, Op0IsKill * RegState::Kill)
1252 .addReg(Op1, Op1IsKill * RegState::Kill);
1253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1254 ResultReg).addReg(II.ImplicitDefs[0]);
1259 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1260 const TargetRegisterClass *RC,
1261 unsigned Op0, bool Op0IsKill,
1262 unsigned Op1, bool Op1IsKill,
1263 unsigned Op2, bool Op2IsKill) {
1264 unsigned ResultReg = createResultReg(RC);
1265 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1267 if (II.getNumDefs() >= 1)
1268 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1269 .addReg(Op0, Op0IsKill * RegState::Kill)
1270 .addReg(Op1, Op1IsKill * RegState::Kill)
1271 .addReg(Op2, Op2IsKill * RegState::Kill);
1273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1274 .addReg(Op0, Op0IsKill * RegState::Kill)
1275 .addReg(Op1, Op1IsKill * RegState::Kill)
1276 .addReg(Op2, Op2IsKill * RegState::Kill);
1277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1278 ResultReg).addReg(II.ImplicitDefs[0]);
1283 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1284 const TargetRegisterClass *RC,
1285 unsigned Op0, bool Op0IsKill,
1287 unsigned ResultReg = createResultReg(RC);
1288 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1290 if (II.getNumDefs() >= 1)
1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1292 .addReg(Op0, Op0IsKill * RegState::Kill)
1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1296 .addReg(Op0, Op0IsKill * RegState::Kill)
1298 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1299 ResultReg).addReg(II.ImplicitDefs[0]);
1304 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1305 const TargetRegisterClass *RC,
1306 unsigned Op0, bool Op0IsKill,
1307 uint64_t Imm1, uint64_t Imm2) {
1308 unsigned ResultReg = createResultReg(RC);
1309 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1311 if (II.getNumDefs() >= 1)
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1313 .addReg(Op0, Op0IsKill * RegState::Kill)
1317 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1318 .addReg(Op0, Op0IsKill * RegState::Kill)
1321 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1322 ResultReg).addReg(II.ImplicitDefs[0]);
1327 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1328 const TargetRegisterClass *RC,
1329 unsigned Op0, bool Op0IsKill,
1330 const ConstantFP *FPImm) {
1331 unsigned ResultReg = createResultReg(RC);
1332 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1334 if (II.getNumDefs() >= 1)
1335 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1336 .addReg(Op0, Op0IsKill * RegState::Kill)
1339 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1340 .addReg(Op0, Op0IsKill * RegState::Kill)
1342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1343 ResultReg).addReg(II.ImplicitDefs[0]);
1348 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1349 const TargetRegisterClass *RC,
1350 unsigned Op0, bool Op0IsKill,
1351 unsigned Op1, bool Op1IsKill,
1353 unsigned ResultReg = createResultReg(RC);
1354 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1356 if (II.getNumDefs() >= 1)
1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1358 .addReg(Op0, Op0IsKill * RegState::Kill)
1359 .addReg(Op1, Op1IsKill * RegState::Kill)
1362 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1363 .addReg(Op0, Op0IsKill * RegState::Kill)
1364 .addReg(Op1, Op1IsKill * RegState::Kill)
1366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1367 ResultReg).addReg(II.ImplicitDefs[0]);
1372 unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
1373 const TargetRegisterClass *RC,
1374 unsigned Op0, bool Op0IsKill,
1375 unsigned Op1, bool Op1IsKill,
1376 uint64_t Imm1, uint64_t Imm2) {
1377 unsigned ResultReg = createResultReg(RC);
1378 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1380 if (II.getNumDefs() >= 1)
1381 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1382 .addReg(Op0, Op0IsKill * RegState::Kill)
1383 .addReg(Op1, Op1IsKill * RegState::Kill)
1384 .addImm(Imm1).addImm(Imm2);
1386 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1387 .addReg(Op0, Op0IsKill * RegState::Kill)
1388 .addReg(Op1, Op1IsKill * RegState::Kill)
1389 .addImm(Imm1).addImm(Imm2);
1390 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1391 ResultReg).addReg(II.ImplicitDefs[0]);
1396 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1397 const TargetRegisterClass *RC,
1399 unsigned ResultReg = createResultReg(RC);
1400 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1402 if (II.getNumDefs() >= 1)
1403 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1405 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1406 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1407 ResultReg).addReg(II.ImplicitDefs[0]);
1412 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1413 const TargetRegisterClass *RC,
1414 uint64_t Imm1, uint64_t Imm2) {
1415 unsigned ResultReg = createResultReg(RC);
1416 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1418 if (II.getNumDefs() >= 1)
1419 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1420 .addImm(Imm1).addImm(Imm2);
1422 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1423 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1424 ResultReg).addReg(II.ImplicitDefs[0]);
1429 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1430 unsigned Op0, bool Op0IsKill,
1432 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1433 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1434 "Cannot yet extract from physregs");
1435 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1436 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1438 DL, TII.get(TargetOpcode::COPY), ResultReg)
1439 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1443 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1444 /// with all but the least significant bit set to zero.
1445 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1446 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1449 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1450 /// Emit code to ensure constants are copied into registers when needed.
1451 /// Remember the virtual registers that need to be added to the Machine PHI
1452 /// nodes as input. We cannot just directly add them, because expansion
1453 /// might result in multiple MBB's for one BB. As such, the start of the
1454 /// BB might correspond to a different MBB than the end.
1455 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1456 const TerminatorInst *TI = LLVMBB->getTerminator();
1458 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1459 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1461 // Check successor nodes' PHI nodes that expect a constant to be available
1463 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1464 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1465 if (!isa<PHINode>(SuccBB->begin())) continue;
1466 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1468 // If this terminator has multiple identical successors (common for
1469 // switches), only handle each succ once.
1470 if (!SuccsHandled.insert(SuccMBB)) continue;
1472 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1474 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1475 // nodes and Machine PHI nodes, but the incoming operands have not been
1477 for (BasicBlock::const_iterator I = SuccBB->begin();
1478 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1480 // Ignore dead phi's.
1481 if (PN->use_empty()) continue;
1483 // Only handle legal types. Two interesting things to note here. First,
1484 // by bailing out early, we may leave behind some dead instructions,
1485 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1486 // own moves. Second, this check is necessary because FastISel doesn't
1487 // use CreateRegs to create registers, so it always creates
1488 // exactly one register for each non-void instruction.
1489 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1490 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1491 // Handle integer promotions, though, because they're common and easy.
1492 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
1493 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1495 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1500 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1502 // Set the DebugLoc for the copy. Prefer the location of the operand
1503 // if there is one; use the location of the PHI otherwise.
1504 DL = PN->getDebugLoc();
1505 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1506 DL = Inst->getDebugLoc();
1508 unsigned Reg = getRegForValue(PHIOp);
1510 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1513 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
1521 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
1522 assert(LI->hasOneUse() &&
1523 "tryToFoldLoad expected a LoadInst with a single use");
1524 // We know that the load has a single use, but don't know what it is. If it
1525 // isn't one of the folded instructions, then we can't succeed here. Handle
1526 // this by scanning the single-use users of the load until we get to FoldInst.
1527 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
1529 const Instruction *TheUser = LI->use_back();
1530 while (TheUser != FoldInst && // Scan up until we find FoldInst.
1531 // Stay in the right block.
1532 TheUser->getParent() == FoldInst->getParent() &&
1533 --MaxUsers) { // Don't scan too far.
1534 // If there are multiple or no uses of this instruction, then bail out.
1535 if (!TheUser->hasOneUse())
1538 TheUser = TheUser->use_back();
1541 // If we didn't find the fold instruction, then we failed to collapse the
1543 if (TheUser != FoldInst)
1546 // Don't try to fold volatile loads. Target has to deal with alignment
1548 if (LI->isVolatile())
1551 // Figure out which vreg this is going into. If there is no assigned vreg yet
1552 // then there actually was no reference to it. Perhaps the load is referenced
1553 // by a dead instruction.
1554 unsigned LoadReg = getRegForValue(LI);
1558 // We can't fold if this vreg has no uses or more than one use. Multiple uses
1559 // may mean that the instruction got lowered to multiple MIs, or the use of
1560 // the loaded value ended up being multiple operands of the result.
1561 if (!MRI.hasOneUse(LoadReg))
1564 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
1565 MachineInstr *User = &*RI;
1567 // Set the insertion point properly. Folding the load can cause generation of
1568 // other random instructions (like sign extends) for addressing modes; make
1569 // sure they get inserted in a logical place before the new instruction.
1570 FuncInfo.InsertPt = User;
1571 FuncInfo.MBB = User->getParent();
1573 // Ask the target to try folding the load.
1574 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);