1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/FastISel.h"
48 #include "llvm/CodeGen/FunctionLoweringInfo.h"
49 #include "llvm/CodeGen/MachineInstrBuilder.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/Analysis/DebugInfo.h"
53 #include "llvm/Analysis/Loads.h"
54 #include "llvm/Target/TargetData.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetLowering.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/Debug.h"
62 /// startNewBlock - Set the current block to which generated machine
63 /// instructions will be appended, and clear the local CSE map.
65 void FastISel::startNewBlock() {
66 LocalValueMap.clear();
68 // Start out as null, meaining no local-value instructions have
72 // Advance the last local value past any EH_LABEL instructions.
73 MachineBasicBlock::iterator
74 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
75 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
81 bool FastISel::hasTrivialKill(const Value *V) const {
82 // Don't consider constants or arguments to have trivial kills.
83 const Instruction *I = dyn_cast<Instruction>(V);
87 // No-op casts are trivially coalesced by fast-isel.
88 if (const CastInst *Cast = dyn_cast<CastInst>(I))
89 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
90 !hasTrivialKill(Cast->getOperand(0)))
93 // Only instructions with a single use in the same basic block are considered
94 // to have trivial kills.
95 return I->hasOneUse() &&
96 !(I->getOpcode() == Instruction::BitCast ||
97 I->getOpcode() == Instruction::PtrToInt ||
98 I->getOpcode() == Instruction::IntToPtr) &&
99 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
102 unsigned FastISel::getRegForValue(const Value *V) {
103 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
104 // Don't handle non-simple values in FastISel.
105 if (!RealVT.isSimple())
108 // Ignore illegal types. We must do this before looking up the value
109 // in ValueMap because Arguments are given virtual registers regardless
110 // of whether FastISel can handle them.
111 MVT VT = RealVT.getSimpleVT();
112 if (!TLI.isTypeLegal(VT)) {
113 // Promote MVT::i1 to a legal type though, because it's common and easy.
115 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
120 // Look up the value to see if we already have a register for it. We
121 // cache values defined by Instructions across blocks, and other values
122 // only locally. This is because Instructions already have the SSA
123 // def-dominates-use requirement enforced.
124 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
125 if (I != FuncInfo.ValueMap.end())
128 unsigned Reg = LocalValueMap[V];
132 // In bottom-up mode, just create the virtual register which will be used
133 // to hold the value. It will be materialized later.
134 if (isa<Instruction>(V) &&
135 (!isa<AllocaInst>(V) ||
136 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
137 return FuncInfo.InitializeRegForValue(V);
139 SavePoint SaveInsertPt = enterLocalValueArea();
141 // Materialize the value in a register. Emit any instructions in the
143 Reg = materializeRegForValue(V, VT);
145 leaveLocalValueArea(SaveInsertPt);
150 /// materializeRegForValue - Helper for getRegForValue. This function is
151 /// called when the value isn't already available in a register and must
152 /// be materialized with new instructions.
153 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
156 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
157 if (CI->getValue().getActiveBits() <= 64)
158 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
159 } else if (isa<AllocaInst>(V)) {
160 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
161 } else if (isa<ConstantPointerNull>(V)) {
162 // Translate this as an integer zero so that it can be
163 // local-CSE'd with actual integer zeros.
165 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
166 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
167 // Try to emit the constant directly.
168 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
171 // Try to emit the constant by using an integer constant with a cast.
172 const APFloat &Flt = CF->getValueAPF();
173 EVT IntVT = TLI.getPointerTy();
176 uint32_t IntBitWidth = IntVT.getSizeInBits();
178 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
179 APFloat::rmTowardZero, &isExact);
181 APInt IntVal(IntBitWidth, 2, x);
183 unsigned IntegerReg =
184 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
186 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
187 IntegerReg, /*Kill=*/false);
190 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
191 if (!SelectOperator(Op, Op->getOpcode()))
192 if (!isa<Instruction>(Op) ||
193 !TargetSelectInstruction(cast<Instruction>(Op)))
195 Reg = lookUpRegForValue(Op);
196 } else if (isa<UndefValue>(V)) {
197 Reg = createResultReg(TLI.getRegClassFor(VT));
198 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
199 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
202 // If target-independent code couldn't handle the value, give target-specific
204 if (!Reg && isa<Constant>(V))
205 Reg = TargetMaterializeConstant(cast<Constant>(V));
207 // Don't cache constant materializations in the general ValueMap.
208 // To do so would require tracking what uses they dominate.
210 LocalValueMap[V] = Reg;
211 LastLocalValue = MRI.getVRegDef(Reg);
216 unsigned FastISel::lookUpRegForValue(const Value *V) {
217 // Look up the value to see if we already have a register for it. We
218 // cache values defined by Instructions across blocks, and other values
219 // only locally. This is because Instructions already have the SSA
220 // def-dominates-use requirement enforced.
221 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
222 if (I != FuncInfo.ValueMap.end())
224 return LocalValueMap[V];
227 /// UpdateValueMap - Update the value map to include the new mapping for this
228 /// instruction, or insert an extra copy to get the result in a previous
229 /// determined register.
230 /// NOTE: This is only necessary because we might select a block that uses
231 /// a value before we select the block that defines the value. It might be
232 /// possible to fix this by selecting blocks in reverse postorder.
233 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
234 if (!isa<Instruction>(I)) {
235 LocalValueMap[I] = Reg;
239 unsigned &AssignedReg = FuncInfo.ValueMap[I];
240 if (AssignedReg == 0)
241 // Use the new register.
243 else if (Reg != AssignedReg) {
244 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
245 FuncInfo.RegFixups[AssignedReg] = Reg;
253 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
254 unsigned IdxN = getRegForValue(Idx);
256 // Unhandled operand. Halt "fast" selection and bail.
257 return std::pair<unsigned, bool>(0, false);
259 bool IdxNIsKill = hasTrivialKill(Idx);
261 // If the index is smaller or larger than intptr_t, truncate or extend it.
262 MVT PtrVT = TLI.getPointerTy();
263 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
264 if (IdxVT.bitsLT(PtrVT)) {
265 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
269 else if (IdxVT.bitsGT(PtrVT)) {
270 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
274 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
277 void FastISel::recomputeInsertPt() {
278 if (getLastLocalValue()) {
279 FuncInfo.InsertPt = getLastLocalValue();
280 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
283 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
285 // Now skip past any EH_LABELs, which must remain at the beginning.
286 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
287 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
291 FastISel::SavePoint FastISel::enterLocalValueArea() {
292 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
296 SavePoint SP = { OldInsertPt, OldDL };
300 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
301 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
302 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
304 // Restore the previous insert position.
305 FuncInfo.InsertPt = OldInsertPt.InsertPt;
309 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
310 /// which has an opcode which directly corresponds to the given ISD opcode.
312 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
313 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
314 if (VT == MVT::Other || !VT.isSimple())
315 // Unhandled type. Halt "fast" selection and bail.
318 // We only handle legal types. For example, on x86-32 the instruction
319 // selector contains all of the 64-bit instructions from x86-64,
320 // under the assumption that i64 won't be used if the target doesn't
322 if (!TLI.isTypeLegal(VT)) {
323 // MVT::i1 is special. Allow AND, OR, or XOR because they
324 // don't require additional zeroing, which makes them easy.
326 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
327 ISDOpcode == ISD::XOR))
328 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
333 // Check if the first operand is a constant, and handle it as "ri". At -O0,
334 // we don't have anything that canonicalizes operand order.
335 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
336 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
337 unsigned Op1 = getRegForValue(I->getOperand(1));
338 if (Op1 == 0) return false;
340 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
342 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
343 Op1IsKill, CI->getZExtValue(),
345 if (ResultReg == 0) return false;
347 // We successfully emitted code for the given LLVM Instruction.
348 UpdateValueMap(I, ResultReg);
353 unsigned Op0 = getRegForValue(I->getOperand(0));
354 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
357 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
359 // Check if the second operand is a constant and handle it appropriately.
360 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
361 uint64_t Imm = CI->getZExtValue();
363 // Transform "sdiv exact X, 8" -> "sra X, 3".
364 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
365 cast<BinaryOperator>(I)->isExact() &&
366 isPowerOf2_64(Imm)) {
368 ISDOpcode = ISD::SRA;
371 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
372 Op0IsKill, Imm, VT.getSimpleVT());
373 if (ResultReg == 0) return false;
375 // We successfully emitted code for the given LLVM Instruction.
376 UpdateValueMap(I, ResultReg);
380 // Check if the second operand is a constant float.
381 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
382 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
383 ISDOpcode, Op0, Op0IsKill, CF);
384 if (ResultReg != 0) {
385 // We successfully emitted code for the given LLVM Instruction.
386 UpdateValueMap(I, ResultReg);
391 unsigned Op1 = getRegForValue(I->getOperand(1));
393 // Unhandled operand. Halt "fast" selection and bail.
396 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
398 // Now we have both operands in registers. Emit the instruction.
399 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
404 // Target-specific code wasn't able to find a machine opcode for
405 // the given ISD opcode and type. Halt "fast" selection and bail.
408 // We successfully emitted code for the given LLVM Instruction.
409 UpdateValueMap(I, ResultReg);
413 bool FastISel::SelectGetElementPtr(const User *I) {
414 unsigned N = getRegForValue(I->getOperand(0));
416 // Unhandled operand. Halt "fast" selection and bail.
419 bool NIsKill = hasTrivialKill(I->getOperand(0));
421 const Type *Ty = I->getOperand(0)->getType();
422 MVT VT = TLI.getPointerTy();
423 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
424 E = I->op_end(); OI != E; ++OI) {
425 const Value *Idx = *OI;
426 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
427 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
430 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
431 // FIXME: This can be optimized by combining the add with a
433 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
435 // Unhandled operand. Halt "fast" selection and bail.
439 Ty = StTy->getElementType(Field);
441 Ty = cast<SequentialType>(Ty)->getElementType();
443 // If this is a constant subscript, handle it quickly.
444 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
445 if (CI->isZero()) continue;
447 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
448 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
450 // Unhandled operand. Halt "fast" selection and bail.
456 // N = N + Idx * ElementSize;
457 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
458 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
459 unsigned IdxN = Pair.first;
460 bool IdxNIsKill = Pair.second;
462 // Unhandled operand. Halt "fast" selection and bail.
465 if (ElementSize != 1) {
466 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
468 // Unhandled operand. Halt "fast" selection and bail.
472 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
474 // Unhandled operand. Halt "fast" selection and bail.
479 // We successfully emitted code for the given LLVM Instruction.
480 UpdateValueMap(I, N);
484 bool FastISel::SelectCall(const User *I) {
485 const CallInst *Call = cast<CallInst>(I);
487 // Handle simple inline asms.
488 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) {
489 // Don't attempt to handle constraints.
490 if (!IA->getConstraintString().empty())
493 unsigned ExtraInfo = 0;
494 if (IA->hasSideEffects())
495 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
496 if (IA->isAlignStack())
497 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
500 TII.get(TargetOpcode::INLINEASM))
501 .addExternalSymbol(IA->getAsmString().c_str())
506 const Function *F = Call->getCalledFunction();
507 if (!F) return false;
509 // Handle selected intrinsic function calls.
510 switch (F->getIntrinsicID()) {
512 case Intrinsic::dbg_declare: {
513 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
514 if (!DIVariable(DI->getVariable()).Verify() ||
515 !FuncInfo.MF->getMMI().hasDebugInfo())
518 const Value *Address = DI->getAddress();
519 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
524 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
525 if (Arg->hasByValAttr()) {
526 // Byval arguments' frame index is recorded during argument lowering.
527 // Use this info directly.
528 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
530 Reg = TRI.getFrameRegister(*FuncInfo.MF);
534 Reg = getRegForValue(Address);
537 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
538 TII.get(TargetOpcode::DBG_VALUE))
539 .addReg(Reg, RegState::Debug).addImm(Offset)
540 .addMetadata(DI->getVariable());
543 case Intrinsic::dbg_value: {
544 // This form of DBG_VALUE is target-independent.
545 const DbgValueInst *DI = cast<DbgValueInst>(Call);
546 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
547 const Value *V = DI->getValue();
549 // Currently the optimizer can produce this; insert an undef to
550 // help debugging. Probably the optimizer should not do this.
551 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
552 .addReg(0U).addImm(DI->getOffset())
553 .addMetadata(DI->getVariable());
554 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
555 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
556 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
557 .addMetadata(DI->getVariable());
558 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
559 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
560 .addFPImm(CF).addImm(DI->getOffset())
561 .addMetadata(DI->getVariable());
562 } else if (unsigned Reg = lookUpRegForValue(V)) {
563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
564 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
565 .addMetadata(DI->getVariable());
567 // We can't yet handle anything else here because it would require
568 // generating code, thus altering codegen because of debug info.
569 DEBUG(dbgs() << "Dropping debug info for " << DI);
573 case Intrinsic::eh_exception: {
574 EVT VT = TLI.getValueType(Call->getType());
575 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
578 assert(FuncInfo.MBB->isLandingPad() &&
579 "Call to eh.exception not in landing pad!");
580 unsigned Reg = TLI.getExceptionAddressRegister();
581 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
582 unsigned ResultReg = createResultReg(RC);
583 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
584 ResultReg).addReg(Reg);
585 UpdateValueMap(Call, ResultReg);
588 case Intrinsic::eh_selector: {
589 EVT VT = TLI.getValueType(Call->getType());
590 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
592 if (FuncInfo.MBB->isLandingPad())
593 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
596 FuncInfo.CatchInfoLost.insert(Call);
598 // FIXME: Mark exception selector register as live in. Hack for PR1508.
599 unsigned Reg = TLI.getExceptionSelectorRegister();
600 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
603 unsigned Reg = TLI.getExceptionSelectorRegister();
604 EVT SrcVT = TLI.getPointerTy();
605 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
606 unsigned ResultReg = createResultReg(RC);
607 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
608 ResultReg).addReg(Reg);
610 bool ResultRegIsKill = hasTrivialKill(Call);
612 // Cast the register to the type of the selector.
613 if (SrcVT.bitsGT(MVT::i32))
614 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
615 ResultReg, ResultRegIsKill);
616 else if (SrcVT.bitsLT(MVT::i32))
617 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
618 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
620 // Unhandled operand. Halt "fast" selection and bail.
623 UpdateValueMap(Call, ResultReg);
629 // An arbitrary call. Bail.
633 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
634 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
635 EVT DstVT = TLI.getValueType(I->getType());
637 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
638 DstVT == MVT::Other || !DstVT.isSimple())
639 // Unhandled type. Halt "fast" selection and bail.
642 // Check if the destination type is legal. Or as a special case,
643 // it may be i1 if we're doing a truncate because that's
644 // easy and somewhat common.
645 if (!TLI.isTypeLegal(DstVT))
646 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
647 // Unhandled type. Halt "fast" selection and bail.
650 // Check if the source operand is legal. Or as a special case,
651 // it may be i1 if we're doing zero-extension because that's
652 // easy and somewhat common.
653 if (!TLI.isTypeLegal(SrcVT))
654 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
655 // Unhandled type. Halt "fast" selection and bail.
658 unsigned InputReg = getRegForValue(I->getOperand(0));
660 // Unhandled operand. Halt "fast" selection and bail.
663 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
665 // If the operand is i1, arrange for the high bits in the register to be zero.
666 if (SrcVT == MVT::i1) {
667 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
668 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
671 InputRegIsKill = true;
673 // If the result is i1, truncate to the target's type for i1 first.
674 if (DstVT == MVT::i1)
675 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
677 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
680 InputReg, InputRegIsKill);
684 UpdateValueMap(I, ResultReg);
688 bool FastISel::SelectBitCast(const User *I) {
689 // If the bitcast doesn't change the type, just use the operand value.
690 if (I->getType() == I->getOperand(0)->getType()) {
691 unsigned Reg = getRegForValue(I->getOperand(0));
694 UpdateValueMap(I, Reg);
698 // Bitcasts of other values become reg-reg copies or BITCAST operators.
699 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
700 EVT DstVT = TLI.getValueType(I->getType());
702 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
703 DstVT == MVT::Other || !DstVT.isSimple() ||
704 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
705 // Unhandled type. Halt "fast" selection and bail.
708 unsigned Op0 = getRegForValue(I->getOperand(0));
710 // Unhandled operand. Halt "fast" selection and bail.
713 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
715 // First, try to perform the bitcast by inserting a reg-reg copy.
716 unsigned ResultReg = 0;
717 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
718 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
719 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
720 // Don't attempt a cross-class copy. It will likely fail.
721 if (SrcClass == DstClass) {
722 ResultReg = createResultReg(DstClass);
723 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
724 ResultReg).addReg(Op0);
728 // If the reg-reg copy failed, select a BITCAST opcode.
730 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
731 ISD::BITCAST, Op0, Op0IsKill);
736 UpdateValueMap(I, ResultReg);
741 FastISel::SelectInstruction(const Instruction *I) {
742 // Just before the terminator instruction, insert instructions to
743 // feed PHI nodes in successor blocks.
744 if (isa<TerminatorInst>(I))
745 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
748 DL = I->getDebugLoc();
750 // First, try doing target-independent selection.
751 if (SelectOperator(I, I->getOpcode())) {
756 // Next, try calling the target to attempt to handle the instruction.
757 if (TargetSelectInstruction(I)) {
766 /// FastEmitBranch - Emit an unconditional branch to the given block,
767 /// unless it is the immediate (fall-through) successor, and update
770 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
771 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
772 // The unconditional fall-through case, which needs no instructions.
774 // The unconditional branch case.
775 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
776 SmallVector<MachineOperand, 0>(), DL);
778 FuncInfo.MBB->addSuccessor(MSucc);
781 /// SelectFNeg - Emit an FNeg operation.
784 FastISel::SelectFNeg(const User *I) {
785 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
786 if (OpReg == 0) return false;
788 bool OpRegIsKill = hasTrivialKill(I);
790 // If the target has ISD::FNEG, use it.
791 EVT VT = TLI.getValueType(I->getType());
792 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
793 ISD::FNEG, OpReg, OpRegIsKill);
794 if (ResultReg != 0) {
795 UpdateValueMap(I, ResultReg);
799 // Bitcast the value to integer, twiddle the sign bit with xor,
800 // and then bitcast it back to floating-point.
801 if (VT.getSizeInBits() > 64) return false;
802 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
803 if (!TLI.isTypeLegal(IntVT))
806 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
807 ISD::BITCAST, OpReg, OpRegIsKill);
811 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
812 IntReg, /*Kill=*/true,
813 UINT64_C(1) << (VT.getSizeInBits()-1),
814 IntVT.getSimpleVT());
815 if (IntResultReg == 0)
818 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
819 ISD::BITCAST, IntResultReg, /*Kill=*/true);
823 UpdateValueMap(I, ResultReg);
828 FastISel::SelectOperator(const User *I, unsigned Opcode) {
830 case Instruction::Add:
831 return SelectBinaryOp(I, ISD::ADD);
832 case Instruction::FAdd:
833 return SelectBinaryOp(I, ISD::FADD);
834 case Instruction::Sub:
835 return SelectBinaryOp(I, ISD::SUB);
836 case Instruction::FSub:
837 // FNeg is currently represented in LLVM IR as a special case of FSub.
838 if (BinaryOperator::isFNeg(I))
839 return SelectFNeg(I);
840 return SelectBinaryOp(I, ISD::FSUB);
841 case Instruction::Mul:
842 return SelectBinaryOp(I, ISD::MUL);
843 case Instruction::FMul:
844 return SelectBinaryOp(I, ISD::FMUL);
845 case Instruction::SDiv:
846 return SelectBinaryOp(I, ISD::SDIV);
847 case Instruction::UDiv:
848 return SelectBinaryOp(I, ISD::UDIV);
849 case Instruction::FDiv:
850 return SelectBinaryOp(I, ISD::FDIV);
851 case Instruction::SRem:
852 return SelectBinaryOp(I, ISD::SREM);
853 case Instruction::URem:
854 return SelectBinaryOp(I, ISD::UREM);
855 case Instruction::FRem:
856 return SelectBinaryOp(I, ISD::FREM);
857 case Instruction::Shl:
858 return SelectBinaryOp(I, ISD::SHL);
859 case Instruction::LShr:
860 return SelectBinaryOp(I, ISD::SRL);
861 case Instruction::AShr:
862 return SelectBinaryOp(I, ISD::SRA);
863 case Instruction::And:
864 return SelectBinaryOp(I, ISD::AND);
865 case Instruction::Or:
866 return SelectBinaryOp(I, ISD::OR);
867 case Instruction::Xor:
868 return SelectBinaryOp(I, ISD::XOR);
870 case Instruction::GetElementPtr:
871 return SelectGetElementPtr(I);
873 case Instruction::Br: {
874 const BranchInst *BI = cast<BranchInst>(I);
876 if (BI->isUnconditional()) {
877 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
878 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
879 FastEmitBranch(MSucc, BI->getDebugLoc());
883 // Conditional branches are not handed yet.
884 // Halt "fast" selection and bail.
888 case Instruction::Unreachable:
892 case Instruction::Alloca:
893 // FunctionLowering has the static-sized case covered.
894 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
897 // Dynamic-sized alloca is not handled yet.
900 case Instruction::Call:
901 return SelectCall(I);
903 case Instruction::BitCast:
904 return SelectBitCast(I);
906 case Instruction::FPToSI:
907 return SelectCast(I, ISD::FP_TO_SINT);
908 case Instruction::ZExt:
909 return SelectCast(I, ISD::ZERO_EXTEND);
910 case Instruction::SExt:
911 return SelectCast(I, ISD::SIGN_EXTEND);
912 case Instruction::Trunc:
913 return SelectCast(I, ISD::TRUNCATE);
914 case Instruction::SIToFP:
915 return SelectCast(I, ISD::SINT_TO_FP);
917 case Instruction::IntToPtr: // Deliberate fall-through.
918 case Instruction::PtrToInt: {
919 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
920 EVT DstVT = TLI.getValueType(I->getType());
921 if (DstVT.bitsGT(SrcVT))
922 return SelectCast(I, ISD::ZERO_EXTEND);
923 if (DstVT.bitsLT(SrcVT))
924 return SelectCast(I, ISD::TRUNCATE);
925 unsigned Reg = getRegForValue(I->getOperand(0));
926 if (Reg == 0) return false;
927 UpdateValueMap(I, Reg);
931 case Instruction::PHI:
932 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
935 // Unhandled instruction. Halt "fast" selection and bail.
940 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
941 : FuncInfo(funcInfo),
942 MRI(FuncInfo.MF->getRegInfo()),
943 MFI(*FuncInfo.MF->getFrameInfo()),
944 MCP(*FuncInfo.MF->getConstantPool()),
945 TM(FuncInfo.MF->getTarget()),
946 TD(*TM.getTargetData()),
947 TII(*TM.getInstrInfo()),
948 TLI(*TM.getTargetLowering()),
949 TRI(*TM.getRegisterInfo()) {
952 FastISel::~FastISel() {}
954 unsigned FastISel::FastEmit_(MVT, MVT,
959 unsigned FastISel::FastEmit_r(MVT, MVT,
961 unsigned /*Op0*/, bool /*Op0IsKill*/) {
965 unsigned FastISel::FastEmit_rr(MVT, MVT,
967 unsigned /*Op0*/, bool /*Op0IsKill*/,
968 unsigned /*Op1*/, bool /*Op1IsKill*/) {
972 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
976 unsigned FastISel::FastEmit_f(MVT, MVT,
977 unsigned, const ConstantFP * /*FPImm*/) {
981 unsigned FastISel::FastEmit_ri(MVT, MVT,
983 unsigned /*Op0*/, bool /*Op0IsKill*/,
988 unsigned FastISel::FastEmit_rf(MVT, MVT,
990 unsigned /*Op0*/, bool /*Op0IsKill*/,
991 const ConstantFP * /*FPImm*/) {
995 unsigned FastISel::FastEmit_rri(MVT, MVT,
997 unsigned /*Op0*/, bool /*Op0IsKill*/,
998 unsigned /*Op1*/, bool /*Op1IsKill*/,
1003 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1004 /// to emit an instruction with an immediate operand using FastEmit_ri.
1005 /// If that fails, it materializes the immediate into a register and try
1006 /// FastEmit_rr instead.
1007 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1008 unsigned Op0, bool Op0IsKill,
1009 uint64_t Imm, MVT ImmType) {
1010 // If this is a multiply by a power of two, emit this as a shift left.
1011 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1014 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1015 // div x, 8 -> srl x, 3
1020 // Horrible hack (to be removed), check to make sure shift amounts are
1022 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1023 Imm >= VT.getSizeInBits())
1026 // First check if immediate type is legal. If not, we can't use the ri form.
1027 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1030 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1031 if (MaterialReg == 0)
1033 return FastEmit_rr(VT, VT, Opcode,
1035 MaterialReg, /*Kill=*/true);
1038 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1039 return MRI.createVirtualRegister(RC);
1042 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1043 const TargetRegisterClass* RC) {
1044 unsigned ResultReg = createResultReg(RC);
1045 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1051 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1052 const TargetRegisterClass *RC,
1053 unsigned Op0, bool Op0IsKill) {
1054 unsigned ResultReg = createResultReg(RC);
1055 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1057 if (II.getNumDefs() >= 1)
1058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1059 .addReg(Op0, Op0IsKill * RegState::Kill);
1061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1062 .addReg(Op0, Op0IsKill * RegState::Kill);
1063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1064 ResultReg).addReg(II.ImplicitDefs[0]);
1070 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1071 const TargetRegisterClass *RC,
1072 unsigned Op0, bool Op0IsKill,
1073 unsigned Op1, bool Op1IsKill) {
1074 unsigned ResultReg = createResultReg(RC);
1075 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1077 if (II.getNumDefs() >= 1)
1078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1079 .addReg(Op0, Op0IsKill * RegState::Kill)
1080 .addReg(Op1, Op1IsKill * RegState::Kill);
1082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1083 .addReg(Op0, Op0IsKill * RegState::Kill)
1084 .addReg(Op1, Op1IsKill * RegState::Kill);
1085 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1086 ResultReg).addReg(II.ImplicitDefs[0]);
1091 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1092 const TargetRegisterClass *RC,
1093 unsigned Op0, bool Op0IsKill,
1095 unsigned ResultReg = createResultReg(RC);
1096 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1098 if (II.getNumDefs() >= 1)
1099 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1100 .addReg(Op0, Op0IsKill * RegState::Kill)
1103 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1104 .addReg(Op0, Op0IsKill * RegState::Kill)
1106 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1107 ResultReg).addReg(II.ImplicitDefs[0]);
1112 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1113 const TargetRegisterClass *RC,
1114 unsigned Op0, bool Op0IsKill,
1115 uint64_t Imm1, uint64_t Imm2) {
1116 unsigned ResultReg = createResultReg(RC);
1117 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1119 if (II.getNumDefs() >= 1)
1120 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1121 .addReg(Op0, Op0IsKill * RegState::Kill)
1125 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1126 .addReg(Op0, Op0IsKill * RegState::Kill)
1129 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1130 ResultReg).addReg(II.ImplicitDefs[0]);
1135 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1136 const TargetRegisterClass *RC,
1137 unsigned Op0, bool Op0IsKill,
1138 const ConstantFP *FPImm) {
1139 unsigned ResultReg = createResultReg(RC);
1140 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1142 if (II.getNumDefs() >= 1)
1143 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1144 .addReg(Op0, Op0IsKill * RegState::Kill)
1147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1148 .addReg(Op0, Op0IsKill * RegState::Kill)
1150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1151 ResultReg).addReg(II.ImplicitDefs[0]);
1156 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1157 const TargetRegisterClass *RC,
1158 unsigned Op0, bool Op0IsKill,
1159 unsigned Op1, bool Op1IsKill,
1161 unsigned ResultReg = createResultReg(RC);
1162 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1164 if (II.getNumDefs() >= 1)
1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1166 .addReg(Op0, Op0IsKill * RegState::Kill)
1167 .addReg(Op1, Op1IsKill * RegState::Kill)
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1171 .addReg(Op0, Op0IsKill * RegState::Kill)
1172 .addReg(Op1, Op1IsKill * RegState::Kill)
1174 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1175 ResultReg).addReg(II.ImplicitDefs[0]);
1180 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1181 const TargetRegisterClass *RC,
1183 unsigned ResultReg = createResultReg(RC);
1184 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1186 if (II.getNumDefs() >= 1)
1187 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1189 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1190 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1191 ResultReg).addReg(II.ImplicitDefs[0]);
1196 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1197 const TargetRegisterClass *RC,
1198 uint64_t Imm1, uint64_t Imm2) {
1199 unsigned ResultReg = createResultReg(RC);
1200 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1202 if (II.getNumDefs() >= 1)
1203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1204 .addImm(Imm1).addImm(Imm2);
1206 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1208 ResultReg).addReg(II.ImplicitDefs[0]);
1213 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1214 unsigned Op0, bool Op0IsKill,
1216 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1217 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1218 "Cannot yet extract from physregs");
1219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1220 DL, TII.get(TargetOpcode::COPY), ResultReg)
1221 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1225 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1226 /// with all but the least significant bit set to zero.
1227 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1228 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1231 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1232 /// Emit code to ensure constants are copied into registers when needed.
1233 /// Remember the virtual registers that need to be added to the Machine PHI
1234 /// nodes as input. We cannot just directly add them, because expansion
1235 /// might result in multiple MBB's for one BB. As such, the start of the
1236 /// BB might correspond to a different MBB than the end.
1237 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1238 const TerminatorInst *TI = LLVMBB->getTerminator();
1240 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1241 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1243 // Check successor nodes' PHI nodes that expect a constant to be available
1245 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1246 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1247 if (!isa<PHINode>(SuccBB->begin())) continue;
1248 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1250 // If this terminator has multiple identical successors (common for
1251 // switches), only handle each succ once.
1252 if (!SuccsHandled.insert(SuccMBB)) continue;
1254 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1256 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1257 // nodes and Machine PHI nodes, but the incoming operands have not been
1259 for (BasicBlock::const_iterator I = SuccBB->begin();
1260 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1262 // Ignore dead phi's.
1263 if (PN->use_empty()) continue;
1265 // Only handle legal types. Two interesting things to note here. First,
1266 // by bailing out early, we may leave behind some dead instructions,
1267 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1268 // own moves. Second, this check is necessary because FastISel doesn't
1269 // use CreateRegs to create registers, so it always creates
1270 // exactly one register for each non-void instruction.
1271 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1272 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1275 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1277 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1282 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1284 // Set the DebugLoc for the copy. Prefer the location of the operand
1285 // if there is one; use the location of the PHI otherwise.
1286 DL = PN->getDebugLoc();
1287 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1288 DL = Inst->getDebugLoc();
1290 unsigned Reg = getRegForValue(PHIOp);
1292 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1295 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));