1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/FastISel.h"
48 #include "llvm/CodeGen/FunctionLoweringInfo.h"
49 #include "llvm/CodeGen/MachineInstrBuilder.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/Analysis/DebugInfo.h"
53 #include "llvm/Analysis/Loads.h"
54 #include "llvm/Target/TargetData.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetLowering.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/Debug.h"
62 /// startNewBlock - Set the current block to which generated machine
63 /// instructions will be appended, and clear the local CSE map.
65 void FastISel::startNewBlock() {
66 LocalValueMap.clear();
68 // Start out as null, meaining no local-value instructions have
72 // Advance the last local value past any EH_LABEL instructions.
73 MachineBasicBlock::iterator
74 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
75 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
81 bool FastISel::hasTrivialKill(const Value *V) const {
82 // Don't consider constants or arguments to have trivial kills.
83 const Instruction *I = dyn_cast<Instruction>(V);
87 // No-op casts are trivially coalesced by fast-isel.
88 if (const CastInst *Cast = dyn_cast<CastInst>(I))
89 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
90 !hasTrivialKill(Cast->getOperand(0)))
93 // Only instructions with a single use in the same basic block are considered
94 // to have trivial kills.
95 return I->hasOneUse() &&
96 !(I->getOpcode() == Instruction::BitCast ||
97 I->getOpcode() == Instruction::PtrToInt ||
98 I->getOpcode() == Instruction::IntToPtr) &&
99 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
102 unsigned FastISel::getRegForValue(const Value *V) {
103 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
104 // Don't handle non-simple values in FastISel.
105 if (!RealVT.isSimple())
108 // Ignore illegal types. We must do this before looking up the value
109 // in ValueMap because Arguments are given virtual registers regardless
110 // of whether FastISel can handle them.
111 MVT VT = RealVT.getSimpleVT();
112 if (!TLI.isTypeLegal(VT)) {
113 // Promote MVT::i1 to a legal type though, because it's common and easy.
115 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
120 // Look up the value to see if we already have a register for it. We
121 // cache values defined by Instructions across blocks, and other values
122 // only locally. This is because Instructions already have the SSA
123 // def-dominates-use requirement enforced.
124 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
125 if (I != FuncInfo.ValueMap.end()) {
126 unsigned Reg = I->second;
129 unsigned Reg = LocalValueMap[V];
133 // In bottom-up mode, just create the virtual register which will be used
134 // to hold the value. It will be materialized later.
135 if (isa<Instruction>(V) &&
136 (!isa<AllocaInst>(V) ||
137 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
138 return FuncInfo.InitializeRegForValue(V);
140 SavePoint SaveInsertPt = enterLocalValueArea();
142 // Materialize the value in a register. Emit any instructions in the
144 Reg = materializeRegForValue(V, VT);
146 leaveLocalValueArea(SaveInsertPt);
151 /// materializeRegForValue - Helper for getRegForValue. This function is
152 /// called when the value isn't already available in a register and must
153 /// be materialized with new instructions.
154 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
157 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
158 if (CI->getValue().getActiveBits() <= 64)
159 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
160 } else if (isa<AllocaInst>(V)) {
161 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
162 } else if (isa<ConstantPointerNull>(V)) {
163 // Translate this as an integer zero so that it can be
164 // local-CSE'd with actual integer zeros.
166 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
167 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
168 // Try to emit the constant directly.
169 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
172 // Try to emit the constant by using an integer constant with a cast.
173 const APFloat &Flt = CF->getValueAPF();
174 EVT IntVT = TLI.getPointerTy();
177 uint32_t IntBitWidth = IntVT.getSizeInBits();
179 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
180 APFloat::rmTowardZero, &isExact);
182 APInt IntVal(IntBitWidth, 2, x);
184 unsigned IntegerReg =
185 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
187 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
188 IntegerReg, /*Kill=*/false);
191 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
192 if (!SelectOperator(Op, Op->getOpcode()))
193 if (!isa<Instruction>(Op) ||
194 !TargetSelectInstruction(cast<Instruction>(Op)))
196 Reg = lookUpRegForValue(Op);
197 } else if (isa<UndefValue>(V)) {
198 Reg = createResultReg(TLI.getRegClassFor(VT));
199 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
200 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
203 // If target-independent code couldn't handle the value, give target-specific
205 if (!Reg && isa<Constant>(V))
206 Reg = TargetMaterializeConstant(cast<Constant>(V));
208 // Don't cache constant materializations in the general ValueMap.
209 // To do so would require tracking what uses they dominate.
211 LocalValueMap[V] = Reg;
212 LastLocalValue = MRI.getVRegDef(Reg);
217 unsigned FastISel::lookUpRegForValue(const Value *V) {
218 // Look up the value to see if we already have a register for it. We
219 // cache values defined by Instructions across blocks, and other values
220 // only locally. This is because Instructions already have the SSA
221 // def-dominates-use requirement enforced.
222 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
223 if (I != FuncInfo.ValueMap.end())
225 return LocalValueMap[V];
228 /// UpdateValueMap - Update the value map to include the new mapping for this
229 /// instruction, or insert an extra copy to get the result in a previous
230 /// determined register.
231 /// NOTE: This is only necessary because we might select a block that uses
232 /// a value before we select the block that defines the value. It might be
233 /// possible to fix this by selecting blocks in reverse postorder.
234 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
235 if (!isa<Instruction>(I)) {
236 LocalValueMap[I] = Reg;
240 unsigned &AssignedReg = FuncInfo.ValueMap[I];
241 if (AssignedReg == 0)
242 // Use the new register.
244 else if (Reg != AssignedReg) {
245 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
246 FuncInfo.RegFixups[AssignedReg] = Reg;
254 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
255 unsigned IdxN = getRegForValue(Idx);
257 // Unhandled operand. Halt "fast" selection and bail.
258 return std::pair<unsigned, bool>(0, false);
260 bool IdxNIsKill = hasTrivialKill(Idx);
262 // If the index is smaller or larger than intptr_t, truncate or extend it.
263 MVT PtrVT = TLI.getPointerTy();
264 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
265 if (IdxVT.bitsLT(PtrVT)) {
266 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
270 else if (IdxVT.bitsGT(PtrVT)) {
271 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
275 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
278 void FastISel::recomputeInsertPt() {
279 if (getLastLocalValue()) {
280 FuncInfo.InsertPt = getLastLocalValue();
281 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
284 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
286 // Now skip past any EH_LABELs, which must remain at the beginning.
287 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
288 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
292 FastISel::SavePoint FastISel::enterLocalValueArea() {
293 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
297 SavePoint SP = { OldInsertPt, OldDL };
301 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
302 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
303 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
305 // Restore the previous insert position.
306 FuncInfo.InsertPt = OldInsertPt.InsertPt;
310 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
311 /// which has an opcode which directly corresponds to the given ISD opcode.
313 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
314 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
315 if (VT == MVT::Other || !VT.isSimple())
316 // Unhandled type. Halt "fast" selection and bail.
319 // We only handle legal types. For example, on x86-32 the instruction
320 // selector contains all of the 64-bit instructions from x86-64,
321 // under the assumption that i64 won't be used if the target doesn't
323 if (!TLI.isTypeLegal(VT)) {
324 // MVT::i1 is special. Allow AND, OR, or XOR because they
325 // don't require additional zeroing, which makes them easy.
327 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
328 ISDOpcode == ISD::XOR))
329 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
334 unsigned Op0 = getRegForValue(I->getOperand(0));
336 // Unhandled operand. Halt "fast" selection and bail.
339 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
341 // Check if the second operand is a constant and handle it appropriately.
342 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
343 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
344 ISDOpcode, Op0, Op0IsKill,
346 if (ResultReg != 0) {
347 // We successfully emitted code for the given LLVM Instruction.
348 UpdateValueMap(I, ResultReg);
353 // Check if the second operand is a constant float.
354 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
355 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
356 ISDOpcode, Op0, Op0IsKill, CF);
357 if (ResultReg != 0) {
358 // We successfully emitted code for the given LLVM Instruction.
359 UpdateValueMap(I, ResultReg);
364 unsigned Op1 = getRegForValue(I->getOperand(1));
366 // Unhandled operand. Halt "fast" selection and bail.
369 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
371 // Now we have both operands in registers. Emit the instruction.
372 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
377 // Target-specific code wasn't able to find a machine opcode for
378 // the given ISD opcode and type. Halt "fast" selection and bail.
381 // We successfully emitted code for the given LLVM Instruction.
382 UpdateValueMap(I, ResultReg);
386 bool FastISel::SelectGetElementPtr(const User *I) {
387 unsigned N = getRegForValue(I->getOperand(0));
389 // Unhandled operand. Halt "fast" selection and bail.
392 bool NIsKill = hasTrivialKill(I->getOperand(0));
394 const Type *Ty = I->getOperand(0)->getType();
395 MVT VT = TLI.getPointerTy();
396 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
397 E = I->op_end(); OI != E; ++OI) {
398 const Value *Idx = *OI;
399 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
400 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
403 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
404 // FIXME: This can be optimized by combining the add with a
406 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
408 // Unhandled operand. Halt "fast" selection and bail.
412 Ty = StTy->getElementType(Field);
414 Ty = cast<SequentialType>(Ty)->getElementType();
416 // If this is a constant subscript, handle it quickly.
417 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
418 if (CI->isZero()) continue;
420 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
421 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
423 // Unhandled operand. Halt "fast" selection and bail.
429 // N = N + Idx * ElementSize;
430 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
431 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
432 unsigned IdxN = Pair.first;
433 bool IdxNIsKill = Pair.second;
435 // Unhandled operand. Halt "fast" selection and bail.
438 if (ElementSize != 1) {
439 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
441 // Unhandled operand. Halt "fast" selection and bail.
445 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
447 // Unhandled operand. Halt "fast" selection and bail.
452 // We successfully emitted code for the given LLVM Instruction.
453 UpdateValueMap(I, N);
457 bool FastISel::SelectCall(const User *I) {
458 const Function *F = cast<CallInst>(I)->getCalledFunction();
459 if (!F) return false;
461 // Handle selected intrinsic function calls.
462 unsigned IID = F->getIntrinsicID();
465 case Intrinsic::dbg_declare: {
466 const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
467 if (!DIVariable(DI->getVariable()).Verify() ||
468 !FuncInfo.MF->getMMI().hasDebugInfo())
471 const Value *Address = DI->getAddress();
472 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
477 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
478 if (Arg->hasByValAttr()) {
479 // Byval arguments' frame index is recorded during argument lowering.
480 // Use this info directly.
481 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
483 Reg = TRI.getFrameRegister(*FuncInfo.MF);
487 Reg = getRegForValue(Address);
490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
491 TII.get(TargetOpcode::DBG_VALUE))
492 .addReg(Reg, RegState::Debug).addImm(Offset)
493 .addMetadata(DI->getVariable());
496 case Intrinsic::dbg_value: {
497 // This form of DBG_VALUE is target-independent.
498 const DbgValueInst *DI = cast<DbgValueInst>(I);
499 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
500 const Value *V = DI->getValue();
502 // Currently the optimizer can produce this; insert an undef to
503 // help debugging. Probably the optimizer should not do this.
504 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
505 .addReg(0U).addImm(DI->getOffset())
506 .addMetadata(DI->getVariable());
507 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
508 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
509 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
510 .addMetadata(DI->getVariable());
511 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
512 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
513 .addFPImm(CF).addImm(DI->getOffset())
514 .addMetadata(DI->getVariable());
515 } else if (unsigned Reg = lookUpRegForValue(V)) {
516 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
517 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
518 .addMetadata(DI->getVariable());
520 // We can't yet handle anything else here because it would require
521 // generating code, thus altering codegen because of debug info.
522 DEBUG(dbgs() << "Dropping debug info for " << DI);
526 case Intrinsic::eh_exception: {
527 EVT VT = TLI.getValueType(I->getType());
528 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
530 case TargetLowering::Expand: {
531 assert(FuncInfo.MBB->isLandingPad() &&
532 "Call to eh.exception not in landing pad!");
533 unsigned Reg = TLI.getExceptionAddressRegister();
534 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
535 unsigned ResultReg = createResultReg(RC);
536 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
537 ResultReg).addReg(Reg);
538 UpdateValueMap(I, ResultReg);
544 case Intrinsic::eh_selector: {
545 EVT VT = TLI.getValueType(I->getType());
546 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
548 case TargetLowering::Expand: {
549 if (FuncInfo.MBB->isLandingPad())
550 AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
553 FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
555 // FIXME: Mark exception selector register as live in. Hack for PR1508.
556 unsigned Reg = TLI.getExceptionSelectorRegister();
557 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
560 unsigned Reg = TLI.getExceptionSelectorRegister();
561 EVT SrcVT = TLI.getPointerTy();
562 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
563 unsigned ResultReg = createResultReg(RC);
564 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
565 ResultReg).addReg(Reg);
567 bool ResultRegIsKill = hasTrivialKill(I);
569 // Cast the register to the type of the selector.
570 if (SrcVT.bitsGT(MVT::i32))
571 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
572 ResultReg, ResultRegIsKill);
573 else if (SrcVT.bitsLT(MVT::i32))
574 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
575 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
577 // Unhandled operand. Halt "fast" selection and bail.
580 UpdateValueMap(I, ResultReg);
589 // An arbitrary call. Bail.
593 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
594 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
595 EVT DstVT = TLI.getValueType(I->getType());
597 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
598 DstVT == MVT::Other || !DstVT.isSimple())
599 // Unhandled type. Halt "fast" selection and bail.
602 // Check if the destination type is legal. Or as a special case,
603 // it may be i1 if we're doing a truncate because that's
604 // easy and somewhat common.
605 if (!TLI.isTypeLegal(DstVT))
606 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
607 // Unhandled type. Halt "fast" selection and bail.
610 // Check if the source operand is legal. Or as a special case,
611 // it may be i1 if we're doing zero-extension because that's
612 // easy and somewhat common.
613 if (!TLI.isTypeLegal(SrcVT))
614 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
615 // Unhandled type. Halt "fast" selection and bail.
618 unsigned InputReg = getRegForValue(I->getOperand(0));
620 // Unhandled operand. Halt "fast" selection and bail.
623 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
625 // If the operand is i1, arrange for the high bits in the register to be zero.
626 if (SrcVT == MVT::i1) {
627 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
628 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
631 InputRegIsKill = true;
633 // If the result is i1, truncate to the target's type for i1 first.
634 if (DstVT == MVT::i1)
635 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
637 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
640 InputReg, InputRegIsKill);
644 UpdateValueMap(I, ResultReg);
648 bool FastISel::SelectBitCast(const User *I) {
649 // If the bitcast doesn't change the type, just use the operand value.
650 if (I->getType() == I->getOperand(0)->getType()) {
651 unsigned Reg = getRegForValue(I->getOperand(0));
654 UpdateValueMap(I, Reg);
658 // Bitcasts of other values become reg-reg copies or BITCAST operators.
659 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
660 EVT DstVT = TLI.getValueType(I->getType());
662 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
663 DstVT == MVT::Other || !DstVT.isSimple() ||
664 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
665 // Unhandled type. Halt "fast" selection and bail.
668 unsigned Op0 = getRegForValue(I->getOperand(0));
670 // Unhandled operand. Halt "fast" selection and bail.
673 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
675 // First, try to perform the bitcast by inserting a reg-reg copy.
676 unsigned ResultReg = 0;
677 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
678 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
679 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
680 // Don't attempt a cross-class copy. It will likely fail.
681 if (SrcClass == DstClass) {
682 ResultReg = createResultReg(DstClass);
683 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
684 ResultReg).addReg(Op0);
688 // If the reg-reg copy failed, select a BITCAST opcode.
690 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
691 ISD::BITCAST, Op0, Op0IsKill);
696 UpdateValueMap(I, ResultReg);
701 FastISel::SelectInstruction(const Instruction *I) {
702 // Just before the terminator instruction, insert instructions to
703 // feed PHI nodes in successor blocks.
704 if (isa<TerminatorInst>(I))
705 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
708 DL = I->getDebugLoc();
710 // First, try doing target-independent selection.
711 if (SelectOperator(I, I->getOpcode())) {
716 // Next, try calling the target to attempt to handle the instruction.
717 if (TargetSelectInstruction(I)) {
726 /// FastEmitBranch - Emit an unconditional branch to the given block,
727 /// unless it is the immediate (fall-through) successor, and update
730 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
731 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
732 // The unconditional fall-through case, which needs no instructions.
734 // The unconditional branch case.
735 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
736 SmallVector<MachineOperand, 0>(), DL);
738 FuncInfo.MBB->addSuccessor(MSucc);
741 /// SelectFNeg - Emit an FNeg operation.
744 FastISel::SelectFNeg(const User *I) {
745 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
746 if (OpReg == 0) return false;
748 bool OpRegIsKill = hasTrivialKill(I);
750 // If the target has ISD::FNEG, use it.
751 EVT VT = TLI.getValueType(I->getType());
752 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
753 ISD::FNEG, OpReg, OpRegIsKill);
754 if (ResultReg != 0) {
755 UpdateValueMap(I, ResultReg);
759 // Bitcast the value to integer, twiddle the sign bit with xor,
760 // and then bitcast it back to floating-point.
761 if (VT.getSizeInBits() > 64) return false;
762 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
763 if (!TLI.isTypeLegal(IntVT))
766 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
767 ISD::BITCAST, OpReg, OpRegIsKill);
771 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
772 IntReg, /*Kill=*/true,
773 UINT64_C(1) << (VT.getSizeInBits()-1),
774 IntVT.getSimpleVT());
775 if (IntResultReg == 0)
778 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
779 ISD::BITCAST, IntResultReg, /*Kill=*/true);
783 UpdateValueMap(I, ResultReg);
788 FastISel::SelectOperator(const User *I, unsigned Opcode) {
790 case Instruction::Add:
791 return SelectBinaryOp(I, ISD::ADD);
792 case Instruction::FAdd:
793 return SelectBinaryOp(I, ISD::FADD);
794 case Instruction::Sub:
795 return SelectBinaryOp(I, ISD::SUB);
796 case Instruction::FSub:
797 // FNeg is currently represented in LLVM IR as a special case of FSub.
798 if (BinaryOperator::isFNeg(I))
799 return SelectFNeg(I);
800 return SelectBinaryOp(I, ISD::FSUB);
801 case Instruction::Mul:
802 return SelectBinaryOp(I, ISD::MUL);
803 case Instruction::FMul:
804 return SelectBinaryOp(I, ISD::FMUL);
805 case Instruction::SDiv:
806 return SelectBinaryOp(I, ISD::SDIV);
807 case Instruction::UDiv:
808 return SelectBinaryOp(I, ISD::UDIV);
809 case Instruction::FDiv:
810 return SelectBinaryOp(I, ISD::FDIV);
811 case Instruction::SRem:
812 return SelectBinaryOp(I, ISD::SREM);
813 case Instruction::URem:
814 return SelectBinaryOp(I, ISD::UREM);
815 case Instruction::FRem:
816 return SelectBinaryOp(I, ISD::FREM);
817 case Instruction::Shl:
818 return SelectBinaryOp(I, ISD::SHL);
819 case Instruction::LShr:
820 return SelectBinaryOp(I, ISD::SRL);
821 case Instruction::AShr:
822 return SelectBinaryOp(I, ISD::SRA);
823 case Instruction::And:
824 return SelectBinaryOp(I, ISD::AND);
825 case Instruction::Or:
826 return SelectBinaryOp(I, ISD::OR);
827 case Instruction::Xor:
828 return SelectBinaryOp(I, ISD::XOR);
830 case Instruction::GetElementPtr:
831 return SelectGetElementPtr(I);
833 case Instruction::Br: {
834 const BranchInst *BI = cast<BranchInst>(I);
836 if (BI->isUnconditional()) {
837 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
838 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
839 FastEmitBranch(MSucc, BI->getDebugLoc());
843 // Conditional branches are not handed yet.
844 // Halt "fast" selection and bail.
848 case Instruction::Unreachable:
852 case Instruction::Alloca:
853 // FunctionLowering has the static-sized case covered.
854 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
857 // Dynamic-sized alloca is not handled yet.
860 case Instruction::Call:
861 return SelectCall(I);
863 case Instruction::BitCast:
864 return SelectBitCast(I);
866 case Instruction::FPToSI:
867 return SelectCast(I, ISD::FP_TO_SINT);
868 case Instruction::ZExt:
869 return SelectCast(I, ISD::ZERO_EXTEND);
870 case Instruction::SExt:
871 return SelectCast(I, ISD::SIGN_EXTEND);
872 case Instruction::Trunc:
873 return SelectCast(I, ISD::TRUNCATE);
874 case Instruction::SIToFP:
875 return SelectCast(I, ISD::SINT_TO_FP);
877 case Instruction::IntToPtr: // Deliberate fall-through.
878 case Instruction::PtrToInt: {
879 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
880 EVT DstVT = TLI.getValueType(I->getType());
881 if (DstVT.bitsGT(SrcVT))
882 return SelectCast(I, ISD::ZERO_EXTEND);
883 if (DstVT.bitsLT(SrcVT))
884 return SelectCast(I, ISD::TRUNCATE);
885 unsigned Reg = getRegForValue(I->getOperand(0));
886 if (Reg == 0) return false;
887 UpdateValueMap(I, Reg);
891 case Instruction::PHI:
892 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
895 // Unhandled instruction. Halt "fast" selection and bail.
900 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
901 : FuncInfo(funcInfo),
902 MRI(FuncInfo.MF->getRegInfo()),
903 MFI(*FuncInfo.MF->getFrameInfo()),
904 MCP(*FuncInfo.MF->getConstantPool()),
905 TM(FuncInfo.MF->getTarget()),
906 TD(*TM.getTargetData()),
907 TII(*TM.getInstrInfo()),
908 TLI(*TM.getTargetLowering()),
909 TRI(*TM.getRegisterInfo()) {
912 FastISel::~FastISel() {}
914 unsigned FastISel::FastEmit_(MVT, MVT,
919 unsigned FastISel::FastEmit_r(MVT, MVT,
921 unsigned /*Op0*/, bool /*Op0IsKill*/) {
925 unsigned FastISel::FastEmit_rr(MVT, MVT,
927 unsigned /*Op0*/, bool /*Op0IsKill*/,
928 unsigned /*Op1*/, bool /*Op1IsKill*/) {
932 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
936 unsigned FastISel::FastEmit_f(MVT, MVT,
937 unsigned, const ConstantFP * /*FPImm*/) {
941 unsigned FastISel::FastEmit_ri(MVT, MVT,
943 unsigned /*Op0*/, bool /*Op0IsKill*/,
948 unsigned FastISel::FastEmit_rf(MVT, MVT,
950 unsigned /*Op0*/, bool /*Op0IsKill*/,
951 const ConstantFP * /*FPImm*/) {
955 unsigned FastISel::FastEmit_rri(MVT, MVT,
957 unsigned /*Op0*/, bool /*Op0IsKill*/,
958 unsigned /*Op1*/, bool /*Op1IsKill*/,
963 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
964 /// to emit an instruction with an immediate operand using FastEmit_ri.
965 /// If that fails, it materializes the immediate into a register and try
966 /// FastEmit_rr instead.
967 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
968 unsigned Op0, bool Op0IsKill,
969 uint64_t Imm, MVT ImmType) {
970 // First check if immediate type is legal. If not, we can't use the ri form.
971 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
974 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
975 if (MaterialReg == 0)
977 return FastEmit_rr(VT, VT, Opcode,
979 MaterialReg, /*Kill=*/true);
982 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
983 /// to emit an instruction with a floating-point immediate operand using
984 /// FastEmit_rf. If that fails, it materializes the immediate into a register
985 /// and try FastEmit_rr instead.
986 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
987 unsigned Op0, bool Op0IsKill,
988 const ConstantFP *FPImm, MVT ImmType) {
989 // First check if immediate type is legal. If not, we can't use the rf form.
990 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
994 // Materialize the constant in a register.
995 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
996 if (MaterialReg == 0) {
997 // If the target doesn't have a way to directly enter a floating-point
998 // value into a register, use an alternate approach.
999 // TODO: The current approach only supports floating-point constants
1000 // that can be constructed by conversion from integer values. This should
1001 // be replaced by code that creates a load from a constant-pool entry,
1002 // which will require some target-specific work.
1003 const APFloat &Flt = FPImm->getValueAPF();
1004 EVT IntVT = TLI.getPointerTy();
1007 uint32_t IntBitWidth = IntVT.getSizeInBits();
1009 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
1010 APFloat::rmTowardZero, &isExact);
1013 APInt IntVal(IntBitWidth, 2, x);
1015 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
1016 ISD::Constant, IntVal.getZExtValue());
1017 if (IntegerReg == 0)
1019 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
1020 ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
1021 if (MaterialReg == 0)
1024 return FastEmit_rr(VT, VT, Opcode,
1026 MaterialReg, /*Kill=*/true);
1029 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1030 return MRI.createVirtualRegister(RC);
1033 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1034 const TargetRegisterClass* RC) {
1035 unsigned ResultReg = createResultReg(RC);
1036 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1042 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1043 const TargetRegisterClass *RC,
1044 unsigned Op0, bool Op0IsKill) {
1045 unsigned ResultReg = createResultReg(RC);
1046 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1048 if (II.getNumDefs() >= 1)
1049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1050 .addReg(Op0, Op0IsKill * RegState::Kill);
1052 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1053 .addReg(Op0, Op0IsKill * RegState::Kill);
1054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1055 ResultReg).addReg(II.ImplicitDefs[0]);
1061 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1062 const TargetRegisterClass *RC,
1063 unsigned Op0, bool Op0IsKill,
1064 unsigned Op1, bool Op1IsKill) {
1065 unsigned ResultReg = createResultReg(RC);
1066 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1068 if (II.getNumDefs() >= 1)
1069 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1070 .addReg(Op0, Op0IsKill * RegState::Kill)
1071 .addReg(Op1, Op1IsKill * RegState::Kill);
1073 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1074 .addReg(Op0, Op0IsKill * RegState::Kill)
1075 .addReg(Op1, Op1IsKill * RegState::Kill);
1076 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1077 ResultReg).addReg(II.ImplicitDefs[0]);
1082 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1083 const TargetRegisterClass *RC,
1084 unsigned Op0, bool Op0IsKill,
1086 unsigned ResultReg = createResultReg(RC);
1087 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1089 if (II.getNumDefs() >= 1)
1090 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1091 .addReg(Op0, Op0IsKill * RegState::Kill)
1094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1095 .addReg(Op0, Op0IsKill * RegState::Kill)
1097 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1098 ResultReg).addReg(II.ImplicitDefs[0]);
1103 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1104 const TargetRegisterClass *RC,
1105 unsigned Op0, bool Op0IsKill,
1106 uint64_t Imm1, uint64_t Imm2) {
1107 unsigned ResultReg = createResultReg(RC);
1108 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1110 if (II.getNumDefs() >= 1)
1111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1112 .addReg(Op0, Op0IsKill * RegState::Kill)
1116 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1117 .addReg(Op0, Op0IsKill * RegState::Kill)
1120 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1121 ResultReg).addReg(II.ImplicitDefs[0]);
1126 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1127 const TargetRegisterClass *RC,
1128 unsigned Op0, bool Op0IsKill,
1129 const ConstantFP *FPImm) {
1130 unsigned ResultReg = createResultReg(RC);
1131 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1133 if (II.getNumDefs() >= 1)
1134 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1135 .addReg(Op0, Op0IsKill * RegState::Kill)
1138 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1139 .addReg(Op0, Op0IsKill * RegState::Kill)
1141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1142 ResultReg).addReg(II.ImplicitDefs[0]);
1147 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1148 const TargetRegisterClass *RC,
1149 unsigned Op0, bool Op0IsKill,
1150 unsigned Op1, bool Op1IsKill,
1152 unsigned ResultReg = createResultReg(RC);
1153 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1155 if (II.getNumDefs() >= 1)
1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1157 .addReg(Op0, Op0IsKill * RegState::Kill)
1158 .addReg(Op1, Op1IsKill * RegState::Kill)
1161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1162 .addReg(Op0, Op0IsKill * RegState::Kill)
1163 .addReg(Op1, Op1IsKill * RegState::Kill)
1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1166 ResultReg).addReg(II.ImplicitDefs[0]);
1171 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1172 const TargetRegisterClass *RC,
1174 unsigned ResultReg = createResultReg(RC);
1175 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1177 if (II.getNumDefs() >= 1)
1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1180 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1182 ResultReg).addReg(II.ImplicitDefs[0]);
1187 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1188 unsigned Op0, bool Op0IsKill,
1190 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1191 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1192 "Cannot yet extract from physregs");
1193 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1194 DL, TII.get(TargetOpcode::COPY), ResultReg)
1195 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1199 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1200 /// with all but the least significant bit set to zero.
1201 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1202 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1205 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1206 /// Emit code to ensure constants are copied into registers when needed.
1207 /// Remember the virtual registers that need to be added to the Machine PHI
1208 /// nodes as input. We cannot just directly add them, because expansion
1209 /// might result in multiple MBB's for one BB. As such, the start of the
1210 /// BB might correspond to a different MBB than the end.
1211 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1212 const TerminatorInst *TI = LLVMBB->getTerminator();
1214 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1215 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1217 // Check successor nodes' PHI nodes that expect a constant to be available
1219 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1220 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1221 if (!isa<PHINode>(SuccBB->begin())) continue;
1222 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1224 // If this terminator has multiple identical successors (common for
1225 // switches), only handle each succ once.
1226 if (!SuccsHandled.insert(SuccMBB)) continue;
1228 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1230 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1231 // nodes and Machine PHI nodes, but the incoming operands have not been
1233 for (BasicBlock::const_iterator I = SuccBB->begin();
1234 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1236 // Ignore dead phi's.
1237 if (PN->use_empty()) continue;
1239 // Only handle legal types. Two interesting things to note here. First,
1240 // by bailing out early, we may leave behind some dead instructions,
1241 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1242 // own moves. Second, this check is necessary because FastISel doesn't
1243 // use CreateRegs to create registers, so it always creates
1244 // exactly one register for each non-void instruction.
1245 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1246 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1249 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1251 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1256 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1258 // Set the DebugLoc for the copy. Prefer the location of the operand
1259 // if there is one; use the location of the PHI otherwise.
1260 DL = PN->getDebugLoc();
1261 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1262 DL = Inst->getDebugLoc();
1264 unsigned Reg = getRegForValue(PHIOp);
1266 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1269 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));