1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Instructions.h"
15 #include "llvm/CodeGen/FastISel.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetMachine.h"
24 unsigned FastISel::getRegForValue(Value *V) {
25 // Look up the value to see if we already have a register for it. We
26 // cache values defined by Instructions across blocks, and other values
27 // only locally. This is because Instructions already have the SSA
28 // def-dominatess-use requirement enforced.
29 if (ValueMap.count(V))
31 unsigned Reg = LocalValueMap[V];
35 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
37 // Ignore illegal types.
38 if (!TLI.isTypeLegal(VT)) {
39 // Promote MVT::i1 to a legal type though, because it's common and easy.
41 VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
46 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
47 if (CI->getValue().getActiveBits() <= 64)
48 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
49 } else if (isa<AllocaInst>(V)) {
50 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
51 } else if (isa<ConstantPointerNull>(V)) {
52 Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
53 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
54 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
57 const APFloat &Flt = CF->getValueAPF();
58 MVT IntVT = TLI.getPointerTy();
61 uint32_t IntBitWidth = IntVT.getSizeInBits();
62 if (!Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
63 APFloat::rmTowardZero) != APFloat::opOK) {
64 APInt IntVal(IntBitWidth, 2, x);
66 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
67 ISD::Constant, IntVal.getZExtValue());
69 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
72 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
73 if (!SelectOperator(CE, CE->getOpcode())) return 0;
74 Reg = LocalValueMap[CE];
75 } else if (isa<UndefValue>(V)) {
76 Reg = createResultReg(TLI.getRegClassFor(VT));
77 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
82 if (!Reg && isa<Constant>(V))
83 Reg = TargetMaterializeConstant(cast<Constant>(V));
85 // Don't cache constant materializations in the general ValueMap.
86 // To do so would require tracking what uses they dominate.
87 LocalValueMap[V] = Reg;
91 unsigned FastISel::lookUpRegForValue(Value *V) {
92 // Look up the value to see if we already have a register for it. We
93 // cache values defined by Instructions across blocks, and other values
94 // only locally. This is because Instructions already have the SSA
95 // def-dominatess-use requirement enforced.
96 if (ValueMap.count(V))
98 return LocalValueMap[V];
101 /// UpdateValueMap - Update the value map to include the new mapping for this
102 /// instruction, or insert an extra copy to get the result in a previous
103 /// determined register.
104 /// NOTE: This is only necessary because we might select a block that uses
105 /// a value before we select the block that defines the value. It might be
106 /// possible to fix this by selecting blocks in reverse postorder.
107 void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
108 if (!isa<Instruction>(I)) {
109 LocalValueMap[I] = Reg;
112 if (!ValueMap.count(I))
115 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
116 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
119 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
120 /// which has an opcode which directly corresponds to the given ISD opcode.
122 bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
123 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
124 if (VT == MVT::Other || !VT.isSimple())
125 // Unhandled type. Halt "fast" selection and bail.
128 // We only handle legal types. For example, on x86-32 the instruction
129 // selector contains all of the 64-bit instructions from x86-64,
130 // under the assumption that i64 won't be used if the target doesn't
132 if (!TLI.isTypeLegal(VT)) {
133 // MVT::i1 is special. Allow AND and OR (but not XOR) because they
134 // don't require additional zeroing, which makes them easy.
136 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR))
137 VT = TLI.getTypeToTransformTo(VT);
142 unsigned Op0 = getRegForValue(I->getOperand(0));
144 // Unhandled operand. Halt "fast" selection and bail.
147 // Check if the second operand is a constant and handle it appropriately.
148 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
149 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
150 ISDOpcode, Op0, CI->getZExtValue());
151 if (ResultReg != 0) {
152 // We successfully emitted code for the given LLVM Instruction.
153 UpdateValueMap(I, ResultReg);
158 // Check if the second operand is a constant float.
159 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
160 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
162 if (ResultReg != 0) {
163 // We successfully emitted code for the given LLVM Instruction.
164 UpdateValueMap(I, ResultReg);
169 unsigned Op1 = getRegForValue(I->getOperand(1));
171 // Unhandled operand. Halt "fast" selection and bail.
174 // Now we have both operands in registers. Emit the instruction.
175 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
176 ISDOpcode, Op0, Op1);
178 // Target-specific code wasn't able to find a machine opcode for
179 // the given ISD opcode and type. Halt "fast" selection and bail.
182 // We successfully emitted code for the given LLVM Instruction.
183 UpdateValueMap(I, ResultReg);
187 bool FastISel::SelectGetElementPtr(User *I) {
188 unsigned N = getRegForValue(I->getOperand(0));
190 // Unhandled operand. Halt "fast" selection and bail.
193 const Type *Ty = I->getOperand(0)->getType();
194 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
195 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
198 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
199 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
202 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
203 // FIXME: This can be optimized by combining the add with a
205 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
207 // Unhandled operand. Halt "fast" selection and bail.
210 Ty = StTy->getElementType(Field);
212 Ty = cast<SequentialType>(Ty)->getElementType();
214 // If this is a constant subscript, handle it quickly.
215 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
216 if (CI->getZExtValue() == 0) continue;
218 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
219 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
221 // Unhandled operand. Halt "fast" selection and bail.
226 // N = N + Idx * ElementSize;
227 uint64_t ElementSize = TD.getABITypeSize(Ty);
228 unsigned IdxN = getRegForValue(Idx);
230 // Unhandled operand. Halt "fast" selection and bail.
233 // If the index is smaller or larger than intptr_t, truncate or extend
235 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
236 if (IdxVT.bitsLT(VT))
237 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
238 else if (IdxVT.bitsGT(VT))
239 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
241 // Unhandled operand. Halt "fast" selection and bail.
244 if (ElementSize != 1) {
245 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
247 // Unhandled operand. Halt "fast" selection and bail.
250 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
252 // Unhandled operand. Halt "fast" selection and bail.
257 // We successfully emitted code for the given LLVM Instruction.
258 UpdateValueMap(I, N);
262 bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
263 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
264 MVT DstVT = TLI.getValueType(I->getType());
266 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
267 DstVT == MVT::Other || !DstVT.isSimple() ||
268 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
269 // Unhandled type. Halt "fast" selection and bail.
272 unsigned InputReg = getRegForValue(I->getOperand(0));
274 // Unhandled operand. Halt "fast" selection and bail.
277 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
284 UpdateValueMap(I, ResultReg);
288 bool FastISel::SelectBitCast(User *I) {
289 // If the bitcast doesn't change the type, just use the operand value.
290 if (I->getType() == I->getOperand(0)->getType()) {
291 unsigned Reg = getRegForValue(I->getOperand(0));
294 UpdateValueMap(I, Reg);
298 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
299 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
300 MVT DstVT = TLI.getValueType(I->getType());
302 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
303 DstVT == MVT::Other || !DstVT.isSimple() ||
304 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
305 // Unhandled type. Halt "fast" selection and bail.
308 unsigned Op0 = getRegForValue(I->getOperand(0));
310 // Unhandled operand. Halt "fast" selection and bail.
313 // First, try to perform the bitcast by inserting a reg-reg copy.
314 unsigned ResultReg = 0;
315 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
316 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
317 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
318 ResultReg = createResultReg(DstClass);
320 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
321 Op0, DstClass, SrcClass);
326 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
328 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
329 ISD::BIT_CONVERT, Op0);
334 UpdateValueMap(I, ResultReg);
339 FastISel::SelectInstruction(Instruction *I) {
340 return SelectOperator(I, I->getOpcode());
344 FastISel::SelectOperator(User *I, unsigned Opcode) {
346 case Instruction::Add: {
347 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
348 return SelectBinaryOp(I, Opc);
350 case Instruction::Sub: {
351 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
352 return SelectBinaryOp(I, Opc);
354 case Instruction::Mul: {
355 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
356 return SelectBinaryOp(I, Opc);
358 case Instruction::SDiv:
359 return SelectBinaryOp(I, ISD::SDIV);
360 case Instruction::UDiv:
361 return SelectBinaryOp(I, ISD::UDIV);
362 case Instruction::FDiv:
363 return SelectBinaryOp(I, ISD::FDIV);
364 case Instruction::SRem:
365 return SelectBinaryOp(I, ISD::SREM);
366 case Instruction::URem:
367 return SelectBinaryOp(I, ISD::UREM);
368 case Instruction::FRem:
369 return SelectBinaryOp(I, ISD::FREM);
370 case Instruction::Shl:
371 return SelectBinaryOp(I, ISD::SHL);
372 case Instruction::LShr:
373 return SelectBinaryOp(I, ISD::SRL);
374 case Instruction::AShr:
375 return SelectBinaryOp(I, ISD::SRA);
376 case Instruction::And:
377 return SelectBinaryOp(I, ISD::AND);
378 case Instruction::Or:
379 return SelectBinaryOp(I, ISD::OR);
380 case Instruction::Xor:
381 return SelectBinaryOp(I, ISD::XOR);
383 case Instruction::GetElementPtr:
384 return SelectGetElementPtr(I);
386 case Instruction::Br: {
387 BranchInst *BI = cast<BranchInst>(I);
389 if (BI->isUnconditional()) {
390 MachineFunction::iterator NextMBB =
391 next(MachineFunction::iterator(MBB));
392 BasicBlock *LLVMSucc = BI->getSuccessor(0);
393 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
395 if (NextMBB != MF.end() && MSucc == NextMBB) {
396 // The unconditional fall-through case, which needs no instructions.
398 // The unconditional branch case.
399 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
401 MBB->addSuccessor(MSucc);
405 // Conditional branches are not handed yet.
406 // Halt "fast" selection and bail.
410 case Instruction::Unreachable:
414 case Instruction::PHI:
415 // PHI nodes are already emitted.
418 case Instruction::Alloca:
419 // FunctionLowering has the static-sized case covered.
420 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
423 // Dynamic-sized alloca is not handled yet.
426 case Instruction::BitCast:
427 return SelectBitCast(I);
429 case Instruction::FPToSI:
430 return SelectCast(I, ISD::FP_TO_SINT);
431 case Instruction::ZExt:
432 return SelectCast(I, ISD::ZERO_EXTEND);
433 case Instruction::SExt:
434 return SelectCast(I, ISD::SIGN_EXTEND);
435 case Instruction::Trunc:
436 return SelectCast(I, ISD::TRUNCATE);
437 case Instruction::SIToFP:
438 return SelectCast(I, ISD::SINT_TO_FP);
440 case Instruction::IntToPtr: // Deliberate fall-through.
441 case Instruction::PtrToInt: {
442 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
443 MVT DstVT = TLI.getValueType(I->getType());
444 if (DstVT.bitsGT(SrcVT))
445 return SelectCast(I, ISD::ZERO_EXTEND);
446 if (DstVT.bitsLT(SrcVT))
447 return SelectCast(I, ISD::TRUNCATE);
448 unsigned Reg = getRegForValue(I->getOperand(0));
449 if (Reg == 0) return false;
450 UpdateValueMap(I, Reg);
455 // Unhandled instruction. Halt "fast" selection and bail.
460 FastISel::FastISel(MachineFunction &mf,
461 MachineModuleInfo *mmi,
462 DenseMap<const Value *, unsigned> &vm,
463 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
464 DenseMap<const AllocaInst *, int> &am)
471 MRI(MF.getRegInfo()),
472 MFI(*MF.getFrameInfo()),
473 MCP(*MF.getConstantPool()),
475 TD(*TM.getTargetData()),
476 TII(*TM.getInstrInfo()),
477 TLI(*TM.getTargetLowering()) {
480 FastISel::~FastISel() {}
482 unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
487 unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
488 ISD::NodeType, unsigned /*Op0*/) {
492 unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
493 ISD::NodeType, unsigned /*Op0*/,
498 unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
499 ISD::NodeType, uint64_t /*Imm*/) {
503 unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
504 ISD::NodeType, ConstantFP * /*FPImm*/) {
508 unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
509 ISD::NodeType, unsigned /*Op0*/,
514 unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
515 ISD::NodeType, unsigned /*Op0*/,
516 ConstantFP * /*FPImm*/) {
520 unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
522 unsigned /*Op0*/, unsigned /*Op1*/,
527 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
528 /// to emit an instruction with an immediate operand using FastEmit_ri.
529 /// If that fails, it materializes the immediate into a register and try
530 /// FastEmit_rr instead.
531 unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
532 unsigned Op0, uint64_t Imm,
533 MVT::SimpleValueType ImmType) {
534 // First check if immediate type is legal. If not, we can't use the ri form.
535 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
538 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
539 if (MaterialReg == 0)
541 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
544 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
545 /// to emit an instruction with a floating-point immediate operand using
546 /// FastEmit_rf. If that fails, it materializes the immediate into a register
547 /// and try FastEmit_rr instead.
548 unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
549 unsigned Op0, ConstantFP *FPImm,
550 MVT::SimpleValueType ImmType) {
551 // First check if immediate type is legal. If not, we can't use the rf form.
552 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
556 // Materialize the constant in a register.
557 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
558 if (MaterialReg == 0) {
559 // If the target doesn't have a way to directly enter a floating-point
560 // value into a register, use an alternate approach.
561 // TODO: The current approach only supports floating-point constants
562 // that can be constructed by conversion from integer values. This should
563 // be replaced by code that creates a load from a constant-pool entry,
564 // which will require some target-specific work.
565 const APFloat &Flt = FPImm->getValueAPF();
566 MVT IntVT = TLI.getPointerTy();
569 uint32_t IntBitWidth = IntVT.getSizeInBits();
570 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
571 APFloat::rmTowardZero) != APFloat::opOK)
573 APInt IntVal(IntBitWidth, 2, x);
575 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
576 ISD::Constant, IntVal.getZExtValue());
579 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
580 ISD::SINT_TO_FP, IntegerReg);
581 if (MaterialReg == 0)
584 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
587 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
588 return MRI.createVirtualRegister(RC);
591 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
592 const TargetRegisterClass* RC) {
593 unsigned ResultReg = createResultReg(RC);
594 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
596 BuildMI(MBB, II, ResultReg);
600 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
601 const TargetRegisterClass *RC,
603 unsigned ResultReg = createResultReg(RC);
604 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
606 if (II.getNumDefs() >= 1)
607 BuildMI(MBB, II, ResultReg).addReg(Op0);
609 BuildMI(MBB, II).addReg(Op0);
610 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
611 II.ImplicitDefs[0], RC, RC);
619 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
620 const TargetRegisterClass *RC,
621 unsigned Op0, unsigned Op1) {
622 unsigned ResultReg = createResultReg(RC);
623 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
625 if (II.getNumDefs() >= 1)
626 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
628 BuildMI(MBB, II).addReg(Op0).addReg(Op1);
629 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
630 II.ImplicitDefs[0], RC, RC);
637 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
638 const TargetRegisterClass *RC,
639 unsigned Op0, uint64_t Imm) {
640 unsigned ResultReg = createResultReg(RC);
641 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
643 if (II.getNumDefs() >= 1)
644 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
646 BuildMI(MBB, II).addReg(Op0).addImm(Imm);
647 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
648 II.ImplicitDefs[0], RC, RC);
655 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
656 const TargetRegisterClass *RC,
657 unsigned Op0, ConstantFP *FPImm) {
658 unsigned ResultReg = createResultReg(RC);
659 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
661 if (II.getNumDefs() >= 1)
662 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
664 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
665 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
666 II.ImplicitDefs[0], RC, RC);
673 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
674 const TargetRegisterClass *RC,
675 unsigned Op0, unsigned Op1, uint64_t Imm) {
676 unsigned ResultReg = createResultReg(RC);
677 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
679 if (II.getNumDefs() >= 1)
680 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
682 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
683 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
684 II.ImplicitDefs[0], RC, RC);
691 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
692 const TargetRegisterClass *RC,
694 unsigned ResultReg = createResultReg(RC);
695 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
697 if (II.getNumDefs() >= 1)
698 BuildMI(MBB, II, ResultReg).addImm(Imm);
700 BuildMI(MBB, II).addImm(Imm);
701 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
702 II.ImplicitDefs[0], RC, RC);
709 unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
710 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
711 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
713 unsigned ResultReg = createResultReg(SRC);
714 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
716 if (II.getNumDefs() >= 1)
717 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
719 BuildMI(MBB, II).addReg(Op0).addImm(Idx);
720 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
721 II.ImplicitDefs[0], RC, RC);