1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Instructions.h"
15 #include "llvm/CodeGen/FastISel.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetMachine.h"
24 unsigned FastISel::getRegForValue(Value *V) {
25 // Look up the value to see if we already have a register for it. We
26 // cache values defined by Instructions across blocks, and other values
27 // only locally. This is because Instructions already have the SSA
28 // def-dominatess-use requirement enforced.
29 if (ValueMap.count(V))
31 unsigned Reg = LocalValueMap[V];
35 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
37 // Ignore illegal types.
38 if (!TLI.isTypeLegal(VT)) {
39 // Promote MVT::i1 to a legal type though, because it's common and easy.
41 VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
46 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
47 if (CI->getValue().getActiveBits() > 64)
48 return TargetMaterializeConstant(CI);
49 // Don't cache constant materializations. To do so would require
50 // tracking what uses they dominate.
51 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
52 } else if (isa<GlobalValue>(V)) {
53 return TargetMaterializeConstant(cast<Constant>(V));
54 } else if (isa<AllocaInst>(V)) {
55 return TargetMaterializeAlloca(cast<AllocaInst>(V));
56 } else if (isa<ConstantPointerNull>(V)) {
57 Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
58 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
59 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
62 const APFloat &Flt = CF->getValueAPF();
63 MVT IntVT = TLI.getPointerTy();
66 uint32_t IntBitWidth = IntVT.getSizeInBits();
67 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
68 APFloat::rmTowardZero) != APFloat::opOK)
69 return TargetMaterializeConstant(CF);
70 APInt IntVal(IntBitWidth, 2, x);
72 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
73 ISD::Constant, IntVal.getZExtValue());
75 return TargetMaterializeConstant(CF);
76 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
78 return TargetMaterializeConstant(CF);
80 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
81 if (!SelectOperator(CE, CE->getOpcode())) return 0;
82 Reg = LocalValueMap[CE];
83 } else if (isa<UndefValue>(V)) {
84 Reg = createResultReg(TLI.getRegClassFor(VT));
85 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
90 if (!Reg && isa<Constant>(V))
91 return TargetMaterializeConstant(cast<Constant>(V));
93 LocalValueMap[V] = Reg;
97 unsigned FastISel::lookUpRegForValue(Value *V) {
98 // Look up the value to see if we already have a register for it. We
99 // cache values defined by Instructions across blocks, and other values
100 // only locally. This is because Instructions already have the SSA
101 // def-dominatess-use requirement enforced.
102 if (ValueMap.count(V))
104 return LocalValueMap[V];
107 /// UpdateValueMap - Update the value map to include the new mapping for this
108 /// instruction, or insert an extra copy to get the result in a previous
109 /// determined register.
110 /// NOTE: This is only necessary because we might select a block that uses
111 /// a value before we select the block that defines the value. It might be
112 /// possible to fix this by selecting blocks in reverse postorder.
113 void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
114 if (!isa<Instruction>(I)) {
115 LocalValueMap[I] = Reg;
118 if (!ValueMap.count(I))
121 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
122 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
125 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
126 /// which has an opcode which directly corresponds to the given ISD opcode.
128 bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
129 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
130 if (VT == MVT::Other || !VT.isSimple())
131 // Unhandled type. Halt "fast" selection and bail.
134 // We only handle legal types. For example, on x86-32 the instruction
135 // selector contains all of the 64-bit instructions from x86-64,
136 // under the assumption that i64 won't be used if the target doesn't
138 if (!TLI.isTypeLegal(VT)) {
139 // MVT::i1 is special. Allow AND and OR (but not XOR) because they
140 // don't require additional zeroing, which makes them easy.
142 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR))
143 VT = TLI.getTypeToTransformTo(VT);
148 unsigned Op0 = getRegForValue(I->getOperand(0));
150 // Unhandled operand. Halt "fast" selection and bail.
153 // Check if the second operand is a constant and handle it appropriately.
154 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
155 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
156 ISDOpcode, Op0, CI->getZExtValue());
157 if (ResultReg != 0) {
158 // We successfully emitted code for the given LLVM Instruction.
159 UpdateValueMap(I, ResultReg);
164 // Check if the second operand is a constant float.
165 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
166 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
168 if (ResultReg != 0) {
169 // We successfully emitted code for the given LLVM Instruction.
170 UpdateValueMap(I, ResultReg);
175 unsigned Op1 = getRegForValue(I->getOperand(1));
177 // Unhandled operand. Halt "fast" selection and bail.
180 // Now we have both operands in registers. Emit the instruction.
181 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
182 ISDOpcode, Op0, Op1);
184 // Target-specific code wasn't able to find a machine opcode for
185 // the given ISD opcode and type. Halt "fast" selection and bail.
188 // We successfully emitted code for the given LLVM Instruction.
189 UpdateValueMap(I, ResultReg);
193 bool FastISel::SelectGetElementPtr(User *I) {
194 unsigned N = getRegForValue(I->getOperand(0));
196 // Unhandled operand. Halt "fast" selection and bail.
199 const Type *Ty = I->getOperand(0)->getType();
200 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
201 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
204 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
205 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
208 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
209 // FIXME: This can be optimized by combining the add with a
211 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
213 // Unhandled operand. Halt "fast" selection and bail.
216 Ty = StTy->getElementType(Field);
218 Ty = cast<SequentialType>(Ty)->getElementType();
220 // If this is a constant subscript, handle it quickly.
221 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
222 if (CI->getZExtValue() == 0) continue;
224 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
225 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
227 // Unhandled operand. Halt "fast" selection and bail.
232 // N = N + Idx * ElementSize;
233 uint64_t ElementSize = TD.getABITypeSize(Ty);
234 unsigned IdxN = getRegForValue(Idx);
236 // Unhandled operand. Halt "fast" selection and bail.
239 // If the index is smaller or larger than intptr_t, truncate or extend
241 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
242 if (IdxVT.bitsLT(VT))
243 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
244 else if (IdxVT.bitsGT(VT))
245 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
247 // Unhandled operand. Halt "fast" selection and bail.
250 if (ElementSize != 1) {
251 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
253 // Unhandled operand. Halt "fast" selection and bail.
256 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
258 // Unhandled operand. Halt "fast" selection and bail.
263 // We successfully emitted code for the given LLVM Instruction.
264 UpdateValueMap(I, N);
268 bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
269 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
270 MVT DstVT = TLI.getValueType(I->getType());
272 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
273 DstVT == MVT::Other || !DstVT.isSimple() ||
274 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
275 // Unhandled type. Halt "fast" selection and bail.
278 unsigned InputReg = getRegForValue(I->getOperand(0));
280 // Unhandled operand. Halt "fast" selection and bail.
283 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
290 UpdateValueMap(I, ResultReg);
294 bool FastISel::SelectBitCast(User *I) {
295 // If the bitcast doesn't change the type, just use the operand value.
296 if (I->getType() == I->getOperand(0)->getType()) {
297 unsigned Reg = getRegForValue(I->getOperand(0));
300 UpdateValueMap(I, Reg);
304 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
305 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
306 MVT DstVT = TLI.getValueType(I->getType());
308 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
309 DstVT == MVT::Other || !DstVT.isSimple() ||
310 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
311 // Unhandled type. Halt "fast" selection and bail.
314 unsigned Op0 = getRegForValue(I->getOperand(0));
316 // Unhandled operand. Halt "fast" selection and bail.
319 // First, try to perform the bitcast by inserting a reg-reg copy.
320 unsigned ResultReg = 0;
321 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
322 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
323 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
324 ResultReg = createResultReg(DstClass);
326 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
327 Op0, DstClass, SrcClass);
332 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
334 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
335 ISD::BIT_CONVERT, Op0);
340 UpdateValueMap(I, ResultReg);
345 FastISel::SelectInstruction(Instruction *I) {
346 return SelectOperator(I, I->getOpcode());
350 FastISel::SelectOperator(User *I, unsigned Opcode) {
352 case Instruction::Add: {
353 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
354 return SelectBinaryOp(I, Opc);
356 case Instruction::Sub: {
357 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
358 return SelectBinaryOp(I, Opc);
360 case Instruction::Mul: {
361 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
362 return SelectBinaryOp(I, Opc);
364 case Instruction::SDiv:
365 return SelectBinaryOp(I, ISD::SDIV);
366 case Instruction::UDiv:
367 return SelectBinaryOp(I, ISD::UDIV);
368 case Instruction::FDiv:
369 return SelectBinaryOp(I, ISD::FDIV);
370 case Instruction::SRem:
371 return SelectBinaryOp(I, ISD::SREM);
372 case Instruction::URem:
373 return SelectBinaryOp(I, ISD::UREM);
374 case Instruction::FRem:
375 return SelectBinaryOp(I, ISD::FREM);
376 case Instruction::Shl:
377 return SelectBinaryOp(I, ISD::SHL);
378 case Instruction::LShr:
379 return SelectBinaryOp(I, ISD::SRL);
380 case Instruction::AShr:
381 return SelectBinaryOp(I, ISD::SRA);
382 case Instruction::And:
383 return SelectBinaryOp(I, ISD::AND);
384 case Instruction::Or:
385 return SelectBinaryOp(I, ISD::OR);
386 case Instruction::Xor:
387 return SelectBinaryOp(I, ISD::XOR);
389 case Instruction::GetElementPtr:
390 return SelectGetElementPtr(I);
392 case Instruction::Br: {
393 BranchInst *BI = cast<BranchInst>(I);
395 if (BI->isUnconditional()) {
396 MachineFunction::iterator NextMBB =
397 next(MachineFunction::iterator(MBB));
398 BasicBlock *LLVMSucc = BI->getSuccessor(0);
399 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
401 if (NextMBB != MF.end() && MSucc == NextMBB) {
402 // The unconditional fall-through case, which needs no instructions.
404 // The unconditional branch case.
405 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
407 MBB->addSuccessor(MSucc);
411 // Conditional branches are not handed yet.
412 // Halt "fast" selection and bail.
416 case Instruction::Unreachable:
420 case Instruction::PHI:
421 // PHI nodes are already emitted.
424 case Instruction::Alloca:
425 // FunctionLowering has the static-sized case covered.
426 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
429 // Dynamic-sized alloca is not handled yet.
432 case Instruction::BitCast:
433 return SelectBitCast(I);
435 case Instruction::FPToSI:
436 return SelectCast(I, ISD::FP_TO_SINT);
437 case Instruction::ZExt:
438 return SelectCast(I, ISD::ZERO_EXTEND);
439 case Instruction::SExt:
440 return SelectCast(I, ISD::SIGN_EXTEND);
441 case Instruction::Trunc:
442 return SelectCast(I, ISD::TRUNCATE);
443 case Instruction::SIToFP:
444 return SelectCast(I, ISD::SINT_TO_FP);
446 case Instruction::IntToPtr: // Deliberate fall-through.
447 case Instruction::PtrToInt: {
448 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
449 MVT DstVT = TLI.getValueType(I->getType());
450 if (DstVT.bitsGT(SrcVT))
451 return SelectCast(I, ISD::ZERO_EXTEND);
452 if (DstVT.bitsLT(SrcVT))
453 return SelectCast(I, ISD::TRUNCATE);
454 unsigned Reg = getRegForValue(I->getOperand(0));
455 if (Reg == 0) return false;
456 UpdateValueMap(I, Reg);
461 // Unhandled instruction. Halt "fast" selection and bail.
466 FastISel::FastISel(MachineFunction &mf,
467 DenseMap<const Value *, unsigned> &vm,
468 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
469 DenseMap<const AllocaInst *, int> &am)
475 MRI(MF.getRegInfo()),
476 MFI(*MF.getFrameInfo()),
477 MCP(*MF.getConstantPool()),
479 TD(*TM.getTargetData()),
480 TII(*TM.getInstrInfo()),
481 TLI(*TM.getTargetLowering()) {
484 FastISel::~FastISel() {}
486 unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
491 unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
492 ISD::NodeType, unsigned /*Op0*/) {
496 unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
497 ISD::NodeType, unsigned /*Op0*/,
502 unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
503 ISD::NodeType, uint64_t /*Imm*/) {
507 unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
508 ISD::NodeType, ConstantFP * /*FPImm*/) {
512 unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
513 ISD::NodeType, unsigned /*Op0*/,
518 unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
519 ISD::NodeType, unsigned /*Op0*/,
520 ConstantFP * /*FPImm*/) {
524 unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
526 unsigned /*Op0*/, unsigned /*Op1*/,
531 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
532 /// to emit an instruction with an immediate operand using FastEmit_ri.
533 /// If that fails, it materializes the immediate into a register and try
534 /// FastEmit_rr instead.
535 unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
536 unsigned Op0, uint64_t Imm,
537 MVT::SimpleValueType ImmType) {
538 // First check if immediate type is legal. If not, we can't use the ri form.
539 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
542 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
543 if (MaterialReg == 0)
545 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
548 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
549 /// to emit an instruction with a floating-point immediate operand using
550 /// FastEmit_rf. If that fails, it materializes the immediate into a register
551 /// and try FastEmit_rr instead.
552 unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
553 unsigned Op0, ConstantFP *FPImm,
554 MVT::SimpleValueType ImmType) {
555 // First check if immediate type is legal. If not, we can't use the rf form.
556 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
560 // Materialize the constant in a register.
561 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
562 if (MaterialReg == 0) {
563 // If the target doesn't have a way to directly enter a floating-point
564 // value into a register, use an alternate approach.
565 // TODO: The current approach only supports floating-point constants
566 // that can be constructed by conversion from integer values. This should
567 // be replaced by code that creates a load from a constant-pool entry,
568 // which will require some target-specific work.
569 const APFloat &Flt = FPImm->getValueAPF();
570 MVT IntVT = TLI.getPointerTy();
573 uint32_t IntBitWidth = IntVT.getSizeInBits();
574 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
575 APFloat::rmTowardZero) != APFloat::opOK)
577 APInt IntVal(IntBitWidth, 2, x);
579 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
580 ISD::Constant, IntVal.getZExtValue());
583 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
584 ISD::SINT_TO_FP, IntegerReg);
585 if (MaterialReg == 0)
588 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
591 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
592 return MRI.createVirtualRegister(RC);
595 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
596 const TargetRegisterClass* RC) {
597 unsigned ResultReg = createResultReg(RC);
598 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
600 BuildMI(MBB, II, ResultReg);
604 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
605 const TargetRegisterClass *RC,
607 unsigned ResultReg = createResultReg(RC);
608 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
610 if (II.getNumDefs() >= 1)
611 BuildMI(MBB, II, ResultReg).addReg(Op0);
613 BuildMI(MBB, II).addReg(Op0);
614 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
615 II.ImplicitDefs[0], RC, RC);
623 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
624 const TargetRegisterClass *RC,
625 unsigned Op0, unsigned Op1) {
626 unsigned ResultReg = createResultReg(RC);
627 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
629 if (II.getNumDefs() >= 1)
630 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
632 BuildMI(MBB, II).addReg(Op0).addReg(Op1);
633 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
634 II.ImplicitDefs[0], RC, RC);
641 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
642 const TargetRegisterClass *RC,
643 unsigned Op0, uint64_t Imm) {
644 unsigned ResultReg = createResultReg(RC);
645 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
647 if (II.getNumDefs() >= 1)
648 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
650 BuildMI(MBB, II).addReg(Op0).addImm(Imm);
651 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
652 II.ImplicitDefs[0], RC, RC);
659 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
660 const TargetRegisterClass *RC,
661 unsigned Op0, ConstantFP *FPImm) {
662 unsigned ResultReg = createResultReg(RC);
663 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
665 if (II.getNumDefs() >= 1)
666 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
668 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
669 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
670 II.ImplicitDefs[0], RC, RC);
677 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
678 const TargetRegisterClass *RC,
679 unsigned Op0, unsigned Op1, uint64_t Imm) {
680 unsigned ResultReg = createResultReg(RC);
681 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
683 if (II.getNumDefs() >= 1)
684 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
686 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
687 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
688 II.ImplicitDefs[0], RC, RC);
695 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
696 const TargetRegisterClass *RC,
698 unsigned ResultReg = createResultReg(RC);
699 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
701 if (II.getNumDefs() >= 1)
702 BuildMI(MBB, II, ResultReg).addImm(Imm);
704 BuildMI(MBB, II).addImm(Imm);
705 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
706 II.ImplicitDefs[0], RC, RC);
713 unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
714 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
715 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
717 unsigned ResultReg = createResultReg(SRC);
718 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
720 if (II.getNumDefs() >= 1)
721 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
723 BuildMI(MBB, II).addReg(Op0).addImm(Idx);
724 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
725 II.ImplicitDefs[0], RC, RC);