1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the X86-specific support for the FastISel class. Much
11 // of the target-specific code is generated by tablegen in the file
12 // X86GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class X86FastISel : public FastISel {
31 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
32 /// make the right decision when generating code for different targets.
33 const X86Subtarget *Subtarget;
36 explicit X86FastISel(MachineFunction &mf,
37 DenseMap<const Value *, unsigned> &vm,
38 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
39 : FastISel(mf, vm, bm) {
40 Subtarget = &TM.getSubtarget<X86Subtarget>();
43 virtual bool TargetSelectInstruction(Instruction *I);
45 #include "X86GenFastISel.inc"
48 bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
50 bool X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V);
52 bool X86SelectConstAddr(Value *V, unsigned &Op0);
54 bool X86SelectLoad(Instruction *I);
56 bool X86SelectStore(Instruction *I);
58 bool X86SelectCmp(Instruction *I);
60 bool X86SelectZExt(Instruction *I);
62 bool X86SelectBranch(Instruction *I);
64 bool X86SelectShift(Instruction *I);
66 bool X86SelectSelect(Instruction *I);
68 bool X86SelectTrunc(Instruction *I);
70 unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
73 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
74 /// The address is either pre-computed, i.e. Op0, or a GlobalAddress, i.e. V.
75 /// Return true and the result register by reference if it is possible.
76 bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Op0, Value *V,
77 unsigned &ResultReg) {
78 // Get opcode and regclass of the output for the given load instruction.
80 const TargetRegisterClass *RC = NULL;
81 switch (VT.getSimpleVT()) {
82 default: return false;
85 RC = X86::GR8RegisterClass;
89 RC = X86::GR16RegisterClass;
93 RC = X86::GR32RegisterClass;
96 // Must be in x86-64 mode.
98 RC = X86::GR64RegisterClass;
101 if (Subtarget->hasSSE1()) {
103 RC = X86::FR32RegisterClass;
106 RC = X86::RFP32RegisterClass;
110 if (Subtarget->hasSSE2()) {
112 RC = X86::FR64RegisterClass;
115 RC = X86::RFP64RegisterClass;
120 RC = X86::RFP80RegisterClass;
124 ResultReg = createResultReg(RC);
127 // Address is in register.
130 AM.GV = cast<GlobalValue>(V);
131 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
135 /// X86FastEmitStore - Emit a machine instruction to store a value Op0 of
136 /// type VT. The address is either pre-computed, i.e. Op1, or a GlobalAddress,
137 /// i.e. V. Return true if it is possible.
139 X86FastISel::X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V) {
140 // Get opcode and regclass of the output for the given load instruction.
142 const TargetRegisterClass *RC = NULL;
143 switch (VT.getSimpleVT()) {
144 default: return false;
147 RC = X86::GR8RegisterClass;
151 RC = X86::GR16RegisterClass;
155 RC = X86::GR32RegisterClass;
158 // Must be in x86-64 mode.
160 RC = X86::GR64RegisterClass;
163 if (Subtarget->hasSSE1()) {
165 RC = X86::FR32RegisterClass;
168 RC = X86::RFP32RegisterClass;
172 if (Subtarget->hasSSE2()) {
174 RC = X86::FR64RegisterClass;
177 RC = X86::RFP64RegisterClass;
182 RC = X86::RFP80RegisterClass;
188 // Address is in register.
191 AM.GV = cast<GlobalValue>(V);
192 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
196 /// X86SelectConstAddr - Select and emit code to materialize constant address.
198 bool X86FastISel::X86SelectConstAddr(Value *V,
200 // FIXME: Only GlobalAddress for now.
201 GlobalValue *GV = dyn_cast<GlobalValue>(V);
205 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
206 // Issue load from stub if necessary.
208 const TargetRegisterClass *RC = NULL;
209 if (TLI.getPointerTy() == MVT::i32) {
211 RC = X86::GR32RegisterClass;
214 RC = X86::GR64RegisterClass;
216 Op0 = createResultReg(RC);
219 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
220 // Prevent loading GV stub multiple times in same MBB.
221 LocalValueMap[V] = Op0;
226 /// X86SelectStore - Select and emit code to implement store instructions.
227 bool X86FastISel::X86SelectStore(Instruction* I) {
228 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
229 if (VT == MVT::Other || !VT.isSimple())
230 // Unhandled type. Halt "fast" selection and bail.
234 VT = TLI.getPointerTy();
235 // We only handle legal types. For example, on x86-32 the instruction
236 // selector contains all of the 64-bit instructions from x86-64,
237 // under the assumption that i64 won't be used if the target doesn't
239 if (!TLI.isTypeLegal(VT))
241 unsigned Op0 = getRegForValue(I->getOperand(0));
243 // Unhandled operand. Halt "fast" selection and bail.
246 Value *V = I->getOperand(1);
247 unsigned Op1 = getRegForValue(V);
249 // Handle constant load address.
250 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
251 // Unhandled operand. Halt "fast" selection and bail.
255 return X86FastEmitStore(VT, Op0, Op1, V);
258 /// X86SelectLoad - Select and emit code to implement load instructions.
260 bool X86FastISel::X86SelectLoad(Instruction *I) {
261 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
262 if (VT == MVT::Other || !VT.isSimple())
263 // Unhandled type. Halt "fast" selection and bail.
267 VT = TLI.getPointerTy();
268 // We only handle legal types. For example, on x86-32 the instruction
269 // selector contains all of the 64-bit instructions from x86-64,
270 // under the assumption that i64 won't be used if the target doesn't
272 if (!TLI.isTypeLegal(VT))
275 Value *V = I->getOperand(0);
276 unsigned Op0 = getRegForValue(V);
278 // Handle constant load address.
279 // FIXME: If load type is something we can't handle, this can result in
280 // a dead stub load instruction.
281 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
282 // Unhandled operand. Halt "fast" selection and bail.
286 unsigned ResultReg = 0;
287 if (X86FastEmitLoad(VT, Op0, V, ResultReg)) {
288 UpdateValueMap(I, ResultReg);
294 bool X86FastISel::X86SelectCmp(Instruction *I) {
295 CmpInst *CI = cast<CmpInst>(I);
297 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
298 if (!TLI.isTypeLegal(VT))
301 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
302 if (Op0Reg == 0) return false;
303 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
304 if (Op1Reg == 0) return false;
307 switch (VT.getSimpleVT()) {
308 case MVT::i8: Opc = X86::CMP8rr; break;
309 case MVT::i16: Opc = X86::CMP16rr; break;
310 case MVT::i32: Opc = X86::CMP32rr; break;
311 case MVT::i64: Opc = X86::CMP64rr; break;
312 case MVT::f32: Opc = X86::UCOMISSrr; break;
313 case MVT::f64: Opc = X86::UCOMISDrr; break;
314 default: return false;
317 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
318 switch (CI->getPredicate()) {
319 case CmpInst::FCMP_OEQ: {
320 unsigned EReg = createResultReg(&X86::GR8RegClass);
321 unsigned NPReg = createResultReg(&X86::GR8RegClass);
322 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
323 BuildMI(MBB, TII.get(X86::SETEr), EReg);
324 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
325 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
328 case CmpInst::FCMP_UNE: {
329 unsigned NEReg = createResultReg(&X86::GR8RegClass);
330 unsigned PReg = createResultReg(&X86::GR8RegClass);
331 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
332 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
333 BuildMI(MBB, TII.get(X86::SETPr), PReg);
334 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
337 case CmpInst::FCMP_OGT:
338 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
339 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
341 case CmpInst::FCMP_OGE:
342 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
343 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
345 case CmpInst::FCMP_OLT:
346 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
347 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
349 case CmpInst::FCMP_OLE:
350 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
351 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
353 case CmpInst::FCMP_ONE:
354 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
355 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
357 case CmpInst::FCMP_ORD:
358 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
359 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
361 case CmpInst::FCMP_UNO:
362 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
363 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
365 case CmpInst::FCMP_UEQ:
366 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
367 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
369 case CmpInst::FCMP_UGT:
370 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
371 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
373 case CmpInst::FCMP_UGE:
374 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
375 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
377 case CmpInst::FCMP_ULT:
378 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
379 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
381 case CmpInst::FCMP_ULE:
382 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
383 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
385 case CmpInst::ICMP_EQ:
386 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
387 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
389 case CmpInst::ICMP_NE:
390 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
391 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
393 case CmpInst::ICMP_UGT:
394 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
395 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
397 case CmpInst::ICMP_UGE:
398 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
399 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
401 case CmpInst::ICMP_ULT:
402 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
403 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
405 case CmpInst::ICMP_ULE:
406 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
407 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
409 case CmpInst::ICMP_SGT:
410 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
411 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
413 case CmpInst::ICMP_SGE:
414 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
415 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
417 case CmpInst::ICMP_SLT:
418 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
419 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
421 case CmpInst::ICMP_SLE:
422 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
423 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
429 UpdateValueMap(I, ResultReg);
433 bool X86FastISel::X86SelectZExt(Instruction *I) {
434 // Special-case hack: The only i1 values we know how to produce currently
435 // set the upper bits of an i8 value to zero.
436 if (I->getType() == Type::Int8Ty &&
437 I->getOperand(0)->getType() == Type::Int1Ty) {
438 unsigned ResultReg = getRegForValue(I->getOperand(0));
439 if (ResultReg == 0) return false;
440 UpdateValueMap(I, ResultReg);
447 bool X86FastISel::X86SelectBranch(Instruction *I) {
448 BranchInst *BI = cast<BranchInst>(I);
449 // Unconditional branches are selected by tablegen-generated code.
450 unsigned OpReg = getRegForValue(BI->getCondition());
451 if (OpReg == 0) return false;
452 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
453 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
455 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
456 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
457 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
459 MBB->addSuccessor(TrueMBB);
460 MBB->addSuccessor(FalseMBB);
465 bool X86FastISel::X86SelectShift(Instruction *I) {
468 const TargetRegisterClass *RC = NULL;
469 if (I->getType() == Type::Int8Ty) {
471 RC = &X86::GR8RegClass;
472 switch (I->getOpcode()) {
473 case Instruction::LShr: Opc = X86::SHR8rCL; break;
474 case Instruction::AShr: Opc = X86::SAR8rCL; break;
475 case Instruction::Shl: Opc = X86::SHL8rCL; break;
476 default: return false;
478 } else if (I->getType() == Type::Int16Ty) {
480 RC = &X86::GR16RegClass;
481 switch (I->getOpcode()) {
482 case Instruction::LShr: Opc = X86::SHR16rCL; break;
483 case Instruction::AShr: Opc = X86::SAR16rCL; break;
484 case Instruction::Shl: Opc = X86::SHL16rCL; break;
485 default: return false;
487 } else if (I->getType() == Type::Int32Ty) {
489 RC = &X86::GR32RegClass;
490 switch (I->getOpcode()) {
491 case Instruction::LShr: Opc = X86::SHR32rCL; break;
492 case Instruction::AShr: Opc = X86::SAR32rCL; break;
493 case Instruction::Shl: Opc = X86::SHL32rCL; break;
494 default: return false;
496 } else if (I->getType() == Type::Int64Ty) {
498 RC = &X86::GR64RegClass;
499 switch (I->getOpcode()) {
500 case Instruction::LShr: Opc = X86::SHR64rCL; break;
501 case Instruction::AShr: Opc = X86::SAR64rCL; break;
502 case Instruction::Shl: Opc = X86::SHL64rCL; break;
503 default: return false;
509 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
510 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
513 unsigned Op0Reg = getRegForValue(I->getOperand(0));
514 if (Op0Reg == 0) return false;
515 unsigned Op1Reg = getRegForValue(I->getOperand(1));
516 if (Op1Reg == 0) return false;
517 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
518 unsigned ResultReg = createResultReg(RC);
519 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg);
520 UpdateValueMap(I, ResultReg);
524 bool X86FastISel::X86SelectSelect(Instruction *I) {
525 const Type *Ty = I->getType();
526 if (isa<PointerType>(Ty))
527 Ty = TLI.getTargetData()->getIntPtrType();
530 const TargetRegisterClass *RC = NULL;
531 if (Ty == Type::Int16Ty) {
532 Opc = X86::CMOVE16rr;
533 RC = &X86::GR16RegClass;
534 } else if (Ty == Type::Int32Ty) {
535 Opc = X86::CMOVE32rr;
536 RC = &X86::GR32RegClass;
537 } else if (Ty == Type::Int64Ty) {
538 Opc = X86::CMOVE64rr;
539 RC = &X86::GR64RegClass;
544 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
545 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
548 unsigned Op0Reg = getRegForValue(I->getOperand(0));
549 if (Op0Reg == 0) return false;
550 unsigned Op1Reg = getRegForValue(I->getOperand(1));
551 if (Op1Reg == 0) return false;
552 unsigned Op2Reg = getRegForValue(I->getOperand(2));
553 if (Op2Reg == 0) return false;
555 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
556 unsigned ResultReg = createResultReg(RC);
557 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
558 UpdateValueMap(I, ResultReg);
562 bool X86FastISel::X86SelectTrunc(Instruction *I) {
563 if (Subtarget->is64Bit())
564 // All other cases should be handled by the tblgen generated code.
566 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
567 MVT DstVT = TLI.getValueType(I->getType());
568 if (DstVT != MVT::i8)
569 // All other cases should be handled by the tblgen generated code.
571 if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
572 // All other cases should be handled by the tblgen generated code.
575 unsigned InputReg = getRegForValue(I->getOperand(0));
577 // Unhandled operand. Halt "fast" selection and bail.
580 // First issue a copy to GR16_ or GR32_.
581 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
582 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
583 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
584 unsigned CopyReg = createResultReg(CopyRC);
585 BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
587 // Then issue an extract_subreg.
588 unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg,1); // x86_subreg_8bit
592 UpdateValueMap(I, ResultReg);
597 X86FastISel::TargetSelectInstruction(Instruction *I) {
598 switch (I->getOpcode()) {
600 case Instruction::Load:
601 return X86SelectLoad(I);
602 case Instruction::Store:
603 return X86SelectStore(I);
604 case Instruction::ICmp:
605 case Instruction::FCmp:
606 return X86SelectCmp(I);
607 case Instruction::ZExt:
608 return X86SelectZExt(I);
609 case Instruction::Br:
610 return X86SelectBranch(I);
611 case Instruction::LShr:
612 case Instruction::AShr:
613 case Instruction::Shl:
614 return X86SelectShift(I);
615 case Instruction::Select:
616 return X86SelectSelect(I);
617 case Instruction::Trunc:
618 return X86SelectTrunc(I);
624 unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
625 MachineConstantPool* MCP) {
626 // Can't handle PIC-mode yet.
627 if (TM.getRelocationModel() == Reloc::PIC_)
630 MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
631 if (VT == MVT::Other || !VT.isSimple())
632 // Unhandled type. Halt "fast" selection and bail.
636 VT = TLI.getPointerTy();
637 // We only handle legal types. For example, on x86-32 the instruction
638 // selector contains all of the 64-bit instructions from x86-64,
639 // under the assumption that i64 won't be used if the target doesn't
641 if (!TLI.isTypeLegal(VT))
644 // Get opcode and regclass of the output for the given load instruction.
646 const TargetRegisterClass *RC = NULL;
647 switch (VT.getSimpleVT()) {
648 default: return false;
651 RC = X86::GR8RegisterClass;
655 RC = X86::GR16RegisterClass;
659 RC = X86::GR32RegisterClass;
662 // Must be in x86-64 mode.
664 RC = X86::GR64RegisterClass;
667 if (Subtarget->hasSSE1()) {
669 RC = X86::FR32RegisterClass;
672 RC = X86::RFP32RegisterClass;
676 if (Subtarget->hasSSE2()) {
678 RC = X86::FR64RegisterClass;
681 RC = X86::RFP64RegisterClass;
686 RC = X86::RFP80RegisterClass;
690 unsigned ResultReg = createResultReg(RC);
691 if (isa<GlobalValue>(C)) {
692 // FIXME: If store value type is something we can't handle, this can result
693 // in a dead stub load instruction.
694 if (X86SelectConstAddr(C, ResultReg))
699 // MachineConstantPool wants an explicit alignment.
701 TM.getTargetData()->getPreferredTypeAlignmentShift(C->getType());
703 // Alignment of vector types. FIXME!
704 Align = TM.getTargetData()->getABITypeSize(C->getType());
705 Align = Log2_64(Align);
708 unsigned MCPOffset = MCP->getConstantPoolIndex(C, Align);
709 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
714 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
715 DenseMap<const Value *, unsigned> &vm,
716 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
717 return new X86FastISel(mf, vm, bm);