1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the X86-specific support for the FastISel class. Much
11 // of the target-specific code is generated by tablegen in the file
12 // X86GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/CodeGen/FastISel.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/Support/CallSite.h"
33 class X86FastISel : public FastISel {
34 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
35 /// make the right decision when generating code for different targets.
36 const X86Subtarget *Subtarget;
38 /// StackPtr - Register used as the stack pointer.
42 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
43 /// floating point ops.
44 /// When SSE is available, use it for f32 operations.
45 /// When SSE2 is available, use it for f64 operations.
50 explicit X86FastISel(MachineFunction &mf,
51 DenseMap<const Value *, unsigned> &vm,
52 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
53 DenseMap<const AllocaInst *, int> &am)
54 : FastISel(mf, vm, bm, am) {
55 Subtarget = &TM.getSubtarget<X86Subtarget>();
56 StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
57 X86ScalarSSEf64 = Subtarget->hasSSE2();
58 X86ScalarSSEf32 = Subtarget->hasSSE1();
61 virtual bool TargetSelectInstruction(Instruction *I);
63 #include "X86GenFastISel.inc"
66 bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
68 bool X86FastEmitStore(MVT VT, unsigned Val,
69 const X86AddressMode &AM);
71 bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
74 bool X86SelectConstAddr(Value *V, unsigned &Op0,
75 bool isCall = false, bool inReg = false);
77 bool X86SelectAddress(Value *V, X86AddressMode &AM);
79 bool X86SelectLoad(Instruction *I);
81 bool X86SelectStore(Instruction *I);
83 bool X86SelectCmp(Instruction *I);
85 bool X86SelectZExt(Instruction *I);
87 bool X86SelectBranch(Instruction *I);
89 bool X86SelectShift(Instruction *I);
91 bool X86SelectSelect(Instruction *I);
93 bool X86SelectTrunc(Instruction *I);
95 bool X86SelectFPExt(Instruction *I);
96 bool X86SelectFPTrunc(Instruction *I);
98 bool X86SelectCall(Instruction *I);
100 CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
102 unsigned TargetMaterializeConstant(Constant *C);
104 unsigned TargetMaterializeAlloca(AllocaInst *C);
106 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
107 /// computed in an SSE register, not on the X87 floating point stack.
108 bool isScalarFPTypeInSSEReg(MVT VT) const {
109 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
110 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
115 static bool isTypeLegal(const Type *Ty, const TargetLowering &TLI, MVT &VT,
116 bool AllowI1 = false) {
117 VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
118 if (VT == MVT::Other || !VT.isSimple())
119 // Unhandled type. Halt "fast" selection and bail.
123 VT = TLI.getPointerTy();
124 // We only handle legal types. For example, on x86-32 the instruction
125 // selector contains all of the 64-bit instructions from x86-64,
126 // under the assumption that i64 won't be used if the target doesn't
128 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
131 #include "X86GenCallingConv.inc"
133 /// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
135 CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
136 if (Subtarget->is64Bit()) {
137 if (Subtarget->isTargetWin64())
138 return CC_X86_Win64_C;
139 else if (CC == CallingConv::Fast && isTaillCall)
140 return CC_X86_64_TailCall;
145 if (CC == CallingConv::X86_FastCall)
146 return CC_X86_32_FastCall;
147 else if (CC == CallingConv::Fast && isTaillCall)
148 return CC_X86_32_TailCall;
149 else if (CC == CallingConv::Fast)
150 return CC_X86_32_FastCC;
155 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
156 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
157 /// Return true and the result register by reference if it is possible.
158 bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
159 unsigned &ResultReg) {
160 // Get opcode and regclass of the output for the given load instruction.
162 const TargetRegisterClass *RC = NULL;
163 switch (VT.getSimpleVT()) {
164 default: return false;
167 RC = X86::GR8RegisterClass;
171 RC = X86::GR16RegisterClass;
175 RC = X86::GR32RegisterClass;
178 // Must be in x86-64 mode.
180 RC = X86::GR64RegisterClass;
183 if (Subtarget->hasSSE1()) {
185 RC = X86::FR32RegisterClass;
188 RC = X86::RFP32RegisterClass;
192 if (Subtarget->hasSSE2()) {
194 RC = X86::FR64RegisterClass;
197 RC = X86::RFP64RegisterClass;
202 RC = X86::RFP80RegisterClass;
206 ResultReg = createResultReg(RC);
207 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
211 /// X86FastEmitStore - Emit a machine instruction to store a value Val of
212 /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
213 /// and a displacement offset, or a GlobalAddress,
214 /// i.e. V. Return true if it is possible.
216 X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
217 const X86AddressMode &AM) {
218 // Get opcode and regclass of the output for the given store instruction.
220 const TargetRegisterClass *RC = NULL;
221 switch (VT.getSimpleVT()) {
222 default: return false;
225 RC = X86::GR8RegisterClass;
229 RC = X86::GR16RegisterClass;
233 RC = X86::GR32RegisterClass;
236 // Must be in x86-64 mode.
238 RC = X86::GR64RegisterClass;
241 if (Subtarget->hasSSE1()) {
243 RC = X86::FR32RegisterClass;
246 RC = X86::RFP32RegisterClass;
250 if (Subtarget->hasSSE2()) {
252 RC = X86::FR64RegisterClass;
255 RC = X86::RFP64RegisterClass;
260 RC = X86::RFP80RegisterClass;
264 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val);
268 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
269 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
270 /// ISD::SIGN_EXTEND).
271 bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
272 unsigned Src, MVT SrcVT,
273 unsigned &ResultReg) {
274 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
283 /// X86SelectConstAddr - Select and emit code to materialize constant address.
285 bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0,
286 bool isCall, bool inReg) {
287 // FIXME: Only GlobalAddress for now.
288 GlobalValue *GV = dyn_cast<GlobalValue>(V);
292 if (Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) {
293 // Issue load from stub if necessary.
295 const TargetRegisterClass *RC = NULL;
296 if (TLI.getPointerTy() == MVT::i32) {
298 RC = X86::GR32RegisterClass;
301 RC = X86::GR64RegisterClass;
303 Op0 = createResultReg(RC);
306 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
307 // Prevent loading GV stub multiple times in same MBB.
308 LocalValueMap[V] = Op0;
311 const TargetRegisterClass *RC = NULL;
312 if (TLI.getPointerTy() == MVT::i32) {
314 RC = X86::GR32RegisterClass;
317 RC = X86::GR64RegisterClass;
319 Op0 = createResultReg(RC);
322 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
323 // Prevent materializing GV address multiple times in same MBB.
324 LocalValueMap[V] = Op0;
330 /// X86SelectAddress - Attempt to fill in an address from the given value.
332 bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
333 // Look past bitcasts.
334 if (const BitCastInst *BC = dyn_cast<BitCastInst>(V))
335 return X86SelectAddress(BC->getOperand(0), AM);
337 if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
338 DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
339 if (SI == StaticAllocaMap.end())
341 AM.BaseType = X86AddressMode::FrameIndexBase;
342 AM.Base.FrameIndex = SI->second;
343 } else if (unsigned Ptr = lookUpRegForValue(V)) {
346 // Handle constant address.
347 // FIXME: If load type is something we can't handle, this can result in
348 // a dead stub load instruction.
349 if (isa<Constant>(V) && X86SelectConstAddr(V, AM.Base.Reg)) {
350 if (AM.Base.Reg == 0)
351 AM.GV = cast<GlobalValue>(V);
353 AM.Base.Reg = getRegForValue(V);
354 if (AM.Base.Reg == 0)
355 // Unhandled operand. Halt "fast" selection and bail.
363 /// X86SelectStore - Select and emit code to implement store instructions.
364 bool X86FastISel::X86SelectStore(Instruction* I) {
366 if (!isTypeLegal(I->getOperand(0)->getType(), TLI, VT))
368 unsigned Val = getRegForValue(I->getOperand(0));
370 // Unhandled operand. Halt "fast" selection and bail.
374 if (!X86SelectAddress(I->getOperand(1), AM))
377 return X86FastEmitStore(VT, Val, AM);
380 /// X86SelectLoad - Select and emit code to implement load instructions.
382 bool X86FastISel::X86SelectLoad(Instruction *I) {
384 if (!isTypeLegal(I->getType(), TLI, VT))
388 if (!X86SelectAddress(I->getOperand(0), AM))
391 unsigned ResultReg = 0;
392 if (X86FastEmitLoad(VT, AM, ResultReg)) {
393 UpdateValueMap(I, ResultReg);
399 bool X86FastISel::X86SelectCmp(Instruction *I) {
400 CmpInst *CI = cast<CmpInst>(I);
402 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
403 if (!TLI.isTypeLegal(VT))
406 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
407 if (Op0Reg == 0) return false;
408 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
409 if (Op1Reg == 0) return false;
412 switch (VT.getSimpleVT()) {
413 case MVT::i8: Opc = X86::CMP8rr; break;
414 case MVT::i16: Opc = X86::CMP16rr; break;
415 case MVT::i32: Opc = X86::CMP32rr; break;
416 case MVT::i64: Opc = X86::CMP64rr; break;
417 case MVT::f32: Opc = X86::UCOMISSrr; break;
418 case MVT::f64: Opc = X86::UCOMISDrr; break;
419 default: return false;
422 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
423 switch (CI->getPredicate()) {
424 case CmpInst::FCMP_OEQ: {
425 unsigned EReg = createResultReg(&X86::GR8RegClass);
426 unsigned NPReg = createResultReg(&X86::GR8RegClass);
427 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
428 BuildMI(MBB, TII.get(X86::SETEr), EReg);
429 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
430 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
433 case CmpInst::FCMP_UNE: {
434 unsigned NEReg = createResultReg(&X86::GR8RegClass);
435 unsigned PReg = createResultReg(&X86::GR8RegClass);
436 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
437 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
438 BuildMI(MBB, TII.get(X86::SETPr), PReg);
439 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
442 case CmpInst::FCMP_OGT:
443 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
444 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
446 case CmpInst::FCMP_OGE:
447 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
448 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
450 case CmpInst::FCMP_OLT:
451 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
452 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
454 case CmpInst::FCMP_OLE:
455 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
456 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
458 case CmpInst::FCMP_ONE:
459 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
460 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
462 case CmpInst::FCMP_ORD:
463 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
464 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
466 case CmpInst::FCMP_UNO:
467 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
468 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
470 case CmpInst::FCMP_UEQ:
471 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
472 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
474 case CmpInst::FCMP_UGT:
475 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
476 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
478 case CmpInst::FCMP_UGE:
479 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
480 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
482 case CmpInst::FCMP_ULT:
483 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
484 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
486 case CmpInst::FCMP_ULE:
487 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
488 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
490 case CmpInst::ICMP_EQ:
491 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
492 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
494 case CmpInst::ICMP_NE:
495 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
496 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
498 case CmpInst::ICMP_UGT:
499 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
500 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
502 case CmpInst::ICMP_UGE:
503 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
504 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
506 case CmpInst::ICMP_ULT:
507 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
508 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
510 case CmpInst::ICMP_ULE:
511 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
512 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
514 case CmpInst::ICMP_SGT:
515 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
516 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
518 case CmpInst::ICMP_SGE:
519 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
520 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
522 case CmpInst::ICMP_SLT:
523 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
524 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
526 case CmpInst::ICMP_SLE:
527 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
528 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
534 UpdateValueMap(I, ResultReg);
538 bool X86FastISel::X86SelectZExt(Instruction *I) {
539 // Special-case hack: The only i1 values we know how to produce currently
540 // set the upper bits of an i8 value to zero.
541 if (I->getType() == Type::Int8Ty &&
542 I->getOperand(0)->getType() == Type::Int1Ty) {
543 unsigned ResultReg = getRegForValue(I->getOperand(0));
544 if (ResultReg == 0) return false;
545 UpdateValueMap(I, ResultReg);
552 bool X86FastISel::X86SelectBranch(Instruction *I) {
553 BranchInst *BI = cast<BranchInst>(I);
554 // Unconditional branches are selected by tablegen-generated code.
555 unsigned OpReg = getRegForValue(BI->getCondition());
556 if (OpReg == 0) return false;
557 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
558 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
560 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
561 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
562 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
564 MBB->addSuccessor(TrueMBB);
565 MBB->addSuccessor(FalseMBB);
570 bool X86FastISel::X86SelectShift(Instruction *I) {
573 const TargetRegisterClass *RC = NULL;
574 if (I->getType() == Type::Int8Ty) {
576 RC = &X86::GR8RegClass;
577 switch (I->getOpcode()) {
578 case Instruction::LShr: Opc = X86::SHR8rCL; break;
579 case Instruction::AShr: Opc = X86::SAR8rCL; break;
580 case Instruction::Shl: Opc = X86::SHL8rCL; break;
581 default: return false;
583 } else if (I->getType() == Type::Int16Ty) {
585 RC = &X86::GR16RegClass;
586 switch (I->getOpcode()) {
587 case Instruction::LShr: Opc = X86::SHR16rCL; break;
588 case Instruction::AShr: Opc = X86::SAR16rCL; break;
589 case Instruction::Shl: Opc = X86::SHL16rCL; break;
590 default: return false;
592 } else if (I->getType() == Type::Int32Ty) {
594 RC = &X86::GR32RegClass;
595 switch (I->getOpcode()) {
596 case Instruction::LShr: Opc = X86::SHR32rCL; break;
597 case Instruction::AShr: Opc = X86::SAR32rCL; break;
598 case Instruction::Shl: Opc = X86::SHL32rCL; break;
599 default: return false;
601 } else if (I->getType() == Type::Int64Ty) {
603 RC = &X86::GR64RegClass;
604 switch (I->getOpcode()) {
605 case Instruction::LShr: Opc = X86::SHR64rCL; break;
606 case Instruction::AShr: Opc = X86::SAR64rCL; break;
607 case Instruction::Shl: Opc = X86::SHL64rCL; break;
608 default: return false;
614 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
615 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
618 unsigned Op0Reg = getRegForValue(I->getOperand(0));
619 if (Op0Reg == 0) return false;
620 unsigned Op1Reg = getRegForValue(I->getOperand(1));
621 if (Op1Reg == 0) return false;
622 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
623 unsigned ResultReg = createResultReg(RC);
624 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg);
625 UpdateValueMap(I, ResultReg);
629 bool X86FastISel::X86SelectSelect(Instruction *I) {
630 const Type *Ty = I->getType();
631 if (isa<PointerType>(Ty))
632 Ty = TLI.getTargetData()->getIntPtrType();
635 const TargetRegisterClass *RC = NULL;
636 if (Ty == Type::Int16Ty) {
637 Opc = X86::CMOVE16rr;
638 RC = &X86::GR16RegClass;
639 } else if (Ty == Type::Int32Ty) {
640 Opc = X86::CMOVE32rr;
641 RC = &X86::GR32RegClass;
642 } else if (Ty == Type::Int64Ty) {
643 Opc = X86::CMOVE64rr;
644 RC = &X86::GR64RegClass;
649 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
650 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
653 unsigned Op0Reg = getRegForValue(I->getOperand(0));
654 if (Op0Reg == 0) return false;
655 unsigned Op1Reg = getRegForValue(I->getOperand(1));
656 if (Op1Reg == 0) return false;
657 unsigned Op2Reg = getRegForValue(I->getOperand(2));
658 if (Op2Reg == 0) return false;
660 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
661 unsigned ResultReg = createResultReg(RC);
662 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
663 UpdateValueMap(I, ResultReg);
667 bool X86FastISel::X86SelectFPExt(Instruction *I) {
668 if (Subtarget->hasSSE2()) {
669 if (I->getType() == Type::DoubleTy) {
670 Value *V = I->getOperand(0);
671 if (V->getType() == Type::FloatTy) {
672 unsigned OpReg = getRegForValue(V);
673 if (OpReg == 0) return false;
674 unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
675 BuildMI(MBB, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
676 UpdateValueMap(I, ResultReg);
685 bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
686 if (Subtarget->hasSSE2()) {
687 if (I->getType() == Type::FloatTy) {
688 Value *V = I->getOperand(0);
689 if (V->getType() == Type::DoubleTy) {
690 unsigned OpReg = getRegForValue(V);
691 if (OpReg == 0) return false;
692 unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
693 BuildMI(MBB, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
694 UpdateValueMap(I, ResultReg);
703 bool X86FastISel::X86SelectTrunc(Instruction *I) {
704 if (Subtarget->is64Bit())
705 // All other cases should be handled by the tblgen generated code.
707 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
708 MVT DstVT = TLI.getValueType(I->getType());
709 if (DstVT != MVT::i8)
710 // All other cases should be handled by the tblgen generated code.
712 if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
713 // All other cases should be handled by the tblgen generated code.
716 unsigned InputReg = getRegForValue(I->getOperand(0));
718 // Unhandled operand. Halt "fast" selection and bail.
721 // First issue a copy to GR16_ or GR32_.
722 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
723 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
724 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
725 unsigned CopyReg = createResultReg(CopyRC);
726 BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
728 // Then issue an extract_subreg.
729 unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg,1); // x86_subreg_8bit
733 UpdateValueMap(I, ResultReg);
737 bool X86FastISel::X86SelectCall(Instruction *I) {
738 CallInst *CI = cast<CallInst>(I);
739 Value *Callee = I->getOperand(0);
741 // Can't handle inline asm yet.
742 if (isa<InlineAsm>(Callee))
745 // FIXME: Handle some intrinsics.
746 if (Function *F = CI->getCalledFunction()) {
747 if (F->isDeclaration() &&F->getIntrinsicID())
751 // Handle only C and fastcc calling conventions for now.
753 unsigned CC = CS.getCallingConv();
754 if (CC != CallingConv::C &&
755 CC != CallingConv::Fast &&
756 CC != CallingConv::X86_FastCall)
759 // Let SDISel handle vararg functions.
760 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
761 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
765 // Handle *simple* calls for now.
766 const Type *RetTy = CS.getType();
768 if (RetTy == Type::VoidTy)
770 else if (!isTypeLegal(RetTy, TLI, RetVT, true))
773 // Materialize callee address in a register. FIXME: GV address can be
774 // handled with a CALLpcrel32 instead.
775 unsigned CalleeOp = 0;
776 if (!isa<Constant>(Callee) || !X86SelectConstAddr(Callee, CalleeOp, true)) {
777 CalleeOp = getRegForValue(Callee);
779 // Unhandled operand. Halt "fast" selection and bail.
783 // Allow calls which produce i1 results.
784 bool AndToI1 = false;
785 if (RetVT == MVT::i1) {
790 // Deal with call operands first.
791 SmallVector<unsigned, 4> Args;
792 SmallVector<MVT, 4> ArgVTs;
793 SmallVector<ISD::ArgFlagsTy, 4> ArgFlags;
794 Args.reserve(CS.arg_size());
795 ArgVTs.reserve(CS.arg_size());
796 ArgFlags.reserve(CS.arg_size());
797 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
799 unsigned Arg = getRegForValue(*i);
802 ISD::ArgFlagsTy Flags;
803 unsigned AttrInd = i - CS.arg_begin() + 1;
804 if (CS.paramHasAttr(AttrInd, ParamAttr::SExt))
806 if (CS.paramHasAttr(AttrInd, ParamAttr::ZExt))
809 // FIXME: Only handle *easy* calls for now.
810 if (CS.paramHasAttr(AttrInd, ParamAttr::InReg) ||
811 CS.paramHasAttr(AttrInd, ParamAttr::StructRet) ||
812 CS.paramHasAttr(AttrInd, ParamAttr::Nest) ||
813 CS.paramHasAttr(AttrInd, ParamAttr::ByVal))
816 const Type *ArgTy = (*i)->getType();
818 if (!isTypeLegal(ArgTy, TLI, ArgVT))
820 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
821 Flags.setOrigAlign(OriginalAlignment);
824 ArgVTs.push_back(ArgVT);
825 ArgFlags.push_back(Flags);
828 // Analyze operands of the call, assigning locations to each operand.
829 SmallVector<CCValAssign, 16> ArgLocs;
830 CCState CCInfo(CC, false, TM, ArgLocs);
831 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
833 // Get a count of how many bytes are to be pushed on the stack.
834 unsigned NumBytes = CCInfo.getNextStackOffset();
836 // Issue CALLSEQ_START
837 BuildMI(MBB, TII.get(X86::ADJCALLSTACKDOWN)).addImm(NumBytes);
839 // Process argumenet: walk the register/memloc assignments, inserting
841 SmallVector<unsigned, 4> RegArgs;
842 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
843 CCValAssign &VA = ArgLocs[i];
844 unsigned Arg = Args[VA.getValNo()];
845 MVT ArgVT = ArgVTs[VA.getValNo()];
847 // Promote the value if needed.
848 switch (VA.getLocInfo()) {
849 default: assert(0 && "Unknown loc info!");
850 case CCValAssign::Full: break;
851 case CCValAssign::SExt: {
852 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
854 assert(Emitted && "Failed to emit a sext!");
855 ArgVT = VA.getLocVT();
858 case CCValAssign::ZExt: {
859 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
861 assert(Emitted && "Failed to emit a zext!");
862 ArgVT = VA.getLocVT();
865 case CCValAssign::AExt: {
866 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
869 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
872 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
875 assert(Emitted && "Failed to emit a aext!");
876 ArgVT = VA.getLocVT();
882 TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
883 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
885 assert(Emitted && "Failed to emit a copy instruction!");
886 RegArgs.push_back(VA.getLocReg());
888 unsigned LocMemOffset = VA.getLocMemOffset();
890 AM.Base.Reg = StackPtr;
891 AM.Disp = LocMemOffset;
892 X86FastEmitStore(ArgVT, Arg, AM);
897 unsigned CallOpc = CalleeOp
898 ? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r)
899 : (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32);
900 MachineInstrBuilder MIB = CalleeOp
901 ? BuildMI(MBB, TII.get(CallOpc)).addReg(CalleeOp)
902 :BuildMI(MBB, TII.get(CallOpc)).addGlobalAddress(cast<GlobalValue>(Callee));
903 // Add implicit physical register uses to the call.
904 while (!RegArgs.empty()) {
905 MIB.addReg(RegArgs.back());
910 BuildMI(MBB, TII.get(X86::ADJCALLSTACKUP)).addImm(NumBytes).addImm(0);
912 // Now handle call return value (if any).
913 if (RetVT.getSimpleVT() != MVT::isVoid) {
914 SmallVector<CCValAssign, 16> RVLocs;
915 CCState CCInfo(CC, false, TM, RVLocs);
916 CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
918 // Copy all of the result registers out of their specified physreg.
919 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
920 MVT CopyVT = RVLocs[0].getValVT();
921 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
922 TargetRegisterClass *SrcRC = DstRC;
924 // If this is a call to a function that returns an fp value on the x87 fp
925 // stack, but where we prefer to use the value in xmm registers, copy it
926 // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
927 if ((RVLocs[0].getLocReg() == X86::ST0 ||
928 RVLocs[0].getLocReg() == X86::ST1) &&
929 isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
931 SrcRC = X86::RSTRegisterClass;
932 DstRC = X86::RFP80RegisterClass;
935 unsigned ResultReg = createResultReg(DstRC);
936 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
937 RVLocs[0].getLocReg(), DstRC, SrcRC);
938 assert(Emitted && "Failed to emit a copy instruction!");
939 if (CopyVT != RVLocs[0].getValVT()) {
940 // Round the F80 the right size, which also moves to the appropriate xmm
941 // register. This is accomplished by storing the F80 value in memory and
942 // then loading it back. Ewww...
943 MVT ResVT = RVLocs[0].getValVT();
944 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
945 unsigned MemSize = ResVT.getSizeInBits()/8;
946 int FI = MFI.CreateStackObject(MemSize, MemSize);
947 addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg);
948 DstRC = ResVT == MVT::f32
949 ? X86::FR32RegisterClass : X86::FR64RegisterClass;
950 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
951 ResultReg = createResultReg(DstRC);
952 addFrameReference(BuildMI(MBB, TII.get(Opc), ResultReg), FI);
956 // Mask out all but lowest bit for some call which produces an i1.
957 unsigned AndResult = createResultReg(X86::GR8RegisterClass);
958 BuildMI(MBB, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
959 ResultReg = AndResult;
962 UpdateValueMap(I, ResultReg);
970 X86FastISel::TargetSelectInstruction(Instruction *I) {
971 switch (I->getOpcode()) {
973 case Instruction::Load:
974 return X86SelectLoad(I);
975 case Instruction::Store:
976 return X86SelectStore(I);
977 case Instruction::ICmp:
978 case Instruction::FCmp:
979 return X86SelectCmp(I);
980 case Instruction::ZExt:
981 return X86SelectZExt(I);
982 case Instruction::Br:
983 return X86SelectBranch(I);
984 case Instruction::Call:
985 return X86SelectCall(I);
986 case Instruction::LShr:
987 case Instruction::AShr:
988 case Instruction::Shl:
989 return X86SelectShift(I);
990 case Instruction::Select:
991 return X86SelectSelect(I);
992 case Instruction::Trunc:
993 return X86SelectTrunc(I);
994 case Instruction::FPExt:
995 return X86SelectFPExt(I);
996 case Instruction::FPTrunc:
997 return X86SelectFPTrunc(I);
1003 unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
1004 // Can't handle PIC-mode yet.
1005 if (TM.getRelocationModel() == Reloc::PIC_)
1009 if (!isTypeLegal(C->getType(), TLI, VT))
1012 // Get opcode and regclass of the output for the given load instruction.
1014 const TargetRegisterClass *RC = NULL;
1015 switch (VT.getSimpleVT()) {
1016 default: return false;
1019 RC = X86::GR8RegisterClass;
1023 RC = X86::GR16RegisterClass;
1027 RC = X86::GR32RegisterClass;
1030 // Must be in x86-64 mode.
1032 RC = X86::GR64RegisterClass;
1035 if (Subtarget->hasSSE1()) {
1037 RC = X86::FR32RegisterClass;
1039 Opc = X86::LD_Fp32m;
1040 RC = X86::RFP32RegisterClass;
1044 if (Subtarget->hasSSE2()) {
1046 RC = X86::FR64RegisterClass;
1048 Opc = X86::LD_Fp64m;
1049 RC = X86::RFP64RegisterClass;
1053 Opc = X86::LD_Fp80m;
1054 RC = X86::RFP80RegisterClass;
1058 unsigned ResultReg = createResultReg(RC);
1059 if (isa<GlobalValue>(C)) {
1060 if (X86SelectConstAddr(C, ResultReg, false, true))
1065 // MachineConstantPool wants an explicit alignment.
1067 TM.getTargetData()->getPreferredTypeAlignmentShift(C->getType());
1069 // Alignment of vector types. FIXME!
1070 Align = TM.getTargetData()->getABITypeSize(C->getType());
1071 Align = Log2_64(Align);
1074 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
1075 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
1079 unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
1081 if (!X86SelectAddress(C, AM))
1083 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
1084 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
1085 unsigned ResultReg = createResultReg(RC);
1086 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
1091 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
1092 DenseMap<const Value *, unsigned> &vm,
1093 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
1094 DenseMap<const AllocaInst *, int> &am) {
1095 return new X86FastISel(mf, vm, bm, am);