1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the X86-specific support for the FastISel class. Much
11 // of the target-specific code is generated by tablegen in the file
12 // X86GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/CodeGen/FastISel.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/Support/CallSite.h"
33 class X86FastISel : public FastISel {
34 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
35 /// make the right decision when generating code for different targets.
36 const X86Subtarget *Subtarget;
38 /// StackPtr - Register used as the stack pointer.
42 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
43 /// floating point ops.
44 /// When SSE is available, use it for f32 operations.
45 /// When SSE2 is available, use it for f64 operations.
50 explicit X86FastISel(MachineFunction &mf,
51 DenseMap<const Value *, unsigned> &vm,
52 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
53 DenseMap<const AllocaInst *, int> &am)
54 : FastISel(mf, vm, bm, am) {
55 Subtarget = &TM.getSubtarget<X86Subtarget>();
56 StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
57 X86ScalarSSEf64 = Subtarget->hasSSE2();
58 X86ScalarSSEf32 = Subtarget->hasSSE1();
61 virtual bool TargetSelectInstruction(Instruction *I);
63 #include "X86GenFastISel.inc"
66 bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
68 bool X86FastEmitStore(MVT VT, unsigned Val,
69 const X86AddressMode &AM);
71 bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
74 bool X86SelectConstAddr(Value *V, unsigned &Op0,
75 bool isCall = false, bool inReg = false);
77 bool X86SelectAddress(Value *V, X86AddressMode &AM);
79 bool X86SelectLoad(Instruction *I);
81 bool X86SelectStore(Instruction *I);
83 bool X86SelectCmp(Instruction *I);
85 bool X86SelectZExt(Instruction *I);
87 bool X86SelectBranch(Instruction *I);
89 bool X86SelectShift(Instruction *I);
91 bool X86SelectSelect(Instruction *I);
93 bool X86SelectTrunc(Instruction *I);
95 bool X86SelectFPExt(Instruction *I);
96 bool X86SelectFPTrunc(Instruction *I);
98 bool X86SelectCall(Instruction *I);
100 CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
102 unsigned TargetMaterializeConstant(Constant *C);
104 unsigned TargetMaterializeAlloca(AllocaInst *C);
106 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
107 /// computed in an SSE register, not on the X87 floating point stack.
108 bool isScalarFPTypeInSSEReg(MVT VT) const {
109 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
110 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
115 static bool isTypeLegal(const Type *Ty, const TargetLowering &TLI, MVT &VT,
116 bool AllowI1 = false) {
117 VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
118 if (VT == MVT::Other || !VT.isSimple())
119 // Unhandled type. Halt "fast" selection and bail.
123 VT = TLI.getPointerTy();
124 // We only handle legal types. For example, on x86-32 the instruction
125 // selector contains all of the 64-bit instructions from x86-64,
126 // under the assumption that i64 won't be used if the target doesn't
128 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
131 #include "X86GenCallingConv.inc"
133 /// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling
135 CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
136 if (Subtarget->is64Bit()) {
137 if (Subtarget->isTargetWin64())
138 return CC_X86_Win64_C;
139 else if (CC == CallingConv::Fast && isTaillCall)
140 return CC_X86_64_TailCall;
145 if (CC == CallingConv::X86_FastCall)
146 return CC_X86_32_FastCall;
147 else if (CC == CallingConv::Fast && isTaillCall)
148 return CC_X86_32_TailCall;
149 else if (CC == CallingConv::Fast)
150 return CC_X86_32_FastCC;
155 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
156 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
157 /// Return true and the result register by reference if it is possible.
158 bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
159 unsigned &ResultReg) {
160 // Get opcode and regclass of the output for the given load instruction.
162 const TargetRegisterClass *RC = NULL;
163 switch (VT.getSimpleVT()) {
164 default: return false;
167 RC = X86::GR8RegisterClass;
171 RC = X86::GR16RegisterClass;
175 RC = X86::GR32RegisterClass;
178 // Must be in x86-64 mode.
180 RC = X86::GR64RegisterClass;
183 if (Subtarget->hasSSE1()) {
185 RC = X86::FR32RegisterClass;
188 RC = X86::RFP32RegisterClass;
192 if (Subtarget->hasSSE2()) {
194 RC = X86::FR64RegisterClass;
197 RC = X86::RFP64RegisterClass;
202 RC = X86::RFP80RegisterClass;
206 ResultReg = createResultReg(RC);
207 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
211 /// X86FastEmitStore - Emit a machine instruction to store a value Val of
212 /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
213 /// and a displacement offset, or a GlobalAddress,
214 /// i.e. V. Return true if it is possible.
216 X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
217 const X86AddressMode &AM) {
218 // Get opcode and regclass of the output for the given store instruction.
220 const TargetRegisterClass *RC = NULL;
221 switch (VT.getSimpleVT()) {
222 default: return false;
225 RC = X86::GR8RegisterClass;
229 RC = X86::GR16RegisterClass;
233 RC = X86::GR32RegisterClass;
236 // Must be in x86-64 mode.
238 RC = X86::GR64RegisterClass;
241 if (Subtarget->hasSSE1()) {
243 RC = X86::FR32RegisterClass;
246 RC = X86::RFP32RegisterClass;
250 if (Subtarget->hasSSE2()) {
252 RC = X86::FR64RegisterClass;
255 RC = X86::RFP64RegisterClass;
260 RC = X86::RFP80RegisterClass;
264 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val);
268 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
269 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
270 /// ISD::SIGN_EXTEND).
271 bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
272 unsigned Src, MVT SrcVT,
273 unsigned &ResultReg) {
274 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
275 return ResultReg != 0;
278 /// X86SelectConstAddr - Select and emit code to materialize constant address.
280 bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0,
281 bool isCall, bool inReg) {
282 // FIXME: Only GlobalAddress for now.
283 GlobalValue *GV = dyn_cast<GlobalValue>(V);
287 if (Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) {
288 // Issue load from stub if necessary.
290 const TargetRegisterClass *RC = NULL;
291 if (TLI.getPointerTy() == MVT::i32) {
293 RC = X86::GR32RegisterClass;
296 RC = X86::GR64RegisterClass;
298 Op0 = createResultReg(RC);
301 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
302 // Prevent loading GV stub multiple times in same MBB.
303 LocalValueMap[V] = Op0;
306 const TargetRegisterClass *RC = NULL;
307 if (TLI.getPointerTy() == MVT::i32) {
309 RC = X86::GR32RegisterClass;
312 RC = X86::GR64RegisterClass;
314 Op0 = createResultReg(RC);
317 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
318 // Prevent materializing GV address multiple times in same MBB.
319 LocalValueMap[V] = Op0;
325 /// X86SelectAddress - Attempt to fill in an address from the given value.
327 bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
328 // Look past bitcasts.
329 if (const BitCastInst *BC = dyn_cast<BitCastInst>(V))
330 return X86SelectAddress(BC->getOperand(0), AM);
332 if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
333 DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
334 if (SI == StaticAllocaMap.end())
336 AM.BaseType = X86AddressMode::FrameIndexBase;
337 AM.Base.FrameIndex = SI->second;
338 } else if (unsigned Ptr = lookUpRegForValue(V)) {
341 // Handle constant address.
342 // FIXME: If load type is something we can't handle, this can result in
343 // a dead stub load instruction.
344 if (isa<Constant>(V) && X86SelectConstAddr(V, AM.Base.Reg)) {
345 if (AM.Base.Reg == 0)
346 AM.GV = cast<GlobalValue>(V);
348 AM.Base.Reg = getRegForValue(V);
349 if (AM.Base.Reg == 0)
350 // Unhandled operand. Halt "fast" selection and bail.
358 /// X86SelectStore - Select and emit code to implement store instructions.
359 bool X86FastISel::X86SelectStore(Instruction* I) {
361 if (!isTypeLegal(I->getOperand(0)->getType(), TLI, VT))
363 unsigned Val = getRegForValue(I->getOperand(0));
365 // Unhandled operand. Halt "fast" selection and bail.
369 if (!X86SelectAddress(I->getOperand(1), AM))
372 return X86FastEmitStore(VT, Val, AM);
375 /// X86SelectLoad - Select and emit code to implement load instructions.
377 bool X86FastISel::X86SelectLoad(Instruction *I) {
379 if (!isTypeLegal(I->getType(), TLI, VT))
383 if (!X86SelectAddress(I->getOperand(0), AM))
386 unsigned ResultReg = 0;
387 if (X86FastEmitLoad(VT, AM, ResultReg)) {
388 UpdateValueMap(I, ResultReg);
394 bool X86FastISel::X86SelectCmp(Instruction *I) {
395 CmpInst *CI = cast<CmpInst>(I);
397 MVT VT = TLI.getValueType(I->getOperand(0)->getType());
398 if (!TLI.isTypeLegal(VT))
401 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
402 if (Op0Reg == 0) return false;
403 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
404 if (Op1Reg == 0) return false;
407 switch (VT.getSimpleVT()) {
408 case MVT::i8: Opc = X86::CMP8rr; break;
409 case MVT::i16: Opc = X86::CMP16rr; break;
410 case MVT::i32: Opc = X86::CMP32rr; break;
411 case MVT::i64: Opc = X86::CMP64rr; break;
412 case MVT::f32: Opc = X86::UCOMISSrr; break;
413 case MVT::f64: Opc = X86::UCOMISDrr; break;
414 default: return false;
417 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
418 switch (CI->getPredicate()) {
419 case CmpInst::FCMP_OEQ: {
420 unsigned EReg = createResultReg(&X86::GR8RegClass);
421 unsigned NPReg = createResultReg(&X86::GR8RegClass);
422 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
423 BuildMI(MBB, TII.get(X86::SETEr), EReg);
424 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
425 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
428 case CmpInst::FCMP_UNE: {
429 unsigned NEReg = createResultReg(&X86::GR8RegClass);
430 unsigned PReg = createResultReg(&X86::GR8RegClass);
431 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
432 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
433 BuildMI(MBB, TII.get(X86::SETPr), PReg);
434 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
437 case CmpInst::FCMP_OGT:
438 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
439 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
441 case CmpInst::FCMP_OGE:
442 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
443 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
445 case CmpInst::FCMP_OLT:
446 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
447 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
449 case CmpInst::FCMP_OLE:
450 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
451 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
453 case CmpInst::FCMP_ONE:
454 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
455 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
457 case CmpInst::FCMP_ORD:
458 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
459 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
461 case CmpInst::FCMP_UNO:
462 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
463 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
465 case CmpInst::FCMP_UEQ:
466 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
467 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
469 case CmpInst::FCMP_UGT:
470 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
471 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
473 case CmpInst::FCMP_UGE:
474 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
475 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
477 case CmpInst::FCMP_ULT:
478 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
479 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
481 case CmpInst::FCMP_ULE:
482 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
483 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
485 case CmpInst::ICMP_EQ:
486 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
487 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
489 case CmpInst::ICMP_NE:
490 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
491 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
493 case CmpInst::ICMP_UGT:
494 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
495 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
497 case CmpInst::ICMP_UGE:
498 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
499 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
501 case CmpInst::ICMP_ULT:
502 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
503 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
505 case CmpInst::ICMP_ULE:
506 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
507 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
509 case CmpInst::ICMP_SGT:
510 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
511 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
513 case CmpInst::ICMP_SGE:
514 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
515 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
517 case CmpInst::ICMP_SLT:
518 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
519 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
521 case CmpInst::ICMP_SLE:
522 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
523 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
529 UpdateValueMap(I, ResultReg);
533 bool X86FastISel::X86SelectZExt(Instruction *I) {
534 // Special-case hack: The only i1 values we know how to produce currently
535 // set the upper bits of an i8 value to zero.
536 if (I->getType() == Type::Int8Ty &&
537 I->getOperand(0)->getType() == Type::Int1Ty) {
538 unsigned ResultReg = getRegForValue(I->getOperand(0));
539 if (ResultReg == 0) return false;
540 UpdateValueMap(I, ResultReg);
547 bool X86FastISel::X86SelectBranch(Instruction *I) {
548 BranchInst *BI = cast<BranchInst>(I);
549 // Unconditional branches are selected by tablegen-generated code.
550 unsigned OpReg = getRegForValue(BI->getCondition());
551 if (OpReg == 0) return false;
552 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
553 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
555 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
556 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB);
557 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB);
559 MBB->addSuccessor(TrueMBB);
560 MBB->addSuccessor(FalseMBB);
565 bool X86FastISel::X86SelectShift(Instruction *I) {
568 const TargetRegisterClass *RC = NULL;
569 if (I->getType() == Type::Int8Ty) {
571 RC = &X86::GR8RegClass;
572 switch (I->getOpcode()) {
573 case Instruction::LShr: Opc = X86::SHR8rCL; break;
574 case Instruction::AShr: Opc = X86::SAR8rCL; break;
575 case Instruction::Shl: Opc = X86::SHL8rCL; break;
576 default: return false;
578 } else if (I->getType() == Type::Int16Ty) {
580 RC = &X86::GR16RegClass;
581 switch (I->getOpcode()) {
582 case Instruction::LShr: Opc = X86::SHR16rCL; break;
583 case Instruction::AShr: Opc = X86::SAR16rCL; break;
584 case Instruction::Shl: Opc = X86::SHL16rCL; break;
585 default: return false;
587 } else if (I->getType() == Type::Int32Ty) {
589 RC = &X86::GR32RegClass;
590 switch (I->getOpcode()) {
591 case Instruction::LShr: Opc = X86::SHR32rCL; break;
592 case Instruction::AShr: Opc = X86::SAR32rCL; break;
593 case Instruction::Shl: Opc = X86::SHL32rCL; break;
594 default: return false;
596 } else if (I->getType() == Type::Int64Ty) {
598 RC = &X86::GR64RegClass;
599 switch (I->getOpcode()) {
600 case Instruction::LShr: Opc = X86::SHR64rCL; break;
601 case Instruction::AShr: Opc = X86::SAR64rCL; break;
602 case Instruction::Shl: Opc = X86::SHL64rCL; break;
603 default: return false;
609 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
610 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
613 unsigned Op0Reg = getRegForValue(I->getOperand(0));
614 if (Op0Reg == 0) return false;
615 unsigned Op1Reg = getRegForValue(I->getOperand(1));
616 if (Op1Reg == 0) return false;
617 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
618 unsigned ResultReg = createResultReg(RC);
619 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg);
620 UpdateValueMap(I, ResultReg);
624 bool X86FastISel::X86SelectSelect(Instruction *I) {
625 const Type *Ty = I->getType();
626 if (isa<PointerType>(Ty))
627 Ty = TLI.getTargetData()->getIntPtrType();
630 const TargetRegisterClass *RC = NULL;
631 if (Ty == Type::Int16Ty) {
632 Opc = X86::CMOVE16rr;
633 RC = &X86::GR16RegClass;
634 } else if (Ty == Type::Int32Ty) {
635 Opc = X86::CMOVE32rr;
636 RC = &X86::GR32RegClass;
637 } else if (Ty == Type::Int64Ty) {
638 Opc = X86::CMOVE64rr;
639 RC = &X86::GR64RegClass;
644 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true);
645 if (VT == MVT::Other || !TLI.isTypeLegal(VT))
648 unsigned Op0Reg = getRegForValue(I->getOperand(0));
649 if (Op0Reg == 0) return false;
650 unsigned Op1Reg = getRegForValue(I->getOperand(1));
651 if (Op1Reg == 0) return false;
652 unsigned Op2Reg = getRegForValue(I->getOperand(2));
653 if (Op2Reg == 0) return false;
655 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
656 unsigned ResultReg = createResultReg(RC);
657 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
658 UpdateValueMap(I, ResultReg);
662 bool X86FastISel::X86SelectFPExt(Instruction *I) {
663 if (Subtarget->hasSSE2()) {
664 if (I->getType() == Type::DoubleTy) {
665 Value *V = I->getOperand(0);
666 if (V->getType() == Type::FloatTy) {
667 unsigned OpReg = getRegForValue(V);
668 if (OpReg == 0) return false;
669 unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
670 BuildMI(MBB, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
671 UpdateValueMap(I, ResultReg);
680 bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
681 if (Subtarget->hasSSE2()) {
682 if (I->getType() == Type::FloatTy) {
683 Value *V = I->getOperand(0);
684 if (V->getType() == Type::DoubleTy) {
685 unsigned OpReg = getRegForValue(V);
686 if (OpReg == 0) return false;
687 unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
688 BuildMI(MBB, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
689 UpdateValueMap(I, ResultReg);
698 bool X86FastISel::X86SelectTrunc(Instruction *I) {
699 if (Subtarget->is64Bit())
700 // All other cases should be handled by the tblgen generated code.
702 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
703 MVT DstVT = TLI.getValueType(I->getType());
704 if (DstVT != MVT::i8)
705 // All other cases should be handled by the tblgen generated code.
707 if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
708 // All other cases should be handled by the tblgen generated code.
711 unsigned InputReg = getRegForValue(I->getOperand(0));
713 // Unhandled operand. Halt "fast" selection and bail.
716 // First issue a copy to GR16_ or GR32_.
717 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
718 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
719 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
720 unsigned CopyReg = createResultReg(CopyRC);
721 BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
723 // Then issue an extract_subreg.
724 unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg,1); // x86_subreg_8bit
728 UpdateValueMap(I, ResultReg);
732 bool X86FastISel::X86SelectCall(Instruction *I) {
733 CallInst *CI = cast<CallInst>(I);
734 Value *Callee = I->getOperand(0);
736 // Can't handle inline asm yet.
737 if (isa<InlineAsm>(Callee))
740 // FIXME: Handle some intrinsics.
741 if (Function *F = CI->getCalledFunction()) {
742 if (F->isDeclaration() &&F->getIntrinsicID())
746 // Materialize callee address in a register. FIXME: GV address can be
747 // handled with a CALLpcrel32 instead.
748 unsigned CalleeOp = getRegForValue(Callee);
750 if (!isa<Constant>(Callee) || !X86SelectConstAddr(Callee, CalleeOp, true))
751 // Unhandled operand. Halt "fast" selection and bail.
755 // Handle only C and fastcc calling conventions for now.
757 unsigned CC = CS.getCallingConv();
758 if (CC != CallingConv::C &&
759 CC != CallingConv::Fast &&
760 CC != CallingConv::X86_FastCall)
763 // Let SDISel handle vararg functions.
764 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
765 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
769 // Handle *simple* calls for now.
770 const Type *RetTy = CS.getType();
772 if (!isTypeLegal(RetTy, TLI, RetVT, true))
775 // Allow calls which produce i1 results.
776 bool AndToI1 = false;
777 if (RetVT == MVT::i1) {
782 // Deal with call operands first.
783 SmallVector<unsigned, 4> Args;
784 SmallVector<MVT, 4> ArgVTs;
785 SmallVector<ISD::ArgFlagsTy, 4> ArgFlags;
786 Args.reserve(CS.arg_size());
787 ArgVTs.reserve(CS.arg_size());
788 ArgFlags.reserve(CS.arg_size());
789 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
791 unsigned Arg = getRegForValue(*i);
794 ISD::ArgFlagsTy Flags;
795 unsigned AttrInd = i - CS.arg_begin() + 1;
796 if (CS.paramHasAttr(AttrInd, ParamAttr::SExt))
798 if (CS.paramHasAttr(AttrInd, ParamAttr::ZExt))
801 // FIXME: Only handle *easy* calls for now.
802 if (CS.paramHasAttr(AttrInd, ParamAttr::InReg) ||
803 CS.paramHasAttr(AttrInd, ParamAttr::StructRet) ||
804 CS.paramHasAttr(AttrInd, ParamAttr::Nest) ||
805 CS.paramHasAttr(AttrInd, ParamAttr::ByVal))
808 const Type *ArgTy = (*i)->getType();
810 if (!isTypeLegal(ArgTy, TLI, ArgVT))
812 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
813 Flags.setOrigAlign(OriginalAlignment);
816 ArgVTs.push_back(ArgVT);
817 ArgFlags.push_back(Flags);
820 // Analyze operands of the call, assigning locations to each operand.
821 SmallVector<CCValAssign, 16> ArgLocs;
822 CCState CCInfo(CC, false, TM, ArgLocs);
823 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
825 // Get a count of how many bytes are to be pushed on the stack.
826 unsigned NumBytes = CCInfo.getNextStackOffset();
828 // Issue CALLSEQ_START
829 BuildMI(MBB, TII.get(X86::ADJCALLSTACKDOWN)).addImm(NumBytes);
831 // Process argumenet: walk the register/memloc assignments, inserting
833 SmallVector<unsigned, 4> RegArgs;
834 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
835 CCValAssign &VA = ArgLocs[i];
836 unsigned Arg = Args[VA.getValNo()];
837 MVT ArgVT = ArgVTs[VA.getValNo()];
839 // Promote the value if needed.
840 switch (VA.getLocInfo()) {
841 default: assert(0 && "Unknown loc info!");
842 case CCValAssign::Full: break;
843 case CCValAssign::SExt: {
844 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
846 assert(Emitted && "Failed to emit a sext!");
847 ArgVT = VA.getLocVT();
850 case CCValAssign::ZExt: {
851 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
853 assert(Emitted && "Failed to emit a zext!");
854 ArgVT = VA.getLocVT();
857 case CCValAssign::AExt: {
858 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
861 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
864 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
867 assert(Emitted && "Failed to emit a aext!");
868 ArgVT = VA.getLocVT();
874 TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
875 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
877 assert(Emitted && "Failed to emit a copy instruction!");
878 RegArgs.push_back(VA.getLocReg());
880 unsigned LocMemOffset = VA.getLocMemOffset();
882 AM.Base.Reg = StackPtr;
883 AM.Disp = LocMemOffset;
884 X86FastEmitStore(ArgVT, Arg, AM);
889 unsigned CallOpc = CalleeOp
890 ? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r)
891 : (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32);
892 MachineInstrBuilder MIB = CalleeOp
893 ? BuildMI(MBB, TII.get(CallOpc)).addReg(CalleeOp)
894 :BuildMI(MBB, TII.get(CallOpc)).addGlobalAddress(cast<GlobalValue>(Callee));
895 // Add implicit physical register uses to the call.
896 while (!RegArgs.empty()) {
897 MIB.addReg(RegArgs.back());
902 BuildMI(MBB, TII.get(X86::ADJCALLSTACKUP)).addImm(NumBytes).addImm(0);
904 // Now handle call return value (if any).
905 if (RetVT.getSimpleVT() != MVT::isVoid) {
906 SmallVector<CCValAssign, 16> RVLocs;
907 CCState CCInfo(CC, false, TM, RVLocs);
908 CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
910 // Copy all of the result registers out of their specified physreg.
911 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
912 MVT CopyVT = RVLocs[0].getValVT();
913 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
914 TargetRegisterClass *SrcRC = DstRC;
916 // If this is a call to a function that returns an fp value on the x87 fp
917 // stack, but where we prefer to use the value in xmm registers, copy it
918 // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
919 if ((RVLocs[0].getLocReg() == X86::ST0 ||
920 RVLocs[0].getLocReg() == X86::ST1) &&
921 isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
923 SrcRC = X86::RSTRegisterClass;
924 DstRC = X86::RFP80RegisterClass;
927 unsigned ResultReg = createResultReg(DstRC);
928 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
929 RVLocs[0].getLocReg(), DstRC, SrcRC);
930 assert(Emitted && "Failed to emit a copy instruction!");
931 if (CopyVT != RVLocs[0].getValVT()) {
932 // Round the F80 the right size, which also moves to the appropriate xmm
933 // register. This is accomplished by storing the F80 value in memory and
934 // then loading it back. Ewww...
935 MVT ResVT = RVLocs[0].getValVT();
936 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
937 unsigned MemSize = ResVT.getSizeInBits()/8;
938 int FI = MFI.CreateStackObject(MemSize, MemSize);
939 addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg);
940 DstRC = ResVT == MVT::f32
941 ? X86::FR32RegisterClass : X86::FR64RegisterClass;
942 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
943 ResultReg = createResultReg(DstRC);
944 addFrameReference(BuildMI(MBB, TII.get(Opc), ResultReg), FI);
948 // Mask out all but lowest bit for some call which produces an i1.
949 unsigned AndResult = createResultReg(X86::GR8RegisterClass);
950 BuildMI(MBB, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
951 ResultReg = AndResult;
954 UpdateValueMap(I, ResultReg);
962 X86FastISel::TargetSelectInstruction(Instruction *I) {
963 switch (I->getOpcode()) {
965 case Instruction::Load:
966 return X86SelectLoad(I);
967 case Instruction::Store:
968 return X86SelectStore(I);
969 case Instruction::ICmp:
970 case Instruction::FCmp:
971 return X86SelectCmp(I);
972 case Instruction::ZExt:
973 return X86SelectZExt(I);
974 case Instruction::Br:
975 return X86SelectBranch(I);
976 case Instruction::Call:
977 return X86SelectCall(I);
978 case Instruction::LShr:
979 case Instruction::AShr:
980 case Instruction::Shl:
981 return X86SelectShift(I);
982 case Instruction::Select:
983 return X86SelectSelect(I);
984 case Instruction::Trunc:
985 return X86SelectTrunc(I);
986 case Instruction::FPExt:
987 return X86SelectFPExt(I);
988 case Instruction::FPTrunc:
989 return X86SelectFPTrunc(I);
995 unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
996 // Can't handle PIC-mode yet.
997 if (TM.getRelocationModel() == Reloc::PIC_)
1001 if (!isTypeLegal(C->getType(), TLI, VT))
1004 // Get opcode and regclass of the output for the given load instruction.
1006 const TargetRegisterClass *RC = NULL;
1007 switch (VT.getSimpleVT()) {
1008 default: return false;
1011 RC = X86::GR8RegisterClass;
1015 RC = X86::GR16RegisterClass;
1019 RC = X86::GR32RegisterClass;
1022 // Must be in x86-64 mode.
1024 RC = X86::GR64RegisterClass;
1027 if (Subtarget->hasSSE1()) {
1029 RC = X86::FR32RegisterClass;
1031 Opc = X86::LD_Fp32m;
1032 RC = X86::RFP32RegisterClass;
1036 if (Subtarget->hasSSE2()) {
1038 RC = X86::FR64RegisterClass;
1040 Opc = X86::LD_Fp64m;
1041 RC = X86::RFP64RegisterClass;
1045 Opc = X86::LD_Fp80m;
1046 RC = X86::RFP80RegisterClass;
1050 unsigned ResultReg = createResultReg(RC);
1051 if (isa<GlobalValue>(C)) {
1052 if (X86SelectConstAddr(C, ResultReg, false, true))
1057 // MachineConstantPool wants an explicit alignment.
1059 TM.getTargetData()->getPreferredTypeAlignmentShift(C->getType());
1061 // Alignment of vector types. FIXME!
1062 Align = TM.getTargetData()->getABITypeSize(C->getType());
1063 Align = Log2_64(Align);
1066 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
1067 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
1071 unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
1073 if (!X86SelectAddress(C, AM))
1075 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
1076 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
1077 unsigned ResultReg = createResultReg(RC);
1078 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
1083 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
1084 DenseMap<const Value *, unsigned> &vm,
1085 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
1086 DenseMap<const AllocaInst *, int> &am) {
1087 return new X86FastISel(mf, vm, bm, am);