1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/Function.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/CodeGen/SSARegMap.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/CFG.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/ADT/Statistic.h"
38 #include "llvm/Support/CommandLine.h"
39 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
40 cl::desc("Enable fastcc on X86"));
43 // X86 Specific DAG Nodes
46 // Start the numbering where the builtin ops leave off.
47 FIRST_NUMBER = ISD::BUILTIN_OP_END,
49 /// FILD64m - This instruction implements SINT_TO_FP with a
50 /// 64-bit source in memory and a FP reg result. This corresponds to
51 /// the X86::FILD64m instruction. It has two inputs (token chain and
52 /// address) and two outputs (FP value and token chain).
55 /// CALL/TAILCALL - These operations represent an abstract X86 call
56 /// instruction, which includes a bunch of information. In particular the
57 /// operands of these node are:
59 /// #0 - The incoming token chain
61 /// #2 - The number of arg bytes the caller pushes on the stack.
62 /// #3 - The number of arg bytes the callee pops off the stack.
63 /// #4 - The value to pass in AL/AX/EAX (optional)
64 /// #5 - The value to pass in DL/DX/EDX (optional)
66 /// The result values of these nodes are:
68 /// #0 - The outgoing token chain
69 /// #1 - The first register result value (optional)
70 /// #2 - The second register result value (optional)
72 /// The CALL vs TAILCALL distinction boils down to whether the callee is
73 /// known not to modify the caller's stack frame, as is standard with
81 //===----------------------------------------------------------------------===//
82 // X86TargetLowering - X86 Implementation of the TargetLowering interface
84 class X86TargetLowering : public TargetLowering {
85 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
86 int ReturnAddrIndex; // FrameIndex for return slot.
87 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
88 int BytesCallerReserves; // Number of arg bytes caller makes.
90 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
91 // Set up the TargetLowering object.
93 // X86 is weird, it always uses i8 for shift amounts and setcc results.
94 setShiftAmountType(MVT::i8);
95 setSetCCResultType(MVT::i8);
96 setSetCCResultContents(ZeroOrOneSetCCResult);
97 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
99 // Set up the register classes.
100 addRegisterClass(MVT::i8, X86::R8RegisterClass);
101 addRegisterClass(MVT::i16, X86::R16RegisterClass);
102 addRegisterClass(MVT::i32, X86::R32RegisterClass);
103 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
105 // FIXME: Eliminate these two classes when legalize can handle promotions
107 /**/ addRegisterClass(MVT::i1, X86::R8RegisterClass);
109 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
110 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
111 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
112 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
114 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
115 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
116 setOperationAction(ISD::SREM , MVT::f64 , Expand);
117 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
118 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
119 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
120 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
121 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
122 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
123 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
124 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
125 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
127 setOperationAction(ISD::READIO , MVT::i1 , Expand);
128 setOperationAction(ISD::READIO , MVT::i8 , Expand);
129 setOperationAction(ISD::READIO , MVT::i16 , Expand);
130 setOperationAction(ISD::READIO , MVT::i32 , Expand);
131 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
132 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
133 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
134 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
137 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
138 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
141 // These should be promoted to a larger select which is supported.
142 /**/ setOperationAction(ISD::SELECT , MVT::i1 , Promote);
143 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
145 computeRegisterProperties();
147 addLegalFPImmediate(+0.0); // FLD0
148 addLegalFPImmediate(+1.0); // FLD1
149 addLegalFPImmediate(-0.0); // FLD0/FCHS
150 addLegalFPImmediate(-1.0); // FLD1/FCHS
153 // Return the number of bytes that a function should pop when it returns (in
154 // addition to the space used by the return address).
156 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
158 // Return the number of bytes that the caller reserves for arguments passed
160 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
162 /// LowerOperation - Provide custom lowering hooks for some operations.
164 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
166 /// LowerArguments - This hook must be implemented to indicate how we should
167 /// lower the arguments for the specified function, into the specified DAG.
168 virtual std::vector<SDOperand>
169 LowerArguments(Function &F, SelectionDAG &DAG);
171 /// LowerCallTo - This hook lowers an abstract call to a function into an
173 virtual std::pair<SDOperand, SDOperand>
174 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
175 bool isTailCall, SDOperand Callee, ArgListTy &Args,
178 virtual std::pair<SDOperand, SDOperand>
179 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
181 virtual std::pair<SDOperand,SDOperand>
182 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
183 const Type *ArgTy, SelectionDAG &DAG);
185 virtual std::pair<SDOperand, SDOperand>
186 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
189 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
192 // C Calling Convention implementation.
193 std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
194 std::pair<SDOperand, SDOperand>
195 LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
197 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
199 // Fast Calling Convention implementation.
200 std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
201 std::pair<SDOperand, SDOperand>
202 LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
203 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
207 std::vector<SDOperand>
208 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
209 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
210 return LowerFastCCArguments(F, DAG);
211 return LowerCCCArguments(F, DAG);
214 std::pair<SDOperand, SDOperand>
215 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
216 bool isVarArg, unsigned CallingConv,
218 SDOperand Callee, ArgListTy &Args,
220 assert((!isVarArg || CallingConv == CallingConv::C) &&
221 "Only C takes varargs!");
222 if (CallingConv == CallingConv::Fast && EnableFastCC)
223 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
224 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
227 //===----------------------------------------------------------------------===//
228 // C Calling Convention implementation
229 //===----------------------------------------------------------------------===//
231 std::vector<SDOperand>
232 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
233 std::vector<SDOperand> ArgValues;
235 MachineFunction &MF = DAG.getMachineFunction();
236 MachineFrameInfo *MFI = MF.getFrameInfo();
238 // Add DAG nodes to load the arguments... On entry to a function on the X86,
239 // the stack frame looks like this:
241 // [ESP] -- return address
242 // [ESP + 4] -- first argument (leftmost lexically)
243 // [ESP + 8] -- second argument, if first argument is four bytes in size
246 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
247 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
248 MVT::ValueType ObjectVT = getValueType(I->getType());
249 unsigned ArgIncrement = 4;
252 default: assert(0 && "Unhandled argument type!");
254 case MVT::i8: ObjSize = 1; break;
255 case MVT::i16: ObjSize = 2; break;
256 case MVT::i32: ObjSize = 4; break;
257 case MVT::i64: ObjSize = ArgIncrement = 8; break;
258 case MVT::f32: ObjSize = 4; break;
259 case MVT::f64: ObjSize = ArgIncrement = 8; break;
261 // Create the frame index object for this incoming parameter...
262 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
264 // Create the SelectionDAG nodes corresponding to a load from this parameter
265 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
267 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
271 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
272 DAG.getSrcValue(NULL));
274 if (MVT::isInteger(ObjectVT))
275 ArgValue = DAG.getConstant(0, ObjectVT);
277 ArgValue = DAG.getConstantFP(0, ObjectVT);
279 ArgValues.push_back(ArgValue);
281 ArgOffset += ArgIncrement; // Move on to the next argument...
284 // If the function takes variable number of arguments, make a frame index for
285 // the start of the first vararg value... for expansion of llvm.va_start.
287 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
288 ReturnAddrIndex = 0; // No return address slot generated yet.
289 BytesToPopOnReturn = 0; // Callee pops nothing.
290 BytesCallerReserves = ArgOffset;
292 // Finally, inform the code generator which regs we return values in.
293 switch (getValueType(F.getReturnType())) {
294 default: assert(0 && "Unknown type!");
295 case MVT::isVoid: break;
300 MF.addLiveOut(X86::EAX);
303 MF.addLiveOut(X86::EAX);
304 MF.addLiveOut(X86::EDX);
308 MF.addLiveOut(X86::ST0);
314 std::pair<SDOperand, SDOperand>
315 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
316 bool isVarArg, bool isTailCall,
317 SDOperand Callee, ArgListTy &Args,
319 // Count how many bytes are to be pushed on the stack.
320 unsigned NumBytes = 0;
324 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
325 DAG.getConstant(0, getPointerTy()));
327 for (unsigned i = 0, e = Args.size(); i != e; ++i)
328 switch (getValueType(Args[i].second)) {
329 default: assert(0 && "Unknown value type!");
343 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
344 DAG.getConstant(NumBytes, getPointerTy()));
346 // Arguments go on the stack in reverse order, as specified by the ABI.
347 unsigned ArgOffset = 0;
348 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
350 std::vector<SDOperand> Stores;
352 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
353 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
354 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
356 switch (getValueType(Args[i].second)) {
357 default: assert(0 && "Unexpected ValueType for argument!");
361 // Promote the integer to 32 bits. If the input type is signed use a
362 // sign extend, otherwise use a zero extend.
363 if (Args[i].second->isSigned())
364 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
366 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
371 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
372 Args[i].first, PtrOff,
373 DAG.getSrcValue(NULL)));
378 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
379 Args[i].first, PtrOff,
380 DAG.getSrcValue(NULL)));
385 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
388 std::vector<MVT::ValueType> RetVals;
389 MVT::ValueType RetTyVT = getValueType(RetTy);
390 RetVals.push_back(MVT::Other);
392 // The result values produced have to be legal. Promote the result.
394 case MVT::isVoid: break;
396 RetVals.push_back(RetTyVT);
401 RetVals.push_back(MVT::i32);
404 RetVals.push_back(MVT::f64);
407 RetVals.push_back(MVT::i32);
408 RetVals.push_back(MVT::i32);
411 std::vector<SDOperand> Ops;
412 Ops.push_back(Chain);
413 Ops.push_back(Callee);
414 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
415 Ops.push_back(DAG.getConstant(0, getPointerTy()));
416 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
418 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
422 case MVT::isVoid: break;
424 ResultVal = TheCall.getValue(1);
429 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
432 // FIXME: we would really like to remember that this FP_ROUND operation is
433 // okay to eliminate if we allow excess FP precision.
434 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
437 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
438 TheCall.getValue(2));
442 return std::make_pair(ResultVal, Chain);
445 std::pair<SDOperand, SDOperand>
446 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
447 // vastart just returns the address of the VarArgsFrameIndex slot.
448 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
451 std::pair<SDOperand,SDOperand> X86TargetLowering::
452 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
453 const Type *ArgTy, SelectionDAG &DAG) {
454 MVT::ValueType ArgVT = getValueType(ArgTy);
457 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList,
458 DAG.getSrcValue(NULL));
461 if (ArgVT == MVT::i32)
464 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
465 "Other types should have been promoted for varargs!");
468 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
469 DAG.getConstant(Amt, VAList.getValueType()));
471 return std::make_pair(Result, Chain);
474 //===----------------------------------------------------------------------===//
475 // Fast Calling Convention implementation
476 //===----------------------------------------------------------------------===//
478 // The X86 'fast' calling convention passes up to two integer arguments in
479 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
480 // and requires that the callee pop its arguments off the stack (allowing proper
481 // tail calls), and has the same return value conventions as C calling convs.
483 // This calling convention always arranges for the callee pop value to be 8n+4
484 // bytes, which is needed for tail recursion elimination and stack alignment
487 // Note that this can be enhanced in the future to pass fp vals in registers
488 // (when we have a global fp allocator) and do other tricks.
491 /// AddLiveIn - This helper function adds the specified physical register to the
492 /// MachineFunction as a live in value. It also creates a corresponding virtual
494 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
495 TargetRegisterClass *RC) {
496 assert(RC->contains(PReg) && "Not the correct regclass!");
497 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
498 MF.addLiveIn(PReg, VReg);
503 std::vector<SDOperand>
504 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
505 std::vector<SDOperand> ArgValues;
507 MachineFunction &MF = DAG.getMachineFunction();
508 MachineFrameInfo *MFI = MF.getFrameInfo();
510 // Add DAG nodes to load the arguments... On entry to a function the stack
511 // frame looks like this:
513 // [ESP] -- return address
514 // [ESP + 4] -- first nonreg argument (leftmost lexically)
515 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
517 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
519 // Keep track of the number of integer regs passed so far. This can be either
520 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
522 unsigned NumIntRegs = 0;
524 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
525 MVT::ValueType ObjectVT = getValueType(I->getType());
526 unsigned ArgIncrement = 4;
527 unsigned ObjSize = 0;
531 default: assert(0 && "Unhandled argument type!");
534 if (NumIntRegs < 2) {
535 if (!I->use_empty()) {
536 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
537 X86::R8RegisterClass);
538 ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
539 DAG.setRoot(ArgValue.getValue(1));
548 if (NumIntRegs < 2) {
549 if (!I->use_empty()) {
550 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
551 X86::R16RegisterClass);
552 ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
553 DAG.setRoot(ArgValue.getValue(1));
561 if (NumIntRegs < 2) {
562 if (!I->use_empty()) {
563 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
564 X86::R32RegisterClass);
565 ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
566 DAG.setRoot(ArgValue.getValue(1));
574 if (NumIntRegs == 0) {
575 if (!I->use_empty()) {
576 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
577 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
579 SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
580 SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
581 DAG.setRoot(Hi.getValue(1));
583 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
587 } else if (NumIntRegs == 1) {
588 if (!I->use_empty()) {
589 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
590 SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
591 DAG.setRoot(Low.getValue(1));
593 // Load the high part from memory.
594 // Create the frame index object for this incoming parameter...
595 int FI = MFI->CreateFixedObject(4, ArgOffset);
596 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
597 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
598 DAG.getSrcValue(NULL));
599 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
605 ObjSize = ArgIncrement = 8;
607 case MVT::f32: ObjSize = 4; break;
608 case MVT::f64: ObjSize = ArgIncrement = 8; break;
611 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
613 if (ObjSize && !I->use_empty()) {
614 // Create the frame index object for this incoming parameter...
615 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
617 // Create the SelectionDAG nodes corresponding to a load from this
619 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
621 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
622 DAG.getSrcValue(NULL));
623 } else if (ArgValue.Val == 0) {
624 if (MVT::isInteger(ObjectVT))
625 ArgValue = DAG.getConstant(0, ObjectVT);
627 ArgValue = DAG.getConstantFP(0, ObjectVT);
629 ArgValues.push_back(ArgValue);
632 ArgOffset += ArgIncrement; // Move on to the next argument.
635 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
636 // arguments and the arguments after the retaddr has been pushed are aligned.
637 if ((ArgOffset & 7) == 0)
640 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
641 ReturnAddrIndex = 0; // No return address slot generated yet.
642 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
643 BytesCallerReserves = 0;
645 // Finally, inform the code generator which regs we return values in.
646 switch (getValueType(F.getReturnType())) {
647 default: assert(0 && "Unknown type!");
648 case MVT::isVoid: break;
653 MF.addLiveOut(X86::EAX);
656 MF.addLiveOut(X86::EAX);
657 MF.addLiveOut(X86::EDX);
661 MF.addLiveOut(X86::ST0);
667 std::pair<SDOperand, SDOperand>
668 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
669 bool isTailCall, SDOperand Callee,
670 ArgListTy &Args, SelectionDAG &DAG) {
671 // Count how many bytes are to be pushed on the stack.
672 unsigned NumBytes = 0;
674 // Keep track of the number of integer regs passed so far. This can be either
675 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
677 unsigned NumIntRegs = 0;
679 for (unsigned i = 0, e = Args.size(); i != e; ++i)
680 switch (getValueType(Args[i].second)) {
681 default: assert(0 && "Unknown value type!");
686 if (NumIntRegs < 2) {
695 if (NumIntRegs == 0) {
698 } else if (NumIntRegs == 1) {
710 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
711 // arguments and the arguments after the retaddr has been pushed are aligned.
712 if ((NumBytes & 7) == 0)
715 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
716 DAG.getConstant(NumBytes, getPointerTy()));
718 // Arguments go on the stack in reverse order, as specified by the ABI.
719 unsigned ArgOffset = 0;
720 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
723 std::vector<SDOperand> Stores;
724 std::vector<SDOperand> RegValuesToPass;
725 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
726 switch (getValueType(Args[i].second)) {
727 default: assert(0 && "Unexpected ValueType for argument!");
732 if (NumIntRegs < 2) {
733 RegValuesToPass.push_back(Args[i].first);
739 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
740 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
741 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
742 Args[i].first, PtrOff,
743 DAG.getSrcValue(NULL)));
748 if (NumIntRegs < 2) { // Can pass part of it in regs?
749 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
750 Args[i].first, DAG.getConstant(1, MVT::i32));
751 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
752 Args[i].first, DAG.getConstant(0, MVT::i32));
753 RegValuesToPass.push_back(Lo);
755 if (NumIntRegs < 2) { // Pass both parts in regs?
756 RegValuesToPass.push_back(Hi);
759 // Pass the high part in memory.
760 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
761 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
762 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
763 Hi, PtrOff, DAG.getSrcValue(NULL)));
770 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
771 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
772 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
773 Args[i].first, PtrOff,
774 DAG.getSrcValue(NULL)));
780 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
782 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
783 // arguments and the arguments after the retaddr has been pushed are aligned.
784 if ((ArgOffset & 7) == 0)
787 std::vector<MVT::ValueType> RetVals;
788 MVT::ValueType RetTyVT = getValueType(RetTy);
790 RetVals.push_back(MVT::Other);
792 // The result values produced have to be legal. Promote the result.
794 case MVT::isVoid: break;
796 RetVals.push_back(RetTyVT);
801 RetVals.push_back(MVT::i32);
804 RetVals.push_back(MVT::f64);
807 RetVals.push_back(MVT::i32);
808 RetVals.push_back(MVT::i32);
812 std::vector<SDOperand> Ops;
813 Ops.push_back(Chain);
814 Ops.push_back(Callee);
815 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
816 // Callee pops all arg values on the stack.
817 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
819 // Pass register arguments as needed.
820 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
822 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
824 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
828 case MVT::isVoid: break;
830 ResultVal = TheCall.getValue(1);
835 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
838 // FIXME: we would really like to remember that this FP_ROUND operation is
839 // okay to eliminate if we allow excess FP precision.
840 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
843 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
844 TheCall.getValue(2));
848 return std::make_pair(ResultVal, Chain);
851 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
852 if (ReturnAddrIndex == 0) {
853 // Set up a frame object for the return address.
854 MachineFunction &MF = DAG.getMachineFunction();
855 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
858 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
863 std::pair<SDOperand, SDOperand> X86TargetLowering::
864 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
867 if (Depth) // Depths > 0 not supported yet!
868 Result = DAG.getConstant(0, getPointerTy());
870 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
872 // Just load the return address
873 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
874 DAG.getSrcValue(NULL));
876 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
877 DAG.getConstant(4, MVT::i32));
879 return std::make_pair(Result, Chain);
882 /// LowerOperation - Provide custom lowering hooks for some operations.
884 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
885 switch (Op.getOpcode()) {
886 default: assert(0 && "Should not custom lower this!");
887 case ISD::SINT_TO_FP:
888 assert(Op.getValueType() == MVT::f64 &&
889 Op.getOperand(0).getValueType() == MVT::i64 &&
890 "Unknown SINT_TO_FP to lower!");
891 // We lower sint64->FP into a store to a temporary stack slot, followed by a
893 MachineFunction &MF = DAG.getMachineFunction();
894 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
895 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
896 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
897 Op.getOperand(0), StackSlot, DAG.getSrcValue(NULL));
898 std::vector<MVT::ValueType> RTs;
899 RTs.push_back(MVT::f64);
900 RTs.push_back(MVT::Other);
901 std::vector<SDOperand> Ops;
902 Ops.push_back(Store);
903 Ops.push_back(StackSlot);
904 return DAG.getNode(X86ISD::FILD64m, RTs, Ops);
909 //===----------------------------------------------------------------------===//
910 // Pattern Matcher Implementation
911 //===----------------------------------------------------------------------===//
914 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
915 /// SDOperand's instead of register numbers for the leaves of the matched
917 struct X86ISelAddressMode {
923 struct { // This is really a union, discriminated by BaseType!
934 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
942 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
944 //===--------------------------------------------------------------------===//
945 /// ISel - X86 specific code to select X86 machine instructions for
946 /// SelectionDAG operations.
948 class ISel : public SelectionDAGISel {
949 /// ContainsFPCode - Every instruction we select that uses or defines a FP
950 /// register should set this to true.
953 /// X86Lowering - This object fully describes how to lower LLVM code to an
954 /// X86-specific SelectionDAG.
955 X86TargetLowering X86Lowering;
957 /// RegPressureMap - This keeps an approximate count of the number of
958 /// registers required to evaluate each node in the graph.
959 std::map<SDNode*, unsigned> RegPressureMap;
961 /// ExprMap - As shared expressions are codegen'd, we keep track of which
962 /// vreg the value is produced in, so we only emit one copy of each compiled
964 std::map<SDOperand, unsigned> ExprMap;
966 /// TheDAG - The DAG being selected during Select* operations.
967 SelectionDAG *TheDAG;
969 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
972 virtual const char *getPassName() const {
973 return "X86 Pattern Instruction Selection";
976 unsigned getRegPressure(SDOperand O) {
977 return RegPressureMap[O.Val];
979 unsigned ComputeRegPressure(SDOperand O);
981 /// InstructionSelectBasicBlock - This callback is invoked by
982 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
983 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
985 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
987 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
988 bool FloatPromoteOk = false);
989 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
990 bool TryToFoldLoadOpStore(SDNode *Node);
991 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
992 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
993 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
994 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
995 unsigned RTrue, unsigned RFalse, unsigned RDest);
996 unsigned SelectExpr(SDOperand N);
998 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
999 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
1000 void SelectAddress(SDOperand N, X86AddressMode &AM);
1001 bool EmitPotentialTailCall(SDNode *Node);
1002 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
1003 void Select(SDOperand N);
1007 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
1008 /// the main function.
1009 static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
1010 MachineFrameInfo *MFI) {
1011 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
1012 int CWFrameIdx = MFI->CreateStackObject(2, 2);
1013 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1015 // Set the high part to be 64-bit precision.
1016 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1017 CWFrameIdx, 1).addImm(2);
1019 // Reload the modified control word now.
1020 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1023 void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
1024 // If this function has live-in values, emit the copies from pregs to vregs at
1025 // the top of the function, before anything else.
1026 MachineBasicBlock *BB = MF.begin();
1027 if (MF.livein_begin() != MF.livein_end()) {
1028 SSARegMap *RegMap = MF.getSSARegMap();
1029 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1030 E = MF.livein_end(); LI != E; ++LI) {
1031 const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
1032 if (RC == X86::R8RegisterClass) {
1033 BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
1034 } else if (RC == X86::R16RegisterClass) {
1035 BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
1036 } else if (RC == X86::R32RegisterClass) {
1037 BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
1038 } else if (RC == X86::RFPRegisterClass) {
1039 BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
1041 assert(0 && "Unknown regclass!");
1047 // If this is main, emit special code for main.
1048 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
1049 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
1053 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
1054 /// when it has created a SelectionDAG for us to codegen.
1055 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
1056 // While we're doing this, keep track of whether we see any FP code for
1057 // FP_REG_KILL insertion.
1058 ContainsFPCode = false;
1059 MachineFunction *MF = BB->getParent();
1061 // Scan the PHI nodes that already are inserted into this basic block. If any
1062 // of them is a PHI of a floating point value, we need to insert an
1064 SSARegMap *RegMap = MF->getSSARegMap();
1065 if (BB != MF->begin())
1066 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
1068 assert(I->getOpcode() == X86::PHI &&
1069 "Isn't just PHI nodes?");
1070 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1071 X86::RFPRegisterClass) {
1072 ContainsFPCode = true;
1077 // Compute the RegPressureMap, which is an approximation for the number of
1078 // registers required to compute each node.
1079 ComputeRegPressure(DAG.getRoot());
1083 // Codegen the basic block.
1084 Select(DAG.getRoot());
1088 // Finally, look at all of the successors of this block. If any contain a PHI
1089 // node of FP type, we need to insert an FP_REG_KILL in this block.
1090 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1091 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
1092 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
1093 I != E && I->getOpcode() == X86::PHI; ++I) {
1094 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1095 X86::RFPRegisterClass) {
1096 ContainsFPCode = true;
1101 // Final check, check LLVM BB's that are successors to the LLVM BB
1102 // corresponding to BB for FP PHI nodes.
1103 const BasicBlock *LLVMBB = BB->getBasicBlock();
1105 if (!ContainsFPCode)
1106 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
1107 SI != E && !ContainsFPCode; ++SI)
1108 for (BasicBlock::const_iterator II = SI->begin();
1109 (PN = dyn_cast<PHINode>(II)); ++II)
1110 if (PN->getType()->isFloatingPoint()) {
1111 ContainsFPCode = true;
1116 // Insert FP_REG_KILL instructions into basic blocks that need them. This
1117 // only occurs due to the floating point stackifier not being aggressive
1118 // enough to handle arbitrary global stackification.
1120 // Currently we insert an FP_REG_KILL instruction into each block that uses or
1121 // defines a floating point virtual register.
1123 // When the global register allocators (like linear scan) finally update live
1124 // variable analysis, we can keep floating point values in registers across
1125 // basic blocks. This will be a huge win, but we are waiting on the global
1126 // allocators before we can do this.
1128 if (ContainsFPCode) {
1129 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
1133 // Clear state used for selection.
1135 RegPressureMap.clear();
1139 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
1140 // for the number of registers required to compute each node. This is basically
1141 // computing a generalized form of the Sethi-Ullman number for each node.
1142 unsigned ISel::ComputeRegPressure(SDOperand O) {
1144 unsigned &Result = RegPressureMap[N];
1145 if (Result) return Result;
1147 // FIXME: Should operations like CALL (which clobber lots o regs) have a
1148 // higher fixed cost??
1150 if (N->getNumOperands() == 0) {
1153 unsigned MaxRegUse = 0;
1154 unsigned NumExtraMaxRegUsers = 0;
1155 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1157 if (N->getOperand(i).getOpcode() == ISD::Constant)
1160 Regs = ComputeRegPressure(N->getOperand(i));
1161 if (Regs > MaxRegUse) {
1163 NumExtraMaxRegUsers = 0;
1164 } else if (Regs == MaxRegUse &&
1165 N->getOperand(i).getValueType() != MVT::Other) {
1166 ++NumExtraMaxRegUsers;
1170 if (O.getOpcode() != ISD::TokenFactor)
1171 Result = MaxRegUse+NumExtraMaxRegUsers;
1173 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
1176 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
1180 /// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
1181 /// The DAG cannot have cycles in it, by definition, so the visited set is not
1182 /// needed to prevent infinite loops. The DAG CAN, however, have unbounded
1183 /// reuse, so it prevents exponential cases.
1185 static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
1186 std::set<SDNode*> &Visited) {
1187 if (N == Op) return true; // Found it.
1188 SDNode *Node = N.Val;
1189 if (Node->getNumOperands() == 0 || // Leaf?
1190 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
1191 if (!Visited.insert(Node).second) return false; // Already visited?
1193 // Recurse for the first N-1 operands.
1194 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1195 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
1198 // Tail recurse for the last operand.
1199 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
1202 X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
1203 X86AddressMode Result;
1205 // If we need to emit two register operands, emit the one with the highest
1206 // register pressure first.
1207 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
1208 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
1209 bool EmitBaseThenIndex;
1210 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
1211 std::set<SDNode*> Visited;
1212 EmitBaseThenIndex = true;
1213 // If Base ends up pointing to Index, we must emit index first. This is
1214 // because of the way we fold loads, we may end up doing bad things with
1216 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
1217 EmitBaseThenIndex = false;
1219 std::set<SDNode*> Visited;
1220 EmitBaseThenIndex = false;
1221 // If Base ends up pointing to Index, we must emit index first. This is
1222 // because of the way we fold loads, we may end up doing bad things with
1224 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
1225 EmitBaseThenIndex = true;
1228 if (EmitBaseThenIndex) {
1229 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1230 Result.IndexReg = SelectExpr(IAM.IndexReg);
1232 Result.IndexReg = SelectExpr(IAM.IndexReg);
1233 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1236 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
1237 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1238 } else if (IAM.IndexReg.Val) {
1239 Result.IndexReg = SelectExpr(IAM.IndexReg);
1242 switch (IAM.BaseType) {
1243 case X86ISelAddressMode::RegBase:
1244 Result.BaseType = X86AddressMode::RegBase;
1246 case X86ISelAddressMode::FrameIndexBase:
1247 Result.BaseType = X86AddressMode::FrameIndexBase;
1248 Result.Base.FrameIndex = IAM.Base.FrameIndex;
1251 assert(0 && "Unknown base type!");
1254 Result.Scale = IAM.Scale;
1255 Result.Disp = IAM.Disp;
1260 /// SelectAddress - Pattern match the maximal addressing mode for this node and
1261 /// emit all of the leaf registers.
1262 void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
1263 X86ISelAddressMode IAM;
1264 MatchAddress(N, IAM);
1265 AM = SelectAddrExprs(IAM);
1268 /// MatchAddress - Add the specified node to the specified addressing mode,
1269 /// returning true if it cannot be done. This just pattern matches for the
1270 /// addressing mode, it does not cause any code to be emitted. For that, use
1272 bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
1273 switch (N.getOpcode()) {
1275 case ISD::FrameIndex:
1276 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
1277 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1278 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1282 case ISD::GlobalAddress:
1284 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1289 AM.Disp += cast<ConstantSDNode>(N)->getValue();
1292 // We might have folded the load into this shift, so don't regen the value
1294 if (ExprMap.count(N)) break;
1296 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
1297 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
1298 unsigned Val = CN->getValue();
1299 if (Val == 1 || Val == 2 || Val == 3) {
1300 AM.Scale = 1 << Val;
1301 SDOperand ShVal = N.Val->getOperand(0);
1303 // Okay, we know that we have a scale by now. However, if the scaled
1304 // value is an add of something and a constant, we can fold the
1305 // constant into the disp field here.
1306 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
1307 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
1308 AM.IndexReg = ShVal.Val->getOperand(0);
1309 ConstantSDNode *AddVal =
1310 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
1311 AM.Disp += AddVal->getValue() << Val;
1313 AM.IndexReg = ShVal;
1320 // We might have folded the load into this mul, so don't regen the value if
1322 if (ExprMap.count(N)) break;
1324 // X*[3,5,9] -> X+X*[2,4,8]
1325 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
1326 AM.Base.Reg.Val == 0)
1327 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
1328 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
1329 AM.Scale = unsigned(CN->getValue())-1;
1331 SDOperand MulVal = N.Val->getOperand(0);
1334 // Okay, we know that we have a scale by now. However, if the scaled
1335 // value is an add of something and a constant, we can fold the
1336 // constant into the disp field here.
1337 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1338 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
1339 Reg = MulVal.Val->getOperand(0);
1340 ConstantSDNode *AddVal =
1341 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
1342 AM.Disp += AddVal->getValue() * CN->getValue();
1344 Reg = N.Val->getOperand(0);
1347 AM.IndexReg = AM.Base.Reg = Reg;
1353 // We might have folded the load into this mul, so don't regen the value if
1355 if (ExprMap.count(N)) break;
1357 X86ISelAddressMode Backup = AM;
1358 if (!MatchAddress(N.Val->getOperand(0), AM) &&
1359 !MatchAddress(N.Val->getOperand(1), AM))
1362 if (!MatchAddress(N.Val->getOperand(1), AM) &&
1363 !MatchAddress(N.Val->getOperand(0), AM))
1370 // Is the base register already occupied?
1371 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
1372 // If so, check to see if the scale index register is set.
1373 if (AM.IndexReg.Val == 0) {
1379 // Otherwise, we cannot select it.
1383 // Default, generate it as a register.
1384 AM.BaseType = X86ISelAddressMode::RegBase;
1389 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
1390 /// assuming that the temporary registers are in the 8-bit register class.
1394 /// DestReg = logicalop Tmp1, Tmp2
1396 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
1397 unsigned SetCC2, unsigned LogicalOp,
1399 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
1400 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1401 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1402 BuildMI(BB, SetCC1, 0, Tmp1);
1403 BuildMI(BB, SetCC2, 0, Tmp2);
1404 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
1407 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
1408 /// condition codes match the specified SetCCOpcode. Note that some conditions
1409 /// require multiple instructions to generate the correct value.
1410 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
1411 ISD::CondCode SetCCOpcode, bool isFP) {
1414 switch (SetCCOpcode) {
1415 default: assert(0 && "Illegal integer SetCC!");
1416 case ISD::SETEQ: Opc = X86::SETEr; break;
1417 case ISD::SETGT: Opc = X86::SETGr; break;
1418 case ISD::SETGE: Opc = X86::SETGEr; break;
1419 case ISD::SETLT: Opc = X86::SETLr; break;
1420 case ISD::SETLE: Opc = X86::SETLEr; break;
1421 case ISD::SETNE: Opc = X86::SETNEr; break;
1422 case ISD::SETULT: Opc = X86::SETBr; break;
1423 case ISD::SETUGT: Opc = X86::SETAr; break;
1424 case ISD::SETULE: Opc = X86::SETBEr; break;
1425 case ISD::SETUGE: Opc = X86::SETAEr; break;
1428 // On a floating point condition, the flags are set as follows:
1430 // 0 | 0 | 0 | X > Y
1431 // 0 | 0 | 1 | X < Y
1432 // 1 | 0 | 0 | X == Y
1433 // 1 | 1 | 1 | unordered
1435 switch (SetCCOpcode) {
1436 default: assert(0 && "Invalid FP setcc!");
1439 Opc = X86::SETEr; // True if ZF = 1
1443 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
1447 Opc = X86::SETAEr; // True if CF = 0
1451 Opc = X86::SETBr; // True if CF = 1
1455 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
1459 Opc = X86::SETNEr; // True if ZF = 0
1462 Opc = X86::SETPr; // True if PF = 1
1465 Opc = X86::SETNPr; // True if PF = 0
1467 case ISD::SETOEQ: // !PF & ZF
1468 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
1470 case ISD::SETOLT: // !PF & CF
1471 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
1473 case ISD::SETOLE: // !PF & (CF || ZF)
1474 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
1476 case ISD::SETUGT: // PF | (!ZF & !CF)
1477 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
1479 case ISD::SETUGE: // PF | !CF
1480 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
1482 case ISD::SETUNE: // PF | !ZF
1483 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
1487 BuildMI(BB, Opc, 0, DestReg);
1491 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
1492 /// the Dest block if the Cond condition is true. If we cannot fold this
1493 /// condition into the branch, return true.
1495 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
1497 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
1498 // B) using two conditional branches instead of one condbr, two setcc's, and
1500 if ((Cond.getOpcode() == ISD::OR ||
1501 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
1502 // And and or set the flags for us, so there is no need to emit a TST of the
1503 // result. It is only safe to do this if there is only a single use of the
1504 // AND/OR though, otherwise we don't know it will be emitted here.
1507 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1511 // Codegen br not C -> JE.
1512 if (Cond.getOpcode() == ISD::XOR)
1513 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
1514 if (NC->isAllOnesValue()) {
1516 if (getRegPressure(Chain) > getRegPressure(Cond)) {
1518 CondR = SelectExpr(Cond.Val->getOperand(0));
1520 CondR = SelectExpr(Cond.Val->getOperand(0));
1523 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
1524 BuildMI(BB, X86::JE, 1).addMBB(Dest);
1528 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
1530 return true; // Can only handle simple setcc's so far.
1534 // Handle integer conditions first.
1535 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1536 switch (SetCC->getCondition()) {
1537 default: assert(0 && "Illegal integer SetCC!");
1538 case ISD::SETEQ: Opc = X86::JE; break;
1539 case ISD::SETGT: Opc = X86::JG; break;
1540 case ISD::SETGE: Opc = X86::JGE; break;
1541 case ISD::SETLT: Opc = X86::JL; break;
1542 case ISD::SETLE: Opc = X86::JLE; break;
1543 case ISD::SETNE: Opc = X86::JNE; break;
1544 case ISD::SETULT: Opc = X86::JB; break;
1545 case ISD::SETUGT: Opc = X86::JA; break;
1546 case ISD::SETULE: Opc = X86::JBE; break;
1547 case ISD::SETUGE: Opc = X86::JAE; break;
1550 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
1551 BuildMI(BB, Opc, 1).addMBB(Dest);
1555 unsigned Opc2 = 0; // Second branch if needed.
1557 // On a floating point condition, the flags are set as follows:
1559 // 0 | 0 | 0 | X > Y
1560 // 0 | 0 | 1 | X < Y
1561 // 1 | 0 | 0 | X == Y
1562 // 1 | 1 | 1 | unordered
1564 switch (SetCC->getCondition()) {
1565 default: assert(0 && "Invalid FP setcc!");
1567 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
1569 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
1571 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
1573 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
1575 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
1577 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
1578 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
1579 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
1580 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1581 Opc = X86::JA; // ZF = 0 & CF = 0
1582 Opc2 = X86::JP; // PF = 1
1584 case ISD::SETUGE: // PF = 1 | CF = 0
1585 Opc = X86::JAE; // CF = 0
1586 Opc2 = X86::JP; // PF = 1
1588 case ISD::SETUNE: // PF = 1 | ZF = 0
1589 Opc = X86::JNE; // ZF = 0
1590 Opc2 = X86::JP; // PF = 1
1592 case ISD::SETOEQ: // PF = 0 & ZF = 1
1595 return true; // FIXME: Emit more efficient code for this branch.
1596 case ISD::SETOLT: // PF = 0 & CF = 1
1599 return true; // FIXME: Emit more efficient code for this branch.
1600 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1601 //X86::JNP, X86::JBE
1603 return true; // FIXME: Emit more efficient code for this branch.
1607 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
1608 BuildMI(BB, Opc, 1).addMBB(Dest);
1610 BuildMI(BB, Opc2, 1).addMBB(Dest);
1614 /// EmitSelectCC - Emit code into BB that performs a select operation between
1615 /// the two registers RTrue and RFalse, generating a result into RDest. Return
1616 /// true if the fold cannot be performed.
1618 void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
1619 unsigned RTrue, unsigned RFalse, unsigned RDest) {
1621 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
1623 } CondCode = NOT_SET;
1625 static const unsigned CMOVTAB16[] = {
1626 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
1627 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
1628 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
1630 static const unsigned CMOVTAB32[] = {
1631 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
1632 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
1633 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
1635 static const unsigned CMOVTABFP[] = {
1636 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
1637 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
1638 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
1641 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
1642 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1643 switch (SetCC->getCondition()) {
1644 default: assert(0 && "Unknown integer comparison!");
1645 case ISD::SETEQ: CondCode = EQ; break;
1646 case ISD::SETGT: CondCode = GT; break;
1647 case ISD::SETGE: CondCode = GE; break;
1648 case ISD::SETLT: CondCode = LT; break;
1649 case ISD::SETLE: CondCode = LE; break;
1650 case ISD::SETNE: CondCode = NE; break;
1651 case ISD::SETULT: CondCode = B; break;
1652 case ISD::SETUGT: CondCode = A; break;
1653 case ISD::SETULE: CondCode = BE; break;
1654 case ISD::SETUGE: CondCode = AE; break;
1657 // On a floating point condition, the flags are set as follows:
1659 // 0 | 0 | 0 | X > Y
1660 // 0 | 0 | 1 | X < Y
1661 // 1 | 0 | 0 | X == Y
1662 // 1 | 1 | 1 | unordered
1664 switch (SetCC->getCondition()) {
1665 default: assert(0 && "Unknown FP comparison!");
1667 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
1669 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
1671 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
1673 case ISD::SETLT: CondCode = B; break; // True if CF = 1
1675 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
1677 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
1678 case ISD::SETUO: CondCode = P; break; // True if PF = 1
1679 case ISD::SETO: CondCode = NP; break; // True if PF = 0
1680 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1681 case ISD::SETUGE: // PF = 1 | CF = 0
1682 case ISD::SETUNE: // PF = 1 | ZF = 0
1683 case ISD::SETOEQ: // PF = 0 & ZF = 1
1684 case ISD::SETOLT: // PF = 0 & CF = 1
1685 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1686 // We cannot emit this comparison as a single cmov.
1693 if (CondCode != NOT_SET) {
1695 default: assert(0 && "Cannot select this type!");
1696 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
1697 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
1698 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
1702 // Finally, if we weren't able to fold this, just emit the condition and test
1704 if (CondCode == NOT_SET || Opc == 0) {
1705 // Get the condition into the zero flag.
1706 unsigned CondReg = SelectExpr(Cond);
1707 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1710 default: assert(0 && "Cannot select this type!");
1711 case MVT::i16: Opc = X86::CMOVE16rr; break;
1712 case MVT::i32: Opc = X86::CMOVE32rr; break;
1713 case MVT::f64: Opc = X86::FCMOVE; break;
1716 // FIXME: CMP R, 0 -> TEST R, R
1717 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
1718 std::swap(RTrue, RFalse);
1720 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
1723 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
1725 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
1727 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
1728 switch (RHS.getValueType()) {
1731 case MVT::i8: Opc = X86::CMP8mi; break;
1732 case MVT::i16: Opc = X86::CMP16mi; break;
1733 case MVT::i32: Opc = X86::CMP32mi; break;
1737 EmitFoldedLoad(LHS, AM);
1738 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
1743 switch (RHS.getValueType()) {
1746 case MVT::i8: Opc = X86::CMP8ri; break;
1747 case MVT::i16: Opc = X86::CMP16ri; break;
1748 case MVT::i32: Opc = X86::CMP32ri; break;
1751 unsigned Tmp1 = SelectExpr(LHS);
1752 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
1755 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
1756 if (CN->isExactlyValue(+0.0) ||
1757 CN->isExactlyValue(-0.0)) {
1758 unsigned Reg = SelectExpr(LHS);
1759 BuildMI(BB, X86::FTST, 1).addReg(Reg);
1760 BuildMI(BB, X86::FNSTSW8r, 0);
1761 BuildMI(BB, X86::SAHF, 1);
1767 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
1768 switch (RHS.getValueType()) {
1771 case MVT::i8: Opc = X86::CMP8mr; break;
1772 case MVT::i16: Opc = X86::CMP16mr; break;
1773 case MVT::i32: Opc = X86::CMP32mr; break;
1777 EmitFoldedLoad(LHS, AM);
1778 unsigned Reg = SelectExpr(RHS);
1779 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
1784 switch (LHS.getValueType()) {
1785 default: assert(0 && "Cannot compare this value!");
1787 case MVT::i8: Opc = X86::CMP8rr; break;
1788 case MVT::i16: Opc = X86::CMP16rr; break;
1789 case MVT::i32: Opc = X86::CMP32rr; break;
1790 case MVT::f64: Opc = X86::FUCOMIr; break;
1792 unsigned Tmp1, Tmp2;
1793 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1794 Tmp1 = SelectExpr(LHS);
1795 Tmp2 = SelectExpr(RHS);
1797 Tmp2 = SelectExpr(RHS);
1798 Tmp1 = SelectExpr(LHS);
1800 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
1803 /// isFoldableLoad - Return true if this is a load instruction that can safely
1804 /// be folded into an operation that uses it.
1805 bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
1806 if (Op.getOpcode() == ISD::LOAD) {
1807 // FIXME: currently can't fold constant pool indexes.
1808 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1810 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
1811 cast<MVTSDNode>(Op)->getExtraValueType() == MVT::f32) {
1812 // FIXME: currently can't fold constant pool indexes.
1813 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1819 // If this load has already been emitted, we clearly can't fold it.
1820 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
1821 if (ExprMap.count(Op.getValue(1))) return false;
1822 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
1823 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
1825 // If there is not just one use of its value, we cannot fold.
1826 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
1828 // Finally, we cannot fold the load into the operation if this would induce a
1829 // cycle into the resultant dag. To check for this, see if OtherOp (the other
1830 // operand of the operation we are folding the load into) can possible use the
1831 // chain node defined by the load.
1832 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
1833 std::set<SDNode*> Visited;
1834 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
1841 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
1842 /// and compute the address being loaded into AM.
1843 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
1844 SDOperand Chain = Op.getOperand(0);
1845 SDOperand Address = Op.getOperand(1);
1847 if (getRegPressure(Chain) > getRegPressure(Address)) {
1849 SelectAddress(Address, AM);
1851 SelectAddress(Address, AM);
1855 // The chain for this load is now lowered.
1856 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
1857 "Load emitted more than once?");
1858 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
1859 assert(0 && "Load emitted more than once!");
1862 // EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
1863 // and op2 are i8/i16/i32 values with one use each (the or). If we can form a
1864 // SHLD or SHRD, emit the instruction (generating the value into DestReg) and
1866 bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
1867 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
1869 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
1870 std::swap(Op1, Op2); // Op1 is the SHL now.
1872 return false; // No match
1875 SDOperand ShlVal = Op1.getOperand(0);
1876 SDOperand ShlAmt = Op1.getOperand(1);
1877 SDOperand ShrVal = Op2.getOperand(0);
1878 SDOperand ShrAmt = Op2.getOperand(1);
1880 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
1882 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
1883 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
1884 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
1885 if (SubCST->getValue() == RegSize) {
1886 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
1887 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
1888 if (ShrVal == ShlVal) {
1889 unsigned Reg, ShAmt;
1890 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
1891 Reg = SelectExpr(ShrVal);
1892 ShAmt = SelectExpr(ShrAmt);
1894 ShAmt = SelectExpr(ShrAmt);
1895 Reg = SelectExpr(ShrVal);
1897 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1898 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
1899 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
1900 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
1902 } else if (RegSize != 8) {
1903 unsigned AReg, BReg;
1904 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1905 BReg = SelectExpr(ShlVal);
1906 AReg = SelectExpr(ShrVal);
1908 AReg = SelectExpr(ShrVal);
1909 BReg = SelectExpr(ShlVal);
1911 unsigned ShAmt = SelectExpr(ShrAmt);
1912 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1913 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
1914 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
1919 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
1920 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
1921 if (SubCST->getValue() == RegSize) {
1922 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
1923 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
1924 if (ShrVal == ShlVal) {
1925 unsigned Reg, ShAmt;
1926 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
1927 Reg = SelectExpr(ShrVal);
1928 ShAmt = SelectExpr(ShlAmt);
1930 ShAmt = SelectExpr(ShlAmt);
1931 Reg = SelectExpr(ShrVal);
1933 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1934 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
1935 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
1936 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
1938 } else if (RegSize != 8) {
1939 unsigned AReg, BReg;
1940 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1941 AReg = SelectExpr(ShlVal);
1942 BReg = SelectExpr(ShrVal);
1944 BReg = SelectExpr(ShrVal);
1945 AReg = SelectExpr(ShlVal);
1947 unsigned ShAmt = SelectExpr(ShlAmt);
1948 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1949 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
1950 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
1955 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
1956 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
1957 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
1958 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
1959 // (A >> 5) | (A << 27) --> ROR A, 5
1960 // (A >> 5) | (B << 27) --> SHRD A, B, 5
1961 if (ShrVal == ShlVal) {
1962 unsigned Reg = SelectExpr(ShrVal);
1963 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
1964 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
1965 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
1967 } else if (RegSize != 8) {
1968 unsigned AReg, BReg;
1969 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1970 BReg = SelectExpr(ShlVal);
1971 AReg = SelectExpr(ShrVal);
1973 AReg = SelectExpr(ShrVal);
1974 BReg = SelectExpr(ShlVal);
1976 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
1977 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
1978 .addImm(ShrCst->getValue());
1986 unsigned ISel::SelectExpr(SDOperand N) {
1988 unsigned Tmp1, Tmp2, Tmp3;
1990 SDNode *Node = N.Val;
1993 if (Node->getOpcode() == ISD::CopyFromReg) {
1994 if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
1995 cast<RegSDNode>(Node)->getReg() == X86::ESP) {
1996 // Just use the specified register as our input.
1997 return cast<RegSDNode>(Node)->getReg();
2001 unsigned &Reg = ExprMap[N];
2002 if (Reg) return Reg;
2004 switch (N.getOpcode()) {
2006 Reg = Result = (N.getValueType() != MVT::Other) ?
2007 MakeReg(N.getValueType()) : 1;
2009 case X86ISD::TAILCALL:
2011 // If this is a call instruction, make sure to prepare ALL of the result
2012 // values as well as the chain.
2013 ExprMap[N.getValue(0)] = 1;
2014 if (Node->getNumValues() > 1) {
2015 Result = MakeReg(Node->getValueType(1));
2016 ExprMap[N.getValue(1)] = Result;
2017 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
2018 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2023 case ISD::ADD_PARTS:
2024 case ISD::SUB_PARTS:
2025 case ISD::SHL_PARTS:
2026 case ISD::SRL_PARTS:
2027 case ISD::SRA_PARTS:
2028 Result = MakeReg(Node->getValueType(0));
2029 ExprMap[N.getValue(0)] = Result;
2030 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
2031 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2035 switch (N.getOpcode()) {
2038 assert(0 && "Node not handled!\n");
2039 case ISD::CopyFromReg:
2040 Select(N.getOperand(0));
2042 Reg = Result = ExprMap[N.getValue(0)] =
2043 MakeReg(N.getValue(0).getValueType());
2045 switch (Node->getValueType(0)) {
2046 default: assert(0 && "Cannot CopyFromReg this!");
2049 BuildMI(BB, X86::MOV8rr, 1,
2050 Result).addReg(cast<RegSDNode>(Node)->getReg());
2053 BuildMI(BB, X86::MOV16rr, 1,
2054 Result).addReg(cast<RegSDNode>(Node)->getReg());
2057 BuildMI(BB, X86::MOV32rr, 1,
2058 Result).addReg(cast<RegSDNode>(Node)->getReg());
2062 case ISD::FrameIndex:
2063 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
2064 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
2066 case ISD::ConstantPool:
2067 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
2068 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
2070 case ISD::ConstantFP:
2071 ContainsFPCode = true;
2072 Tmp1 = Result; // Intermediate Register
2073 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
2074 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2075 Tmp1 = MakeReg(MVT::f64);
2077 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
2078 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2079 BuildMI(BB, X86::FLD0, 0, Tmp1);
2080 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
2081 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
2082 BuildMI(BB, X86::FLD1, 0, Tmp1);
2084 assert(0 && "Unexpected constant!");
2086 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
2089 switch (N.getValueType()) {
2090 default: assert(0 && "Cannot use constants of this type!");
2092 case MVT::i8: Opc = X86::MOV8ri; break;
2093 case MVT::i16: Opc = X86::MOV16ri; break;
2094 case MVT::i32: Opc = X86::MOV32ri; break;
2096 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
2099 if (Node->getValueType(0) == MVT::f64) {
2100 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
2101 BuildMI(BB, X86::FLD0, 0, Result);
2103 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
2106 case ISD::GlobalAddress: {
2107 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
2108 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
2111 case ISD::ExternalSymbol: {
2112 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
2113 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
2116 case ISD::ZERO_EXTEND: {
2117 int DestIs16 = N.getValueType() == MVT::i16;
2118 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2120 // FIXME: This hack is here for zero extension casts from bool to i8. This
2121 // would not be needed if bools were promoted by Legalize.
2122 if (N.getValueType() == MVT::i8) {
2123 Tmp1 = SelectExpr(N.getOperand(0));
2124 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
2128 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2129 static const unsigned Opc[3] = {
2130 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
2134 EmitFoldedLoad(N.getOperand(0), AM);
2135 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2140 static const unsigned Opc[3] = {
2141 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
2143 Tmp1 = SelectExpr(N.getOperand(0));
2144 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2147 case ISD::SIGN_EXTEND: {
2148 int DestIs16 = N.getValueType() == MVT::i16;
2149 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2151 // FIXME: Legalize should promote bools to i8!
2152 assert(N.getOperand(0).getValueType() != MVT::i1 &&
2153 "Sign extend from bool not implemented!");
2155 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2156 static const unsigned Opc[3] = {
2157 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
2161 EmitFoldedLoad(N.getOperand(0), AM);
2162 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2166 static const unsigned Opc[3] = {
2167 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
2169 Tmp1 = SelectExpr(N.getOperand(0));
2170 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2174 // Fold TRUNCATE (LOAD P) into a smaller load from P.
2175 // FIXME: This should be performed by the DAGCombiner.
2176 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2177 switch (N.getValueType()) {
2178 default: assert(0 && "Unknown truncate!");
2180 case MVT::i8: Opc = X86::MOV8rm; break;
2181 case MVT::i16: Opc = X86::MOV16rm; break;
2184 EmitFoldedLoad(N.getOperand(0), AM);
2185 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2189 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
2190 // a move out of AX or AL.
2191 switch (N.getOperand(0).getValueType()) {
2192 default: assert(0 && "Unknown truncate!");
2193 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2194 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2195 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
2197 Tmp1 = SelectExpr(N.getOperand(0));
2198 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2200 switch (N.getValueType()) {
2201 default: assert(0 && "Unknown truncate!");
2203 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2204 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2206 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2209 case ISD::SINT_TO_FP:
2210 case ISD::UINT_TO_FP: {
2211 // FIXME: Most of this grunt work should be done by legalize!
2212 ContainsFPCode = true;
2214 // Promote the integer to a type supported by FLD. We do this because there
2215 // are no unsigned FLD instructions, so we must promote an unsigned value to
2216 // a larger signed value, then use FLD on the larger value.
2218 MVT::ValueType PromoteType = MVT::Other;
2219 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
2220 unsigned PromoteOpcode = 0;
2221 unsigned RealDestReg = Result;
2225 // We don't have the facilities for directly loading byte sized data from
2226 // memory (even signed). Promote it to 16 bits.
2227 PromoteType = MVT::i16;
2228 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
2229 X86::MOVSX16rr8 : X86::MOVZX16rr8;
2232 if (Node->getOpcode() == ISD::UINT_TO_FP) {
2233 PromoteType = MVT::i32;
2234 PromoteOpcode = X86::MOVZX32rr16;
2238 // Don't fild into the real destination.
2239 if (Node->getOpcode() == ISD::UINT_TO_FP)
2240 Result = MakeReg(Node->getValueType(0));
2244 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2246 if (PromoteType != MVT::Other) {
2247 Tmp2 = MakeReg(PromoteType);
2248 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
2249 SrcTy = PromoteType;
2253 // Spill the integer to memory and reload it from there.
2254 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
2255 MachineFunction *F = BB->getParent();
2256 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2260 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
2261 FrameIdx).addReg(Tmp1);
2262 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
2265 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
2266 FrameIdx).addReg(Tmp1);
2267 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
2269 default: break; // No promotion required.
2272 if (Node->getOpcode() == ISD::UINT_TO_FP && Result != RealDestReg) {
2273 // If this is a cast from uint -> double, we need to be careful when if
2274 // the "sign" bit is set. If so, we don't want to make a negative number,
2275 // we want to make a positive number. Emit code to add an offset if the
2278 // Compute whether the sign bit is set by shifting the reg right 31 bits.
2279 unsigned IsNeg = MakeReg(MVT::i32);
2280 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
2282 // Create a CP value that has the offset in one word and 0 in the other.
2283 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
2284 0x4f80000000000000ULL);
2285 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
2286 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
2287 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
2291 case ISD::FP_TO_SINT:
2292 case ISD::FP_TO_UINT: {
2293 // FIXME: Most of this grunt work should be done by legalize!
2294 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2296 // Change the floating point control register to use "round towards zero"
2297 // mode when truncating to an integer value.
2299 MachineFunction *F = BB->getParent();
2300 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
2301 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
2303 // Load the old value of the high byte of the control word...
2304 unsigned HighPartOfCW = MakeReg(MVT::i8);
2305 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
2308 // Set the high part to be round to zero...
2309 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
2310 CWFrameIdx, 1).addImm(12);
2312 // Reload the modified control word now...
2313 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
2315 // Restore the memory image of control word to original value
2316 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
2317 CWFrameIdx, 1).addReg(HighPartOfCW);
2319 // We don't have the facilities for directly storing byte sized data to
2320 // memory. Promote it to 16 bits. We also must promote unsigned values to
2321 // larger classes because we only have signed FP stores.
2322 MVT::ValueType StoreClass = Node->getValueType(0);
2323 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
2324 switch (StoreClass) {
2326 case MVT::i8: StoreClass = MVT::i16; break;
2327 case MVT::i16: StoreClass = MVT::i32; break;
2328 case MVT::i32: StoreClass = MVT::i64; break;
2329 default: assert(0 && "Unknown store class!");
2332 // Spill the integer to memory and reload it from there.
2333 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
2334 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2336 switch (StoreClass) {
2337 default: assert(0 && "Unknown store class!");
2339 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
2342 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
2345 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
2348 switch (Node->getValueType(0)) {
2350 assert(0 && "Unknown integer type!");
2352 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
2355 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
2359 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
2363 // Reload the original control word now.
2364 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
2368 Op0 = N.getOperand(0);
2369 Op1 = N.getOperand(1);
2371 if (isFoldableLoad(Op0, Op1, true)) {
2372 std::swap(Op0, Op1);
2376 if (isFoldableLoad(Op1, Op0, true)) {
2378 switch (N.getValueType()) {
2379 default: assert(0 && "Cannot add this type!");
2381 case MVT::i8: Opc = X86::ADD8rm; break;
2382 case MVT::i16: Opc = X86::ADD16rm; break;
2383 case MVT::i32: Opc = X86::ADD32rm; break;
2385 // For F64, handle promoted load operations (from F32) as well!
2386 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
2390 EmitFoldedLoad(Op1, AM);
2391 Tmp1 = SelectExpr(Op0);
2392 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2396 // See if we can codegen this as an LEA to fold operations together.
2397 if (N.getValueType() == MVT::i32) {
2399 X86ISelAddressMode AM;
2400 MatchAddress(N, AM);
2401 ExprMap[N] = Result;
2403 // If this is not just an add, emit the LEA. For a simple add (like
2404 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
2405 // leave this as LEA, then peephole it to 'ADD' after two address elim
2407 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
2408 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
2409 X86AddressMode XAM = SelectAddrExprs(AM);
2410 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
2415 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2417 if (CN->getValue() == 1) { // add X, 1 -> inc X
2418 switch (N.getValueType()) {
2419 default: assert(0 && "Cannot integer add this type!");
2420 case MVT::i8: Opc = X86::INC8r; break;
2421 case MVT::i16: Opc = X86::INC16r; break;
2422 case MVT::i32: Opc = X86::INC32r; break;
2424 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
2425 switch (N.getValueType()) {
2426 default: assert(0 && "Cannot integer add this type!");
2427 case MVT::i8: Opc = X86::DEC8r; break;
2428 case MVT::i16: Opc = X86::DEC16r; break;
2429 case MVT::i32: Opc = X86::DEC32r; break;
2434 Tmp1 = SelectExpr(Op0);
2435 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2439 switch (N.getValueType()) {
2440 default: assert(0 && "Cannot add this type!");
2441 case MVT::i8: Opc = X86::ADD8ri; break;
2442 case MVT::i16: Opc = X86::ADD16ri; break;
2443 case MVT::i32: Opc = X86::ADD32ri; break;
2446 Tmp1 = SelectExpr(Op0);
2447 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2452 switch (N.getValueType()) {
2453 default: assert(0 && "Cannot add this type!");
2454 case MVT::i8: Opc = X86::ADD8rr; break;
2455 case MVT::i16: Opc = X86::ADD16rr; break;
2456 case MVT::i32: Opc = X86::ADD32rr; break;
2457 case MVT::f64: Opc = X86::FpADD; break;
2460 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2461 Tmp1 = SelectExpr(Op0);
2462 Tmp2 = SelectExpr(Op1);
2464 Tmp2 = SelectExpr(Op1);
2465 Tmp1 = SelectExpr(Op0);
2468 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2476 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
2477 Tmp1 = SelectExpr(Node->getOperand(0));
2478 switch (N.getOpcode()) {
2479 default: assert(0 && "Unreachable!");
2480 case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
2481 case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
2482 case ISD::FSQRT: BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1); break;
2483 case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
2484 case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
2489 switch (N.getValueType()) {
2490 default: assert(0 && "Unsupported VT!");
2491 case MVT::i8: Tmp2 = X86::MUL8r; break;
2492 case MVT::i16: Tmp2 = X86::MUL16r; break;
2493 case MVT::i32: Tmp2 = X86::MUL32r; break;
2497 unsigned MovOpc, LowReg, HiReg;
2498 switch (N.getValueType()) {
2499 default: assert(0 && "Unsupported VT!");
2501 MovOpc = X86::MOV8rr;
2507 MovOpc = X86::MOV16rr;
2513 MovOpc = X86::MOV32rr;
2519 if (Node->getOpcode() != ISD::MULHS)
2520 Opc = Tmp2; // Get the MULHU opcode.
2522 Op0 = Node->getOperand(0);
2523 Op1 = Node->getOperand(1);
2524 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2525 Tmp1 = SelectExpr(Op0);
2526 Tmp2 = SelectExpr(Op1);
2528 Tmp2 = SelectExpr(Op1);
2529 Tmp1 = SelectExpr(Op0);
2532 // FIXME: Implement folding of loads into the memory operands here!
2533 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
2534 BuildMI(BB, Opc, 1).addReg(Tmp2);
2535 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
2544 static const unsigned SUBTab[] = {
2545 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2546 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
2547 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
2549 static const unsigned MULTab[] = {
2550 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2551 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
2552 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
2554 static const unsigned ANDTab[] = {
2555 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
2556 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
2557 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
2559 static const unsigned ORTab[] = {
2560 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
2561 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
2562 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
2564 static const unsigned XORTab[] = {
2565 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
2566 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
2567 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
2570 Op0 = Node->getOperand(0);
2571 Op1 = Node->getOperand(1);
2573 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
2574 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
2577 if (Node->getOpcode() == ISD::SUB)
2578 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
2579 if (CN->isNullValue()) { // 0 - N -> neg N
2580 switch (N.getValueType()) {
2581 default: assert(0 && "Cannot sub this type!");
2583 case MVT::i8: Opc = X86::NEG8r; break;
2584 case MVT::i16: Opc = X86::NEG16r; break;
2585 case MVT::i32: Opc = X86::NEG32r; break;
2587 Tmp1 = SelectExpr(N.getOperand(1));
2588 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2592 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2593 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
2595 switch (N.getValueType()) {
2596 default: assert(0 && "Cannot add this type!");
2597 case MVT::i1: break; // Not supported, don't invert upper bits!
2598 case MVT::i8: Opc = X86::NOT8r; break;
2599 case MVT::i16: Opc = X86::NOT16r; break;
2600 case MVT::i32: Opc = X86::NOT32r; break;
2603 Tmp1 = SelectExpr(Op0);
2604 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2609 // Fold common multiplies into LEA instructions.
2610 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
2611 switch ((int)CN->getValue()) {
2616 // Remove N from exprmap so SelectAddress doesn't get confused.
2619 SelectAddress(N, AM);
2620 // Restore it to the map.
2621 ExprMap[N] = Result;
2622 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
2627 switch (N.getValueType()) {
2628 default: assert(0 && "Cannot xor this type!");
2630 case MVT::i8: Opc = 0; break;
2631 case MVT::i16: Opc = 1; break;
2632 case MVT::i32: Opc = 2; break;
2634 switch (Node->getOpcode()) {
2635 default: assert(0 && "Unreachable!");
2636 case ISD::SUB: Opc = SUBTab[Opc]; break;
2637 case ISD::MUL: Opc = MULTab[Opc]; break;
2638 case ISD::AND: Opc = ANDTab[Opc]; break;
2639 case ISD::OR: Opc = ORTab[Opc]; break;
2640 case ISD::XOR: Opc = XORTab[Opc]; break;
2642 if (Opc) { // Can't fold MUL:i8 R, imm
2643 Tmp1 = SelectExpr(Op0);
2644 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2649 if (isFoldableLoad(Op0, Op1, true))
2650 if (Node->getOpcode() != ISD::SUB) {
2651 std::swap(Op0, Op1);
2654 // For FP, emit 'reverse' subract, with a memory operand.
2655 if (N.getValueType() == MVT::f64) {
2656 if (Op0.getOpcode() == ISD::EXTLOAD)
2657 Opc = X86::FSUBR32m;
2659 Opc = X86::FSUBR64m;
2662 EmitFoldedLoad(Op0, AM);
2663 Tmp1 = SelectExpr(Op1);
2664 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2669 if (isFoldableLoad(Op1, Op0, true)) {
2671 switch (N.getValueType()) {
2672 default: assert(0 && "Cannot operate on this type!");
2674 case MVT::i8: Opc = 5; break;
2675 case MVT::i16: Opc = 6; break;
2676 case MVT::i32: Opc = 7; break;
2677 // For F64, handle promoted load operations (from F32) as well!
2678 case MVT::f64: Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
2680 switch (Node->getOpcode()) {
2681 default: assert(0 && "Unreachable!");
2682 case ISD::SUB: Opc = SUBTab[Opc]; break;
2683 case ISD::MUL: Opc = MULTab[Opc]; break;
2684 case ISD::AND: Opc = ANDTab[Opc]; break;
2685 case ISD::OR: Opc = ORTab[Opc]; break;
2686 case ISD::XOR: Opc = XORTab[Opc]; break;
2690 EmitFoldedLoad(Op1, AM);
2691 Tmp1 = SelectExpr(Op0);
2693 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2695 assert(Node->getOpcode() == ISD::MUL &&
2696 N.getValueType() == MVT::i8 && "Unexpected situation!");
2697 // Must use the MUL instruction, which forces use of AL.
2698 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2699 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
2700 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2705 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2706 Tmp1 = SelectExpr(Op0);
2707 Tmp2 = SelectExpr(Op1);
2709 Tmp2 = SelectExpr(Op1);
2710 Tmp1 = SelectExpr(Op0);
2713 switch (N.getValueType()) {
2714 default: assert(0 && "Cannot add this type!");
2716 case MVT::i8: Opc = 10; break;
2717 case MVT::i16: Opc = 11; break;
2718 case MVT::i32: Opc = 12; break;
2719 case MVT::f32: Opc = 13; break;
2720 case MVT::f64: Opc = 14; break;
2722 switch (Node->getOpcode()) {
2723 default: assert(0 && "Unreachable!");
2724 case ISD::SUB: Opc = SUBTab[Opc]; break;
2725 case ISD::MUL: Opc = MULTab[Opc]; break;
2726 case ISD::AND: Opc = ANDTab[Opc]; break;
2727 case ISD::OR: Opc = ORTab[Opc]; break;
2728 case ISD::XOR: Opc = XORTab[Opc]; break;
2731 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2733 assert(Node->getOpcode() == ISD::MUL &&
2734 N.getValueType() == MVT::i8 && "Unexpected situation!");
2735 // Must use the MUL instruction, which forces use of AL.
2736 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2737 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
2738 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2742 case ISD::ADD_PARTS:
2743 case ISD::SUB_PARTS: {
2744 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
2745 "Not an i64 add/sub!");
2746 // Emit all of the operands.
2747 std::vector<unsigned> InVals;
2748 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
2749 InVals.push_back(SelectExpr(N.getOperand(i)));
2750 if (N.getOpcode() == ISD::ADD_PARTS) {
2751 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2752 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2754 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2755 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2757 return Result+N.ResNo;
2760 case ISD::SHL_PARTS:
2761 case ISD::SRA_PARTS:
2762 case ISD::SRL_PARTS: {
2763 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
2764 "Not an i64 shift!");
2765 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
2766 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
2767 unsigned TmpReg = MakeReg(MVT::i32);
2768 if (N.getOpcode() == ISD::SRA_PARTS) {
2769 // If this is a SHR of a Long, then we need to do funny sign extension
2770 // stuff. TmpReg gets the value to use as the high-part if we are
2771 // shifting more than 32 bits.
2772 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
2774 // Other shifts use a fixed zero value if the shift is more than 32 bits.
2775 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
2778 // Initialize CL with the shift amount.
2779 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
2780 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2782 unsigned TmpReg2 = MakeReg(MVT::i32);
2783 unsigned TmpReg3 = MakeReg(MVT::i32);
2784 if (N.getOpcode() == ISD::SHL_PARTS) {
2785 // TmpReg2 = shld inHi, inLo
2786 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
2788 // TmpReg3 = shl inLo, CL
2789 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
2791 // Set the flags to indicate whether the shift was by more than 32 bits.
2792 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2794 // DestHi = (>32) ? TmpReg3 : TmpReg2;
2795 BuildMI(BB, X86::CMOVNE32rr, 2,
2796 Result+1).addReg(TmpReg2).addReg(TmpReg3);
2797 // DestLo = (>32) ? TmpReg : TmpReg3;
2798 BuildMI(BB, X86::CMOVNE32rr, 2,
2799 Result).addReg(TmpReg3).addReg(TmpReg);
2801 // TmpReg2 = shrd inLo, inHi
2802 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
2804 // TmpReg3 = s[ah]r inHi, CL
2805 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
2806 : X86::SHR32rCL, 1, TmpReg3)
2809 // Set the flags to indicate whether the shift was by more than 32 bits.
2810 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2812 // DestLo = (>32) ? TmpReg3 : TmpReg2;
2813 BuildMI(BB, X86::CMOVNE32rr, 2,
2814 Result).addReg(TmpReg2).addReg(TmpReg3);
2816 // DestHi = (>32) ? TmpReg : TmpReg3;
2817 BuildMI(BB, X86::CMOVNE32rr, 2,
2818 Result+1).addReg(TmpReg3).addReg(TmpReg);
2820 return Result+N.ResNo;
2824 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
2825 Tmp2 = SelectExpr(N.getOperand(1));
2826 Tmp3 = SelectExpr(N.getOperand(2));
2828 Tmp3 = SelectExpr(N.getOperand(2));
2829 Tmp2 = SelectExpr(N.getOperand(1));
2831 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
2838 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
2839 "We don't support this operator!");
2841 if (N.getOpcode() == ISD::SDIV) {
2842 // We can fold loads into FpDIVs, but not really into any others.
2843 if (N.getValueType() == MVT::f64) {
2844 // Check for reversed and unreversed DIV.
2845 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
2846 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
2847 Opc = X86::FDIVR32m;
2849 Opc = X86::FDIVR64m;
2851 EmitFoldedLoad(N.getOperand(0), AM);
2852 Tmp1 = SelectExpr(N.getOperand(1));
2853 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2855 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
2856 N.getOperand(1).getOpcode() == ISD::LOAD) {
2857 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
2862 EmitFoldedLoad(N.getOperand(1), AM);
2863 Tmp1 = SelectExpr(N.getOperand(0));
2864 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2869 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2870 // FIXME: These special cases should be handled by the lowering impl!
2871 unsigned RHS = CN->getValue();
2877 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
2878 unsigned Log = log2(RHS);
2879 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
2880 switch (N.getValueType()) {
2881 default: assert("Unknown type to signed divide!");
2883 SAROpc = X86::SAR8ri;
2884 SHROpc = X86::SHR8ri;
2885 ADDOpc = X86::ADD8rr;
2886 NEGOpc = X86::NEG8r;
2889 SAROpc = X86::SAR16ri;
2890 SHROpc = X86::SHR16ri;
2891 ADDOpc = X86::ADD16rr;
2892 NEGOpc = X86::NEG16r;
2895 SAROpc = X86::SAR32ri;
2896 SHROpc = X86::SHR32ri;
2897 ADDOpc = X86::ADD32rr;
2898 NEGOpc = X86::NEG32r;
2901 unsigned RegSize = MVT::getSizeInBits(N.getValueType());
2902 Tmp1 = SelectExpr(N.getOperand(0));
2905 TmpReg = MakeReg(N.getValueType());
2906 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
2910 unsigned TmpReg2 = MakeReg(N.getValueType());
2911 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(RegSize-Log);
2912 unsigned TmpReg3 = MakeReg(N.getValueType());
2913 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
2915 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
2916 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
2918 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
2924 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2925 Tmp1 = SelectExpr(N.getOperand(0));
2926 Tmp2 = SelectExpr(N.getOperand(1));
2928 Tmp2 = SelectExpr(N.getOperand(1));
2929 Tmp1 = SelectExpr(N.getOperand(0));
2932 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
2933 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
2934 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
2935 switch (N.getValueType()) {
2936 default: assert(0 && "Cannot sdiv this type!");
2938 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
2941 MovOpcode = X86::MOV8rr;
2942 ClrOpcode = X86::MOV8ri;
2943 SExtOpcode = X86::CBW;
2946 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
2949 MovOpcode = X86::MOV16rr;
2950 ClrOpcode = X86::MOV16ri;
2951 SExtOpcode = X86::CWD;
2954 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
2957 MovOpcode = X86::MOV32rr;
2958 ClrOpcode = X86::MOV32ri;
2959 SExtOpcode = X86::CDQ;
2962 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
2966 // Set up the low part.
2967 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
2970 // Sign extend the low part into the high part.
2971 BuildMI(BB, SExtOpcode, 0);
2973 // Zero out the high part, effectively zero extending the input.
2974 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
2977 // Emit the DIV/IDIV instruction.
2978 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
2980 // Get the result of the divide or rem.
2981 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
2986 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2987 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
2988 switch (N.getValueType()) {
2989 default: assert(0 && "Cannot shift this type!");
2990 case MVT::i8: Opc = X86::ADD8rr; break;
2991 case MVT::i16: Opc = X86::ADD16rr; break;
2992 case MVT::i32: Opc = X86::ADD32rr; break;
2994 Tmp1 = SelectExpr(N.getOperand(0));
2995 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
2999 switch (N.getValueType()) {
3000 default: assert(0 && "Cannot shift this type!");
3001 case MVT::i8: Opc = X86::SHL8ri; break;
3002 case MVT::i16: Opc = X86::SHL16ri; break;
3003 case MVT::i32: Opc = X86::SHL32ri; break;
3005 Tmp1 = SelectExpr(N.getOperand(0));
3006 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3010 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3011 Tmp1 = SelectExpr(N.getOperand(0));
3012 Tmp2 = SelectExpr(N.getOperand(1));
3014 Tmp2 = SelectExpr(N.getOperand(1));
3015 Tmp1 = SelectExpr(N.getOperand(0));
3018 switch (N.getValueType()) {
3019 default: assert(0 && "Cannot shift this type!");
3020 case MVT::i8 : Opc = X86::SHL8rCL; break;
3021 case MVT::i16: Opc = X86::SHL16rCL; break;
3022 case MVT::i32: Opc = X86::SHL32rCL; break;
3024 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3025 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3028 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3029 switch (N.getValueType()) {
3030 default: assert(0 && "Cannot shift this type!");
3031 case MVT::i8: Opc = X86::SHR8ri; break;
3032 case MVT::i16: Opc = X86::SHR16ri; break;
3033 case MVT::i32: Opc = X86::SHR32ri; break;
3035 Tmp1 = SelectExpr(N.getOperand(0));
3036 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3040 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3041 Tmp1 = SelectExpr(N.getOperand(0));
3042 Tmp2 = SelectExpr(N.getOperand(1));
3044 Tmp2 = SelectExpr(N.getOperand(1));
3045 Tmp1 = SelectExpr(N.getOperand(0));
3048 switch (N.getValueType()) {
3049 default: assert(0 && "Cannot shift this type!");
3050 case MVT::i8 : Opc = X86::SHR8rCL; break;
3051 case MVT::i16: Opc = X86::SHR16rCL; break;
3052 case MVT::i32: Opc = X86::SHR32rCL; break;
3054 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3055 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3058 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3059 switch (N.getValueType()) {
3060 default: assert(0 && "Cannot shift this type!");
3061 case MVT::i8: Opc = X86::SAR8ri; break;
3062 case MVT::i16: Opc = X86::SAR16ri; break;
3063 case MVT::i32: Opc = X86::SAR32ri; break;
3065 Tmp1 = SelectExpr(N.getOperand(0));
3066 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3070 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3071 Tmp1 = SelectExpr(N.getOperand(0));
3072 Tmp2 = SelectExpr(N.getOperand(1));
3074 Tmp2 = SelectExpr(N.getOperand(1));
3075 Tmp1 = SelectExpr(N.getOperand(0));
3078 switch (N.getValueType()) {
3079 default: assert(0 && "Cannot shift this type!");
3080 case MVT::i8 : Opc = X86::SAR8rCL; break;
3081 case MVT::i16: Opc = X86::SAR16rCL; break;
3082 case MVT::i32: Opc = X86::SAR32rCL; break;
3084 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3085 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3089 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
3090 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
3091 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
3094 // Make sure we generate both values.
3095 if (Result != 1) { // Generate the token
3096 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3097 assert(0 && "Load already emitted!?");
3099 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3101 switch (Node->getValueType(0)) {
3102 default: assert(0 && "Cannot load this type!");
3104 case MVT::i8: Opc = X86::MOV8rm; break;
3105 case MVT::i16: Opc = X86::MOV16rm; break;
3106 case MVT::i32: Opc = X86::MOV32rm; break;
3107 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
3110 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
3111 Select(N.getOperand(0));
3112 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
3116 SDOperand Chain = N.getOperand(0);
3117 SDOperand Address = N.getOperand(1);
3118 if (getRegPressure(Chain) > getRegPressure(Address)) {
3120 SelectAddress(Address, AM);
3122 SelectAddress(Address, AM);
3126 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
3129 case X86ISD::FILD64m:
3130 // Make sure we generate both values.
3131 assert(Result != 1 && N.getValueType() == MVT::f64);
3132 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3133 assert(0 && "Load already emitted!?");
3138 SDOperand Chain = N.getOperand(0);
3139 SDOperand Address = N.getOperand(1);
3140 if (getRegPressure(Chain) > getRegPressure(Address)) {
3142 SelectAddress(Address, AM);
3144 SelectAddress(Address, AM);
3148 addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
3152 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
3153 case ISD::ZEXTLOAD: {
3154 // Make sure we generate both values.
3156 ExprMap[N.getValue(1)] = 1; // Generate the token
3158 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3160 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
3161 if (Node->getValueType(0) == MVT::f64) {
3162 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
3164 addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
3170 if (getRegPressure(Node->getOperand(0)) >
3171 getRegPressure(Node->getOperand(1))) {
3172 Select(Node->getOperand(0)); // chain
3173 SelectAddress(Node->getOperand(1), AM);
3175 SelectAddress(Node->getOperand(1), AM);
3176 Select(Node->getOperand(0)); // chain
3179 switch (Node->getValueType(0)) {
3180 default: assert(0 && "Unknown type to sign extend to.");
3182 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
3184 addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
3187 switch (cast<MVTSDNode>(Node)->getExtraValueType()) {
3189 assert(0 && "Bad zero extend!");
3192 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
3195 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
3200 assert(cast<MVTSDNode>(Node)->getExtraValueType() <= MVT::i8 &&
3201 "Bad zero extend!");
3202 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3205 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::i1 &&
3206 "Bad zero extend!");
3207 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
3212 case ISD::SEXTLOAD: {
3213 // Make sure we generate both values.
3215 ExprMap[N.getValue(1)] = 1; // Generate the token
3217 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3220 if (getRegPressure(Node->getOperand(0)) >
3221 getRegPressure(Node->getOperand(1))) {
3222 Select(Node->getOperand(0)); // chain
3223 SelectAddress(Node->getOperand(1), AM);
3225 SelectAddress(Node->getOperand(1), AM);
3226 Select(Node->getOperand(0)); // chain
3229 switch (Node->getValueType(0)) {
3230 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
3231 default: assert(0 && "Unknown type to sign extend to.");
3233 switch (cast<MVTSDNode>(Node)->getExtraValueType()) {
3235 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
3237 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
3240 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
3245 assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::i8 &&
3246 "Cannot sign extend from bool!");
3247 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3253 case ISD::DYNAMIC_STACKALLOC:
3254 // Generate both result values.
3256 ExprMap[N.getValue(1)] = 1; // Generate the token
3258 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3260 // FIXME: We are currently ignoring the requested alignment for handling
3261 // greater than the stack alignment. This will need to be revisited at some
3262 // point. Align = N.getOperand(2);
3264 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
3265 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
3266 std::cerr << "Cannot allocate stack object with greater alignment than"
3267 << " the stack alignment yet!";
3271 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3272 Select(N.getOperand(0));
3273 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
3274 .addImm(CN->getValue());
3276 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3277 Select(N.getOperand(0));
3278 Tmp1 = SelectExpr(N.getOperand(1));
3280 Tmp1 = SelectExpr(N.getOperand(1));
3281 Select(N.getOperand(0));
3284 // Subtract size from stack pointer, thereby allocating some space.
3285 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
3288 // Put a pointer to the space into the result register, by copying the stack
3290 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
3293 case X86ISD::TAILCALL:
3294 case X86ISD::CALL: {
3295 // The chain for this call is now lowered.
3296 ExprMap.insert(std::make_pair(N.getValue(0), 1));
3298 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
3299 isa<ExternalSymbolSDNode>(N.getOperand(1));
3300 unsigned Callee = 0;
3302 Select(N.getOperand(0));
3304 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3305 Select(N.getOperand(0));
3306 Callee = SelectExpr(N.getOperand(1));
3308 Callee = SelectExpr(N.getOperand(1));
3309 Select(N.getOperand(0));
3313 // If this call has values to pass in registers, do so now.
3314 if (Node->getNumOperands() > 4) {
3315 // The first value is passed in (a part of) EAX, the second in EDX.
3316 unsigned RegOp1 = SelectExpr(N.getOperand(4));
3318 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
3320 switch (N.getOperand(4).getValueType()) {
3321 default: assert(0 && "Bad thing to pass in regs");
3323 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
3324 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
3325 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
3328 switch (N.getOperand(5).getValueType()) {
3329 default: assert(0 && "Bad thing to pass in regs");
3332 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
3335 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3338 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3343 if (GlobalAddressSDNode *GASD =
3344 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3345 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
3346 } else if (ExternalSymbolSDNode *ESSDN =
3347 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
3348 BuildMI(BB, X86::CALLpcrel32,
3349 1).addExternalSymbol(ESSDN->getSymbol(), true);
3351 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3352 Select(N.getOperand(0));
3353 Tmp1 = SelectExpr(N.getOperand(1));
3355 Tmp1 = SelectExpr(N.getOperand(1));
3356 Select(N.getOperand(0));
3359 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
3362 // Get caller stack amount and amount the callee added to the stack pointer.
3363 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
3364 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
3365 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
3367 if (Node->getNumValues() != 1)
3368 switch (Node->getValueType(1)) {
3369 default: assert(0 && "Unknown value type for call result!");
3370 case MVT::Other: return 1;
3373 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3376 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3379 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3380 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
3381 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
3383 case MVT::f64: // Floating-point return values live in %ST(0)
3384 ContainsFPCode = true;
3385 BuildMI(BB, X86::FpGETRESULT, 1, Result);
3388 return Result+N.ResNo-1;
3391 // First, determine that the size of the operand falls within the acceptable
3392 // range for this architecture.
3394 if (Node->getOperand(1).getValueType() != MVT::i16) {
3395 std::cerr << "llvm.readport: Address size is not 16 bits\n";
3399 // Make sure we generate both values.
3400 if (Result != 1) { // Generate the token
3401 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3402 assert(0 && "readport already emitted!?");
3404 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3406 Select(Node->getOperand(0)); // Select the chain.
3408 // If the port is a single-byte constant, use the immediate form.
3409 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
3410 if ((Port->getValue() & 255) == Port->getValue()) {
3411 switch (Node->getValueType(0)) {
3413 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
3414 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3417 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
3418 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3421 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
3422 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3428 // Now, move the I/O port address into the DX register and use the IN
3429 // instruction to get the input data.
3431 Tmp1 = SelectExpr(Node->getOperand(1));
3432 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
3433 switch (Node->getValueType(0)) {
3435 BuildMI(BB, X86::IN8rr, 0);
3436 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3439 BuildMI(BB, X86::IN16rr, 0);
3440 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3443 BuildMI(BB, X86::IN32rr, 0);
3444 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3447 std::cerr << "Cannot do input on this data type";
3456 /// TryToFoldLoadOpStore - Given a store node, try to fold together a
3457 /// load/op/store instruction. If successful return true.
3458 bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
3459 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
3460 SDOperand Chain = Node->getOperand(0);
3461 SDOperand StVal = Node->getOperand(1);
3462 SDOperand StPtr = Node->getOperand(2);
3464 // The chain has to be a load, the stored value must be an integer binary
3465 // operation with one use.
3466 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
3467 MVT::isFloatingPoint(StVal.getValueType()))
3470 // Token chain must either be a factor node or the load to fold.
3471 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
3476 // Check to see if there is a load from the same pointer that we're storing
3477 // to in either operand of the binop.
3478 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
3479 StVal.getOperand(0).getOperand(1) == StPtr)
3480 TheLoad = StVal.getOperand(0);
3481 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
3482 StVal.getOperand(1).getOperand(1) == StPtr)
3483 TheLoad = StVal.getOperand(1);
3485 return false; // No matching load operand.
3487 // We can only fold the load if there are no intervening side-effecting
3488 // operations. This means that the store uses the load as its token chain, or
3489 // there are only token factor nodes in between the store and load.
3490 if (Chain != TheLoad.getValue(1)) {
3491 // Okay, the other option is that we have a store referring to (possibly
3492 // nested) token factor nodes. For now, just try peeking through one level
3493 // of token factors to see if this is the case.
3494 bool ChainOk = false;
3495 if (Chain.getOpcode() == ISD::TokenFactor) {
3496 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3497 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
3503 if (!ChainOk) return false;
3506 if (TheLoad.getOperand(1) != StPtr)
3509 // Make sure that one of the operands of the binop is the load, and that the
3510 // load folds into the binop.
3511 if (((StVal.getOperand(0) != TheLoad ||
3512 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
3513 (StVal.getOperand(1) != TheLoad ||
3514 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
3517 // Finally, check to see if this is one of the ops we can handle!
3518 static const unsigned ADDTAB[] = {
3519 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
3520 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
3522 static const unsigned SUBTAB[] = {
3523 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
3524 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
3526 static const unsigned ANDTAB[] = {
3527 X86::AND8mi, X86::AND16mi, X86::AND32mi,
3528 X86::AND8mr, X86::AND16mr, X86::AND32mr,
3530 static const unsigned ORTAB[] = {
3531 X86::OR8mi, X86::OR16mi, X86::OR32mi,
3532 X86::OR8mr, X86::OR16mr, X86::OR32mr,
3534 static const unsigned XORTAB[] = {
3535 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
3536 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
3538 static const unsigned SHLTAB[] = {
3539 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
3540 /*Have to put the reg in CL*/0, 0, 0,
3542 static const unsigned SARTAB[] = {
3543 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
3544 /*Have to put the reg in CL*/0, 0, 0,
3546 static const unsigned SHRTAB[] = {
3547 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
3548 /*Have to put the reg in CL*/0, 0, 0,
3551 const unsigned *TabPtr = 0;
3552 switch (StVal.getOpcode()) {
3554 std::cerr << "CANNOT [mem] op= val: ";
3555 StVal.Val->dump(); std::cerr << "\n";
3560 case ISD::UREM: return false;
3562 case ISD::ADD: TabPtr = ADDTAB; break;
3563 case ISD::SUB: TabPtr = SUBTAB; break;
3564 case ISD::AND: TabPtr = ANDTAB; break;
3565 case ISD:: OR: TabPtr = ORTAB; break;
3566 case ISD::XOR: TabPtr = XORTAB; break;
3567 case ISD::SHL: TabPtr = SHLTAB; break;
3568 case ISD::SRA: TabPtr = SARTAB; break;
3569 case ISD::SRL: TabPtr = SHRTAB; break;
3572 // Handle: [mem] op= CST
3573 SDOperand Op0 = StVal.getOperand(0);
3574 SDOperand Op1 = StVal.getOperand(1);
3576 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3577 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
3580 case MVT::i8: Opc = TabPtr[0]; break;
3581 case MVT::i16: Opc = TabPtr[1]; break;
3582 case MVT::i32: Opc = TabPtr[2]; break;
3586 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3587 assert(0 && "Already emitted?");
3591 if (getRegPressure(TheLoad.getOperand(0)) >
3592 getRegPressure(TheLoad.getOperand(1))) {
3593 Select(TheLoad.getOperand(0));
3594 SelectAddress(TheLoad.getOperand(1), AM);
3596 SelectAddress(TheLoad.getOperand(1), AM);
3597 Select(TheLoad.getOperand(0));
3600 if (StVal.getOpcode() == ISD::ADD) {
3601 if (CN->getValue() == 1) {
3602 switch (Op0.getValueType()) {
3605 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
3607 case MVT::i16: Opc = TabPtr[1];
3608 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
3610 case MVT::i32: Opc = TabPtr[2];
3611 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
3614 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
3615 switch (Op0.getValueType()) {
3618 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
3620 case MVT::i16: Opc = TabPtr[1];
3621 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
3623 case MVT::i32: Opc = TabPtr[2];
3624 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
3630 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
3635 // If we have [mem] = V op [mem], try to turn it into:
3636 // [mem] = [mem] op V.
3637 if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
3638 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
3639 StVal.getOpcode() != ISD::SRL)
3640 std::swap(Op0, Op1);
3642 if (Op0 != TheLoad) return false;
3644 switch (Op0.getValueType()) {
3645 default: return false;
3647 case MVT::i8: Opc = TabPtr[3]; break;
3648 case MVT::i16: Opc = TabPtr[4]; break;
3649 case MVT::i32: Opc = TabPtr[5]; break;
3652 // Table entry doesn't exist?
3653 if (Opc == 0) return false;
3655 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3656 assert(0 && "Already emitted?");
3658 Select(TheLoad.getOperand(0));
3661 SelectAddress(TheLoad.getOperand(1), AM);
3662 unsigned Reg = SelectExpr(Op1);
3663 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
3667 /// If node is a ret(tailcall) node, emit the specified tail call and return
3668 /// true, otherwise return false.
3670 /// FIXME: This whole thing should be a post-legalize optimization pass which
3671 /// recognizes and transforms the dag. We don't want the selection phase doing
3674 bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
3675 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
3677 SDOperand Chain = RetNode->getOperand(0);
3679 // If this is a token factor node where one operand is a call, dig into it.
3680 SDOperand TokFactor;
3681 unsigned TokFactorOperand = 0;
3682 if (Chain.getOpcode() == ISD::TokenFactor) {
3683 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3684 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
3685 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
3686 TokFactorOperand = i;
3688 Chain = Chain.getOperand(i);
3691 if (TokFactor.Val == 0) return false; // No call operand.
3694 // Skip the CALLSEQ_END node if present.
3695 if (Chain.getOpcode() == ISD::CALLSEQ_END)
3696 Chain = Chain.getOperand(0);
3698 // Is a tailcall the last control operation that occurs before the return?
3699 if (Chain.getOpcode() != X86ISD::TAILCALL)
3702 // If we return a value, is it the value produced by the call?
3703 if (RetNode->getNumOperands() > 1) {
3704 // Not returning the ret val of the call?
3705 if (Chain.Val->getNumValues() == 1 ||
3706 RetNode->getOperand(1) != Chain.getValue(1))
3709 if (RetNode->getNumOperands() > 2) {
3710 if (Chain.Val->getNumValues() == 2 ||
3711 RetNode->getOperand(2) != Chain.getValue(2))
3714 assert(RetNode->getNumOperands() <= 3);
3717 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
3718 // For FastCC, this will always be > 0.
3719 unsigned CalleeCallArgAmt =
3720 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
3722 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
3723 // callee. For FastCC this will always be > 0, for CCC this is always 0.
3724 unsigned CalleeCallArgPopAmt =
3725 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
3727 // There are several cases we can handle here. First, if the caller and
3728 // callee are both CCC functions, we can tailcall if the callee takes <= the
3729 // number of argument bytes that the caller does.
3730 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
3731 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
3732 // Check to see if caller arg area size >= callee arg area size.
3733 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
3734 //std::cerr << "CCC TAILCALL UNIMP!\n";
3735 // If TokFactor is non-null, emit all operands.
3737 //EmitCCCToCCCTailCall(Chain.Val);
3743 // Second, if both are FastCC functions, we can always perform the tail call.
3744 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
3745 // If TokFactor is non-null, emit all operands before the call.
3746 if (TokFactor.Val) {
3747 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
3748 if (i != TokFactorOperand)
3749 Select(TokFactor.getOperand(i));
3752 EmitFastCCToFastCCTailCall(Chain.Val);
3756 // We don't support mixed calls, due to issues with alignment. We could in
3757 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
3758 // aligned (which depends on the number of arguments to the callee). TODO.
3762 static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
3763 SelectionDAG &DAG) {
3764 MVT::ValueType StoreVT;
3765 switch (Chain.getOpcode()) {
3766 case ISD::CALLSEQ_START:
3767 // If we found the start of the call sequence, we're done. We actually
3768 // strip off the CALLSEQ_START node, to avoid generating the
3769 // ADJCALLSTACKDOWN marker for the tail call.
3770 return Chain.getOperand(0);
3771 case ISD::TokenFactor: {
3772 std::vector<SDOperand> Ops;
3773 Ops.reserve(Chain.getNumOperands());
3774 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3775 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
3776 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
3778 case ISD::STORE: // Normal store
3779 StoreVT = Chain.getOperand(1).getValueType();
3781 case ISD::TRUNCSTORE: // FLOAT store
3782 StoreVT = cast<MVTSDNode>(Chain)->getExtraValueType();
3786 SDOperand OrigDest = Chain.getOperand(2);
3787 unsigned OrigOffset;
3789 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
3791 assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
3793 // We expect only (ESP+C)
3794 assert(OrigDest.getOpcode() == ISD::ADD &&
3795 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
3796 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
3797 cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
3798 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
3801 // Compute the new offset from the incoming ESP value we wish to use.
3802 unsigned NewOffset = OrigOffset + Offset;
3804 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
3805 MachineFunction &MF = DAG.getMachineFunction();
3806 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
3807 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
3809 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
3811 if (Chain.getOpcode() == ISD::STORE)
3812 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
3814 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
3815 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
3816 FIN, DAG.getSrcValue(NULL), StoreVT);
3820 /// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
3821 /// fastcc function from a fastcc function, emit the code to emit a 'proper'
3823 void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
3824 unsigned CalleeCallArgSize =
3825 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
3826 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
3828 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
3830 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
3831 // indexes that are relative to the incoming ESP. If the incoming and
3832 // outgoing arg sizes are the same we will store to [InESP] instead of
3833 // [CurESP] and the ESP referenced will be relative to the incoming function
3835 int ESPOffset = CallerArgSize-CalleeCallArgSize;
3836 SDOperand AdjustedArgStores =
3837 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
3839 // Copy the return address of the caller into a virtual register so we don't
3843 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
3844 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
3845 RetValAddr, TheDAG->getSrcValue(NULL));
3849 // Codegen all of the argument stores.
3850 Select(AdjustedArgStores);
3853 // Emit a store of the saved ret value to the new location.
3854 MachineFunction &MF = TheDAG->getMachineFunction();
3855 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
3856 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
3857 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
3858 RetVal, RetValAddr));
3861 // Get the destination value.
3862 SDOperand Callee = TailCallNode->getOperand(1);
3863 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
3864 isa<ExternalSymbolSDNode>(Callee);
3866 if (!isDirect) CalleeReg = SelectExpr(Callee);
3868 unsigned RegOp1 = 0;
3869 unsigned RegOp2 = 0;
3871 if (TailCallNode->getNumOperands() > 4) {
3872 // The first value is passed in (a part of) EAX, the second in EDX.
3873 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
3874 if (TailCallNode->getNumOperands() > 5)
3875 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
3877 switch (TailCallNode->getOperand(4).getValueType()) {
3878 default: assert(0 && "Bad thing to pass in regs");
3881 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
3885 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
3889 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
3894 switch (TailCallNode->getOperand(5).getValueType()) {
3895 default: assert(0 && "Bad thing to pass in regs");
3898 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
3902 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3906 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3914 BuildMI(BB, X86::ADJSTACKPTRri, 2,
3915 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
3917 // TODO: handle jmp [mem]
3919 BuildMI(BB, X86::TAILJMPr, 1).addReg(CalleeReg);
3920 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
3921 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
3923 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
3924 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
3926 // ADD IMPLICIT USE RegOp1/RegOp2's
3930 void ISel::Select(SDOperand N) {
3931 unsigned Tmp1, Tmp2, Opc;
3933 if (!ExprMap.insert(std::make_pair(N, 1)).second)
3934 return; // Already selected.
3936 SDNode *Node = N.Val;
3938 switch (Node->getOpcode()) {
3940 Node->dump(); std::cerr << "\n";
3941 assert(0 && "Node not handled yet!");
3942 case ISD::EntryToken: return; // Noop
3943 case ISD::TokenFactor:
3944 if (Node->getNumOperands() == 2) {
3946 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
3947 Select(Node->getOperand(OneFirst));
3948 Select(Node->getOperand(!OneFirst));
3950 std::vector<std::pair<unsigned, unsigned> > OpsP;
3951 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
3952 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
3953 std::sort(OpsP.begin(), OpsP.end());
3954 std::reverse(OpsP.begin(), OpsP.end());
3955 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
3956 Select(Node->getOperand(OpsP[i].second));
3959 case ISD::CopyToReg:
3960 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3961 Select(N.getOperand(0));
3962 Tmp1 = SelectExpr(N.getOperand(1));
3964 Tmp1 = SelectExpr(N.getOperand(1));
3965 Select(N.getOperand(0));
3967 Tmp2 = cast<RegSDNode>(N)->getReg();
3970 switch (N.getOperand(1).getValueType()) {
3971 default: assert(0 && "Invalid type for operation!");
3973 case MVT::i8: Opc = X86::MOV8rr; break;
3974 case MVT::i16: Opc = X86::MOV16rr; break;
3975 case MVT::i32: Opc = X86::MOV32rr; break;
3976 case MVT::f64: Opc = X86::FpMOV; ContainsFPCode = true; break;
3978 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
3982 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
3983 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
3984 N.getOperand(0).getOpcode() == ISD::TokenFactor)
3985 if (EmitPotentialTailCall(Node))
3988 switch (N.getNumOperands()) {
3990 assert(0 && "Unknown return instruction!");
3992 assert(N.getOperand(1).getValueType() == MVT::i32 &&
3993 N.getOperand(2).getValueType() == MVT::i32 &&
3994 "Unknown two-register value!");
3995 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
3996 Tmp1 = SelectExpr(N.getOperand(1));
3997 Tmp2 = SelectExpr(N.getOperand(2));
3999 Tmp2 = SelectExpr(N.getOperand(2));
4000 Tmp1 = SelectExpr(N.getOperand(1));
4002 Select(N.getOperand(0));
4004 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4005 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
4008 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4009 Select(N.getOperand(0));
4010 Tmp1 = SelectExpr(N.getOperand(1));
4012 Tmp1 = SelectExpr(N.getOperand(1));
4013 Select(N.getOperand(0));
4015 switch (N.getOperand(1).getValueType()) {
4016 default: assert(0 && "All other types should have been promoted!!");
4018 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
4021 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4026 Select(N.getOperand(0));
4029 if (X86Lowering.getBytesToPopOnReturn() == 0)
4030 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
4032 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
4035 Select(N.getOperand(0));
4036 MachineBasicBlock *Dest =
4037 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
4038 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
4043 MachineBasicBlock *Dest =
4044 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
4046 // Try to fold a setcc into the branch. If this fails, emit a test/jne
4048 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
4049 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4050 Select(N.getOperand(0));
4051 Tmp1 = SelectExpr(N.getOperand(1));
4053 Tmp1 = SelectExpr(N.getOperand(1));
4054 Select(N.getOperand(0));
4056 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
4057 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
4064 // If this load could be folded into the only using instruction, and if it
4065 // is safe to emit the instruction here, try to do so now.
4066 if (Node->hasNUsesOfValue(1, 0)) {
4067 SDOperand TheVal = N.getValue(0);
4069 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
4070 assert(UI != Node->use_end() && "Didn't find use!");
4072 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
4073 if (UN->getOperand(i) == TheVal) {
4079 // Only handle unary operators right now.
4080 if (User->getNumOperands() == 1) {
4082 SelectExpr(SDOperand(User, 0));
4093 case ISD::DYNAMIC_STACKALLOC:
4094 case X86ISD::TAILCALL:
4099 case ISD::CopyFromReg:
4100 case X86ISD::FILD64m:
4102 SelectExpr(N.getValue(0));
4105 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr :storety
4106 // On X86, we can represent all types except for Bool and Float natively.
4108 MVT::ValueType StoredTy = cast<MVTSDNode>(Node)->getExtraValueType();
4109 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
4110 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
4111 && "Unsupported TRUNCSTORE for this target!");
4113 if (StoredTy == MVT::i16) {
4114 // FIXME: This is here just to allow testing. X86 doesn't really have a
4115 // TRUNCSTORE i16 operation, but this is required for targets that do not
4116 // have 16-bit integer registers. We occasionally disable 16-bit integer
4117 // registers to test the promotion code.
4118 Select(N.getOperand(0));
4119 Tmp1 = SelectExpr(N.getOperand(1));
4120 SelectAddress(N.getOperand(2), AM);
4122 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4123 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
4127 // Store of constant bool?
4128 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4129 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4130 Select(N.getOperand(0));
4131 SelectAddress(N.getOperand(2), AM);
4133 SelectAddress(N.getOperand(2), AM);
4134 Select(N.getOperand(0));
4136 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
4141 default: assert(0 && "Cannot truncstore this type!");
4142 case MVT::i1: Opc = X86::MOV8mr; break;
4143 case MVT::f32: Opc = X86::FST32m; break;
4146 std::vector<std::pair<unsigned, unsigned> > RP;
4147 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4148 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4149 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4150 std::sort(RP.begin(), RP.end());
4152 Tmp1 = 0; // Silence a warning.
4153 for (unsigned i = 0; i != 3; ++i)
4154 switch (RP[2-i].second) {
4155 default: assert(0 && "Unknown operand number!");
4156 case 0: Select(N.getOperand(0)); break;
4157 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4158 case 2: SelectAddress(N.getOperand(2), AM); break;
4161 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4167 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4169 switch (CN->getValueType(0)) {
4170 default: assert(0 && "Invalid type for operation!");
4172 case MVT::i8: Opc = X86::MOV8mi; break;
4173 case MVT::i16: Opc = X86::MOV16mi; break;
4174 case MVT::i32: Opc = X86::MOV32mi; break;
4175 case MVT::f64: break;
4178 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4179 Select(N.getOperand(0));
4180 SelectAddress(N.getOperand(2), AM);
4182 SelectAddress(N.getOperand(2), AM);
4183 Select(N.getOperand(0));
4185 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
4188 } else if (GlobalAddressSDNode *GA =
4189 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
4190 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
4192 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4193 Select(N.getOperand(0));
4194 SelectAddress(N.getOperand(2), AM);
4196 SelectAddress(N.getOperand(2), AM);
4197 Select(N.getOperand(0));
4199 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),
4200 AM).addGlobalAddress(GA->getGlobal());
4204 // Check to see if this is a load/op/store combination.
4205 if (TryToFoldLoadOpStore(Node))
4208 switch (N.getOperand(1).getValueType()) {
4209 default: assert(0 && "Cannot store this type!");
4211 case MVT::i8: Opc = X86::MOV8mr; break;
4212 case MVT::i16: Opc = X86::MOV16mr; break;
4213 case MVT::i32: Opc = X86::MOV32mr; break;
4214 case MVT::f64: Opc = X86::FST64m; break;
4217 std::vector<std::pair<unsigned, unsigned> > RP;
4218 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4219 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4220 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4221 std::sort(RP.begin(), RP.end());
4223 Tmp1 = 0; // Silence a warning.
4224 for (unsigned i = 0; i != 3; ++i)
4225 switch (RP[2-i].second) {
4226 default: assert(0 && "Unknown operand number!");
4227 case 0: Select(N.getOperand(0)); break;
4228 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4229 case 2: SelectAddress(N.getOperand(2), AM); break;
4232 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4235 case ISD::CALLSEQ_START:
4236 Select(N.getOperand(0));
4238 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
4239 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
4241 case ISD::CALLSEQ_END:
4242 Select(N.getOperand(0));
4245 Select(N.getOperand(0)); // Select the chain.
4247 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4248 if (Align == 0) Align = 1;
4250 // Turn the byte code into # iterations
4253 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
4254 unsigned Val = ValC->getValue() & 255;
4256 // If the value is a constant, then we can potentially use larger sets.
4257 switch (Align & 3) {
4258 case 2: // WORD aligned
4259 CountReg = MakeReg(MVT::i32);
4260 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4261 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4263 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4264 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4266 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
4267 Opcode = X86::REP_STOSW;
4269 case 0: // DWORD aligned
4270 CountReg = MakeReg(MVT::i32);
4271 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4272 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4274 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4275 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4277 Val = (Val << 8) | Val;
4278 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
4279 Opcode = X86::REP_STOSD;
4281 default: // BYTE aligned
4282 CountReg = SelectExpr(Node->getOperand(3));
4283 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
4284 Opcode = X86::REP_STOSB;
4288 // If it's not a constant value we are storing, just fall back. We could
4289 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
4290 unsigned ValReg = SelectExpr(Node->getOperand(2));
4291 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
4292 CountReg = SelectExpr(Node->getOperand(3));
4293 Opcode = X86::REP_STOSB;
4296 // No matter what the alignment is, we put the source in ESI, the
4297 // destination in EDI, and the count in ECX.
4298 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4299 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4300 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4301 BuildMI(BB, Opcode, 0);
4305 Select(N.getOperand(0)); // Select the chain.
4307 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4308 if (Align == 0) Align = 1;
4310 // Turn the byte code into # iterations
4313 switch (Align & 3) {
4314 case 2: // WORD aligned
4315 CountReg = MakeReg(MVT::i32);
4316 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4317 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4319 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4320 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4322 Opcode = X86::REP_MOVSW;
4324 case 0: // DWORD aligned
4325 CountReg = MakeReg(MVT::i32);
4326 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4327 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4329 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4330 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4332 Opcode = X86::REP_MOVSD;
4334 default: // BYTE aligned
4335 CountReg = SelectExpr(Node->getOperand(3));
4336 Opcode = X86::REP_MOVSB;
4340 // No matter what the alignment is, we put the source in ESI, the
4341 // destination in EDI, and the count in ECX.
4342 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4343 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
4344 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4345 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4346 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
4347 BuildMI(BB, Opcode, 0);
4350 case ISD::WRITEPORT:
4351 if (Node->getOperand(2).getValueType() != MVT::i16) {
4352 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
4355 Select(Node->getOperand(0)); // Emit the chain.
4357 Tmp1 = SelectExpr(Node->getOperand(1));
4358 switch (Node->getOperand(1).getValueType()) {
4360 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
4361 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
4364 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
4365 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
4368 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4369 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
4372 std::cerr << "llvm.writeport: invalid data type for X86 target";
4376 // If the port is a single-byte constant, use the immediate form.
4377 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
4378 if ((CN->getValue() & 255) == CN->getValue()) {
4379 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
4383 // Otherwise, move the I/O port address into the DX register.
4384 unsigned Reg = SelectExpr(Node->getOperand(2));
4385 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
4386 BuildMI(BB, Opc, 0);
4389 assert(0 && "Should not be reached!");
4393 /// createX86PatternInstructionSelector - This pass converts an LLVM function
4394 /// into a machine code representation using pattern matching and a machine
4395 /// description file.
4397 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
4398 return new ISel(TM);