1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/Constants.h" // FIXME: REMOVE
18 #include "llvm/Function.h"
19 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetLowering.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/ADT/Statistic.h"
33 //===----------------------------------------------------------------------===//
34 // X86TargetLowering - X86 Implementation of the TargetLowering interface
36 class X86TargetLowering : public TargetLowering {
37 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
38 int ReturnAddrIndex; // FrameIndex for return slot.
40 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
41 // Set up the TargetLowering object.
42 addRegisterClass(MVT::i8, X86::R8RegisterClass);
43 addRegisterClass(MVT::i16, X86::R16RegisterClass);
44 addRegisterClass(MVT::i32, X86::R32RegisterClass);
45 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
47 // FIXME: Eliminate these two classes when legalize can handle promotions
49 addRegisterClass(MVT::i1, X86::R8RegisterClass);
50 addRegisterClass(MVT::f32, X86::RFPRegisterClass);
52 computeRegisterProperties();
54 setOperationUnsupported(ISD::MEMMOVE, MVT::Other);
56 setOperationUnsupported(ISD::MUL, MVT::i8);
57 setOperationUnsupported(ISD::SELECT, MVT::i1);
58 setOperationUnsupported(ISD::SELECT, MVT::i8);
60 addLegalFPImmediate(+0.0); // FLD0
61 addLegalFPImmediate(+1.0); // FLD1
62 addLegalFPImmediate(-0.0); // FLD0/FCHS
63 addLegalFPImmediate(-1.0); // FLD1/FCHS
66 /// LowerArguments - This hook must be implemented to indicate how we should
67 /// lower the arguments for the specified function, into the specified DAG.
68 virtual std::vector<SDOperand>
69 LowerArguments(Function &F, SelectionDAG &DAG);
71 /// LowerCallTo - This hook lowers an abstract call to a function into an
73 virtual std::pair<SDOperand, SDOperand>
74 LowerCallTo(SDOperand Chain, const Type *RetTy, SDOperand Callee,
75 ArgListTy &Args, SelectionDAG &DAG);
77 virtual std::pair<SDOperand, SDOperand>
78 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
80 virtual std::pair<SDOperand,SDOperand>
81 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
82 const Type *ArgTy, SelectionDAG &DAG);
84 virtual std::pair<SDOperand, SDOperand>
85 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
91 std::vector<SDOperand>
92 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
93 std::vector<SDOperand> ArgValues;
95 // Add DAG nodes to load the arguments... On entry to a function on the X86,
96 // the stack frame looks like this:
98 // [ESP] -- return address
99 // [ESP + 4] -- first argument (leftmost lexically)
100 // [ESP + 8] -- second argument, if first argument is four bytes in size
103 MachineFunction &MF = DAG.getMachineFunction();
104 MachineFrameInfo *MFI = MF.getFrameInfo();
106 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
107 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I) {
108 MVT::ValueType ObjectVT = getValueType(I->getType());
109 unsigned ArgIncrement = 4;
112 default: assert(0 && "Unhandled argument type!");
114 case MVT::i8: ObjSize = 1; break;
115 case MVT::i16: ObjSize = 2; break;
116 case MVT::i32: ObjSize = 4; break;
117 case MVT::i64: ObjSize = ArgIncrement = 8; break;
118 case MVT::f32: ObjSize = 4; break;
119 case MVT::f64: ObjSize = ArgIncrement = 8; break;
121 // Create the frame index object for this incoming parameter...
122 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
124 // Create the SelectionDAG nodes corresponding to a load from this parameter
125 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
127 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
131 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN);
133 if (MVT::isInteger(ObjectVT))
134 ArgValue = DAG.getConstant(0, ObjectVT);
136 ArgValue = DAG.getConstantFP(0, ObjectVT);
138 ArgValues.push_back(ArgValue);
140 ArgOffset += ArgIncrement; // Move on to the next argument...
143 // If the function takes variable number of arguments, make a frame index for
144 // the start of the first vararg value... for expansion of llvm.va_start.
146 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
147 ReturnAddrIndex = 0; // No return address slot generated yet.
151 std::pair<SDOperand, SDOperand>
152 X86TargetLowering::LowerCallTo(SDOperand Chain,
153 const Type *RetTy, SDOperand Callee,
154 ArgListTy &Args, SelectionDAG &DAG) {
155 // Count how many bytes are to be pushed on the stack.
156 unsigned NumBytes = 0;
160 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
161 DAG.getConstant(0, getPointerTy()));
163 for (unsigned i = 0, e = Args.size(); i != e; ++i)
164 switch (getValueType(Args[i].second)) {
165 default: assert(0 && "Unknown value type!");
179 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
180 DAG.getConstant(NumBytes, getPointerTy()));
182 // Arguments go on the stack in reverse order, as specified by the ABI.
183 unsigned ArgOffset = 0;
184 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32);
185 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
187 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
188 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
190 switch (getValueType(Args[i].second)) {
191 default: assert(0 && "Unexpected ValueType for argument!");
195 // Promote the integer to 32 bits. If the input type is signed use a
196 // sign extend, otherwise use a zero extend.
197 if (Args[i].second->isSigned())
198 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
200 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
205 // FIXME: Note that all of these stores are independent of each other.
206 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
207 Args[i].first, PtrOff);
212 // FIXME: Note that all of these stores are independent of each other.
213 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
214 Args[i].first, PtrOff);
221 std::vector<MVT::ValueType> RetVals;
222 MVT::ValueType RetTyVT = getValueType(RetTy);
223 if (RetTyVT != MVT::isVoid)
224 RetVals.push_back(RetTyVT);
225 RetVals.push_back(MVT::Other);
227 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain, Callee), 0);
228 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
229 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
230 DAG.getConstant(NumBytes, getPointerTy()));
231 return std::make_pair(TheCall, Chain);
234 std::pair<SDOperand, SDOperand>
235 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
236 // vastart just returns the address of the VarArgsFrameIndex slot.
237 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
240 std::pair<SDOperand,SDOperand> X86TargetLowering::
241 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
242 const Type *ArgTy, SelectionDAG &DAG) {
243 MVT::ValueType ArgVT = getValueType(ArgTy);
246 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
249 if (ArgVT == MVT::i32)
252 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
253 "Other types should have been promoted for varargs!");
256 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
257 DAG.getConstant(Amt, VAList.getValueType()));
259 return std::make_pair(Result, Chain);
263 std::pair<SDOperand, SDOperand> X86TargetLowering::
264 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
267 if (Depth) // Depths > 0 not supported yet!
268 Result = DAG.getConstant(0, getPointerTy());
270 if (ReturnAddrIndex == 0) {
271 // Set up a frame object for the return address.
272 MachineFunction &MF = DAG.getMachineFunction();
273 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
276 SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
279 // Just load the return address
280 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI);
282 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
283 DAG.getConstant(4, MVT::i32));
285 return std::make_pair(Result, Chain);
294 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
296 //===--------------------------------------------------------------------===//
297 /// ISel - X86 specific code to select X86 machine instructions for
298 /// SelectionDAG operations.
300 class ISel : public SelectionDAGISel {
301 /// ContainsFPCode - Every instruction we select that uses or defines a FP
302 /// register should set this to true.
305 /// X86Lowering - This object fully describes how to lower LLVM code to an
306 /// X86-specific SelectionDAG.
307 X86TargetLowering X86Lowering;
309 /// RegPressureMap - This keeps an approximate count of the number of
310 /// registers required to evaluate each node in the graph.
311 std::map<SDNode*, unsigned> RegPressureMap;
313 /// ExprMap - As shared expressions are codegen'd, we keep track of which
314 /// vreg the value is produced in, so we only emit one copy of each compiled
316 std::map<SDOperand, unsigned> ExprMap;
317 std::set<SDOperand> LoweredTokens;
320 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
323 unsigned getRegPressure(SDOperand O) {
324 return RegPressureMap[O.Val];
326 unsigned ComputeRegPressure(SDOperand O);
328 /// InstructionSelectBasicBlock - This callback is invoked by
329 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
330 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
332 bool isFoldableLoad(SDOperand Op);
333 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
336 void EmitCMP(SDOperand LHS, SDOperand RHS);
337 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
338 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
339 unsigned RTrue, unsigned RFalse, unsigned RDest);
340 unsigned SelectExpr(SDOperand N);
341 bool SelectAddress(SDOperand N, X86AddressMode &AM);
342 void Select(SDOperand N);
346 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
347 /// when it has created a SelectionDAG for us to codegen.
348 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
349 // While we're doing this, keep track of whether we see any FP code for
350 // FP_REG_KILL insertion.
351 ContainsFPCode = false;
353 // Scan the PHI nodes that already are inserted into this basic block. If any
354 // of them is a PHI of a floating point value, we need to insert an
356 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
357 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
359 assert(I->getOpcode() == X86::PHI &&
360 "Isn't just PHI nodes?");
361 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
362 X86::RFPRegisterClass) {
363 ContainsFPCode = true;
368 // Compute the RegPressureMap, which is an approximation for the number of
369 // registers required to compute each node.
370 ComputeRegPressure(DAG.getRoot());
372 // Codegen the basic block.
373 Select(DAG.getRoot());
375 // Finally, look at all of the successors of this block. If any contain a PHI
376 // node of FP type, we need to insert an FP_REG_KILL in this block.
377 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
378 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
379 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
380 I != E && I->getOpcode() == X86::PHI; ++I) {
381 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
382 X86::RFPRegisterClass) {
383 ContainsFPCode = true;
388 // Insert FP_REG_KILL instructions into basic blocks that need them. This
389 // only occurs due to the floating point stackifier not being aggressive
390 // enough to handle arbitrary global stackification.
392 // Currently we insert an FP_REG_KILL instruction into each block that uses or
393 // defines a floating point virtual register.
395 // When the global register allocators (like linear scan) finally update live
396 // variable analysis, we can keep floating point values in registers across
397 // basic blocks. This will be a huge win, but we are waiting on the global
398 // allocators before we can do this.
400 if (ContainsFPCode && BB->succ_size()) {
401 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
405 // Clear state used for selection.
407 LoweredTokens.clear();
408 RegPressureMap.clear();
412 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
413 // for the number of registers required to compute each node. This is basically
414 // computing a generalized form of the Sethi-Ullman number for each node.
415 unsigned ISel::ComputeRegPressure(SDOperand O) {
417 unsigned &Result = RegPressureMap[N];
418 if (Result) return Result;
420 // FIXME: Should operations like CALL (which clobber lots o regs) have a
421 // higher fixed cost??
423 if (N->getNumOperands() == 0) {
426 unsigned MaxRegUse = 0;
427 unsigned NumExtraMaxRegUsers = 0;
428 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
430 if (N->getOperand(i).getOpcode() == ISD::Constant)
433 Regs = ComputeRegPressure(N->getOperand(i));
434 if (Regs > MaxRegUse) {
436 NumExtraMaxRegUsers = 0;
437 } else if (Regs == MaxRegUse &&
438 N->getOperand(i).getValueType() != MVT::Other) {
439 ++NumExtraMaxRegUsers;
443 Result = MaxRegUse+NumExtraMaxRegUsers;
446 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
450 /// SelectAddress - Add the specified node to the specified addressing mode,
451 /// returning true if it cannot be done.
452 bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
453 switch (N.getOpcode()) {
455 case ISD::FrameIndex:
456 if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) {
457 AM.BaseType = X86AddressMode::FrameIndexBase;
458 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
462 case ISD::GlobalAddress:
464 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
469 AM.Disp += cast<ConstantSDNode>(N)->getValue();
472 if (AM.IndexReg == 0 && AM.Scale == 1)
473 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
474 unsigned Val = CN->getValue();
475 if (Val == 1 || Val == 2 || Val == 3) {
477 SDOperand ShVal = N.Val->getOperand(0);
479 // Okay, we know that we have a scale by now. However, if the scaled
480 // value is an add of something and a constant, we can fold the
481 // constant into the disp field here.
482 if (ShVal.Val->getOpcode() == ISD::ADD &&
483 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
484 AM.IndexReg = SelectExpr(ShVal.Val->getOperand(0));
485 ConstantSDNode *AddVal =
486 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
487 AM.Disp += AddVal->getValue() << Val;
489 AM.IndexReg = SelectExpr(ShVal);
496 // X*[3,5,9] -> X+X*[2,4,8]
497 if (AM.IndexReg == 0 && AM.BaseType == X86AddressMode::RegBase &&
499 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
500 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
501 AM.Scale = unsigned(CN->getValue())-1;
503 SDOperand MulVal = N.Val->getOperand(0);
506 // Okay, we know that we have a scale by now. However, if the scaled
507 // value is an add of something and a constant, we can fold the
508 // constant into the disp field here.
509 if (MulVal.Val->getOpcode() == ISD::ADD &&
510 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
511 Reg = SelectExpr(MulVal.Val->getOperand(0));
512 ConstantSDNode *AddVal =
513 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
514 AM.Disp += AddVal->getValue() * CN->getValue();
516 Reg = SelectExpr(N.Val->getOperand(0));
519 AM.IndexReg = AM.Base.Reg = Reg;
525 X86AddressMode Backup = AM;
526 if (!SelectAddress(N.Val->getOperand(0), AM) &&
527 !SelectAddress(N.Val->getOperand(1), AM))
530 if (!SelectAddress(N.Val->getOperand(1), AM) &&
531 !SelectAddress(N.Val->getOperand(0), AM))
538 // Is the base register already occupied?
539 if (AM.BaseType != X86AddressMode::RegBase || AM.Base.Reg) {
540 // If so, check to see if the scale index register is set.
541 if (AM.IndexReg == 0) {
542 AM.IndexReg = SelectExpr(N);
547 // Otherwise, we cannot select it.
551 // Default, generate it as a register.
552 AM.BaseType = X86AddressMode::RegBase;
553 AM.Base.Reg = SelectExpr(N);
557 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
558 /// assuming that the temporary registers are in the 8-bit register class.
562 /// DestReg = logicalop Tmp1, Tmp2
564 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
565 unsigned SetCC2, unsigned LogicalOp,
567 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
568 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
569 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
570 BuildMI(BB, SetCC1, 0, Tmp1);
571 BuildMI(BB, SetCC2, 0, Tmp2);
572 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
575 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
576 /// condition codes match the specified SetCCOpcode. Note that some conditions
577 /// require multiple instructions to generate the correct value.
578 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
579 ISD::CondCode SetCCOpcode, bool isFP) {
582 switch (SetCCOpcode) {
583 default: assert(0 && "Illegal integer SetCC!");
584 case ISD::SETEQ: Opc = X86::SETEr; break;
585 case ISD::SETGT: Opc = X86::SETGr; break;
586 case ISD::SETGE: Opc = X86::SETGEr; break;
587 case ISD::SETLT: Opc = X86::SETLr; break;
588 case ISD::SETLE: Opc = X86::SETLEr; break;
589 case ISD::SETNE: Opc = X86::SETNEr; break;
590 case ISD::SETULT: Opc = X86::SETBr; break;
591 case ISD::SETUGT: Opc = X86::SETAr; break;
592 case ISD::SETULE: Opc = X86::SETBEr; break;
593 case ISD::SETUGE: Opc = X86::SETAEr; break;
596 // On a floating point condition, the flags are set as follows:
600 // 1 | 0 | 0 | X == Y
601 // 1 | 1 | 1 | unordered
603 switch (SetCCOpcode) {
604 default: assert(0 && "Invalid FP setcc!");
607 Opc = X86::SETEr; // True if ZF = 1
611 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
615 Opc = X86::SETAEr; // True if CF = 0
619 Opc = X86::SETBr; // True if CF = 1
623 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
627 Opc = X86::SETNEr; // True if ZF = 0
630 Opc = X86::SETPr; // True if PF = 1
633 Opc = X86::SETNPr; // True if PF = 0
635 case ISD::SETOEQ: // !PF & ZF
636 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
638 case ISD::SETOLT: // !PF & CF
639 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
641 case ISD::SETOLE: // !PF & (CF || ZF)
642 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
644 case ISD::SETUGT: // PF | (!ZF & !CF)
645 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
647 case ISD::SETUGE: // PF | !CF
648 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
650 case ISD::SETUNE: // PF | !ZF
651 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
655 BuildMI(BB, Opc, 0, DestReg);
659 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
660 /// the Dest block if the Cond condition is true. If we cannot fold this
661 /// condition into the branch, return true.
663 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
665 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
666 // B) using two conditional branches instead of one condbr, two setcc's, and
668 if ((Cond.getOpcode() == ISD::OR ||
669 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
670 // And and or set the flags for us, so there is no need to emit a TST of the
671 // result. It is only safe to do this if there is only a single use of the
672 // AND/OR though, otherwise we don't know it will be emitted here.
675 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
679 // Codegen br not C -> JE.
680 if (Cond.getOpcode() == ISD::XOR)
681 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
682 if (NC->isAllOnesValue()) {
684 if (getRegPressure(Chain) > getRegPressure(Cond)) {
686 CondR = SelectExpr(Cond.Val->getOperand(0));
688 CondR = SelectExpr(Cond.Val->getOperand(0));
691 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
692 BuildMI(BB, X86::JE, 1).addMBB(Dest);
696 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
698 return true; // Can only handle simple setcc's so far.
702 // Handle integer conditions first.
703 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
704 switch (SetCC->getCondition()) {
705 default: assert(0 && "Illegal integer SetCC!");
706 case ISD::SETEQ: Opc = X86::JE; break;
707 case ISD::SETGT: Opc = X86::JG; break;
708 case ISD::SETGE: Opc = X86::JGE; break;
709 case ISD::SETLT: Opc = X86::JL; break;
710 case ISD::SETLE: Opc = X86::JLE; break;
711 case ISD::SETNE: Opc = X86::JNE; break;
712 case ISD::SETULT: Opc = X86::JB; break;
713 case ISD::SETUGT: Opc = X86::JA; break;
714 case ISD::SETULE: Opc = X86::JBE; break;
715 case ISD::SETUGE: Opc = X86::JAE; break;
718 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
719 BuildMI(BB, Opc, 1).addMBB(Dest);
723 unsigned Opc2 = 0; // Second branch if needed.
725 // On a floating point condition, the flags are set as follows:
729 // 1 | 0 | 0 | X == Y
730 // 1 | 1 | 1 | unordered
732 switch (SetCC->getCondition()) {
733 default: assert(0 && "Invalid FP setcc!");
735 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
737 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
739 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
741 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
743 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
745 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
746 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
747 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
748 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
749 Opc = X86::JA; // ZF = 0 & CF = 0
750 Opc2 = X86::JP; // PF = 1
752 case ISD::SETUGE: // PF = 1 | CF = 0
753 Opc = X86::JAE; // CF = 0
754 Opc2 = X86::JP; // PF = 1
756 case ISD::SETUNE: // PF = 1 | ZF = 0
757 Opc = X86::JNE; // ZF = 0
758 Opc2 = X86::JP; // PF = 1
760 case ISD::SETOEQ: // PF = 0 & ZF = 1
763 return true; // FIXME: Emit more efficient code for this branch.
764 case ISD::SETOLT: // PF = 0 & CF = 1
767 return true; // FIXME: Emit more efficient code for this branch.
768 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
771 return true; // FIXME: Emit more efficient code for this branch.
775 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
776 BuildMI(BB, Opc, 1).addMBB(Dest);
778 BuildMI(BB, Opc2, 1).addMBB(Dest);
782 /// EmitSelectCC - Emit code into BB that performs a select operation between
783 /// the two registers RTrue and RFalse, generating a result into RDest. Return
784 /// true if the fold cannot be performed.
786 void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
787 unsigned RTrue, unsigned RFalse, unsigned RDest) {
789 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
791 } CondCode = NOT_SET;
793 static const unsigned CMOVTAB16[] = {
794 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
795 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
796 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
798 static const unsigned CMOVTAB32[] = {
799 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
800 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
801 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
803 static const unsigned CMOVTABFP[] = {
804 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
805 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
806 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
809 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
810 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
811 switch (SetCC->getCondition()) {
812 default: assert(0 && "Unknown integer comparison!");
813 case ISD::SETEQ: CondCode = EQ; break;
814 case ISD::SETGT: CondCode = GT; break;
815 case ISD::SETGE: CondCode = GE; break;
816 case ISD::SETLT: CondCode = LT; break;
817 case ISD::SETLE: CondCode = LE; break;
818 case ISD::SETNE: CondCode = NE; break;
819 case ISD::SETULT: CondCode = B; break;
820 case ISD::SETUGT: CondCode = A; break;
821 case ISD::SETULE: CondCode = BE; break;
822 case ISD::SETUGE: CondCode = AE; break;
825 // On a floating point condition, the flags are set as follows:
829 // 1 | 0 | 0 | X == Y
830 // 1 | 1 | 1 | unordered
832 switch (SetCC->getCondition()) {
833 default: assert(0 && "Unknown FP comparison!");
835 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
837 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
839 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
841 case ISD::SETLT: CondCode = B; break; // True if CF = 1
843 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
845 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
846 case ISD::SETUO: CondCode = P; break; // True if PF = 1
847 case ISD::SETO: CondCode = NP; break; // True if PF = 0
848 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
849 case ISD::SETUGE: // PF = 1 | CF = 0
850 case ISD::SETUNE: // PF = 1 | ZF = 0
851 case ISD::SETOEQ: // PF = 0 & ZF = 1
852 case ISD::SETOLT: // PF = 0 & CF = 1
853 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
854 // We cannot emit this comparison as a single cmov.
861 if (CondCode != NOT_SET) {
863 default: assert(0 && "Cannot select this type!");
864 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
865 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
867 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
871 // Finally, if we weren't able to fold this, just emit the condition and test
873 if (CondCode == NOT_SET || Opc == 0) {
874 // Get the condition into the zero flag.
875 unsigned CondReg = SelectExpr(Cond);
876 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
879 default: assert(0 && "Cannot select this type!");
880 case MVT::i16: Opc = X86::CMOVE16rr; break;
881 case MVT::i32: Opc = X86::CMOVE32rr; break;
883 case MVT::f64: Opc = X86::FCMOVE; break;
886 // FIXME: CMP R, 0 -> TEST R, R
887 EmitCMP(Cond.getOperand(0), Cond.getOperand(1));
888 std::swap(RTrue, RFalse);
890 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
893 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) {
895 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
897 if (isFoldableLoad(LHS)) {
898 switch (RHS.getValueType()) {
901 case MVT::i8: Opc = X86::CMP8mi; break;
902 case MVT::i16: Opc = X86::CMP16mi; break;
903 case MVT::i32: Opc = X86::CMP32mi; break;
907 EmitFoldedLoad(LHS, AM);
908 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
913 switch (RHS.getValueType()) {
916 case MVT::i8: Opc = X86::CMP8ri; break;
917 case MVT::i16: Opc = X86::CMP16ri; break;
918 case MVT::i32: Opc = X86::CMP32ri; break;
921 unsigned Tmp1 = SelectExpr(LHS);
922 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
928 if (isFoldableLoad(LHS)) {
929 switch (RHS.getValueType()) {
932 case MVT::i8: Opc = X86::CMP8mr; break;
933 case MVT::i16: Opc = X86::CMP16mr; break;
934 case MVT::i32: Opc = X86::CMP32mr; break;
939 if (getRegPressure(LHS) > getRegPressure(RHS)) {
940 EmitFoldedLoad(LHS, AM);
941 Reg = SelectExpr(RHS);
943 Reg = SelectExpr(RHS);
944 EmitFoldedLoad(LHS, AM);
946 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
951 switch (LHS.getValueType()) {
952 default: assert(0 && "Cannot compare this value!");
954 case MVT::i8: Opc = X86::CMP8rr; break;
955 case MVT::i16: Opc = X86::CMP16rr; break;
956 case MVT::i32: Opc = X86::CMP32rr; break;
958 case MVT::f64: Opc = X86::FUCOMIr; break;
961 if (getRegPressure(LHS) > getRegPressure(RHS)) {
962 Tmp1 = SelectExpr(LHS);
963 Tmp2 = SelectExpr(RHS);
965 Tmp2 = SelectExpr(RHS);
966 Tmp1 = SelectExpr(LHS);
968 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
971 /// isFoldableLoad - Return true if this is a load instruction that can safely
972 /// be folded into an operation that uses it.
973 bool ISel::isFoldableLoad(SDOperand Op) {
974 if (Op.getOpcode() != ISD::LOAD ||
975 // FIXME: currently can't fold constant pool indexes.
976 isa<ConstantPoolSDNode>(Op.getOperand(1)))
979 // If this load has already been emitted, we clearly can't fold it.
980 if (ExprMap.count(Op)) return false;
982 return Op.Val->use_size() == 2;
985 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
986 /// and compute the address being loaded into AM.
987 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
988 SDOperand Chain = Op.getOperand(0);
989 SDOperand Address = Op.getOperand(1);
990 if (getRegPressure(Chain) > getRegPressure(Address)) {
992 SelectAddress(Address, AM);
994 SelectAddress(Address, AM);
998 // The chain for this load is now lowered.
999 LoweredTokens.insert(SDOperand(Op.Val, 1));
1000 ExprMap[SDOperand(Op.Val, 1)] = 1;
1003 unsigned ISel::SelectExpr(SDOperand N) {
1005 unsigned Tmp1, Tmp2, Tmp3;
1007 SDNode *Node = N.Val;
1010 if (Node->getOpcode() == ISD::CopyFromReg)
1011 // Just use the specified register as our input.
1012 return dyn_cast<CopyRegSDNode>(Node)->getReg();
1014 unsigned &Reg = ExprMap[N];
1015 if (Reg) return Reg;
1017 if (N.getOpcode() != ISD::CALL)
1018 Reg = Result = (N.getValueType() != MVT::Other) ?
1019 MakeReg(N.getValueType()) : 1;
1021 // If this is a call instruction, make sure to prepare ALL of the result
1022 // values as well as the chain.
1023 if (Node->getNumValues() == 1)
1024 Reg = Result = 1; // Void call, just a chain.
1026 Result = MakeReg(Node->getValueType(0));
1027 ExprMap[N.getValue(0)] = Result;
1028 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
1029 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
1030 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
1034 switch (N.getOpcode()) {
1037 assert(0 && "Node not handled!\n");
1038 case ISD::FrameIndex:
1039 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
1040 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
1042 case ISD::ConstantPool:
1043 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
1044 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
1046 case ISD::ConstantFP:
1047 ContainsFPCode = true;
1048 Tmp1 = Result; // Intermediate Register
1049 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
1050 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1051 Tmp1 = MakeReg(MVT::f64);
1053 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
1054 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1055 BuildMI(BB, X86::FLD0, 0, Tmp1);
1056 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
1057 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
1058 BuildMI(BB, X86::FLD1, 0, Tmp1);
1060 assert(0 && "Unexpected constant!");
1062 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
1065 switch (N.getValueType()) {
1066 default: assert(0 && "Cannot use constants of this type!");
1068 case MVT::i8: Opc = X86::MOV8ri; break;
1069 case MVT::i16: Opc = X86::MOV16ri; break;
1070 case MVT::i32: Opc = X86::MOV32ri; break;
1072 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
1074 case ISD::GlobalAddress: {
1075 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1076 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
1079 case ISD::ExternalSymbol: {
1080 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1081 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
1084 case ISD::FP_EXTEND:
1085 Tmp1 = SelectExpr(N.getOperand(0));
1086 BuildMI(BB, X86::FpMOV, 1, Result).addReg(Tmp1);
1088 case ISD::ZERO_EXTEND: {
1089 int DestIs16 = N.getValueType() == MVT::i16;
1090 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1092 // FIXME: This hack is here for zero extension casts from bool to i8. This
1093 // would not be needed if bools were promoted by Legalize.
1094 if (N.getValueType() == MVT::i8) {
1095 Tmp1 = SelectExpr(N.getOperand(0));
1096 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
1100 if (isFoldableLoad(N.getOperand(0))) {
1101 static const unsigned Opc[3] = {
1102 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
1106 EmitFoldedLoad(N.getOperand(0), AM);
1107 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1112 static const unsigned Opc[3] = {
1113 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
1115 Tmp1 = SelectExpr(N.getOperand(0));
1116 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1119 case ISD::SIGN_EXTEND: {
1120 int DestIs16 = N.getValueType() == MVT::i16;
1121 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1123 // FIXME: Legalize should promote bools to i8!
1124 assert(N.getOperand(0).getValueType() != MVT::i1 &&
1125 "Sign extend from bool not implemented!");
1127 if (isFoldableLoad(N.getOperand(0))) {
1128 static const unsigned Opc[3] = {
1129 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
1133 EmitFoldedLoad(N.getOperand(0), AM);
1134 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1138 static const unsigned Opc[3] = {
1139 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
1141 Tmp1 = SelectExpr(N.getOperand(0));
1142 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1146 // Fold TRUNCATE (LOAD P) into a smaller load from P.
1147 if (isFoldableLoad(N.getOperand(0))) {
1148 switch (N.getValueType()) {
1149 default: assert(0 && "Unknown truncate!");
1151 case MVT::i8: Opc = X86::MOV8rm; break;
1152 case MVT::i16: Opc = X86::MOV16rm; break;
1155 EmitFoldedLoad(N.getOperand(0), AM);
1156 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1160 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
1161 // a move out of AX or AL.
1162 switch (N.getOperand(0).getValueType()) {
1163 default: assert(0 && "Unknown truncate!");
1164 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1165 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1166 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
1168 Tmp1 = SelectExpr(N.getOperand(0));
1169 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1171 switch (N.getValueType()) {
1172 default: assert(0 && "Unknown truncate!");
1174 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1175 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1177 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1181 // Truncate from double to float by storing to memory as float,
1182 // then reading it back into a register.
1184 // Create as stack slot to use.
1185 // FIXME: This should automatically be made by the Legalizer!
1186 Tmp1 = TLI.getTargetData().getFloatAlignment();
1187 Tmp2 = BB->getParent()->getFrameInfo()->CreateStackObject(4, Tmp1);
1189 // Codegen the input.
1190 Tmp1 = SelectExpr(N.getOperand(0));
1192 // Emit the store, then the reload.
1193 addFrameReference(BuildMI(BB, X86::FST32m, 5), Tmp2).addReg(Tmp1);
1194 addFrameReference(BuildMI(BB, X86::FLD32m, 5, Result), Tmp2);
1197 case ISD::SINT_TO_FP:
1198 case ISD::UINT_TO_FP: {
1199 // FIXME: Most of this grunt work should be done by legalize!
1200 ContainsFPCode = true;
1202 // Promote the integer to a type supported by FLD. We do this because there
1203 // are no unsigned FLD instructions, so we must promote an unsigned value to
1204 // a larger signed value, then use FLD on the larger value.
1206 MVT::ValueType PromoteType = MVT::Other;
1207 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
1208 unsigned PromoteOpcode = 0;
1209 unsigned RealDestReg = Result;
1213 // We don't have the facilities for directly loading byte sized data from
1214 // memory (even signed). Promote it to 16 bits.
1215 PromoteType = MVT::i16;
1216 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
1217 X86::MOVSX16rr8 : X86::MOVZX16rr8;
1220 if (Node->getOpcode() == ISD::UINT_TO_FP) {
1221 PromoteType = MVT::i32;
1222 PromoteOpcode = X86::MOVZX32rr16;
1226 // Don't fild into the real destination.
1227 if (Node->getOpcode() == ISD::UINT_TO_FP)
1228 Result = MakeReg(Node->getValueType(0));
1232 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1234 if (PromoteType != MVT::Other) {
1235 Tmp2 = MakeReg(PromoteType);
1236 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
1237 SrcTy = PromoteType;
1241 // Spill the integer to memory and reload it from there.
1242 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
1243 MachineFunction *F = BB->getParent();
1244 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1248 assert(0 && "Cast ulong to FP not implemented yet!");
1249 // FIXME: this won't work for cast [u]long to FP
1250 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1251 FrameIdx).addReg(Tmp1);
1252 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1253 FrameIdx, 4).addReg(Tmp1+1);
1254 addFrameReference(BuildMI(BB, X86::FILD64m, 5, Result), FrameIdx);
1257 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1258 FrameIdx).addReg(Tmp1);
1259 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
1262 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
1263 FrameIdx).addReg(Tmp1);
1264 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
1266 default: break; // No promotion required.
1269 if (Node->getOpcode() == ISD::UINT_TO_FP && Result != RealDestReg) {
1270 // If this is a cast from uint -> double, we need to be careful when if
1271 // the "sign" bit is set. If so, we don't want to make a negative number,
1272 // we want to make a positive number. Emit code to add an offset if the
1275 // Compute whether the sign bit is set by shifting the reg right 31 bits.
1276 unsigned IsNeg = MakeReg(MVT::i32);
1277 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
1279 // Create a CP value that has the offset in one word and 0 in the other.
1280 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
1281 0x4f80000000000000ULL);
1282 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
1283 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
1284 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
1286 } else if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i64) {
1287 // We need special handling for unsigned 64-bit integer sources. If the
1288 // input number has the "sign bit" set, then we loaded it incorrectly as a
1289 // negative 64-bit number. In this case, add an offset value.
1291 // Emit a test instruction to see if the dynamic input value was signed.
1292 BuildMI(BB, X86::TEST32rr, 2).addReg(Tmp1+1).addReg(Tmp1+1);
1294 // If the sign bit is set, get a pointer to an offset, otherwise get a
1295 // pointer to a zero.
1296 MachineConstantPool *CP = F->getConstantPool();
1297 unsigned Zero = MakeReg(MVT::i32);
1298 Constant *Null = Constant::getNullValue(Type::UIntTy);
1299 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
1300 CP->getConstantPoolIndex(Null));
1301 unsigned Offset = MakeReg(MVT::i32);
1302 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
1304 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Offset),
1305 CP->getConstantPoolIndex(OffsetCst));
1306 unsigned Addr = MakeReg(MVT::i32);
1307 BuildMI(BB, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
1309 // Load the constant for an add. FIXME: this could make an 'fadd' that
1310 // reads directly from memory, but we don't support these yet.
1311 unsigned ConstReg = MakeReg(MVT::f64);
1312 addDirectMem(BuildMI(BB, X86::FLD32m, 4, ConstReg), Addr);
1314 BuildMI(BB, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(Result);
1318 case ISD::FP_TO_SINT:
1319 case ISD::FP_TO_UINT: {
1320 // FIXME: Most of this grunt work should be done by legalize!
1321 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1323 // Change the floating point control register to use "round towards zero"
1324 // mode when truncating to an integer value.
1326 MachineFunction *F = BB->getParent();
1327 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1328 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1330 // Load the old value of the high byte of the control word...
1331 unsigned HighPartOfCW = MakeReg(MVT::i8);
1332 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
1335 // Set the high part to be round to zero...
1336 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1337 CWFrameIdx, 1).addImm(12);
1339 // Reload the modified control word now...
1340 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1342 // Restore the memory image of control word to original value
1343 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
1344 CWFrameIdx, 1).addReg(HighPartOfCW);
1346 // We don't have the facilities for directly storing byte sized data to
1347 // memory. Promote it to 16 bits. We also must promote unsigned values to
1348 // larger classes because we only have signed FP stores.
1349 MVT::ValueType StoreClass = Node->getValueType(0);
1350 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
1351 switch (StoreClass) {
1352 case MVT::i8: StoreClass = MVT::i16; break;
1353 case MVT::i16: StoreClass = MVT::i32; break;
1354 case MVT::i32: StoreClass = MVT::i64; break;
1355 // The following treatment of cLong may not be perfectly right,
1356 // but it survives chains of casts of the form
1357 // double->ulong->double.
1358 case MVT::i64: StoreClass = MVT::i64; break;
1359 default: assert(0 && "Unknown store class!");
1362 // Spill the integer to memory and reload it from there.
1363 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
1364 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1366 switch (StoreClass) {
1367 default: assert(0 && "Unknown store class!");
1369 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
1372 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
1375 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
1379 switch (Node->getValueType(0)) {
1381 assert(0 && "Unknown integer type!");
1383 // FIXME: this isn't gunna work.
1384 assert(0 && "Cast FP to long not implemented yet!");
1385 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1386 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result+1), FrameIdx, 4);
1388 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1391 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
1394 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
1398 // Reload the original control word now.
1399 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1403 Op0 = N.getOperand(0);
1404 Op1 = N.getOperand(1);
1406 if (isFoldableLoad(Op0))
1407 std::swap(Op0, Op1);
1409 if (isFoldableLoad(Op1)) {
1410 switch (N.getValueType()) {
1411 default: assert(0 && "Cannot add this type!");
1413 case MVT::i8: Opc = X86::ADD8rm; break;
1414 case MVT::i16: Opc = X86::ADD16rm; break;
1415 case MVT::i32: Opc = X86::ADD32rm; break;
1416 case MVT::f32: Opc = X86::FADD32m; break;
1417 case MVT::f64: Opc = X86::FADD64m; break;
1420 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1421 Tmp1 = SelectExpr(Op0);
1422 EmitFoldedLoad(Op1, AM);
1424 EmitFoldedLoad(Op1, AM);
1425 Tmp1 = SelectExpr(Op0);
1427 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1431 // See if we can codegen this as an LEA to fold operations together.
1432 if (N.getValueType() == MVT::i32) {
1434 if (!SelectAddress(Op0, AM) && !SelectAddress(Op1, AM)) {
1435 // If this is not just an add, emit the LEA. For a simple add (like
1436 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
1437 // leave this as LEA, then peephole it to 'ADD' after two address elim
1439 if (AM.Scale != 1 || AM.BaseType == X86AddressMode::FrameIndexBase ||
1440 AM.GV || (AM.Base.Reg && AM.IndexReg && AM.Disp)) {
1441 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
1447 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1449 if (CN->getValue() == 1) { // add X, 1 -> inc X
1450 switch (N.getValueType()) {
1451 default: assert(0 && "Cannot integer add this type!");
1452 case MVT::i8: Opc = X86::INC8r; break;
1453 case MVT::i16: Opc = X86::INC16r; break;
1454 case MVT::i32: Opc = X86::INC32r; break;
1456 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
1457 switch (N.getValueType()) {
1458 default: assert(0 && "Cannot integer add this type!");
1459 case MVT::i8: Opc = X86::DEC8r; break;
1460 case MVT::i16: Opc = X86::DEC16r; break;
1461 case MVT::i32: Opc = X86::DEC32r; break;
1466 Tmp1 = SelectExpr(Op0);
1467 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1471 switch (N.getValueType()) {
1472 default: assert(0 && "Cannot add this type!");
1473 case MVT::i8: Opc = X86::ADD8ri; break;
1474 case MVT::i16: Opc = X86::ADD16ri; break;
1475 case MVT::i32: Opc = X86::ADD32ri; break;
1478 Tmp1 = SelectExpr(Op0);
1479 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1484 switch (N.getValueType()) {
1485 default: assert(0 && "Cannot add this type!");
1486 case MVT::i8: Opc = X86::ADD8rr; break;
1487 case MVT::i16: Opc = X86::ADD16rr; break;
1488 case MVT::i32: Opc = X86::ADD32rr; break;
1490 case MVT::f64: Opc = X86::FpADD; break;
1493 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1494 Tmp1 = SelectExpr(Op0);
1495 Tmp2 = SelectExpr(Op1);
1497 Tmp2 = SelectExpr(Op1);
1498 Tmp1 = SelectExpr(Op0);
1501 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1508 static const unsigned SUBTab[] = {
1509 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
1510 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
1511 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
1513 static const unsigned MULTab[] = {
1514 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
1515 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
1516 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
1518 static const unsigned ANDTab[] = {
1519 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
1520 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
1521 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
1523 static const unsigned ORTab[] = {
1524 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
1525 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
1526 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
1528 static const unsigned XORTab[] = {
1529 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
1530 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
1531 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
1534 Op0 = Node->getOperand(0);
1535 Op1 = Node->getOperand(1);
1537 if (Node->getOpcode() == ISD::SUB && MVT::isInteger(N.getValueType()))
1538 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
1539 if (CN->isNullValue()) { // 0 - N -> neg N
1540 switch (N.getValueType()) {
1541 default: assert(0 && "Cannot sub this type!");
1543 case MVT::i8: Opc = X86::NEG8r; break;
1544 case MVT::i16: Opc = X86::NEG16r; break;
1545 case MVT::i32: Opc = X86::NEG32r; break;
1547 Tmp1 = SelectExpr(N.getOperand(1));
1548 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1552 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1553 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
1554 switch (N.getValueType()) {
1555 default: assert(0 && "Cannot add this type!");
1557 case MVT::i8: Opc = X86::NOT8r; break;
1558 case MVT::i16: Opc = X86::NOT16r; break;
1559 case MVT::i32: Opc = X86::NOT32r; break;
1561 Tmp1 = SelectExpr(Op0);
1562 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1566 switch (N.getValueType()) {
1567 default: assert(0 && "Cannot xor this type!");
1569 case MVT::i8: Opc = 0; break;
1570 case MVT::i16: Opc = 1; break;
1571 case MVT::i32: Opc = 2; break;
1573 switch (Node->getOpcode()) {
1574 default: assert(0 && "Unreachable!");
1575 case ISD::SUB: Opc = SUBTab[Opc]; break;
1576 case ISD::MUL: Opc = MULTab[Opc]; break;
1577 case ISD::AND: Opc = ANDTab[Opc]; break;
1578 case ISD::OR: Opc = ORTab[Opc]; break;
1579 case ISD::XOR: Opc = XORTab[Opc]; break;
1581 if (Opc) { // Can't fold MUL:i8 R, imm
1582 Tmp1 = SelectExpr(Op0);
1583 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1588 if (isFoldableLoad(Op0))
1589 if (Node->getOpcode() != ISD::SUB) {
1590 std::swap(Op0, Op1);
1592 // Emit 'reverse' subract, with a memory operand.
1593 switch (N.getValueType()) {
1594 default: Opc = 0; break;
1595 case MVT::f32: Opc = X86::FSUBR32m; break;
1596 case MVT::f64: Opc = X86::FSUBR64m; break;
1600 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1601 EmitFoldedLoad(Op0, AM);
1602 Tmp1 = SelectExpr(Op1);
1604 Tmp1 = SelectExpr(Op1);
1605 EmitFoldedLoad(Op0, AM);
1607 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1612 if (isFoldableLoad(Op1)) {
1613 switch (N.getValueType()) {
1614 default: assert(0 && "Cannot operate on this type!");
1616 case MVT::i8: Opc = 5; break;
1617 case MVT::i16: Opc = 6; break;
1618 case MVT::i32: Opc = 7; break;
1619 case MVT::f32: Opc = 8; break;
1620 case MVT::f64: Opc = 9; break;
1622 switch (Node->getOpcode()) {
1623 default: assert(0 && "Unreachable!");
1624 case ISD::SUB: Opc = SUBTab[Opc]; break;
1625 case ISD::MUL: Opc = MULTab[Opc]; break;
1626 case ISD::AND: Opc = ANDTab[Opc]; break;
1627 case ISD::OR: Opc = ORTab[Opc]; break;
1628 case ISD::XOR: Opc = XORTab[Opc]; break;
1632 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1633 Tmp1 = SelectExpr(Op0);
1634 EmitFoldedLoad(Op1, AM);
1636 EmitFoldedLoad(Op1, AM);
1637 Tmp1 = SelectExpr(Op0);
1640 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1642 assert(Node->getOpcode() == ISD::MUL &&
1643 N.getValueType() == MVT::i8 && "Unexpected situation!");
1644 // Must use the MUL instruction, which forces use of AL.
1645 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1646 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
1647 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1652 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1653 Tmp1 = SelectExpr(Op0);
1654 Tmp2 = SelectExpr(Op1);
1656 Tmp2 = SelectExpr(Op1);
1657 Tmp1 = SelectExpr(Op0);
1660 switch (N.getValueType()) {
1661 default: assert(0 && "Cannot add this type!");
1663 case MVT::i8: Opc = 10; break;
1664 case MVT::i16: Opc = 11; break;
1665 case MVT::i32: Opc = 12; break;
1666 case MVT::f32: Opc = 13; break;
1667 case MVT::f64: Opc = 14; break;
1669 switch (Node->getOpcode()) {
1670 default: assert(0 && "Unreachable!");
1671 case ISD::SUB: Opc = SUBTab[Opc]; break;
1672 case ISD::MUL: Opc = MULTab[Opc]; break;
1673 case ISD::AND: Opc = ANDTab[Opc]; break;
1674 case ISD::OR: Opc = ORTab[Opc]; break;
1675 case ISD::XOR: Opc = XORTab[Opc]; break;
1678 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1680 assert(Node->getOpcode() == ISD::MUL &&
1681 N.getValueType() == MVT::i8 && "Unexpected situation!");
1682 // Must use the MUL instruction, which forces use of AL.
1683 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1684 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
1685 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1690 if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) {
1691 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1692 Tmp2 = SelectExpr(N.getOperand(1));
1693 Tmp3 = SelectExpr(N.getOperand(2));
1695 Tmp3 = SelectExpr(N.getOperand(2));
1696 Tmp2 = SelectExpr(N.getOperand(1));
1698 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
1701 // FIXME: This should not be implemented here, it should be in the generic
1703 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1704 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1706 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1709 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1711 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1714 unsigned TmpReg = MakeReg(MVT::i16);
1715 EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg);
1716 // FIXME: need subregs to do better than this!
1717 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg);
1718 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1726 if (N.getOpcode() == ISD::SDIV)
1727 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1728 // FIXME: These special cases should be handled by the lowering impl!
1729 unsigned RHS = CN->getValue();
1735 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
1736 unsigned Log = log2(RHS);
1737 unsigned TmpReg = MakeReg(N.getValueType());
1738 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
1739 switch (N.getValueType()) {
1740 default: assert("Unknown type to signed divide!");
1742 SAROpc = X86::SAR8ri;
1743 SHROpc = X86::SHR8ri;
1744 ADDOpc = X86::ADD8rr;
1745 NEGOpc = X86::NEG8r;
1748 SAROpc = X86::SAR16ri;
1749 SHROpc = X86::SHR16ri;
1750 ADDOpc = X86::ADD16rr;
1751 NEGOpc = X86::NEG16r;
1754 SAROpc = X86::SAR32ri;
1755 SHROpc = X86::SHR32ri;
1756 ADDOpc = X86::ADD32rr;
1757 NEGOpc = X86::NEG32r;
1760 Tmp1 = SelectExpr(N.getOperand(0));
1761 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
1762 unsigned TmpReg2 = MakeReg(N.getValueType());
1763 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
1764 unsigned TmpReg3 = MakeReg(N.getValueType());
1765 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
1767 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
1768 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
1770 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
1775 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1776 Tmp1 = SelectExpr(N.getOperand(0));
1777 Tmp2 = SelectExpr(N.getOperand(1));
1779 Tmp2 = SelectExpr(N.getOperand(1));
1780 Tmp1 = SelectExpr(N.getOperand(0));
1783 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
1784 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
1785 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
1786 switch (N.getValueType()) {
1787 default: assert(0 && "Cannot sdiv this type!");
1789 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
1792 MovOpcode = X86::MOV8rr;
1793 ClrOpcode = X86::MOV8ri;
1794 SExtOpcode = X86::CBW;
1797 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
1800 MovOpcode = X86::MOV16rr;
1801 ClrOpcode = X86::MOV16ri;
1802 SExtOpcode = X86::CWD;
1805 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
1808 MovOpcode = X86::MOV32rr;
1809 ClrOpcode = X86::MOV32ri;
1810 SExtOpcode = X86::CDQ;
1812 case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!");
1815 if (N.getOpcode() == ISD::SDIV)
1816 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
1818 assert(0 && "FIXME: Emit frem libcall to fmod!");
1822 // Set up the low part.
1823 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
1826 // Sign extend the low part into the high part.
1827 BuildMI(BB, SExtOpcode, 0);
1829 // Zero out the high part, effectively zero extending the input.
1830 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
1833 // Emit the DIV/IDIV instruction.
1834 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
1836 // Get the result of the divide or rem.
1837 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
1842 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1843 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
1844 switch (N.getValueType()) {
1845 default: assert(0 && "Cannot shift this type!");
1846 case MVT::i8: Opc = X86::ADD8rr; break;
1847 case MVT::i16: Opc = X86::ADD16rr; break;
1848 case MVT::i32: Opc = X86::ADD32rr; break;
1850 Tmp1 = SelectExpr(N.getOperand(0));
1851 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
1855 switch (N.getValueType()) {
1856 default: assert(0 && "Cannot shift this type!");
1857 case MVT::i8: Opc = X86::SHL8ri; break;
1858 case MVT::i16: Opc = X86::SHL16ri; break;
1859 case MVT::i32: Opc = X86::SHL32ri; break;
1861 Tmp1 = SelectExpr(N.getOperand(0));
1862 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1866 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1867 Tmp1 = SelectExpr(N.getOperand(0));
1868 Tmp2 = SelectExpr(N.getOperand(1));
1870 Tmp2 = SelectExpr(N.getOperand(1));
1871 Tmp1 = SelectExpr(N.getOperand(0));
1874 switch (N.getValueType()) {
1875 default: assert(0 && "Cannot shift this type!");
1876 case MVT::i8 : Opc = X86::SHL8rCL; break;
1877 case MVT::i16: Opc = X86::SHL16rCL; break;
1878 case MVT::i32: Opc = X86::SHL32rCL; break;
1880 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1881 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1884 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1885 switch (N.getValueType()) {
1886 default: assert(0 && "Cannot shift this type!");
1887 case MVT::i8: Opc = X86::SHR8ri; break;
1888 case MVT::i16: Opc = X86::SHR16ri; break;
1889 case MVT::i32: Opc = X86::SHR32ri; break;
1891 Tmp1 = SelectExpr(N.getOperand(0));
1892 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1896 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1897 Tmp1 = SelectExpr(N.getOperand(0));
1898 Tmp2 = SelectExpr(N.getOperand(1));
1900 Tmp2 = SelectExpr(N.getOperand(1));
1901 Tmp1 = SelectExpr(N.getOperand(0));
1904 switch (N.getValueType()) {
1905 default: assert(0 && "Cannot shift this type!");
1906 case MVT::i8 : Opc = X86::SHR8rCL; break;
1907 case MVT::i16: Opc = X86::SHR16rCL; break;
1908 case MVT::i32: Opc = X86::SHR32rCL; break;
1910 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1911 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1914 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1915 switch (N.getValueType()) {
1916 default: assert(0 && "Cannot shift this type!");
1917 case MVT::i8: Opc = X86::SAR8ri; break;
1918 case MVT::i16: Opc = X86::SAR16ri; break;
1919 case MVT::i32: Opc = X86::SAR32ri; break;
1921 Tmp1 = SelectExpr(N.getOperand(0));
1922 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1926 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1927 Tmp1 = SelectExpr(N.getOperand(0));
1928 Tmp2 = SelectExpr(N.getOperand(1));
1930 Tmp2 = SelectExpr(N.getOperand(1));
1931 Tmp1 = SelectExpr(N.getOperand(0));
1934 switch (N.getValueType()) {
1935 default: assert(0 && "Cannot shift this type!");
1936 case MVT::i8 : Opc = X86::SAR8rCL; break;
1937 case MVT::i16: Opc = X86::SAR16rCL; break;
1938 case MVT::i32: Opc = X86::SAR32rCL; break;
1940 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1941 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1945 EmitCMP(N.getOperand(0), N.getOperand(1));
1946 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
1947 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
1950 // Make sure we generate both values.
1952 ExprMap[N.getValue(1)] = 1; // Generate the token
1954 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1956 switch (Node->getValueType(0)) {
1957 default: assert(0 && "Cannot load this type!");
1959 case MVT::i8: Opc = X86::MOV8rm; break;
1960 case MVT::i16: Opc = X86::MOV16rm; break;
1961 case MVT::i32: Opc = X86::MOV32rm; break;
1962 case MVT::f32: Opc = X86::FLD32m; ContainsFPCode = true; break;
1963 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
1966 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
1967 Select(N.getOperand(0));
1968 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
1971 EmitFoldedLoad(N, AM);
1972 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1976 case ISD::DYNAMIC_STACKALLOC:
1977 // Generate both result values.
1979 ExprMap[N.getValue(1)] = 1; // Generate the token
1981 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1983 // FIXME: We are currently ignoring the requested alignment for handling
1984 // greater than the stack alignment. This will need to be revisited at some
1985 // point. Align = N.getOperand(2);
1987 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1988 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1989 std::cerr << "Cannot allocate stack object with greater alignment than"
1990 << " the stack alignment yet!";
1994 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1995 Select(N.getOperand(0));
1996 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
1997 .addImm(CN->getValue());
1999 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2000 Select(N.getOperand(0));
2001 Tmp1 = SelectExpr(N.getOperand(1));
2003 Tmp1 = SelectExpr(N.getOperand(1));
2004 Select(N.getOperand(0));
2007 // Subtract size from stack pointer, thereby allocating some space.
2008 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
2011 // Put a pointer to the space into the result register, by copying the stack
2013 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
2017 // The chain for this call is now lowered.
2018 LoweredTokens.insert(N.getValue(Node->getNumValues()-1));
2020 if (GlobalAddressSDNode *GASD =
2021 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
2022 Select(N.getOperand(0));
2023 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
2024 } else if (ExternalSymbolSDNode *ESSDN =
2025 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
2026 Select(N.getOperand(0));
2027 BuildMI(BB, X86::CALLpcrel32,
2028 1).addExternalSymbol(ESSDN->getSymbol(), true);
2030 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2031 Select(N.getOperand(0));
2032 Tmp1 = SelectExpr(N.getOperand(1));
2034 Tmp1 = SelectExpr(N.getOperand(1));
2035 Select(N.getOperand(0));
2038 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
2040 switch (Node->getValueType(0)) {
2041 default: assert(0 && "Unknown value type for call result!");
2042 case MVT::Other: return 1;
2045 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2048 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
2051 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
2052 if (Node->getValueType(1) == MVT::i32)
2053 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
2056 case MVT::f64: // Floating-point return values live in %ST(0)
2057 ContainsFPCode = true;
2058 BuildMI(BB, X86::FpGETRESULT, 1, Result);
2061 return Result+N.ResNo;
2067 void ISel::Select(SDOperand N) {
2068 unsigned Tmp1, Tmp2, Opc;
2070 // FIXME: Disable for our current expansion model!
2071 if (/*!N->hasOneUse() &&*/ !LoweredTokens.insert(N).second)
2072 return; // Already selected.
2074 SDNode *Node = N.Val;
2076 switch (Node->getOpcode()) {
2078 Node->dump(); std::cerr << "\n";
2079 assert(0 && "Node not handled yet!");
2080 case ISD::EntryToken: return; // Noop
2081 case ISD::CopyToReg:
2082 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2083 Select(N.getOperand(0));
2084 Tmp1 = SelectExpr(N.getOperand(1));
2086 Tmp1 = SelectExpr(N.getOperand(1));
2087 Select(N.getOperand(0));
2089 Tmp2 = cast<CopyRegSDNode>(N)->getReg();
2092 switch (N.getOperand(1).getValueType()) {
2093 default: assert(0 && "Invalid type for operation!");
2095 case MVT::i8: Opc = X86::MOV8rr; break;
2096 case MVT::i16: Opc = X86::MOV16rr; break;
2097 case MVT::i32: Opc = X86::MOV32rr; break;
2099 case MVT::f64: Opc = X86::FpMOV; ContainsFPCode = true; break;
2101 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2105 switch (N.getNumOperands()) {
2107 assert(0 && "Unknown return instruction!");
2109 assert(N.getOperand(1).getValueType() == MVT::i32 &&
2110 N.getOperand(2).getValueType() == MVT::i32 &&
2111 "Unknown two-register value!");
2112 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
2113 Tmp1 = SelectExpr(N.getOperand(1));
2114 Tmp2 = SelectExpr(N.getOperand(2));
2116 Tmp2 = SelectExpr(N.getOperand(2));
2117 Tmp1 = SelectExpr(N.getOperand(1));
2119 Select(N.getOperand(0));
2121 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2122 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
2123 // Declare that EAX & EDX are live on exit.
2124 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
2128 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2129 Select(N.getOperand(0));
2130 Tmp1 = SelectExpr(N.getOperand(1));
2132 Tmp1 = SelectExpr(N.getOperand(1));
2133 Select(N.getOperand(0));
2135 switch (N.getOperand(1).getValueType()) {
2136 default: assert(0 && "All other types should have been promoted!!");
2138 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
2139 // Declare that top-of-stack is live on exit
2140 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
2143 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2144 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
2149 Select(N.getOperand(0));
2152 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
2155 Select(N.getOperand(0));
2156 MachineBasicBlock *Dest =
2157 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
2158 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
2163 MachineBasicBlock *Dest =
2164 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
2166 // Try to fold a setcc into the branch. If this fails, emit a test/jne
2168 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
2169 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2170 Select(N.getOperand(0));
2171 Tmp1 = SelectExpr(N.getOperand(1));
2173 Tmp1 = SelectExpr(N.getOperand(1));
2174 Select(N.getOperand(0));
2176 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
2177 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
2184 case ISD::DYNAMIC_STACKALLOC:
2190 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2192 switch (CN->getValueType(0)) {
2193 default: assert(0 && "Invalid type for operation!");
2195 case MVT::i8: Opc = X86::MOV8mi; break;
2196 case MVT::i16: Opc = X86::MOV16mi; break;
2197 case MVT::i32: Opc = X86::MOV32mi; break;
2199 case MVT::f64: break;
2202 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
2203 Select(N.getOperand(0));
2204 SelectAddress(N.getOperand(2), AM);
2206 SelectAddress(N.getOperand(2), AM);
2207 Select(N.getOperand(0));
2209 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
2214 // Check to see if this is a load/op/store combination.
2215 if (N.getOperand(1).Val->hasOneUse() &&
2216 isFoldableLoad(N.getOperand(0).getValue(0)) &&
2217 !MVT::isFloatingPoint(N.getOperand(0).getValue(0).getValueType())) {
2218 SDOperand TheLoad = N.getOperand(0).getValue(0);
2219 // Check to see if we are loading the same pointer that we're storing to.
2220 if (TheLoad.getOperand(1) == N.getOperand(2)) {
2221 // See if the stored value is a simple binary operator that uses the
2222 // load as one of its operands.
2223 SDOperand Op = N.getOperand(1);
2224 if (Op.Val->getNumOperands() == 2 &&
2225 (Op.getOperand(0) == TheLoad || Op.getOperand(1) == TheLoad)) {
2226 // Finally, check to see if this is one of the ops we can handle!
2227 static const unsigned ADDTAB[] = {
2228 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
2229 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
2231 static const unsigned SUBTAB[] = {
2232 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
2233 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
2235 static const unsigned ANDTAB[] = {
2236 X86::AND8mi, X86::AND16mi, X86::AND32mi,
2237 X86::AND8mr, X86::AND16mr, X86::AND32mr,
2239 static const unsigned ORTAB[] = {
2240 X86::OR8mi, X86::OR16mi, X86::OR32mi,
2241 X86::OR8mr, X86::OR16mr, X86::OR32mr,
2243 static const unsigned XORTAB[] = {
2244 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
2245 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
2247 static const unsigned SHLTAB[] = {
2248 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
2249 /*Have to put the reg in CL*/0, 0, 0,
2251 static const unsigned SARTAB[] = {
2252 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
2253 /*Have to put the reg in CL*/0, 0, 0,
2255 static const unsigned SHRTAB[] = {
2256 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
2257 /*Have to put the reg in CL*/0, 0, 0,
2260 const unsigned *TabPtr = 0;
2261 switch (Op.getOpcode()) {
2262 default: std::cerr << "CANNOT [mem] op= val: "; Op.Val->dump(); std::cerr << "\n"; break;
2263 case ISD::ADD: TabPtr = ADDTAB; break;
2264 case ISD::SUB: TabPtr = SUBTAB; break;
2265 case ISD::AND: TabPtr = ANDTAB; break;
2266 case ISD:: OR: TabPtr = ORTAB; break;
2267 case ISD::XOR: TabPtr = XORTAB; break;
2268 case ISD::SHL: TabPtr = SHLTAB; break;
2269 case ISD::SRA: TabPtr = SARTAB; break;
2270 case ISD::SRL: TabPtr = SHRTAB; break;
2274 // Handle: [mem] op= CST
2275 SDOperand Op0 = Op.getOperand(0);
2276 SDOperand Op1 = Op.getOperand(1);
2277 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2278 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
2281 case MVT::i8: Opc = TabPtr[0]; break;
2282 case MVT::i16: Opc = TabPtr[1]; break;
2283 case MVT::i32: Opc = TabPtr[2]; break;
2287 if (getRegPressure(TheLoad.getOperand(0)) >
2288 getRegPressure(TheLoad.getOperand(1))) {
2289 Select(TheLoad.getOperand(0));
2290 SelectAddress(TheLoad.getOperand(1), AM);
2292 SelectAddress(TheLoad.getOperand(1), AM);
2293 Select(TheLoad.getOperand(0));
2296 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
2301 // If we have [mem] = V op [mem], try to turn it into:
2302 // [mem] = [mem] op V.
2303 if (Op1 == TheLoad && Op.getOpcode() != ISD::SUB &&
2304 Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRA &&
2305 Op.getOpcode() != ISD::SRL)
2306 std::swap(Op0, Op1);
2308 if (Op0 == TheLoad) {
2309 switch (Op0.getValueType()) {
2312 case MVT::i8: Opc = TabPtr[3]; break;
2313 case MVT::i16: Opc = TabPtr[4]; break;
2314 case MVT::i32: Opc = TabPtr[5]; break;
2318 Select(TheLoad.getOperand(0));
2319 SelectAddress(TheLoad.getOperand(1), AM);
2320 unsigned Reg = SelectExpr(Op1);
2321 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg);
2331 switch (N.getOperand(1).getValueType()) {
2332 default: assert(0 && "Cannot store this type!");
2334 case MVT::i8: Opc = X86::MOV8mr; break;
2335 case MVT::i16: Opc = X86::MOV16mr; break;
2336 case MVT::i32: Opc = X86::MOV32mr; break;
2337 case MVT::f32: Opc = X86::FST32m; break;
2338 case MVT::f64: Opc = X86::FST64m; break;
2341 std::vector<std::pair<unsigned, unsigned> > RP;
2342 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
2343 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
2344 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
2345 std::sort(RP.begin(), RP.end());
2347 for (unsigned i = 0; i != 3; ++i)
2348 switch (RP[2-i].second) {
2349 default: assert(0 && "Unknown operand number!");
2350 case 0: Select(N.getOperand(0)); break;
2351 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
2352 case 2: SelectAddress(N.getOperand(2), AM); break;
2355 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
2358 case ISD::ADJCALLSTACKDOWN:
2359 case ISD::ADJCALLSTACKUP:
2360 Select(N.getOperand(0));
2361 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2363 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
2364 X86::ADJCALLSTACKUP;
2365 BuildMI(BB, Opc, 1).addImm(Tmp1);
2368 Select(N.getOperand(0)); // Select the chain.
2370 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2371 if (Align == 0) Align = 1;
2373 // Turn the byte code into # iterations
2376 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
2377 unsigned Val = ValC->getValue() & 255;
2379 // If the value is a constant, then we can potentially use larger sets.
2380 switch (Align & 3) {
2381 case 2: // WORD aligned
2382 CountReg = MakeReg(MVT::i32);
2383 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2384 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2386 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2387 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2389 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
2390 Opcode = X86::REP_STOSW;
2392 case 0: // DWORD aligned
2393 CountReg = MakeReg(MVT::i32);
2394 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2395 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2397 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2398 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2400 Val = (Val << 8) | Val;
2401 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
2402 Opcode = X86::REP_STOSD;
2404 default: // BYTE aligned
2405 CountReg = SelectExpr(Node->getOperand(3));
2406 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
2407 Opcode = X86::REP_STOSB;
2411 // If it's not a constant value we are storing, just fall back. We could
2412 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
2413 unsigned ValReg = SelectExpr(Node->getOperand(2));
2414 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
2415 CountReg = SelectExpr(Node->getOperand(3));
2416 Opcode = X86::REP_STOSB;
2419 // No matter what the alignment is, we put the source in ESI, the
2420 // destination in EDI, and the count in ECX.
2421 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2422 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2423 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2424 BuildMI(BB, Opcode, 0);
2428 Select(N.getOperand(0)); // Select the chain.
2430 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2431 if (Align == 0) Align = 1;
2433 // Turn the byte code into # iterations
2436 switch (Align & 3) {
2437 case 2: // WORD aligned
2438 CountReg = MakeReg(MVT::i32);
2439 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2440 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2442 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2443 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2445 Opcode = X86::REP_MOVSW;
2447 case 0: // DWORD aligned
2448 CountReg = MakeReg(MVT::i32);
2449 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2450 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2452 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2453 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2455 Opcode = X86::REP_MOVSD;
2457 default: // BYTE aligned
2458 CountReg = SelectExpr(Node->getOperand(3));
2459 Opcode = X86::REP_MOVSB;
2463 // No matter what the alignment is, we put the source in ESI, the
2464 // destination in EDI, and the count in ECX.
2465 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2466 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
2467 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2468 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2469 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
2470 BuildMI(BB, Opcode, 0);
2473 assert(0 && "Should not be reached!");
2477 /// createX86PatternInstructionSelector - This pass converts an LLVM function
2478 /// into a machine code representation using pattern matching and a machine
2479 /// description file.
2481 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
2482 return new ISel(TM);