1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86ISelLowering.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/Function.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/CodeGen/SSARegMap.h"
29 #include "llvm/Target/TargetData.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/Support/CFG.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/ADT/Statistic.h"
40 //===----------------------------------------------------------------------===//
41 // Pattern Matcher Implementation
42 //===----------------------------------------------------------------------===//
45 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
46 /// SDOperand's instead of register numbers for the leaves of the matched
48 struct X86ISelAddressMode {
54 struct { // This is really a union, discriminated by BaseType!
65 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
73 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
75 //===--------------------------------------------------------------------===//
76 /// ISel - X86 specific code to select X86 machine instructions for
77 /// SelectionDAG operations.
79 class ISel : public SelectionDAGISel {
80 /// ContainsFPCode - Every instruction we select that uses or defines a FP
81 /// register should set this to true.
84 /// X86Lowering - This object fully describes how to lower LLVM code to an
85 /// X86-specific SelectionDAG.
86 X86TargetLowering X86Lowering;
88 /// RegPressureMap - This keeps an approximate count of the number of
89 /// registers required to evaluate each node in the graph.
90 std::map<SDNode*, unsigned> RegPressureMap;
92 /// ExprMap - As shared expressions are codegen'd, we keep track of which
93 /// vreg the value is produced in, so we only emit one copy of each compiled
95 std::map<SDOperand, unsigned> ExprMap;
97 /// TheDAG - The DAG being selected during Select* operations.
100 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
101 /// make the right decision when generating code for different targets.
102 const X86Subtarget *Subtarget;
104 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
105 Subtarget = &TM.getSubtarget<X86Subtarget>();
108 virtual const char *getPassName() const {
109 return "X86 Pattern Instruction Selection";
112 unsigned getRegPressure(SDOperand O) {
113 return RegPressureMap[O.Val];
115 unsigned ComputeRegPressure(SDOperand O);
117 /// InstructionSelectBasicBlock - This callback is invoked by
118 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
119 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
121 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
123 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
124 bool FloatPromoteOk = false);
125 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
126 bool TryToFoldLoadOpStore(SDNode *Node);
127 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
128 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
129 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
130 void EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
131 MVT::ValueType SVT, unsigned RDest);
132 unsigned SelectExpr(SDOperand N);
134 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
135 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
136 void SelectAddress(SDOperand N, X86AddressMode &AM);
137 bool EmitPotentialTailCall(SDNode *Node);
138 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
139 void Select(SDOperand N);
143 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
144 /// the main function.
145 static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
146 MachineFrameInfo *MFI) {
147 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
148 int CWFrameIdx = MFI->CreateStackObject(2, 2);
149 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
151 // Set the high part to be 64-bit precision.
152 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
153 CWFrameIdx, 1).addImm(2);
155 // Reload the modified control word now.
156 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
159 void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
160 // If this is main, emit special code for main.
161 MachineBasicBlock *BB = MF.begin();
162 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
163 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
167 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
168 /// when it has created a SelectionDAG for us to codegen.
169 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
170 // While we're doing this, keep track of whether we see any FP code for
171 // FP_REG_KILL insertion.
172 ContainsFPCode = false;
173 MachineFunction *MF = BB->getParent();
175 // Scan the PHI nodes that already are inserted into this basic block. If any
176 // of them is a PHI of a floating point value, we need to insert an
178 SSARegMap *RegMap = MF->getSSARegMap();
179 if (BB != MF->begin())
180 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
182 assert(I->getOpcode() == X86::PHI &&
183 "Isn't just PHI nodes?");
184 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
185 X86::RFPRegisterClass) {
186 ContainsFPCode = true;
191 // Compute the RegPressureMap, which is an approximation for the number of
192 // registers required to compute each node.
193 ComputeRegPressure(DAG.getRoot());
197 // Codegen the basic block.
198 Select(DAG.getRoot());
202 // Finally, look at all of the successors of this block. If any contain a PHI
203 // node of FP type, we need to insert an FP_REG_KILL in this block.
204 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
205 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
206 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
207 I != E && I->getOpcode() == X86::PHI; ++I) {
208 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
209 X86::RFPRegisterClass) {
210 ContainsFPCode = true;
215 // Final check, check LLVM BB's that are successors to the LLVM BB
216 // corresponding to BB for FP PHI nodes.
217 const BasicBlock *LLVMBB = BB->getBasicBlock();
220 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
221 SI != E && !ContainsFPCode; ++SI)
222 for (BasicBlock::const_iterator II = SI->begin();
223 (PN = dyn_cast<PHINode>(II)); ++II)
224 if (PN->getType()->isFloatingPoint()) {
225 ContainsFPCode = true;
230 // Insert FP_REG_KILL instructions into basic blocks that need them. This
231 // only occurs due to the floating point stackifier not being aggressive
232 // enough to handle arbitrary global stackification.
234 // Currently we insert an FP_REG_KILL instruction into each block that uses or
235 // defines a floating point virtual register.
237 // When the global register allocators (like linear scan) finally update live
238 // variable analysis, we can keep floating point values in registers across
239 // basic blocks. This will be a huge win, but we are waiting on the global
240 // allocators before we can do this.
242 if (ContainsFPCode) {
243 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
247 // Clear state used for selection.
249 RegPressureMap.clear();
253 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
254 // for the number of registers required to compute each node. This is basically
255 // computing a generalized form of the Sethi-Ullman number for each node.
256 unsigned ISel::ComputeRegPressure(SDOperand O) {
258 unsigned &Result = RegPressureMap[N];
259 if (Result) return Result;
261 // FIXME: Should operations like CALL (which clobber lots o regs) have a
262 // higher fixed cost??
264 if (N->getNumOperands() == 0) {
267 unsigned MaxRegUse = 0;
268 unsigned NumExtraMaxRegUsers = 0;
269 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
271 if (N->getOperand(i).getOpcode() == ISD::Constant)
274 Regs = ComputeRegPressure(N->getOperand(i));
275 if (Regs > MaxRegUse) {
277 NumExtraMaxRegUsers = 0;
278 } else if (Regs == MaxRegUse &&
279 N->getOperand(i).getValueType() != MVT::Other) {
280 ++NumExtraMaxRegUsers;
284 if (O.getOpcode() != ISD::TokenFactor)
285 Result = MaxRegUse+NumExtraMaxRegUsers;
287 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
290 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
294 /// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
295 /// The DAG cannot have cycles in it, by definition, so the visited set is not
296 /// needed to prevent infinite loops. The DAG CAN, however, have unbounded
297 /// reuse, so it prevents exponential cases.
299 static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
300 std::set<SDNode*> &Visited) {
301 if (N == Op) return true; // Found it.
302 SDNode *Node = N.Val;
303 if (Node->getNumOperands() == 0 || // Leaf?
304 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
305 if (!Visited.insert(Node).second) return false; // Already visited?
307 // Recurse for the first N-1 operands.
308 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
309 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
312 // Tail recurse for the last operand.
313 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
316 X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
317 X86AddressMode Result;
319 // If we need to emit two register operands, emit the one with the highest
320 // register pressure first.
321 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
322 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
323 bool EmitBaseThenIndex;
324 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
325 std::set<SDNode*> Visited;
326 EmitBaseThenIndex = true;
327 // If Base ends up pointing to Index, we must emit index first. This is
328 // because of the way we fold loads, we may end up doing bad things with
330 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
331 EmitBaseThenIndex = false;
333 std::set<SDNode*> Visited;
334 EmitBaseThenIndex = false;
335 // If Base ends up pointing to Index, we must emit index first. This is
336 // because of the way we fold loads, we may end up doing bad things with
338 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
339 EmitBaseThenIndex = true;
342 if (EmitBaseThenIndex) {
343 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
344 Result.IndexReg = SelectExpr(IAM.IndexReg);
346 Result.IndexReg = SelectExpr(IAM.IndexReg);
347 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
350 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
351 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
352 } else if (IAM.IndexReg.Val) {
353 Result.IndexReg = SelectExpr(IAM.IndexReg);
356 switch (IAM.BaseType) {
357 case X86ISelAddressMode::RegBase:
358 Result.BaseType = X86AddressMode::RegBase;
360 case X86ISelAddressMode::FrameIndexBase:
361 Result.BaseType = X86AddressMode::FrameIndexBase;
362 Result.Base.FrameIndex = IAM.Base.FrameIndex;
365 assert(0 && "Unknown base type!");
368 Result.Scale = IAM.Scale;
369 Result.Disp = IAM.Disp;
374 /// SelectAddress - Pattern match the maximal addressing mode for this node and
375 /// emit all of the leaf registers.
376 void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
377 X86ISelAddressMode IAM;
378 MatchAddress(N, IAM);
379 AM = SelectAddrExprs(IAM);
382 /// MatchAddress - Add the specified node to the specified addressing mode,
383 /// returning true if it cannot be done. This just pattern matches for the
384 /// addressing mode, it does not cause any code to be emitted. For that, use
386 bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
387 switch (N.getOpcode()) {
389 case ISD::FrameIndex:
390 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
391 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
392 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
396 case ISD::GlobalAddress:
398 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
399 // For Darwin, external and weak symbols are indirect, so we want to load
400 // the value at address GV, not the value of GV itself. This means that
401 // the GlobalAddress must be in the base or index register of the address,
402 // not the GV offset field.
403 if (Subtarget->getIndirectExternAndWeakGlobals() &&
404 (GV->hasWeakLinkage() || GV->isExternal())) {
413 AM.Disp += cast<ConstantSDNode>(N)->getValue();
416 // We might have folded the load into this shift, so don't regen the value
418 if (ExprMap.count(N)) break;
420 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
421 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
422 unsigned Val = CN->getValue();
423 if (Val == 1 || Val == 2 || Val == 3) {
425 SDOperand ShVal = N.Val->getOperand(0);
427 // Okay, we know that we have a scale by now. However, if the scaled
428 // value is an add of something and a constant, we can fold the
429 // constant into the disp field here.
430 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
431 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
432 AM.IndexReg = ShVal.Val->getOperand(0);
433 ConstantSDNode *AddVal =
434 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
435 AM.Disp += AddVal->getValue() << Val;
444 // We might have folded the load into this mul, so don't regen the value if
446 if (ExprMap.count(N)) break;
448 // X*[3,5,9] -> X+X*[2,4,8]
449 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
450 AM.Base.Reg.Val == 0)
451 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
452 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
453 AM.Scale = unsigned(CN->getValue())-1;
455 SDOperand MulVal = N.Val->getOperand(0);
458 // Okay, we know that we have a scale by now. However, if the scaled
459 // value is an add of something and a constant, we can fold the
460 // constant into the disp field here.
461 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
462 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
463 Reg = MulVal.Val->getOperand(0);
464 ConstantSDNode *AddVal =
465 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
466 AM.Disp += AddVal->getValue() * CN->getValue();
468 Reg = N.Val->getOperand(0);
471 AM.IndexReg = AM.Base.Reg = Reg;
477 // We might have folded the load into this mul, so don't regen the value if
479 if (ExprMap.count(N)) break;
481 X86ISelAddressMode Backup = AM;
482 if (!MatchAddress(N.Val->getOperand(0), AM) &&
483 !MatchAddress(N.Val->getOperand(1), AM))
486 if (!MatchAddress(N.Val->getOperand(1), AM) &&
487 !MatchAddress(N.Val->getOperand(0), AM))
494 // Is the base register already occupied?
495 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
496 // If so, check to see if the scale index register is set.
497 if (AM.IndexReg.Val == 0) {
503 // Otherwise, we cannot select it.
507 // Default, generate it as a register.
508 AM.BaseType = X86ISelAddressMode::RegBase;
513 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
514 /// assuming that the temporary registers are in the 8-bit register class.
518 /// DestReg = logicalop Tmp1, Tmp2
520 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
521 unsigned SetCC2, unsigned LogicalOp,
523 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
524 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
525 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
526 BuildMI(BB, SetCC1, 0, Tmp1);
527 BuildMI(BB, SetCC2, 0, Tmp2);
528 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
531 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
532 /// condition codes match the specified SetCCOpcode. Note that some conditions
533 /// require multiple instructions to generate the correct value.
534 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
535 ISD::CondCode SetCCOpcode, bool isFP) {
538 switch (SetCCOpcode) {
539 default: assert(0 && "Illegal integer SetCC!");
540 case ISD::SETEQ: Opc = X86::SETEr; break;
541 case ISD::SETGT: Opc = X86::SETGr; break;
542 case ISD::SETGE: Opc = X86::SETGEr; break;
543 case ISD::SETLT: Opc = X86::SETLr; break;
544 case ISD::SETLE: Opc = X86::SETLEr; break;
545 case ISD::SETNE: Opc = X86::SETNEr; break;
546 case ISD::SETULT: Opc = X86::SETBr; break;
547 case ISD::SETUGT: Opc = X86::SETAr; break;
548 case ISD::SETULE: Opc = X86::SETBEr; break;
549 case ISD::SETUGE: Opc = X86::SETAEr; break;
552 // On a floating point condition, the flags are set as follows:
556 // 1 | 0 | 0 | X == Y
557 // 1 | 1 | 1 | unordered
559 switch (SetCCOpcode) {
560 default: assert(0 && "Invalid FP setcc!");
563 Opc = X86::SETEr; // True if ZF = 1
567 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
571 Opc = X86::SETAEr; // True if CF = 0
575 Opc = X86::SETBr; // True if CF = 1
579 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
583 Opc = X86::SETNEr; // True if ZF = 0
586 Opc = X86::SETPr; // True if PF = 1
589 Opc = X86::SETNPr; // True if PF = 0
591 case ISD::SETOEQ: // !PF & ZF
592 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
594 case ISD::SETOLT: // !PF & CF
595 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
597 case ISD::SETOLE: // !PF & (CF || ZF)
598 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
600 case ISD::SETUGT: // PF | (!ZF & !CF)
601 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
603 case ISD::SETUGE: // PF | !CF
604 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
606 case ISD::SETUNE: // PF | !ZF
607 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
611 BuildMI(BB, Opc, 0, DestReg);
615 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
616 /// the Dest block if the Cond condition is true. If we cannot fold this
617 /// condition into the branch, return true.
619 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
621 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
622 // B) using two conditional branches instead of one condbr, two setcc's, and
624 if ((Cond.getOpcode() == ISD::OR ||
625 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
626 // And and or set the flags for us, so there is no need to emit a TST of the
627 // result. It is only safe to do this if there is only a single use of the
628 // AND/OR though, otherwise we don't know it will be emitted here.
631 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
635 // Codegen br not C -> JE.
636 if (Cond.getOpcode() == ISD::XOR)
637 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
638 if (NC->isAllOnesValue()) {
640 if (getRegPressure(Chain) > getRegPressure(Cond)) {
642 CondR = SelectExpr(Cond.Val->getOperand(0));
644 CondR = SelectExpr(Cond.Val->getOperand(0));
647 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
648 BuildMI(BB, X86::JE, 1).addMBB(Dest);
652 if (Cond.getOpcode() != ISD::SETCC)
653 return true; // Can only handle simple setcc's so far.
654 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
658 // Handle integer conditions first.
659 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
661 default: assert(0 && "Illegal integer SetCC!");
662 case ISD::SETEQ: Opc = X86::JE; break;
663 case ISD::SETGT: Opc = X86::JG; break;
664 case ISD::SETGE: Opc = X86::JGE; break;
665 case ISD::SETLT: Opc = X86::JL; break;
666 case ISD::SETLE: Opc = X86::JLE; break;
667 case ISD::SETNE: Opc = X86::JNE; break;
668 case ISD::SETULT: Opc = X86::JB; break;
669 case ISD::SETUGT: Opc = X86::JA; break;
670 case ISD::SETULE: Opc = X86::JBE; break;
671 case ISD::SETUGE: Opc = X86::JAE; break;
674 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
675 BuildMI(BB, Opc, 1).addMBB(Dest);
679 unsigned Opc2 = 0; // Second branch if needed.
681 // On a floating point condition, the flags are set as follows:
685 // 1 | 0 | 0 | X == Y
686 // 1 | 1 | 1 | unordered
689 default: assert(0 && "Invalid FP setcc!");
691 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
693 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
695 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
697 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
699 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
701 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
702 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
703 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
704 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
705 Opc = X86::JA; // ZF = 0 & CF = 0
706 Opc2 = X86::JP; // PF = 1
708 case ISD::SETUGE: // PF = 1 | CF = 0
709 Opc = X86::JAE; // CF = 0
710 Opc2 = X86::JP; // PF = 1
712 case ISD::SETUNE: // PF = 1 | ZF = 0
713 Opc = X86::JNE; // ZF = 0
714 Opc2 = X86::JP; // PF = 1
716 case ISD::SETOEQ: // PF = 0 & ZF = 1
719 return true; // FIXME: Emit more efficient code for this branch.
720 case ISD::SETOLT: // PF = 0 & CF = 1
723 return true; // FIXME: Emit more efficient code for this branch.
724 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
727 return true; // FIXME: Emit more efficient code for this branch.
731 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
732 BuildMI(BB, Opc, 1).addMBB(Dest);
734 BuildMI(BB, Opc2, 1).addMBB(Dest);
738 /// EmitSelectCC - Emit code into BB that performs a select operation between
739 /// the two registers RTrue and RFalse, generating a result into RDest.
741 void ISel::EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
742 MVT::ValueType SVT, unsigned RDest) {
743 unsigned RTrue, RFalse;
745 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
747 } CondCode = NOT_SET;
749 static const unsigned CMOVTAB16[] = {
750 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
751 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
752 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
754 static const unsigned CMOVTAB32[] = {
755 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
756 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
757 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
759 static const unsigned CMOVTABFP[] = {
760 X86::FpCMOVE, X86::FpCMOVNE, /*missing*/0, /*missing*/0,
761 /*missing*/0, /*missing*/ 0, X86::FpCMOVB, X86::FpCMOVBE,
762 X86::FpCMOVA, X86::FpCMOVAE, X86::FpCMOVP, X86::FpCMOVNP
764 static const int SSE_CMOVTAB[] = {
765 /*CMPEQ*/ 0, /*CMPNEQ*/ 4, /*missing*/ 0, /*missing*/ 0,
766 /*missing*/ 0, /*missing*/ 0, /*CMPLT*/ 1, /*CMPLE*/ 2,
767 /*CMPNLE*/ 6, /*CMPNLT*/ 5, /*CMPUNORD*/ 3, /*CMPORD*/ 7
770 if (Cond.getOpcode() == ISD::SETCC) {
771 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
772 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
774 default: assert(0 && "Unknown integer comparison!");
775 case ISD::SETEQ: CondCode = EQ; break;
776 case ISD::SETGT: CondCode = GT; break;
777 case ISD::SETGE: CondCode = GE; break;
778 case ISD::SETLT: CondCode = LT; break;
779 case ISD::SETLE: CondCode = LE; break;
780 case ISD::SETNE: CondCode = NE; break;
781 case ISD::SETULT: CondCode = B; break;
782 case ISD::SETUGT: CondCode = A; break;
783 case ISD::SETULE: CondCode = BE; break;
784 case ISD::SETUGE: CondCode = AE; break;
787 // On a floating point condition, the flags are set as follows:
791 // 1 | 0 | 0 | X == Y
792 // 1 | 1 | 1 | unordered
795 default: assert(0 && "Unknown FP comparison!");
797 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
799 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
801 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
803 case ISD::SETLT: CondCode = B; break; // True if CF = 1
805 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
807 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
808 case ISD::SETUO: CondCode = P; break; // True if PF = 1
809 case ISD::SETO: CondCode = NP; break; // True if PF = 0
810 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
811 case ISD::SETUGE: // PF = 1 | CF = 0
812 case ISD::SETUNE: // PF = 1 | ZF = 0
813 case ISD::SETOEQ: // PF = 0 & ZF = 1
814 case ISD::SETOLT: // PF = 0 & CF = 1
815 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
816 // We cannot emit this comparison as a single cmov.
822 // There's no SSE equivalent of FCMOVE. For cases where we set a condition
823 // code above and one of the results of the select is +0.0, then we can fake
824 // it up through a clever AND with mask. Otherwise, we will fall through to
825 // the code below that will use a PHI node to select the right value.
826 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
827 if (Cond.getOperand(0).getValueType() == SVT &&
828 NOT_SET != CondCode) {
829 ConstantFPSDNode *CT = dyn_cast<ConstantFPSDNode>(True);
830 ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(False);
831 bool TrueZero = CT && CT->isExactlyValue(0.0);
832 bool FalseZero = CF && CF->isExactlyValue(0.0);
833 if (TrueZero || FalseZero) {
834 SDOperand LHS = Cond.getOperand(0);
835 SDOperand RHS = Cond.getOperand(1);
837 // Select the two halves of the condition
839 if (getRegPressure(LHS) > getRegPressure(RHS)) {
840 RLHS = SelectExpr(LHS);
841 RRHS = SelectExpr(RHS);
843 RRHS = SelectExpr(RHS);
844 RLHS = SelectExpr(LHS);
847 // Emit the comparison and generate a mask from it
848 unsigned MaskReg = MakeReg(SVT);
849 unsigned Opc = (SVT == MVT::f32) ? X86::CMPSSrr : X86::CMPSDrr;
850 BuildMI(BB, Opc, 3, MaskReg).addReg(RLHS).addReg(RRHS)
851 .addImm(SSE_CMOVTAB[CondCode]);
854 RFalse = SelectExpr(False);
855 Opc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
856 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RFalse);
858 RTrue = SelectExpr(True);
859 Opc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
860 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RTrue);
868 // Select the true and false values for use in both the SSE PHI case, and the
869 // integer or x87 cmov cases below.
870 if (getRegPressure(True) > getRegPressure(False)) {
871 RTrue = SelectExpr(True);
872 RFalse = SelectExpr(False);
874 RFalse = SelectExpr(False);
875 RTrue = SelectExpr(True);
878 // Since there's no SSE equivalent of FCMOVE, and we couldn't generate an
879 // AND with mask, we'll have to do the normal RISC thing and generate a PHI
880 // node to select between the true and false values.
881 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
882 // FIXME: emit a direct compare and branch rather than setting a cond reg
884 unsigned CondReg = SelectExpr(Cond);
885 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
887 // Create an iterator with which to insert the MBB for copying the false
888 // value and the MBB to hold the PHI instruction for this SetCC.
889 MachineBasicBlock *thisMBB = BB;
890 const BasicBlock *LLVM_BB = BB->getBasicBlock();
891 ilist<MachineBasicBlock>::iterator It = BB;
899 // fallthrough --> copy0MBB
900 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
901 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
902 BuildMI(BB, X86::JNE, 1).addMBB(sinkMBB);
903 MachineFunction *F = BB->getParent();
904 F->getBasicBlockList().insert(It, copy0MBB);
905 F->getBasicBlockList().insert(It, sinkMBB);
906 // Update machine-CFG edges
907 BB->addSuccessor(copy0MBB);
908 BB->addSuccessor(sinkMBB);
912 // # fallthrough to sinkMBB
914 // Update machine-CFG edges
915 BB->addSuccessor(sinkMBB);
918 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
921 BuildMI(BB, X86::PHI, 4, RDest).addReg(RFalse)
922 .addMBB(copy0MBB).addReg(RTrue).addMBB(thisMBB);
927 if (CondCode != NOT_SET) {
929 default: assert(0 && "Cannot select this type!");
930 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
931 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
932 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
936 // Finally, if we weren't able to fold this, just emit the condition and test
938 if (CondCode == NOT_SET || Opc == 0) {
939 // Get the condition into the zero flag.
940 unsigned CondReg = SelectExpr(Cond);
941 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
944 default: assert(0 && "Cannot select this type!");
945 case MVT::i16: Opc = X86::CMOVE16rr; break;
946 case MVT::i32: Opc = X86::CMOVE32rr; break;
947 case MVT::f64: Opc = X86::FpCMOVE; break;
950 // FIXME: CMP R, 0 -> TEST R, R
951 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
952 std::swap(RTrue, RFalse);
954 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
957 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
959 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
961 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
962 switch (RHS.getValueType()) {
965 case MVT::i8: Opc = X86::CMP8mi; break;
966 case MVT::i16: Opc = X86::CMP16mi; break;
967 case MVT::i32: Opc = X86::CMP32mi; break;
971 EmitFoldedLoad(LHS, AM);
972 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
977 switch (RHS.getValueType()) {
980 case MVT::i8: Opc = X86::CMP8ri; break;
981 case MVT::i16: Opc = X86::CMP16ri; break;
982 case MVT::i32: Opc = X86::CMP32ri; break;
985 unsigned Tmp1 = SelectExpr(LHS);
986 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
989 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
990 if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
991 CN->isExactlyValue(-0.0))) {
992 unsigned Reg = SelectExpr(LHS);
993 BuildMI(BB, X86::FpTST, 1).addReg(Reg);
994 BuildMI(BB, X86::FNSTSW8r, 0);
995 BuildMI(BB, X86::SAHF, 1);
1001 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
1002 switch (RHS.getValueType()) {
1005 case MVT::i8: Opc = X86::CMP8mr; break;
1006 case MVT::i16: Opc = X86::CMP16mr; break;
1007 case MVT::i32: Opc = X86::CMP32mr; break;
1011 EmitFoldedLoad(LHS, AM);
1012 unsigned Reg = SelectExpr(RHS);
1013 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
1018 switch (LHS.getValueType()) {
1019 default: assert(0 && "Cannot compare this value!");
1021 case MVT::i8: Opc = X86::CMP8rr; break;
1022 case MVT::i16: Opc = X86::CMP16rr; break;
1023 case MVT::i32: Opc = X86::CMP32rr; break;
1024 case MVT::f32: Opc = X86::UCOMISSrr; break;
1025 case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FpUCOMIr; break;
1027 unsigned Tmp1, Tmp2;
1028 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1029 Tmp1 = SelectExpr(LHS);
1030 Tmp2 = SelectExpr(RHS);
1032 Tmp2 = SelectExpr(RHS);
1033 Tmp1 = SelectExpr(LHS);
1035 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
1038 /// isFoldableLoad - Return true if this is a load instruction that can safely
1039 /// be folded into an operation that uses it.
1040 bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
1041 if (Op.getOpcode() == ISD::LOAD) {
1042 // FIXME: currently can't fold constant pool indexes.
1043 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1045 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
1046 cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::f32) {
1047 // FIXME: currently can't fold constant pool indexes.
1048 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
1054 // If this load has already been emitted, we clearly can't fold it.
1055 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
1056 if (ExprMap.count(Op.getValue(1))) return false;
1057 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
1058 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
1060 // If there is not just one use of its value, we cannot fold.
1061 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
1063 // Finally, we cannot fold the load into the operation if this would induce a
1064 // cycle into the resultant dag. To check for this, see if OtherOp (the other
1065 // operand of the operation we are folding the load into) can possible use the
1066 // chain node defined by the load.
1067 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
1068 std::set<SDNode*> Visited;
1069 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
1076 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
1077 /// and compute the address being loaded into AM.
1078 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
1079 SDOperand Chain = Op.getOperand(0);
1080 SDOperand Address = Op.getOperand(1);
1082 if (getRegPressure(Chain) > getRegPressure(Address)) {
1084 SelectAddress(Address, AM);
1086 SelectAddress(Address, AM);
1090 // The chain for this load is now lowered.
1091 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
1092 "Load emitted more than once?");
1093 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
1094 assert(0 && "Load emitted more than once!");
1097 // EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
1098 // and op2 are i8/i16/i32 values with one use each (the or). If we can form a
1099 // SHLD or SHRD, emit the instruction (generating the value into DestReg) and
1101 bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
1102 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
1104 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
1105 std::swap(Op1, Op2); // Op1 is the SHL now.
1107 return false; // No match
1110 SDOperand ShlVal = Op1.getOperand(0);
1111 SDOperand ShlAmt = Op1.getOperand(1);
1112 SDOperand ShrVal = Op2.getOperand(0);
1113 SDOperand ShrAmt = Op2.getOperand(1);
1115 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
1117 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
1118 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
1119 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
1120 if (SubCST->getValue() == RegSize) {
1121 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
1122 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
1123 if (ShrVal == ShlVal) {
1124 unsigned Reg, ShAmt;
1125 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
1126 Reg = SelectExpr(ShrVal);
1127 ShAmt = SelectExpr(ShrAmt);
1129 ShAmt = SelectExpr(ShrAmt);
1130 Reg = SelectExpr(ShrVal);
1132 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1133 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
1134 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
1135 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
1137 } else if (RegSize != 8) {
1138 unsigned AReg, BReg;
1139 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1140 BReg = SelectExpr(ShlVal);
1141 AReg = SelectExpr(ShrVal);
1143 AReg = SelectExpr(ShrVal);
1144 BReg = SelectExpr(ShlVal);
1146 unsigned ShAmt = SelectExpr(ShrAmt);
1147 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1148 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
1149 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
1154 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
1155 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
1156 if (SubCST->getValue() == RegSize) {
1157 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
1158 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
1159 if (ShrVal == ShlVal) {
1160 unsigned Reg, ShAmt;
1161 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
1162 Reg = SelectExpr(ShrVal);
1163 ShAmt = SelectExpr(ShlAmt);
1165 ShAmt = SelectExpr(ShlAmt);
1166 Reg = SelectExpr(ShrVal);
1168 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1169 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
1170 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
1171 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
1173 } else if (RegSize != 8) {
1174 unsigned AReg, BReg;
1175 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1176 AReg = SelectExpr(ShlVal);
1177 BReg = SelectExpr(ShrVal);
1179 BReg = SelectExpr(ShrVal);
1180 AReg = SelectExpr(ShlVal);
1182 unsigned ShAmt = SelectExpr(ShlAmt);
1183 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
1184 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
1185 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
1190 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
1191 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
1192 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
1193 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
1194 // (A >> 5) | (A << 27) --> ROR A, 5
1195 // (A >> 5) | (B << 27) --> SHRD A, B, 5
1196 if (ShrVal == ShlVal) {
1197 unsigned Reg = SelectExpr(ShrVal);
1198 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
1199 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
1200 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
1202 } else if (RegSize != 8) {
1203 unsigned AReg, BReg;
1204 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
1205 BReg = SelectExpr(ShlVal);
1206 AReg = SelectExpr(ShrVal);
1208 AReg = SelectExpr(ShrVal);
1209 BReg = SelectExpr(ShlVal);
1211 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
1212 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
1213 .addImm(ShrCst->getValue());
1221 unsigned ISel::SelectExpr(SDOperand N) {
1223 unsigned Tmp1 = 0, Tmp2 = 0, Tmp3 = 0, Opc = 0;
1224 SDNode *Node = N.Val;
1227 if (Node->getOpcode() == ISD::CopyFromReg) {
1228 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1229 // Just use the specified register as our input if we can.
1230 if (MRegisterInfo::isVirtualRegister(Reg) || Reg == X86::ESP)
1234 unsigned &Reg = ExprMap[N];
1235 if (Reg) return Reg;
1237 switch (N.getOpcode()) {
1239 Reg = Result = (N.getValueType() != MVT::Other) ?
1240 MakeReg(N.getValueType()) : 1;
1242 case X86ISD::TAILCALL:
1244 // If this is a call instruction, make sure to prepare ALL of the result
1245 // values as well as the chain.
1246 ExprMap[N.getValue(0)] = 1;
1247 if (Node->getNumValues() > 1) {
1248 Result = MakeReg(Node->getValueType(1));
1249 ExprMap[N.getValue(1)] = Result;
1250 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
1251 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
1256 case ISD::ADD_PARTS:
1257 case ISD::SUB_PARTS:
1258 case ISD::SHL_PARTS:
1259 case ISD::SRL_PARTS:
1260 case ISD::SRA_PARTS:
1261 Result = MakeReg(Node->getValueType(0));
1262 ExprMap[N.getValue(0)] = Result;
1263 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
1264 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
1268 switch (N.getOpcode()) {
1271 assert(0 && "Node not handled!\n");
1272 case ISD::FP_EXTEND:
1273 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
1274 Tmp1 = SelectExpr(N.getOperand(0));
1275 BuildMI(BB, X86::CVTSS2SDrr, 1, Result).addReg(Tmp1);
1278 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
1279 Tmp1 = SelectExpr(N.getOperand(0));
1280 BuildMI(BB, X86::CVTSD2SSrr, 1, Result).addReg(Tmp1);
1282 case ISD::CopyFromReg:
1283 Select(N.getOperand(0));
1285 Reg = Result = ExprMap[N.getValue(0)] =
1286 MakeReg(N.getValue(0).getValueType());
1288 Tmp1 = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1289 switch (Node->getValueType(0)) {
1290 default: assert(0 && "Cannot CopyFromReg this!");
1293 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
1296 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(Tmp1);
1299 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(Tmp1);
1303 case ISD::FrameIndex:
1304 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
1305 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
1307 case ISD::ConstantPool:
1308 Tmp1 = BB->getParent()->getConstantPool()->
1309 getConstantPoolIndex(cast<ConstantPoolSDNode>(N)->get());
1310 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
1312 case ISD::ConstantFP:
1314 assert(cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) &&
1315 "SSE only supports +0.0");
1316 Opc = (N.getValueType() == MVT::f32) ? X86::FLD0SS : X86::FLD0SD;
1317 BuildMI(BB, Opc, 0, Result);
1320 ContainsFPCode = true;
1321 Tmp1 = Result; // Intermediate Register
1322 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
1323 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1324 Tmp1 = MakeReg(MVT::f64);
1326 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
1327 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
1328 BuildMI(BB, X86::FpLD0, 0, Tmp1);
1329 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
1330 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
1331 BuildMI(BB, X86::FpLD1, 0, Tmp1);
1333 assert(0 && "Unexpected constant!");
1335 BuildMI(BB, X86::FpCHS, 1, Result).addReg(Tmp1);
1338 switch (N.getValueType()) {
1339 default: assert(0 && "Cannot use constants of this type!");
1341 case MVT::i8: Opc = X86::MOV8ri; break;
1342 case MVT::i16: Opc = X86::MOV16ri; break;
1343 case MVT::i32: Opc = X86::MOV32ri; break;
1345 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
1348 if (Node->getValueType(0) == MVT::f64) {
1349 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
1350 BuildMI(BB, X86::FpLD0, 0, Result);
1352 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
1355 case ISD::GlobalAddress: {
1356 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1357 // For Darwin, external and weak symbols are indirect, so we want to load
1358 // the value at address GV, not the value of GV itself.
1359 if (Subtarget->getIndirectExternAndWeakGlobals() &&
1360 (GV->hasWeakLinkage() || GV->isExternal())) {
1361 BuildMI(BB, X86::MOV32rm, 4, Result).addReg(0).addZImm(1).addReg(0)
1362 .addGlobalAddress(GV, false, 0);
1364 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
1368 case ISD::ExternalSymbol: {
1369 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1370 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
1373 case ISD::ANY_EXTEND: // treat any extend like zext
1374 case ISD::ZERO_EXTEND: {
1375 int DestIs16 = N.getValueType() == MVT::i16;
1376 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1378 // FIXME: This hack is here for zero extension casts from bool to i8. This
1379 // would not be needed if bools were promoted by Legalize.
1380 if (N.getValueType() == MVT::i8) {
1381 Tmp1 = SelectExpr(N.getOperand(0));
1382 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
1386 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
1387 static const unsigned Opc[3] = {
1388 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
1392 EmitFoldedLoad(N.getOperand(0), AM);
1393 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1398 static const unsigned Opc[3] = {
1399 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
1401 Tmp1 = SelectExpr(N.getOperand(0));
1402 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1405 case ISD::SIGN_EXTEND: {
1406 int DestIs16 = N.getValueType() == MVT::i16;
1407 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1409 // FIXME: Legalize should promote bools to i8!
1410 assert(N.getOperand(0).getValueType() != MVT::i1 &&
1411 "Sign extend from bool not implemented!");
1413 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
1414 static const unsigned Opc[3] = {
1415 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
1419 EmitFoldedLoad(N.getOperand(0), AM);
1420 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
1424 static const unsigned Opc[3] = {
1425 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
1427 Tmp1 = SelectExpr(N.getOperand(0));
1428 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1432 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
1433 // a move out of AX or AL.
1434 switch (N.getOperand(0).getValueType()) {
1435 default: assert(0 && "Unknown truncate!");
1436 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1437 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1438 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
1440 Tmp1 = SelectExpr(N.getOperand(0));
1441 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1443 switch (N.getValueType()) {
1444 default: assert(0 && "Unknown truncate!");
1446 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1447 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1449 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1452 case ISD::SINT_TO_FP: {
1453 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1454 unsigned PromoteOpcode = 0;
1456 // We can handle any sint to fp with the direct sse conversion instructions.
1458 Opc = (N.getValueType() == MVT::f64) ? X86::CVTSI2SDrr : X86::CVTSI2SSrr;
1459 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1463 ContainsFPCode = true;
1465 // Spill the integer to memory and reload it from there.
1466 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
1467 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
1468 MachineFunction *F = BB->getParent();
1469 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1473 addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1);
1474 addFrameReference(BuildMI(BB, X86::FpILD32m, 5, Result), FrameIdx);
1477 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), FrameIdx).addReg(Tmp1);
1478 addFrameReference(BuildMI(BB, X86::FpILD16m, 5, Result), FrameIdx);
1480 default: break; // No promotion required.
1484 case ISD::FP_TO_SINT:
1485 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1487 // If the target supports SSE2 and is performing FP operations in SSE regs
1488 // instead of the FP stack, then we can use the efficient CVTSS2SI and
1489 // CVTSD2SI instructions.
1490 assert(X86ScalarSSE);
1491 if (MVT::f32 == N.getOperand(0).getValueType()) {
1492 BuildMI(BB, X86::CVTTSS2SIrr, 1, Result).addReg(Tmp1);
1493 } else if (MVT::f64 == N.getOperand(0).getValueType()) {
1494 BuildMI(BB, X86::CVTTSD2SIrr, 1, Result).addReg(Tmp1);
1496 assert(0 && "Not an f32 or f64?");
1503 Op0 = N.getOperand(0);
1504 Op1 = N.getOperand(1);
1506 if (isFoldableLoad(Op0, Op1, true)) {
1507 std::swap(Op0, Op1);
1511 if (isFoldableLoad(Op1, Op0, true)) {
1513 switch (N.getValueType()) {
1514 default: assert(0 && "Cannot add this type!");
1516 case MVT::i8: Opc = X86::ADD8rm; break;
1517 case MVT::i16: Opc = X86::ADD16rm; break;
1518 case MVT::i32: Opc = X86::ADD32rm; break;
1519 case MVT::f32: Opc = X86::ADDSSrm; break;
1521 // For F64, handle promoted load operations (from F32) as well!
1523 assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
1526 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FpADD64m : X86::FpADD32m;
1531 EmitFoldedLoad(Op1, AM);
1532 Tmp1 = SelectExpr(Op0);
1533 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1537 // See if we can codegen this as an LEA to fold operations together.
1538 if (N.getValueType() == MVT::i32) {
1540 X86ISelAddressMode AM;
1541 MatchAddress(N, AM);
1542 ExprMap[N] = Result;
1544 // If this is not just an add, emit the LEA. For a simple add (like
1545 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
1546 // leave this as LEA, then peephole it to 'ADD' after two address elim
1548 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
1549 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
1550 X86AddressMode XAM = SelectAddrExprs(AM);
1551 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
1556 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1558 if (CN->getValue() == 1) { // add X, 1 -> inc X
1559 switch (N.getValueType()) {
1560 default: assert(0 && "Cannot integer add this type!");
1561 case MVT::i8: Opc = X86::INC8r; break;
1562 case MVT::i16: Opc = X86::INC16r; break;
1563 case MVT::i32: Opc = X86::INC32r; break;
1565 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
1566 switch (N.getValueType()) {
1567 default: assert(0 && "Cannot integer add this type!");
1568 case MVT::i8: Opc = X86::DEC8r; break;
1569 case MVT::i16: Opc = X86::DEC16r; break;
1570 case MVT::i32: Opc = X86::DEC32r; break;
1575 Tmp1 = SelectExpr(Op0);
1576 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1580 switch (N.getValueType()) {
1581 default: assert(0 && "Cannot add this type!");
1582 case MVT::i8: Opc = X86::ADD8ri; break;
1583 case MVT::i16: Opc = X86::ADD16ri; break;
1584 case MVT::i32: Opc = X86::ADD32ri; break;
1587 Tmp1 = SelectExpr(Op0);
1588 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1593 switch (N.getValueType()) {
1594 default: assert(0 && "Cannot add this type!");
1595 case MVT::i8: Opc = X86::ADD8rr; break;
1596 case MVT::i16: Opc = X86::ADD16rr; break;
1597 case MVT::i32: Opc = X86::ADD32rr; break;
1598 case MVT::f32: Opc = X86::ADDSSrr; break;
1599 case MVT::f64: Opc = X86ScalarSSE ? X86::ADDSDrr : X86::FpADD; break;
1602 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1603 Tmp1 = SelectExpr(Op0);
1604 Tmp2 = SelectExpr(Op1);
1606 Tmp2 = SelectExpr(Op1);
1607 Tmp1 = SelectExpr(Op0);
1610 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1614 Tmp1 = SelectExpr(Node->getOperand(0));
1616 Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
1617 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1619 BuildMI(BB, X86::FpSQRT, 1, Result).addReg(Tmp1);
1624 // Once we can spill 16 byte constants into the constant pool, we can
1625 // implement SSE equivalents of FABS and FCHS.
1630 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
1631 Tmp1 = SelectExpr(Node->getOperand(0));
1632 switch (N.getOpcode()) {
1633 default: assert(0 && "Unreachable!");
1634 case ISD::FABS: BuildMI(BB, X86::FpABS, 1, Result).addReg(Tmp1); break;
1635 case ISD::FNEG: BuildMI(BB, X86::FpCHS, 1, Result).addReg(Tmp1); break;
1636 case ISD::FSIN: BuildMI(BB, X86::FpSIN, 1, Result).addReg(Tmp1); break;
1637 case ISD::FCOS: BuildMI(BB, X86::FpCOS, 1, Result).addReg(Tmp1); break;
1642 switch (N.getValueType()) {
1643 default: assert(0 && "Unsupported VT!");
1644 case MVT::i8: Tmp2 = X86::MUL8r; break;
1645 case MVT::i16: Tmp2 = X86::MUL16r; break;
1646 case MVT::i32: Tmp2 = X86::MUL32r; break;
1650 unsigned MovOpc, LowReg, HiReg;
1651 switch (N.getValueType()) {
1652 default: assert(0 && "Unsupported VT!");
1654 MovOpc = X86::MOV8rr;
1660 MovOpc = X86::MOV16rr;
1666 MovOpc = X86::MOV32rr;
1672 if (Node->getOpcode() != ISD::MULHS)
1673 Opc = Tmp2; // Get the MULHU opcode.
1675 Op0 = Node->getOperand(0);
1676 Op1 = Node->getOperand(1);
1677 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1678 Tmp1 = SelectExpr(Op0);
1679 Tmp2 = SelectExpr(Op1);
1681 Tmp2 = SelectExpr(Op1);
1682 Tmp1 = SelectExpr(Op0);
1685 // FIXME: Implement folding of loads into the memory operands here!
1686 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
1687 BuildMI(BB, Opc, 1).addReg(Tmp2);
1688 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
1699 static const unsigned SUBTab[] = {
1700 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
1701 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FpSUB32m, X86::FpSUB64m,
1702 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
1704 static const unsigned SSE_SUBTab[] = {
1705 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
1706 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::SUBSSrm, X86::SUBSDrm,
1707 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::SUBSSrr, X86::SUBSDrr,
1709 static const unsigned MULTab[] = {
1710 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
1711 0, X86::IMUL16rm , X86::IMUL32rm, X86::FpMUL32m, X86::FpMUL64m,
1712 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
1714 static const unsigned SSE_MULTab[] = {
1715 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
1716 0, X86::IMUL16rm , X86::IMUL32rm, X86::MULSSrm, X86::MULSDrm,
1717 0, X86::IMUL16rr , X86::IMUL32rr, X86::MULSSrr, X86::MULSDrr,
1719 static const unsigned ANDTab[] = {
1720 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
1721 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
1722 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
1724 static const unsigned ORTab[] = {
1725 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
1726 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
1727 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
1729 static const unsigned XORTab[] = {
1730 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
1731 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
1732 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
1735 Op0 = Node->getOperand(0);
1736 Op1 = Node->getOperand(1);
1738 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
1739 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
1742 if (Node->getOpcode() == ISD::SUB)
1743 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
1744 if (CN->isNullValue()) { // 0 - N -> neg N
1745 switch (N.getValueType()) {
1746 default: assert(0 && "Cannot sub this type!");
1748 case MVT::i8: Opc = X86::NEG8r; break;
1749 case MVT::i16: Opc = X86::NEG16r; break;
1750 case MVT::i32: Opc = X86::NEG32r; break;
1752 Tmp1 = SelectExpr(N.getOperand(1));
1753 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1757 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1758 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
1760 switch (N.getValueType()) {
1761 default: assert(0 && "Cannot add this type!");
1762 case MVT::i1: break; // Not supported, don't invert upper bits!
1763 case MVT::i8: Opc = X86::NOT8r; break;
1764 case MVT::i16: Opc = X86::NOT16r; break;
1765 case MVT::i32: Opc = X86::NOT32r; break;
1768 Tmp1 = SelectExpr(Op0);
1769 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1774 // Fold common multiplies into LEA instructions.
1775 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
1776 switch ((int)CN->getValue()) {
1781 // Remove N from exprmap so SelectAddress doesn't get confused.
1784 SelectAddress(N, AM);
1785 // Restore it to the map.
1786 ExprMap[N] = Result;
1787 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
1792 switch (N.getValueType()) {
1793 default: assert(0 && "Cannot xor this type!");
1795 case MVT::i8: Opc = 0; break;
1796 case MVT::i16: Opc = 1; break;
1797 case MVT::i32: Opc = 2; break;
1799 switch (Node->getOpcode()) {
1800 default: assert(0 && "Unreachable!");
1802 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
1804 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
1805 case ISD::AND: Opc = ANDTab[Opc]; break;
1806 case ISD::OR: Opc = ORTab[Opc]; break;
1807 case ISD::XOR: Opc = XORTab[Opc]; break;
1809 if (Opc) { // Can't fold MUL:i8 R, imm
1810 Tmp1 = SelectExpr(Op0);
1811 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1816 if (isFoldableLoad(Op0, Op1, true))
1817 if (Node->getOpcode() != ISD::SUB && Node->getOpcode() != ISD::FSUB) {
1818 std::swap(Op0, Op1);
1821 // For FP, emit 'reverse' subract, with a memory operand.
1822 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
1823 if (Op0.getOpcode() == ISD::EXTLOAD)
1824 Opc = X86::FpSUBR32m;
1826 Opc = X86::FpSUBR64m;
1829 EmitFoldedLoad(Op0, AM);
1830 Tmp1 = SelectExpr(Op1);
1831 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1836 if (isFoldableLoad(Op1, Op0, true)) {
1838 switch (N.getValueType()) {
1839 default: assert(0 && "Cannot operate on this type!");
1841 case MVT::i8: Opc = 5; break;
1842 case MVT::i16: Opc = 6; break;
1843 case MVT::i32: Opc = 7; break;
1844 case MVT::f32: Opc = 8; break;
1845 // For F64, handle promoted load operations (from F32) as well!
1847 assert((!X86ScalarSSE || Op1.getOpcode() == ISD::LOAD) &&
1848 "SSE load should have been promoted");
1849 Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
1851 switch (Node->getOpcode()) {
1852 default: assert(0 && "Unreachable!");
1854 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
1856 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
1857 case ISD::AND: Opc = ANDTab[Opc]; break;
1858 case ISD::OR: Opc = ORTab[Opc]; break;
1859 case ISD::XOR: Opc = XORTab[Opc]; break;
1863 EmitFoldedLoad(Op1, AM);
1864 Tmp1 = SelectExpr(Op0);
1866 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1868 assert(Node->getOpcode() == ISD::MUL &&
1869 N.getValueType() == MVT::i8 && "Unexpected situation!");
1870 // Must use the MUL instruction, which forces use of AL.
1871 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1872 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
1873 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1878 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1879 Tmp1 = SelectExpr(Op0);
1880 Tmp2 = SelectExpr(Op1);
1882 Tmp2 = SelectExpr(Op1);
1883 Tmp1 = SelectExpr(Op0);
1886 switch (N.getValueType()) {
1887 default: assert(0 && "Cannot add this type!");
1889 case MVT::i8: Opc = 10; break;
1890 case MVT::i16: Opc = 11; break;
1891 case MVT::i32: Opc = 12; break;
1892 case MVT::f32: Opc = 13; break;
1893 case MVT::f64: Opc = 14; break;
1895 switch (Node->getOpcode()) {
1896 default: assert(0 && "Unreachable!");
1898 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
1900 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
1901 case ISD::AND: Opc = ANDTab[Opc]; break;
1902 case ISD::OR: Opc = ORTab[Opc]; break;
1903 case ISD::XOR: Opc = XORTab[Opc]; break;
1906 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1908 assert(Node->getOpcode() == ISD::MUL &&
1909 N.getValueType() == MVT::i8 && "Unexpected situation!");
1910 // Must use the MUL instruction, which forces use of AL.
1911 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1912 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
1913 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1917 case ISD::ADD_PARTS:
1918 case ISD::SUB_PARTS: {
1919 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
1920 "Not an i64 add/sub!");
1921 // Emit all of the operands.
1922 std::vector<unsigned> InVals;
1923 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
1924 InVals.push_back(SelectExpr(N.getOperand(i)));
1925 if (N.getOpcode() == ISD::ADD_PARTS) {
1926 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
1927 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
1929 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
1930 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
1932 return Result+N.ResNo;
1935 case ISD::SHL_PARTS:
1936 case ISD::SRA_PARTS:
1937 case ISD::SRL_PARTS: {
1938 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
1939 "Not an i64 shift!");
1940 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
1941 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
1942 unsigned TmpReg = MakeReg(MVT::i32);
1943 if (N.getOpcode() == ISD::SRA_PARTS) {
1944 // If this is a SHR of a Long, then we need to do funny sign extension
1945 // stuff. TmpReg gets the value to use as the high-part if we are
1946 // shifting more than 32 bits.
1947 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
1949 // Other shifts use a fixed zero value if the shift is more than 32 bits.
1950 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
1953 // Initialize CL with the shift amount.
1954 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
1955 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
1957 unsigned TmpReg2 = MakeReg(MVT::i32);
1958 unsigned TmpReg3 = MakeReg(MVT::i32);
1959 if (N.getOpcode() == ISD::SHL_PARTS) {
1960 // TmpReg2 = shld inHi, inLo
1961 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
1963 // TmpReg3 = shl inLo, CL
1964 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
1966 // Set the flags to indicate whether the shift was by more than 32 bits.
1967 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
1969 // DestHi = (>32) ? TmpReg3 : TmpReg2;
1970 BuildMI(BB, X86::CMOVNE32rr, 2,
1971 Result+1).addReg(TmpReg2).addReg(TmpReg3);
1972 // DestLo = (>32) ? TmpReg : TmpReg3;
1973 BuildMI(BB, X86::CMOVNE32rr, 2,
1974 Result).addReg(TmpReg3).addReg(TmpReg);
1976 // TmpReg2 = shrd inLo, inHi
1977 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
1979 // TmpReg3 = s[ah]r inHi, CL
1980 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
1981 : X86::SHR32rCL, 1, TmpReg3)
1984 // Set the flags to indicate whether the shift was by more than 32 bits.
1985 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
1987 // DestLo = (>32) ? TmpReg3 : TmpReg2;
1988 BuildMI(BB, X86::CMOVNE32rr, 2,
1989 Result).addReg(TmpReg2).addReg(TmpReg3);
1991 // DestHi = (>32) ? TmpReg : TmpReg3;
1992 BuildMI(BB, X86::CMOVNE32rr, 2,
1993 Result+1).addReg(TmpReg3).addReg(TmpReg);
1995 return Result+N.ResNo;
1999 EmitSelectCC(N.getOperand(0), N.getOperand(1), N.getOperand(2),
2000 N.getValueType(), Result);
2009 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
2010 "We don't support this operator!");
2012 if (N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::FDIV) {
2013 // We can fold loads into FpDIVs, but not really into any others.
2014 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
2015 // Check for reversed and unreversed DIV.
2016 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
2017 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
2018 Opc = X86::FpDIVR32m;
2020 Opc = X86::FpDIVR64m;
2022 EmitFoldedLoad(N.getOperand(0), AM);
2023 Tmp1 = SelectExpr(N.getOperand(1));
2024 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2026 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
2027 N.getOperand(1).getOpcode() == ISD::LOAD) {
2028 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
2029 Opc = X86::FpDIV32m;
2031 Opc = X86::FpDIV64m;
2033 EmitFoldedLoad(N.getOperand(1), AM);
2034 Tmp1 = SelectExpr(N.getOperand(0));
2035 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2041 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2042 Tmp1 = SelectExpr(N.getOperand(0));
2043 Tmp2 = SelectExpr(N.getOperand(1));
2045 Tmp2 = SelectExpr(N.getOperand(1));
2046 Tmp1 = SelectExpr(N.getOperand(0));
2049 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
2050 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
2051 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
2052 switch (N.getValueType()) {
2053 default: assert(0 && "Cannot sdiv this type!");
2055 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
2058 MovOpcode = X86::MOV8rr;
2059 ClrOpcode = X86::MOV8ri;
2060 SExtOpcode = X86::CBW;
2063 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
2066 MovOpcode = X86::MOV16rr;
2067 ClrOpcode = X86::MOV16ri;
2068 SExtOpcode = X86::CWD;
2071 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
2074 MovOpcode = X86::MOV32rr;
2075 ClrOpcode = X86::MOV32ri;
2076 SExtOpcode = X86::CDQ;
2079 BuildMI(BB, X86::DIVSSrr, 2, Result).addReg(Tmp1).addReg(Tmp2);
2082 Opc = X86ScalarSSE ? X86::DIVSDrr : X86::FpDIV;
2083 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2087 // Set up the low part.
2088 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
2091 // Sign extend the low part into the high part.
2092 BuildMI(BB, SExtOpcode, 0);
2094 // Zero out the high part, effectively zero extending the input.
2095 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
2098 // Emit the DIV/IDIV instruction.
2099 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
2101 // Get the result of the divide or rem.
2102 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
2107 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2108 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
2109 switch (N.getValueType()) {
2110 default: assert(0 && "Cannot shift this type!");
2111 case MVT::i8: Opc = X86::ADD8rr; break;
2112 case MVT::i16: Opc = X86::ADD16rr; break;
2113 case MVT::i32: Opc = X86::ADD32rr; break;
2115 Tmp1 = SelectExpr(N.getOperand(0));
2116 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
2120 switch (N.getValueType()) {
2121 default: assert(0 && "Cannot shift this type!");
2122 case MVT::i8: Opc = X86::SHL8ri; break;
2123 case MVT::i16: Opc = X86::SHL16ri; break;
2124 case MVT::i32: Opc = X86::SHL32ri; break;
2126 Tmp1 = SelectExpr(N.getOperand(0));
2127 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2131 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2132 Tmp1 = SelectExpr(N.getOperand(0));
2133 Tmp2 = SelectExpr(N.getOperand(1));
2135 Tmp2 = SelectExpr(N.getOperand(1));
2136 Tmp1 = SelectExpr(N.getOperand(0));
2139 switch (N.getValueType()) {
2140 default: assert(0 && "Cannot shift this type!");
2141 case MVT::i8 : Opc = X86::SHL8rCL; break;
2142 case MVT::i16: Opc = X86::SHL16rCL; break;
2143 case MVT::i32: Opc = X86::SHL32rCL; break;
2145 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
2146 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2149 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2150 switch (N.getValueType()) {
2151 default: assert(0 && "Cannot shift this type!");
2152 case MVT::i8: Opc = X86::SHR8ri; break;
2153 case MVT::i16: Opc = X86::SHR16ri; break;
2154 case MVT::i32: Opc = X86::SHR32ri; break;
2156 Tmp1 = SelectExpr(N.getOperand(0));
2157 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2161 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2162 Tmp1 = SelectExpr(N.getOperand(0));
2163 Tmp2 = SelectExpr(N.getOperand(1));
2165 Tmp2 = SelectExpr(N.getOperand(1));
2166 Tmp1 = SelectExpr(N.getOperand(0));
2169 switch (N.getValueType()) {
2170 default: assert(0 && "Cannot shift this type!");
2171 case MVT::i8 : Opc = X86::SHR8rCL; break;
2172 case MVT::i16: Opc = X86::SHR16rCL; break;
2173 case MVT::i32: Opc = X86::SHR32rCL; break;
2175 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
2176 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2179 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2180 switch (N.getValueType()) {
2181 default: assert(0 && "Cannot shift this type!");
2182 case MVT::i8: Opc = X86::SAR8ri; break;
2183 case MVT::i16: Opc = X86::SAR16ri; break;
2184 case MVT::i32: Opc = X86::SAR32ri; break;
2186 Tmp1 = SelectExpr(N.getOperand(0));
2187 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2191 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2192 Tmp1 = SelectExpr(N.getOperand(0));
2193 Tmp2 = SelectExpr(N.getOperand(1));
2195 Tmp2 = SelectExpr(N.getOperand(1));
2196 Tmp1 = SelectExpr(N.getOperand(0));
2199 switch (N.getValueType()) {
2200 default: assert(0 && "Cannot shift this type!");
2201 case MVT::i8 : Opc = X86::SAR8rCL; break;
2202 case MVT::i16: Opc = X86::SAR16rCL; break;
2203 case MVT::i32: Opc = X86::SAR32rCL; break;
2205 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
2206 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2210 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
2211 EmitSetCC(BB, Result, cast<CondCodeSDNode>(N.getOperand(2))->get(),
2212 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
2215 // Make sure we generate both values.
2216 if (Result != 1) { // Generate the token
2217 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
2218 assert(0 && "Load already emitted!?");
2220 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
2222 switch (Node->getValueType(0)) {
2223 default: assert(0 && "Cannot load this type!");
2225 case MVT::i8: Opc = X86::MOV8rm; break;
2226 case MVT::i16: Opc = X86::MOV16rm; break;
2227 case MVT::i32: Opc = X86::MOV32rm; break;
2228 case MVT::f32: Opc = X86::MOVSSrm; break;
2234 ContainsFPCode = true;
2239 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
2240 unsigned CPIdx = BB->getParent()->getConstantPool()->
2241 getConstantPoolIndex(CP->get());
2242 Select(N.getOperand(0));
2243 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CPIdx);
2247 SDOperand Chain = N.getOperand(0);
2248 SDOperand Address = N.getOperand(1);
2249 if (getRegPressure(Chain) > getRegPressure(Address)) {
2251 SelectAddress(Address, AM);
2253 SelectAddress(Address, AM);
2257 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2260 case X86ISD::FILD64m:
2261 // Make sure we generate both values.
2262 assert(Result != 1 && N.getValueType() == MVT::f64);
2263 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
2264 assert(0 && "Load already emitted!?");
2269 SDOperand Chain = N.getOperand(0);
2270 SDOperand Address = N.getOperand(1);
2271 if (getRegPressure(Chain) > getRegPressure(Address)) {
2273 SelectAddress(Address, AM);
2275 SelectAddress(Address, AM);
2279 addFullAddress(BuildMI(BB, X86::FpILD64m, 4, Result), AM);
2283 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
2284 case ISD::ZEXTLOAD: {
2285 // Make sure we generate both values.
2287 ExprMap[N.getValue(1)] = 1; // Generate the token
2289 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
2291 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
2292 if (Node->getValueType(0) == MVT::f64) {
2293 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
2295 unsigned CPIdx = BB->getParent()->getConstantPool()->
2296 getConstantPoolIndex(CP->get());
2298 addConstantPoolReference(BuildMI(BB, X86::FpLD32m, 4, Result), CPIdx);
2303 if (getRegPressure(Node->getOperand(0)) >
2304 getRegPressure(Node->getOperand(1))) {
2305 Select(Node->getOperand(0)); // chain
2306 SelectAddress(Node->getOperand(1), AM);
2308 SelectAddress(Node->getOperand(1), AM);
2309 Select(Node->getOperand(0)); // chain
2312 switch (Node->getValueType(0)) {
2313 default: assert(0 && "Unknown type to sign extend to.");
2315 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
2317 addFullAddress(BuildMI(BB, X86::FpLD32m, 5, Result), AM);
2320 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
2322 assert(0 && "Bad zero extend!");
2325 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
2328 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
2333 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() <= MVT::i8 &&
2334 "Bad zero extend!");
2335 addFullAddress(BuildMI(BB, X86::MOVZX16rm8, 5, Result), AM);
2338 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i1 &&
2339 "Bad zero extend!");
2340 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
2345 case ISD::SEXTLOAD: {
2346 // Make sure we generate both values.
2348 ExprMap[N.getValue(1)] = 1; // Generate the token
2350 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
2353 if (getRegPressure(Node->getOperand(0)) >
2354 getRegPressure(Node->getOperand(1))) {
2355 Select(Node->getOperand(0)); // chain
2356 SelectAddress(Node->getOperand(1), AM);
2358 SelectAddress(Node->getOperand(1), AM);
2359 Select(Node->getOperand(0)); // chain
2362 switch (Node->getValueType(0)) {
2363 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
2364 default: assert(0 && "Unknown type to sign extend to.");
2366 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
2368 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
2370 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
2373 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
2378 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i8 &&
2379 "Cannot sign extend from bool!");
2380 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
2386 case ISD::DYNAMIC_STACKALLOC:
2387 // Generate both result values.
2389 ExprMap[N.getValue(1)] = 1; // Generate the token
2391 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
2393 // FIXME: We are currently ignoring the requested alignment for handling
2394 // greater than the stack alignment. This will need to be revisited at some
2395 // point. Align = N.getOperand(2);
2397 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
2398 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
2399 std::cerr << "Cannot allocate stack object with greater alignment than"
2400 << " the stack alignment yet!";
2404 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2405 Select(N.getOperand(0));
2406 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
2407 .addImm(CN->getValue());
2409 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2410 Select(N.getOperand(0));
2411 Tmp1 = SelectExpr(N.getOperand(1));
2413 Tmp1 = SelectExpr(N.getOperand(1));
2414 Select(N.getOperand(0));
2417 // Subtract size from stack pointer, thereby allocating some space.
2418 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
2421 // Put a pointer to the space into the result register, by copying the stack
2423 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
2426 case X86ISD::TAILCALL:
2427 case X86ISD::CALL: {
2428 // The chain for this call is now lowered.
2429 ExprMap.insert(std::make_pair(N.getValue(0), 1));
2431 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
2432 isa<ExternalSymbolSDNode>(N.getOperand(1));
2433 unsigned Callee = 0;
2435 Select(N.getOperand(0));
2437 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2438 Select(N.getOperand(0));
2439 Callee = SelectExpr(N.getOperand(1));
2441 Callee = SelectExpr(N.getOperand(1));
2442 Select(N.getOperand(0));
2446 // If this call has values to pass in registers, do so now.
2447 if (Node->getNumOperands() > 4) {
2448 // The first value is passed in (a part of) EAX, the second in EDX.
2449 unsigned RegOp1 = SelectExpr(N.getOperand(4));
2451 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
2453 switch (N.getOperand(4).getValueType()) {
2454 default: assert(0 && "Bad thing to pass in regs");
2456 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
2457 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
2458 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
2461 switch (N.getOperand(5).getValueType()) {
2462 default: assert(0 && "Bad thing to pass in regs");
2465 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
2468 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
2471 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
2476 if (GlobalAddressSDNode *GASD =
2477 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
2478 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
2479 } else if (ExternalSymbolSDNode *ESSDN =
2480 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
2481 BuildMI(BB, X86::CALLpcrel32,
2482 1).addExternalSymbol(ESSDN->getSymbol(), true);
2484 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2485 Select(N.getOperand(0));
2486 Tmp1 = SelectExpr(N.getOperand(1));
2488 Tmp1 = SelectExpr(N.getOperand(1));
2489 Select(N.getOperand(0));
2492 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
2495 // Get caller stack amount and amount the callee added to the stack pointer.
2496 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
2497 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
2498 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
2500 if (Node->getNumValues() != 1)
2501 switch (Node->getValueType(1)) {
2502 default: assert(0 && "Unknown value type for call result!");
2503 case MVT::Other: return 1;
2506 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2509 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
2512 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
2513 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
2514 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
2516 case MVT::f64: // Floating-point return values live in %ST(0)
2518 ContainsFPCode = true;
2519 BuildMI(BB, X86::FpGETRESULT, 1, X86::FP0);
2521 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
2522 MachineFunction *F = BB->getParent();
2523 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2524 addFrameReference(BuildMI(BB, X86::FpST64m, 5), FrameIdx).addReg(X86::FP0);
2525 addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
2528 ContainsFPCode = true;
2529 BuildMI(BB, X86::FpGETRESULT, 1, Result);
2533 return Result+N.ResNo-1;
2536 // First, determine that the size of the operand falls within the acceptable
2537 // range for this architecture.
2539 if (Node->getOperand(1).getValueType() != MVT::i16) {
2540 std::cerr << "llvm.readport: Address size is not 16 bits\n";
2544 // Make sure we generate both values.
2545 if (Result != 1) { // Generate the token
2546 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
2547 assert(0 && "readport already emitted!?");
2549 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
2551 Select(Node->getOperand(0)); // Select the chain.
2553 // If the port is a single-byte constant, use the immediate form.
2554 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
2555 if ((Port->getValue() & 255) == Port->getValue()) {
2556 switch (Node->getValueType(0)) {
2558 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
2559 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2562 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
2563 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
2566 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
2567 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
2573 // Now, move the I/O port address into the DX register and use the IN
2574 // instruction to get the input data.
2576 Tmp1 = SelectExpr(Node->getOperand(1));
2577 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
2578 switch (Node->getValueType(0)) {
2580 BuildMI(BB, X86::IN8rr, 0);
2581 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2584 BuildMI(BB, X86::IN16rr, 0);
2585 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
2588 BuildMI(BB, X86::IN32rr, 0);
2589 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
2592 std::cerr << "Cannot do input on this data type";
2601 /// TryToFoldLoadOpStore - Given a store node, try to fold together a
2602 /// load/op/store instruction. If successful return true.
2603 bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
2604 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
2605 SDOperand Chain = Node->getOperand(0);
2606 SDOperand StVal = Node->getOperand(1);
2607 SDOperand StPtr = Node->getOperand(2);
2609 // The chain has to be a load, the stored value must be an integer binary
2610 // operation with one use.
2611 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
2612 MVT::isFloatingPoint(StVal.getValueType()))
2615 // Token chain must either be a factor node or the load to fold.
2616 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
2621 // Check to see if there is a load from the same pointer that we're storing
2622 // to in either operand of the binop.
2623 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
2624 StVal.getOperand(0).getOperand(1) == StPtr)
2625 TheLoad = StVal.getOperand(0);
2626 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
2627 StVal.getOperand(1).getOperand(1) == StPtr)
2628 TheLoad = StVal.getOperand(1);
2630 return false; // No matching load operand.
2632 // We can only fold the load if there are no intervening side-effecting
2633 // operations. This means that the store uses the load as its token chain, or
2634 // there are only token factor nodes in between the store and load.
2635 if (Chain != TheLoad.getValue(1)) {
2636 // Okay, the other option is that we have a store referring to (possibly
2637 // nested) token factor nodes. For now, just try peeking through one level
2638 // of token factors to see if this is the case.
2639 bool ChainOk = false;
2640 if (Chain.getOpcode() == ISD::TokenFactor) {
2641 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
2642 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
2648 if (!ChainOk) return false;
2651 if (TheLoad.getOperand(1) != StPtr)
2654 // Make sure that one of the operands of the binop is the load, and that the
2655 // load folds into the binop.
2656 if (((StVal.getOperand(0) != TheLoad ||
2657 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
2658 (StVal.getOperand(1) != TheLoad ||
2659 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
2662 // Finally, check to see if this is one of the ops we can handle!
2663 static const unsigned ADDTAB[] = {
2664 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
2665 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
2667 static const unsigned SUBTAB[] = {
2668 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
2669 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
2671 static const unsigned ANDTAB[] = {
2672 X86::AND8mi, X86::AND16mi, X86::AND32mi,
2673 X86::AND8mr, X86::AND16mr, X86::AND32mr,
2675 static const unsigned ORTAB[] = {
2676 X86::OR8mi, X86::OR16mi, X86::OR32mi,
2677 X86::OR8mr, X86::OR16mr, X86::OR32mr,
2679 static const unsigned XORTAB[] = {
2680 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
2681 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
2683 static const unsigned SHLTAB[] = {
2684 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
2685 /*Have to put the reg in CL*/0, 0, 0,
2687 static const unsigned SARTAB[] = {
2688 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
2689 /*Have to put the reg in CL*/0, 0, 0,
2691 static const unsigned SHRTAB[] = {
2692 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
2693 /*Have to put the reg in CL*/0, 0, 0,
2696 const unsigned *TabPtr = 0;
2697 switch (StVal.getOpcode()) {
2699 std::cerr << "CANNOT [mem] op= val: ";
2700 StVal.Val->dump(); std::cerr << "\n";
2708 case ISD::UREM: return false;
2710 case ISD::ADD: TabPtr = ADDTAB; break;
2711 case ISD::SUB: TabPtr = SUBTAB; break;
2712 case ISD::AND: TabPtr = ANDTAB; break;
2713 case ISD:: OR: TabPtr = ORTAB; break;
2714 case ISD::XOR: TabPtr = XORTAB; break;
2715 case ISD::SHL: TabPtr = SHLTAB; break;
2716 case ISD::SRA: TabPtr = SARTAB; break;
2717 case ISD::SRL: TabPtr = SHRTAB; break;
2720 // Handle: [mem] op= CST
2721 SDOperand Op0 = StVal.getOperand(0);
2722 SDOperand Op1 = StVal.getOperand(1);
2724 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2725 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
2728 case MVT::i8: Opc = TabPtr[0]; break;
2729 case MVT::i16: Opc = TabPtr[1]; break;
2730 case MVT::i32: Opc = TabPtr[2]; break;
2734 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
2735 assert(0 && "Already emitted?");
2739 if (getRegPressure(TheLoad.getOperand(0)) >
2740 getRegPressure(TheLoad.getOperand(1))) {
2741 Select(TheLoad.getOperand(0));
2742 SelectAddress(TheLoad.getOperand(1), AM);
2744 SelectAddress(TheLoad.getOperand(1), AM);
2745 Select(TheLoad.getOperand(0));
2748 if (StVal.getOpcode() == ISD::ADD) {
2749 if (CN->getValue() == 1) {
2750 switch (Op0.getValueType()) {
2753 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
2755 case MVT::i16: Opc = TabPtr[1];
2756 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
2758 case MVT::i32: Opc = TabPtr[2];
2759 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
2762 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
2763 switch (Op0.getValueType()) {
2766 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
2768 case MVT::i16: Opc = TabPtr[1];
2769 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
2771 case MVT::i32: Opc = TabPtr[2];
2772 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
2778 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
2783 // If we have [mem] = V op [mem], try to turn it into:
2784 // [mem] = [mem] op V.
2785 if (Op1 == TheLoad &&
2786 StVal.getOpcode() != ISD::SUB && StVal.getOpcode() != ISD::FSUB &&
2787 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
2788 StVal.getOpcode() != ISD::SRL)
2789 std::swap(Op0, Op1);
2791 if (Op0 != TheLoad) return false;
2793 switch (Op0.getValueType()) {
2794 default: return false;
2796 case MVT::i8: Opc = TabPtr[3]; break;
2797 case MVT::i16: Opc = TabPtr[4]; break;
2798 case MVT::i32: Opc = TabPtr[5]; break;
2801 // Table entry doesn't exist?
2802 if (Opc == 0) return false;
2804 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
2805 assert(0 && "Already emitted?");
2807 Select(TheLoad.getOperand(0));
2810 SelectAddress(TheLoad.getOperand(1), AM);
2811 unsigned Reg = SelectExpr(Op1);
2812 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
2816 /// If node is a ret(tailcall) node, emit the specified tail call and return
2817 /// true, otherwise return false.
2819 /// FIXME: This whole thing should be a post-legalize optimization pass which
2820 /// recognizes and transforms the dag. We don't want the selection phase doing
2823 bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
2824 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
2826 SDOperand Chain = RetNode->getOperand(0);
2828 // If this is a token factor node where one operand is a call, dig into it.
2829 SDOperand TokFactor;
2830 unsigned TokFactorOperand = 0;
2831 if (Chain.getOpcode() == ISD::TokenFactor) {
2832 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
2833 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
2834 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
2835 TokFactorOperand = i;
2837 Chain = Chain.getOperand(i);
2840 if (TokFactor.Val == 0) return false; // No call operand.
2843 // Skip the CALLSEQ_END node if present.
2844 if (Chain.getOpcode() == ISD::CALLSEQ_END)
2845 Chain = Chain.getOperand(0);
2847 // Is a tailcall the last control operation that occurs before the return?
2848 if (Chain.getOpcode() != X86ISD::TAILCALL)
2851 // If we return a value, is it the value produced by the call?
2852 if (RetNode->getNumOperands() > 1) {
2853 // Not returning the ret val of the call?
2854 if (Chain.Val->getNumValues() == 1 ||
2855 RetNode->getOperand(1) != Chain.getValue(1))
2858 if (RetNode->getNumOperands() > 2) {
2859 if (Chain.Val->getNumValues() == 2 ||
2860 RetNode->getOperand(2) != Chain.getValue(2))
2863 assert(RetNode->getNumOperands() <= 3);
2866 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
2867 // For FastCC, this will always be > 0.
2868 unsigned CalleeCallArgAmt =
2869 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
2871 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
2872 // callee. For FastCC this will always be > 0, for CCC this is always 0.
2873 unsigned CalleeCallArgPopAmt =
2874 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
2876 // There are several cases we can handle here. First, if the caller and
2877 // callee are both CCC functions, we can tailcall if the callee takes <= the
2878 // number of argument bytes that the caller does.
2879 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
2880 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
2881 // Check to see if caller arg area size >= callee arg area size.
2882 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
2883 //std::cerr << "CCC TAILCALL UNIMP!\n";
2884 // If TokFactor is non-null, emit all operands.
2886 //EmitCCCToCCCTailCall(Chain.Val);
2892 // Second, if both are FastCC functions, we can always perform the tail call.
2893 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
2894 // If TokFactor is non-null, emit all operands before the call.
2895 if (TokFactor.Val) {
2896 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
2897 if (i != TokFactorOperand)
2898 Select(TokFactor.getOperand(i));
2901 EmitFastCCToFastCCTailCall(Chain.Val);
2905 // We don't support mixed calls, due to issues with alignment. We could in
2906 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
2907 // aligned (which depends on the number of arguments to the callee). TODO.
2911 static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
2912 SelectionDAG &DAG) {
2913 MVT::ValueType StoreVT;
2914 switch (Chain.getOpcode()) {
2915 default: assert(0 && "Unexpected node!");
2916 case ISD::CALLSEQ_START:
2917 // If we found the start of the call sequence, we're done. We actually
2918 // strip off the CALLSEQ_START node, to avoid generating the
2919 // ADJCALLSTACKDOWN marker for the tail call.
2920 return Chain.getOperand(0);
2921 case ISD::TokenFactor: {
2922 std::vector<SDOperand> Ops;
2923 Ops.reserve(Chain.getNumOperands());
2924 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
2925 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
2926 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
2928 case ISD::STORE: // Normal store
2929 StoreVT = Chain.getOperand(1).getValueType();
2931 case ISD::TRUNCSTORE: // FLOAT store
2932 StoreVT = cast<VTSDNode>(Chain.getOperand(4))->getVT();
2936 SDOperand OrigDest = Chain.getOperand(2);
2937 unsigned OrigOffset;
2939 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
2941 assert(cast<RegisterSDNode>(OrigDest.getOperand(1))->getReg() == X86::ESP);
2943 // We expect only (ESP+C)
2944 assert(OrigDest.getOpcode() == ISD::ADD &&
2945 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
2946 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
2947 cast<RegisterSDNode>(OrigDest.getOperand(0).getOperand(1))->getReg()
2949 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
2952 // Compute the new offset from the incoming ESP value we wish to use.
2953 unsigned NewOffset = OrigOffset + Offset;
2955 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
2956 MachineFunction &MF = DAG.getMachineFunction();
2957 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
2958 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
2960 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
2962 if (Chain.getOpcode() == ISD::STORE)
2963 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
2965 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
2966 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
2967 FIN, DAG.getSrcValue(NULL), DAG.getValueType(StoreVT));
2971 /// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
2972 /// fastcc function from a fastcc function, emit the code to emit a 'proper'
2974 void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
2975 unsigned CalleeCallArgSize =
2976 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
2977 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
2979 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
2981 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
2982 // indexes that are relative to the incoming ESP. If the incoming and
2983 // outgoing arg sizes are the same we will store to [InESP] instead of
2984 // [CurESP] and the ESP referenced will be relative to the incoming function
2986 int ESPOffset = CallerArgSize-CalleeCallArgSize;
2987 SDOperand AdjustedArgStores =
2988 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
2990 // Copy the return address of the caller into a virtual register so we don't
2994 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
2995 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
2996 RetValAddr, TheDAG->getSrcValue(NULL));
3000 // Codegen all of the argument stores.
3001 Select(AdjustedArgStores);
3004 // Emit a store of the saved ret value to the new location.
3005 MachineFunction &MF = TheDAG->getMachineFunction();
3006 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
3007 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
3008 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
3009 RetVal, RetValAddr));
3012 // Get the destination value.
3013 SDOperand Callee = TailCallNode->getOperand(1);
3014 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
3015 isa<ExternalSymbolSDNode>(Callee);
3016 unsigned CalleeReg = 0;
3018 // If this is not a direct tail call, evaluate the callee's address.
3019 CalleeReg = SelectExpr(Callee);
3022 unsigned RegOp1 = 0;
3023 unsigned RegOp2 = 0;
3025 if (TailCallNode->getNumOperands() > 4) {
3026 // The first value is passed in (a part of) EAX, the second in EDX.
3027 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
3028 if (TailCallNode->getNumOperands() > 5)
3029 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
3031 switch (TailCallNode->getOperand(4).getValueType()) {
3032 default: assert(0 && "Bad thing to pass in regs");
3035 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
3039 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
3043 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
3048 switch (TailCallNode->getOperand(5).getValueType()) {
3049 default: assert(0 && "Bad thing to pass in regs");
3052 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
3056 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3060 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3066 // If this is not a direct tail call, put the callee's address into ECX.
3067 // The address has to be evaluated into a non-callee save register that is
3068 // not used for arguments. This means either ECX, as EAX and EDX may be
3069 // used for argument passing. We do this here to make sure that the
3070 // expressions for arguments and callee are all evaluated before the copies
3071 // into physical registers.
3073 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CalleeReg);
3077 BuildMI(BB, X86::ADJSTACKPTRri, 2,
3078 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
3080 // TODO: handle jmp [mem]
3082 BuildMI(BB, X86::TAILJMPr, 1).addReg(X86::ECX);
3083 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
3084 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
3086 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
3087 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
3089 // ADD IMPLICIT USE RegOp1/RegOp2's
3093 void ISel::Select(SDOperand N) {
3094 unsigned Tmp1 = 0, Tmp2 = 0, Opc = 0;
3096 if (!ExprMap.insert(std::make_pair(N, 1)).second)
3097 return; // Already selected.
3099 SDNode *Node = N.Val;
3101 switch (Node->getOpcode()) {
3103 Node->dump(); std::cerr << "\n";
3104 assert(0 && "Node not handled yet!");
3105 case X86ISD::RDTSC_DAG:
3106 Select(Node->getOperand(0)); //Chain
3107 BuildMI(BB, X86::RDTSC, 0);
3110 case ISD::EntryToken: return; // Noop
3111 case ISD::TokenFactor:
3112 if (Node->getNumOperands() == 2) {
3114 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
3115 Select(Node->getOperand(OneFirst));
3116 Select(Node->getOperand(!OneFirst));
3118 std::vector<std::pair<unsigned, unsigned> > OpsP;
3119 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
3120 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
3121 std::sort(OpsP.begin(), OpsP.end());
3122 std::reverse(OpsP.begin(), OpsP.end());
3123 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
3124 Select(Node->getOperand(OpsP[i].second));
3127 case ISD::CopyToReg:
3128 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
3129 Select(N.getOperand(0));
3130 Tmp1 = SelectExpr(N.getOperand(2));
3132 Tmp1 = SelectExpr(N.getOperand(2));
3133 Select(N.getOperand(0));
3135 Tmp2 = cast<RegisterSDNode>(N.getOperand(1))->getReg();
3138 switch (N.getOperand(2).getValueType()) {
3139 default: assert(0 && "Invalid type for operation!");
3141 case MVT::i8: Opc = X86::MOV8rr; break;
3142 case MVT::i16: Opc = X86::MOV16rr; break;
3143 case MVT::i32: Opc = X86::MOV32rr; break;
3144 case MVT::f32: Opc = X86::MOVSSrr; break;
3150 ContainsFPCode = true;
3154 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
3158 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
3159 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
3160 N.getOperand(0).getOpcode() == ISD::TokenFactor)
3161 if (EmitPotentialTailCall(Node))
3164 switch (N.getNumOperands()) {
3166 assert(0 && "Unknown return instruction!");
3168 assert(N.getOperand(1).getValueType() == MVT::i32 &&
3169 N.getOperand(2).getValueType() == MVT::i32 &&
3170 "Unknown two-register value!");
3171 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
3172 Tmp1 = SelectExpr(N.getOperand(1));
3173 Tmp2 = SelectExpr(N.getOperand(2));
3175 Tmp2 = SelectExpr(N.getOperand(2));
3176 Tmp1 = SelectExpr(N.getOperand(1));
3178 Select(N.getOperand(0));
3180 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
3181 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
3184 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3185 Select(N.getOperand(0));
3186 Tmp1 = SelectExpr(N.getOperand(1));
3188 Tmp1 = SelectExpr(N.getOperand(1));
3189 Select(N.getOperand(0));
3191 switch (N.getOperand(1).getValueType()) {
3192 default: assert(0 && "All other types should have been promoted!!");
3195 // Spill the value to memory and reload it into top of stack.
3196 unsigned Size = MVT::getSizeInBits(MVT::f32)/8;
3197 MachineFunction *F = BB->getParent();
3198 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3199 addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
3200 addFrameReference(BuildMI(BB, X86::FpLD32m, 4, X86::FP0), FrameIdx);
3201 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
3202 ContainsFPCode = true;
3204 assert(0 && "MVT::f32 only legal with scalar sse fp");
3210 // Spill the value to memory and reload it into top of stack.
3211 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
3212 MachineFunction *F = BB->getParent();
3213 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3214 addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
3215 addFrameReference(BuildMI(BB, X86::FpLD64m, 4, X86::FP0), FrameIdx);
3216 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
3217 ContainsFPCode = true;
3219 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
3223 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
3228 Select(N.getOperand(0));
3231 if (X86Lowering.getBytesToPopOnReturn() == 0)
3232 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
3234 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
3237 Select(N.getOperand(0));
3238 MachineBasicBlock *Dest =
3239 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
3240 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
3245 MachineBasicBlock *Dest =
3246 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
3248 // Try to fold a setcc into the branch. If this fails, emit a test/jne
3250 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
3251 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3252 Select(N.getOperand(0));
3253 Tmp1 = SelectExpr(N.getOperand(1));
3255 Tmp1 = SelectExpr(N.getOperand(1));
3256 Select(N.getOperand(0));
3258 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
3259 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
3266 // If this load could be folded into the only using instruction, and if it
3267 // is safe to emit the instruction here, try to do so now.
3268 if (Node->hasNUsesOfValue(1, 0)) {
3269 SDOperand TheVal = N.getValue(0);
3271 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
3272 assert(UI != Node->use_end() && "Didn't find use!");
3274 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
3275 if (UN->getOperand(i) == TheVal) {
3281 // Only handle unary operators right now.
3282 if (User->getNumOperands() == 1) {
3284 SelectExpr(SDOperand(User, 0));
3295 case ISD::DYNAMIC_STACKALLOC:
3296 case X86ISD::TAILCALL:
3301 case ISD::CopyFromReg:
3302 case X86ISD::FILD64m:
3304 SelectExpr(N.getValue(0));
3307 case X86ISD::FP_TO_INT16_IN_MEM:
3308 case X86ISD::FP_TO_INT32_IN_MEM:
3309 case X86ISD::FP_TO_INT64_IN_MEM: {
3310 assert(N.getOperand(1).getValueType() == MVT::f64);
3312 Select(N.getOperand(0)); // Select the token chain
3315 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
3316 ValReg = SelectExpr(N.getOperand(1));
3317 SelectAddress(N.getOperand(2), AM);
3319 SelectAddress(N.getOperand(2), AM);
3320 ValReg = SelectExpr(N.getOperand(1));
3323 // Change the floating point control register to use "round towards zero"
3324 // mode when truncating to an integer value.
3326 MachineFunction *F = BB->getParent();
3327 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
3328 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
3330 // Load the old value of the high byte of the control word...
3331 unsigned OldCW = MakeReg(MVT::i16);
3332 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
3334 // Set the high part to be round to zero...
3335 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
3337 // Reload the modified control word now...
3338 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
3340 // Restore the memory image of control word to original value
3341 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
3343 // Get the X86 opcode to use.
3344 switch (N.getOpcode()) {
3345 case X86ISD::FP_TO_INT16_IN_MEM: Tmp1 = X86::FpIST16m; break;
3346 case X86ISD::FP_TO_INT32_IN_MEM: Tmp1 = X86::FpIST32m; break;
3347 case X86ISD::FP_TO_INT64_IN_MEM: Tmp1 = X86::FpIST64m; break;
3350 addFullAddress(BuildMI(BB, Tmp1, 5), AM).addReg(ValReg);
3352 // Reload the original control word now.
3353 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
3357 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr, SRCVALUE, storety
3359 MVT::ValueType StoredTy = cast<VTSDNode>(N.getOperand(4))->getVT();
3360 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
3361 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
3362 && "Unsupported TRUNCSTORE for this target!");
3364 if (StoredTy == MVT::i16) {
3365 // FIXME: This is here just to allow testing. X86 doesn't really have a
3366 // TRUNCSTORE i16 operation, but this is required for targets that do not
3367 // have 16-bit integer registers. We occasionally disable 16-bit integer
3368 // registers to test the promotion code.
3369 Select(N.getOperand(0));
3370 Tmp1 = SelectExpr(N.getOperand(1));
3371 SelectAddress(N.getOperand(2), AM);
3373 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
3374 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
3378 // Store of constant bool?
3379 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3380 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
3381 Select(N.getOperand(0));
3382 SelectAddress(N.getOperand(2), AM);
3384 SelectAddress(N.getOperand(2), AM);
3385 Select(N.getOperand(0));
3387 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
3392 default: assert(0 && "Cannot truncstore this type!");
3393 case MVT::i1: Opc = X86::MOV8mr; break;
3395 assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
3396 Opc = X86::FpST32m; break;
3399 std::vector<std::pair<unsigned, unsigned> > RP;
3400 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
3401 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
3402 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
3403 std::sort(RP.begin(), RP.end());
3405 Tmp1 = 0; // Silence a warning.
3406 for (unsigned i = 0; i != 3; ++i)
3407 switch (RP[2-i].second) {
3408 default: assert(0 && "Unknown operand number!");
3409 case 0: Select(N.getOperand(0)); break;
3410 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
3411 case 2: SelectAddress(N.getOperand(2), AM); break;
3414 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
3420 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3422 switch (CN->getValueType(0)) {
3423 default: assert(0 && "Invalid type for operation!");
3425 case MVT::i8: Opc = X86::MOV8mi; break;
3426 case MVT::i16: Opc = X86::MOV16mi; break;
3427 case MVT::i32: Opc = X86::MOV32mi; break;
3430 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
3431 Select(N.getOperand(0));
3432 SelectAddress(N.getOperand(2), AM);
3434 SelectAddress(N.getOperand(2), AM);
3435 Select(N.getOperand(0));
3437 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
3440 } else if (GlobalAddressSDNode *GA =
3441 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3442 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
3444 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
3445 Select(N.getOperand(0));
3446 SelectAddress(N.getOperand(2), AM);
3448 SelectAddress(N.getOperand(2), AM);
3449 Select(N.getOperand(0));
3451 GlobalValue *GV = GA->getGlobal();
3452 // For Darwin, external and weak symbols are indirect, so we want to load
3453 // the value at address GV, not the value of GV itself.
3454 if (Subtarget->getIndirectExternAndWeakGlobals() &&
3455 (GV->hasWeakLinkage() || GV->isExternal())) {
3456 Tmp1 = MakeReg(MVT::i32);
3457 BuildMI(BB, X86::MOV32rm, 4, Tmp1).addReg(0).addZImm(1).addReg(0)
3458 .addGlobalAddress(GV, false, 0);
3459 addFullAddress(BuildMI(BB, X86::MOV32mr, 4+1),AM).addReg(Tmp1);
3461 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),AM).addGlobalAddress(GV);
3466 // Check to see if this is a load/op/store combination.
3467 if (TryToFoldLoadOpStore(Node))
3470 switch (N.getOperand(1).getValueType()) {
3471 default: assert(0 && "Cannot store this type!");
3473 case MVT::i8: Opc = X86::MOV8mr; break;
3474 case MVT::i16: Opc = X86::MOV16mr; break;
3475 case MVT::i32: Opc = X86::MOV32mr; break;
3476 case MVT::f32: Opc = X86::MOVSSmr; break;
3477 case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FpST64m; break;
3480 std::vector<std::pair<unsigned, unsigned> > RP;
3481 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
3482 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
3483 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
3484 std::sort(RP.begin(), RP.end());
3486 Tmp1 = 0; // Silence a warning.
3487 for (unsigned i = 0; i != 3; ++i)
3488 switch (RP[2-i].second) {
3489 default: assert(0 && "Unknown operand number!");
3490 case 0: Select(N.getOperand(0)); break;
3491 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
3492 case 2: SelectAddress(N.getOperand(2), AM); break;
3495 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
3498 case ISD::CALLSEQ_START:
3499 Select(N.getOperand(0));
3501 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
3502 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
3504 case ISD::CALLSEQ_END:
3505 Select(N.getOperand(0));
3508 Select(N.getOperand(0)); // Select the chain.
3510 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
3511 if (Align == 0) Align = 1;
3513 // Turn the byte code into # iterations
3516 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
3517 unsigned Val = ValC->getValue() & 255;
3519 // If the value is a constant, then we can potentially use larger sets.
3520 switch (Align & 3) {
3521 case 2: // WORD aligned
3522 CountReg = MakeReg(MVT::i32);
3523 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
3524 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
3526 unsigned ByteReg = SelectExpr(Node->getOperand(3));
3527 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
3529 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
3530 Opcode = X86::REP_STOSW;
3532 case 0: // DWORD aligned
3533 CountReg = MakeReg(MVT::i32);
3534 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
3535 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
3537 unsigned ByteReg = SelectExpr(Node->getOperand(3));
3538 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
3540 Val = (Val << 8) | Val;
3541 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
3542 Opcode = X86::REP_STOSD;
3544 default: // BYTE aligned
3545 CountReg = SelectExpr(Node->getOperand(3));
3546 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
3547 Opcode = X86::REP_STOSB;
3551 // If it's not a constant value we are storing, just fall back. We could
3552 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
3553 unsigned ValReg = SelectExpr(Node->getOperand(2));
3554 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
3555 CountReg = SelectExpr(Node->getOperand(3));
3556 Opcode = X86::REP_STOSB;
3559 // No matter what the alignment is, we put the source in ESI, the
3560 // destination in EDI, and the count in ECX.
3561 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
3562 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
3563 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
3564 BuildMI(BB, Opcode, 0);
3568 Select(N.getOperand(0)); // Select the chain.
3570 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
3571 if (Align == 0) Align = 1;
3573 // Turn the byte code into # iterations
3576 switch (Align & 3) {
3577 case 2: // WORD aligned
3578 CountReg = MakeReg(MVT::i32);
3579 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
3580 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
3582 unsigned ByteReg = SelectExpr(Node->getOperand(3));
3583 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
3585 Opcode = X86::REP_MOVSW;
3587 case 0: // DWORD aligned
3588 CountReg = MakeReg(MVT::i32);
3589 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
3590 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
3592 unsigned ByteReg = SelectExpr(Node->getOperand(3));
3593 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
3595 Opcode = X86::REP_MOVSD;
3597 default: // BYTE aligned
3598 CountReg = SelectExpr(Node->getOperand(3));
3599 Opcode = X86::REP_MOVSB;
3603 // No matter what the alignment is, we put the source in ESI, the
3604 // destination in EDI, and the count in ECX.
3605 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
3606 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
3607 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
3608 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
3609 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
3610 BuildMI(BB, Opcode, 0);
3613 case ISD::WRITEPORT:
3614 if (Node->getOperand(2).getValueType() != MVT::i16) {
3615 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
3618 Select(Node->getOperand(0)); // Emit the chain.
3620 Tmp1 = SelectExpr(Node->getOperand(1));
3621 switch (Node->getOperand(1).getValueType()) {
3623 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
3624 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
3627 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
3628 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
3631 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
3632 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
3635 std::cerr << "llvm.writeport: invalid data type for X86 target";
3639 // If the port is a single-byte constant, use the immediate form.
3640 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
3641 if ((CN->getValue() & 255) == CN->getValue()) {
3642 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
3646 // Otherwise, move the I/O port address into the DX register.
3647 unsigned Reg = SelectExpr(Node->getOperand(2));
3648 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
3649 BuildMI(BB, Opc, 0);
3652 assert(0 && "Should not be reached!");
3656 /// createX86ISelPattern - This pass converts an LLVM function
3657 /// into a machine code representation using pattern matching and a machine
3658 /// description file.
3660 FunctionPass *llvm::createX86ISelPattern(TargetMachine &TM) {
3661 return new ISel(TM);