1 //===-- IA64ISelPattern.cpp - A pattern matching inst selector for IA64 ---===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for IA64.
12 //===----------------------------------------------------------------------===//
15 #include "IA64InstrBuilder.h"
16 #include "IA64RegisterInfo.h"
17 #include "IA64MachineFunctionInfo.h"
18 #include "llvm/Constants.h" // FIXME: REMOVE
19 #include "llvm/Function.h"
20 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/ADT/Statistic.h"
34 //===----------------------------------------------------------------------===//
35 // IA64TargetLowering - IA64 Implementation of the TargetLowering interface
37 class IA64TargetLowering : public TargetLowering {
38 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
40 //int ReturnAddrIndex; // FrameIndex for return slot.
41 unsigned GP, SP, RP; // FIXME - clean this mess up
44 unsigned VirtGPR; // this is public so it can be accessed in the selector
45 // for ISD::RET down below. add an accessor instead? FIXME
47 IA64TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
49 // register class for general registers
50 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
52 // register class for FP registers
53 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
55 // register class for predicate registers
56 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
58 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
59 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
61 setSetCCResultType(MVT::i1);
62 setShiftAmountType(MVT::i64);
64 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
66 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
68 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
69 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
70 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
71 setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
73 setOperationAction(ISD::SREM , MVT::f32 , Expand);
74 setOperationAction(ISD::SREM , MVT::f64 , Expand);
76 setOperationAction(ISD::UREM , MVT::f32 , Expand);
77 setOperationAction(ISD::UREM , MVT::f64 , Expand);
79 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
80 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
81 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
83 computeRegisterProperties();
85 addLegalFPImmediate(+0.0);
86 addLegalFPImmediate(+1.0);
87 addLegalFPImmediate(-0.0);
88 addLegalFPImmediate(-1.0);
91 /// LowerArguments - This hook must be implemented to indicate how we should
92 /// lower the arguments for the specified function, into the specified DAG.
93 virtual std::vector<SDOperand>
94 LowerArguments(Function &F, SelectionDAG &DAG);
96 /// LowerCallTo - This hook lowers an abstract call to a function into an
98 virtual std::pair<SDOperand, SDOperand>
99 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
100 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
102 virtual std::pair<SDOperand, SDOperand>
103 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
105 virtual std::pair<SDOperand,SDOperand>
106 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
107 const Type *ArgTy, SelectionDAG &DAG);
109 virtual std::pair<SDOperand, SDOperand>
110 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
113 void restoreGP_SP_RP(MachineBasicBlock* BB)
115 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
116 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
117 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
120 void restoreSP_RP(MachineBasicBlock* BB)
122 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
123 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
126 void restoreRP(MachineBasicBlock* BB)
128 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
131 void restoreGP(MachineBasicBlock* BB)
133 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
140 std::vector<SDOperand>
141 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
142 std::vector<SDOperand> ArgValues;
145 // add beautiful description of IA64 stack frame format
146 // here (from intel 24535803.pdf most likely)
148 MachineFunction &MF = DAG.getMachineFunction();
149 MachineFrameInfo *MFI = MF.getFrameInfo();
151 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
152 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
153 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
155 MachineBasicBlock& BB = MF.front();
157 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
158 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
160 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
161 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
167 unsigned used_FPArgs = 0; // how many FP args have been used so far?
169 unsigned ArgOffset = 0;
172 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
174 SDOperand newroot, argt;
175 if(count < 8) { // need to fix this logic? maybe.
177 switch (getValueType(I->getType())) {
179 std::cerr << "ERROR in LowerArgs: unknown type "
180 << getValueType(I->getType()) << "\n";
183 // fixme? (well, will need to for weird FP structy stuff,
184 // see intel ABI docs)
186 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
187 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
188 // floating point args go into f8..f15 as-needed, the increment
189 argVreg[count] = // is below..:
190 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
191 // FP args go into f8..f15 as needed: (hence the ++)
192 argPreg[count] = args_FP[used_FPArgs++];
193 argOpc[count] = IA64::FMOV;
194 argt = newroot = DAG.getCopyFromReg(argVreg[count],
195 getValueType(I->getType()), DAG.getRoot());
197 case MVT::i1: // NOTE: as far as C abi stuff goes,
198 // bools are just boring old ints
203 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
204 MF.addLiveIn(args_int[count]); // mark this register as liveIn
206 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
207 argPreg[count] = args_int[count];
208 argOpc[count] = IA64::MOV;
210 DAG.getCopyFromReg(argVreg[count], MVT::i64, DAG.getRoot());
211 if ( getValueType(I->getType()) != MVT::i64)
212 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
216 } else { // more than 8 args go into the frame
217 // Create the frame index object for this incoming parameter...
218 ArgOffset = 16 + 8 * (count - 8);
219 int FI = MFI->CreateFixedObject(8, ArgOffset);
221 // Create the SelectionDAG nodes corresponding to a load
222 //from this parameter
223 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
224 argt = newroot = DAG.getLoad(getValueType(I->getType()),
225 DAG.getEntryNode(), FIN);
228 DAG.setRoot(newroot.getValue(1));
229 ArgValues.push_back(argt);
233 // Create a vreg to hold the output of (what will become)
234 // the "alloc" instruction
235 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
236 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
237 // we create a PSEUDO_ALLOC (pseudo)instruction for now
239 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
242 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
243 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
246 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
249 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
250 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
253 unsigned tempOffset=0;
255 // if this is a varargs function, we simply lower llvm.va_start by
256 // pointing to the first entry
259 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
262 // here we actually do the moving of args, and store them to the stack
263 // too if this is a varargs function:
264 for (int i = 0; i < count && i < 8; ++i) {
265 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
267 // if this is a varargs function, we copy the input registers to the stack
268 int FI = MFI->CreateFixedObject(8, tempOffset);
269 tempOffset+=8; //XXX: is it safe to use r22 like this?
270 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
271 // FIXME: we should use st8.spill here, one day
272 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
276 // Finally, inform the code generator which regs we return values in.
277 // (see the ISD::RET: case down below)
278 switch (getValueType(F.getReturnType())) {
279 default: assert(0 && "i have no idea where to return this type!");
280 case MVT::isVoid: break;
286 MF.addLiveOut(IA64::r8);
290 MF.addLiveOut(IA64::F8);
297 std::pair<SDOperand, SDOperand>
298 IA64TargetLowering::LowerCallTo(SDOperand Chain,
299 const Type *RetTy, bool isVarArg,
300 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG) {
302 MachineFunction &MF = DAG.getMachineFunction();
304 unsigned NumBytes = 16;
305 unsigned outRegsUsed = 0;
307 if (Args.size() > 8) {
308 NumBytes += (Args.size() - 8) * 8;
311 outRegsUsed = Args.size();
314 // FIXME? this WILL fail if we ever try to pass around an arg that
315 // consumes more than a single output slot (a 'real' double, int128
316 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
317 // registers we use. Hopefully, the assembler will notice.
318 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
319 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
321 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
322 DAG.getConstant(NumBytes, getPointerTy()));
324 std::vector<SDOperand> args_to_use;
325 for (unsigned i = 0, e = Args.size(); i != e; ++i)
327 switch (getValueType(Args[i].second)) {
328 default: assert(0 && "unexpected argument type!");
333 //promote to 64-bits, sign/zero extending based on type
335 if(Args[i].second->isSigned())
336 Args[i].first = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64,
339 Args[i].first = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64,
344 Args[i].first = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Args[i].first);
349 args_to_use.push_back(Args[i].first);
352 std::vector<MVT::ValueType> RetVals;
353 MVT::ValueType RetTyVT = getValueType(RetTy);
354 if (RetTyVT != MVT::isVoid)
355 RetVals.push_back(RetTyVT);
356 RetVals.push_back(MVT::Other);
358 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain,
359 Callee, args_to_use), 0);
360 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
361 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
362 DAG.getConstant(NumBytes, getPointerTy()));
363 return std::make_pair(TheCall, Chain);
366 std::pair<SDOperand, SDOperand>
367 IA64TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
368 // vastart just returns the address of the VarArgsFrameIndex slot.
369 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64), Chain);
372 std::pair<SDOperand,SDOperand> IA64TargetLowering::
373 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
374 const Type *ArgTy, SelectionDAG &DAG) {
376 MVT::ValueType ArgVT = getValueType(ArgTy);
379 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
382 if (ArgVT == MVT::i32 || ArgVT == MVT::f32)
385 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
386 "Other types should have been promoted for varargs!");
389 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
390 DAG.getConstant(Amt, VAList.getValueType()));
392 return std::make_pair(Result, Chain);
395 std::pair<SDOperand, SDOperand> IA64TargetLowering::
396 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
399 assert(0 && "LowerFrameReturnAddress not done yet\n");
406 //===--------------------------------------------------------------------===//
407 /// ISel - IA64 specific code to select IA64 machine instructions for
408 /// SelectionDAG operations.
410 class ISel : public SelectionDAGISel {
411 /// IA64Lowering - This object fully describes how to lower LLVM code to an
412 /// IA64-specific SelectionDAG.
413 IA64TargetLowering IA64Lowering;
415 /// ExprMap - As shared expressions are codegen'd, we keep track of which
416 /// vreg the value is produced in, so we only emit one copy of each compiled
418 std::map<SDOperand, unsigned> ExprMap;
419 std::set<SDOperand> LoweredTokens;
422 ISel(TargetMachine &TM) : SelectionDAGISel(IA64Lowering), IA64Lowering(TM) {
425 /// InstructionSelectBasicBlock - This callback is invoked by
426 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
427 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
429 unsigned SelectExpr(SDOperand N);
430 void Select(SDOperand N);
434 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
435 /// when it has created a SelectionDAG for us to codegen.
436 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
438 // Codegen the basic block.
439 Select(DAG.getRoot());
441 // Clear state used for selection.
443 LoweredTokens.clear();
446 /// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
447 /// returns zero when the input is not exactly a power of two.
448 static unsigned ExactLog2(uint64_t Val) {
449 if (Val == 0 || (Val & (Val-1))) return 0;
458 /// ExactLog2sub1 - This function solves for (Val == (1 << (N-1))-1)
459 /// and returns N. It returns 666 if Val is not 2^n -1 for some n.
460 static unsigned ExactLog2sub1(uint64_t Val) {
462 for(n=0; n<64; n++) {
463 if(Val==(uint64_t)((1LL<<n)-1))
469 /// ponderIntegerDivisionBy - When handling integer divides, if the divide
470 /// is by a constant such that we can efficiently codegen it, this
471 /// function says what to do. Currently, it returns 0 if the division must
472 /// become a genuine divide, and 1 if the division can be turned into a
474 static unsigned ponderIntegerDivisionBy(SDOperand N, bool isSigned,
476 if (N.getOpcode() != ISD::Constant) return 0; // if not a divide by
477 // a constant, give up.
479 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
481 if ((Imm = ExactLog2(v))) { // if a division by a power of two, say so
485 return 0; // fallthrough
488 static unsigned ponderIntegerAndWith(SDOperand N, unsigned& Imm) {
489 if (N.getOpcode() != ISD::Constant) return 0; // if not ANDing with
490 // a constant, give up.
492 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
494 if ((Imm = ExactLog2sub1(v))!=666) { // if ANDing with ((2^n)-1) for some n
498 return 0; // fallthrough
501 static unsigned ponderIntegerAdditionWith(SDOperand N, unsigned& Imm) {
502 if (N.getOpcode() != ISD::Constant) return 0; // if not adding a
503 // constant, give up.
504 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
506 if (v <= 8191 && v >= -8192) { // if this constants fits in 14 bits, say so
507 Imm = v & 0x3FFF; // 14 bits
510 return 0; // fallthrough
513 static unsigned ponderIntegerSubtractionFrom(SDOperand N, unsigned& Imm) {
514 if (N.getOpcode() != ISD::Constant) return 0; // if not subtracting a
515 // constant, give up.
516 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
518 if (v <= 127 && v >= -128) { // if this constants fits in 8 bits, say so
519 Imm = v & 0xFF; // 8 bits
522 return 0; // fallthrough
525 unsigned ISel::SelectExpr(SDOperand N) {
527 unsigned Tmp1, Tmp2, Tmp3;
529 MVT::ValueType DestType = N.getValueType();
531 unsigned opcode = N.getOpcode();
533 SDNode *Node = N.Val;
536 if (Node->getOpcode() == ISD::CopyFromReg)
537 // Just use the specified register as our input.
538 return dyn_cast<RegSDNode>(Node)->getReg();
540 unsigned &Reg = ExprMap[N];
543 if (N.getOpcode() != ISD::CALL)
544 Reg = Result = (N.getValueType() != MVT::Other) ?
545 MakeReg(N.getValueType()) : 1;
547 // If this is a call instruction, make sure to prepare ALL of the result
548 // values as well as the chain.
549 if (Node->getNumValues() == 1)
550 Reg = Result = 1; // Void call, just a chain.
552 Result = MakeReg(Node->getValueType(0));
553 ExprMap[N.getValue(0)] = Result;
554 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
555 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
556 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
560 switch (N.getOpcode()) {
563 assert(0 && "Node not handled!\n");
565 case ISD::FrameIndex: {
566 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
567 BuildMI(BB, IA64::MOV, 1, Result).addFrameIndex(Tmp1);
571 case ISD::ConstantPool: {
572 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
573 IA64Lowering.restoreGP(BB); // FIXME: do i really need this?
574 BuildMI(BB, IA64::ADD, 2, Result).addConstantPoolIndex(Tmp1)
579 case ISD::ConstantFP: {
580 Tmp1 = Result; // Intermediate Register
581 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
582 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
583 Tmp1 = MakeReg(MVT::f64);
585 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
586 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
587 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F0); // load 0.0
588 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
589 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
590 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F1); // load 1.0
592 assert(0 && "Unexpected FP constant!");
594 // we multiply by +1.0, negate (this is FNMA), and then add 0.0
595 BuildMI(BB, IA64::FNMA, 3, Result).addReg(Tmp1).addReg(IA64::F1)
600 case ISD::DYNAMIC_STACKALLOC: {
601 // Generate both result values.
603 ExprMap[N.getValue(1)] = 1; // Generate the token
605 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
607 // FIXME: We are currently ignoring the requested alignment for handling
608 // greater than the stack alignment. This will need to be revisited at some
609 // point. Align = N.getOperand(2);
611 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
612 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
613 std::cerr << "Cannot allocate stack object with greater alignment than"
614 << " the stack alignment yet!";
619 Select(N.getOperand(0));
620 if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
622 if (CN->getValue() < 32000)
624 BuildMI(BB, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
625 .addImm(-CN->getValue());
627 Tmp1 = SelectExpr(N.getOperand(1));
628 // Subtract size from stack pointer, thereby allocating some space.
629 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
632 Tmp1 = SelectExpr(N.getOperand(1));
633 // Subtract size from stack pointer, thereby allocating some space.
634 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
637 Select(N.getOperand(0));
638 Tmp1 = SelectExpr(N.getOperand(1));
639 // Subtract size from stack pointer, thereby allocating some space.
640 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
641 // Put a pointer to the space into the result register, by copying the
643 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r12);
648 Tmp1 = SelectExpr(N.getOperand(0)); //Cond
649 Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
650 Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
654 switch (N.getOperand(1).getValueType()) {
656 "ISD::SELECT: 'select'ing something other than i64 or f64!\n");
658 bogoResult=MakeReg(MVT::i64);
661 bogoResult=MakeReg(MVT::f64);
665 BuildMI(BB, IA64::MOV, 1, bogoResult).addReg(Tmp3);
666 BuildMI(BB, IA64::CMOV, 2, Result).addReg(bogoResult).addReg(Tmp2)
667 .addReg(Tmp1); // FIXME: should be FMOV/FCMOV sometimes,
668 // though this will work for now (no JIT)
672 case ISD::Constant: {
673 unsigned depositPos=0;
674 unsigned depositLen=0;
675 switch (N.getValueType()) {
676 default: assert(0 && "Cannot use constants of this type!");
677 case MVT::i1: { // if a bool, we don't 'load' so much as generate
679 if(cast<ConstantSDNode>(N)->getValue()) // true:
680 BuildMI(BB, IA64::CMPEQ, 2, Result)
681 .addReg(IA64::r0).addReg(IA64::r0);
683 BuildMI(BB, IA64::CMPNE, 2, Result)
684 .addReg(IA64::r0).addReg(IA64::r0);
685 return Result; // early exit
687 case MVT::i64: break;
690 int64_t immediate = cast<ConstantSDNode>(N)->getValue();
692 if(immediate==0) { // if the constant is just zero,
693 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r0); // just copy r0
694 return Result; // early exit
697 if (immediate <= 8191 && immediate >= -8192) {
698 // if this constants fits in 14 bits, we use a mov the assembler will
699 // turn into: "adds rDest=imm,r0" (and _not_ "andl"...)
700 BuildMI(BB, IA64::MOVSIMM14, 1, Result).addSImm(immediate);
701 return Result; // early exit
704 if (immediate <= 2097151 && immediate >= -2097152) {
705 // if this constants fits in 22 bits, we use a mov the assembler will
706 // turn into: "addl rDest=imm,r0"
707 BuildMI(BB, IA64::MOVSIMM22, 1, Result).addSImm(immediate);
708 return Result; // early exit
711 /* otherwise, our immediate is big, so we use movl */
712 uint64_t Imm = immediate;
713 BuildMI(BB, IA64::MOVLIMM64, 1, Result).addImm64(Imm);
718 BuildMI(BB, IA64::IDEF, 0, Result);
722 case ISD::GlobalAddress: {
723 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
724 unsigned Tmp1 = MakeReg(MVT::i64);
726 BuildMI(BB, IA64::ADD, 2, Tmp1).addGlobalAddress(GV).addReg(IA64::r1);
727 BuildMI(BB, IA64::LD8, 1, Result).addReg(Tmp1);
732 case ISD::ExternalSymbol: {
733 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
734 // assert(0 && "sorry, but what did you want an ExternalSymbol for again?");
735 BuildMI(BB, IA64::MOV, 1, Result).addExternalSymbol(Sym); // XXX
739 case ISD::FP_EXTEND: {
740 Tmp1 = SelectExpr(N.getOperand(0));
741 BuildMI(BB, IA64::FMOV, 1, Result).addReg(Tmp1);
745 case ISD::ZERO_EXTEND: {
746 Tmp1 = SelectExpr(N.getOperand(0)); // value
748 switch (N.getOperand(0).getValueType()) {
749 default: assert(0 && "Cannot zero-extend this type!");
750 case MVT::i8: Opc = IA64::ZXT1; break;
751 case MVT::i16: Opc = IA64::ZXT2; break;
752 case MVT::i32: Opc = IA64::ZXT4; break;
754 // we handle bools differently! :
755 case MVT::i1: { // if the predicate reg has 1, we want a '1' in our GR.
756 unsigned dummy = MakeReg(MVT::i64);
758 BuildMI(BB, IA64::MOV, 1, dummy).addReg(IA64::r0);
759 // ...then conditionally (PR:Tmp1) add 1:
760 BuildMI(BB, IA64::TPCADDIMM22, 2, Result).addReg(dummy)
761 .addImm(1).addReg(Tmp1);
762 return Result; // XXX early exit!
766 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
770 case ISD::SIGN_EXTEND: { // we should only have to handle i1 -> i64 here!!!
772 assert(0 && "hmm, ISD::SIGN_EXTEND: shouldn't ever be reached. bad luck!\n");
774 Tmp1 = SelectExpr(N.getOperand(0)); // value
776 switch (N.getOperand(0).getValueType()) {
777 default: assert(0 && "Cannot sign-extend this type!");
778 case MVT::i1: assert(0 && "trying to sign extend a bool? ow.\n");
779 Opc = IA64::SXT1; break;
780 // FIXME: for now, we treat bools the same as i8s
781 case MVT::i8: Opc = IA64::SXT1; break;
782 case MVT::i16: Opc = IA64::SXT2; break;
783 case MVT::i32: Opc = IA64::SXT4; break;
786 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
790 case ISD::TRUNCATE: {
791 // we use the funky dep.z (deposit (zero)) instruction to deposit bits
792 // of R0 appropriately.
793 switch (N.getOperand(0).getValueType()) {
794 default: assert(0 && "Unknown truncate!");
795 case MVT::i64: break;
797 Tmp1 = SelectExpr(N.getOperand(0));
798 unsigned depositPos, depositLen;
800 switch (N.getValueType()) {
801 default: assert(0 && "Unknown truncate!");
803 // if input (normal reg) is 0, 0!=0 -> false (0), if 1, 1!=0 ->true (1):
804 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1)
806 return Result; // XXX early exit!
808 case MVT::i8: depositPos=0; depositLen=8; break;
809 case MVT::i16: depositPos=0; depositLen=16; break;
810 case MVT::i32: depositPos=0; depositLen=32; break;
812 BuildMI(BB, IA64::DEPZ, 1, Result).addReg(Tmp1)
813 .addImm(depositPos).addImm(depositLen);
818 case ISD::FP_ROUND: {
819 assert (DestType == MVT::f32 && N.getOperand(0).getValueType() == MVT::f64 &&
820 "error: trying to FP_ROUND something other than f64 -> f32!\n");
821 Tmp1 = SelectExpr(N.getOperand(0));
822 BuildMI(BB, IA64::FADDS, 2, Result).addReg(Tmp1).addReg(IA64::F0);
823 // we add 0.0 using a single precision add to do rounding
828 // FIXME: the following 4 cases need cleaning
829 case ISD::SINT_TO_FP: {
830 Tmp1 = SelectExpr(N.getOperand(0));
831 Tmp2 = MakeReg(MVT::f64);
832 unsigned dummy = MakeReg(MVT::f64);
833 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
834 BuildMI(BB, IA64::FCVTXF, 1, dummy).addReg(Tmp2);
835 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
839 case ISD::UINT_TO_FP: {
840 Tmp1 = SelectExpr(N.getOperand(0));
841 Tmp2 = MakeReg(MVT::f64);
842 unsigned dummy = MakeReg(MVT::f64);
843 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
844 BuildMI(BB, IA64::FCVTXUF, 1, dummy).addReg(Tmp2);
845 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
849 case ISD::FP_TO_SINT: {
850 Tmp1 = SelectExpr(N.getOperand(0));
851 Tmp2 = MakeReg(MVT::f64);
852 BuildMI(BB, IA64::FCVTFXTRUNC, 1, Tmp2).addReg(Tmp1);
853 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
857 case ISD::FP_TO_UINT: {
858 Tmp1 = SelectExpr(N.getOperand(0));
859 Tmp2 = MakeReg(MVT::f64);
860 BuildMI(BB, IA64::FCVTFXUTRUNC, 1, Tmp2).addReg(Tmp1);
861 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
866 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
867 N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
868 // into an fma, do so:
869 // ++FusedFP; // Statistic
870 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
871 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
872 Tmp3 = SelectExpr(N.getOperand(1));
873 BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
874 return Result; // early exit
877 if(DestType != MVT::f64 && N.getOperand(0).getOpcode() == ISD::SHL &&
878 N.getOperand(0).Val->hasOneUse()) { // if we might be able to fold
879 // this add into a shladd, try:
880 ConstantSDNode *CSD = NULL;
881 if((CSD = dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) &&
882 (CSD->getValue() >= 1) && (CSD->getValue() <= 4) ) { // we can:
884 // ++FusedSHLADD; // Statistic
885 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
886 int shl_amt = CSD->getValue();
887 Tmp3 = SelectExpr(N.getOperand(1));
889 BuildMI(BB, IA64::SHLADD, 3, Result)
890 .addReg(Tmp1).addImm(shl_amt).addReg(Tmp3);
891 return Result; // early exit
896 Tmp1 = SelectExpr(N.getOperand(0));
897 if(DestType != MVT::f64) { // integer addition:
898 switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
899 case 1: // adding a constant that's 14 bits
900 BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
901 return Result; // early exit
902 } // fallthrough and emit a reg+reg ADD:
903 Tmp2 = SelectExpr(N.getOperand(1));
904 BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
905 } else { // this is a floating point addition
906 Tmp2 = SelectExpr(N.getOperand(1));
907 BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
913 Tmp1 = SelectExpr(N.getOperand(0));
914 Tmp2 = SelectExpr(N.getOperand(1));
916 if(DestType != MVT::f64) { // TODO: speed!
917 // boring old integer multiply with xma
918 unsigned TempFR1=MakeReg(MVT::f64);
919 unsigned TempFR2=MakeReg(MVT::f64);
920 unsigned TempFR3=MakeReg(MVT::f64);
921 BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
922 BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
923 BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
925 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
927 else // floating point multiply
928 BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
933 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
934 N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
935 // into an fms, do so:
936 // ++FusedFP; // Statistic
937 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
938 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
939 Tmp3 = SelectExpr(N.getOperand(1));
940 BuildMI(BB, IA64::FMS, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
941 return Result; // early exit
943 Tmp2 = SelectExpr(N.getOperand(1));
944 if(DestType != MVT::f64) { // integer subtraction:
945 switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
946 case 1: // subtracting *from* an 8 bit constant:
947 BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
948 return Result; // early exit
949 } // fallthrough and emit a reg+reg SUB:
950 Tmp1 = SelectExpr(N.getOperand(0));
951 BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
952 } else { // this is a floating point subtraction
953 Tmp1 = SelectExpr(N.getOperand(0));
954 BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
960 Tmp1 = SelectExpr(N.getOperand(0));
961 assert(DestType == MVT::f64 && "trying to fabs something other than f64?");
962 BuildMI(BB, IA64::FABS, 1, Result).addReg(Tmp1);
967 assert(DestType == MVT::f64 && "trying to fneg something other than f64?");
969 if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
970 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
971 BuildMI(BB, IA64::FNEGABS, 1, Result).addReg(Tmp1); // fold in abs
973 Tmp1 = SelectExpr(N.getOperand(0));
974 BuildMI(BB, IA64::FNEG, 1, Result).addReg(Tmp1); // plain old fneg
981 switch (N.getValueType()) {
982 default: assert(0 && "Cannot AND this type!");
983 case MVT::i1: { // if a bool, we emit a pseudocode AND
984 unsigned pA = SelectExpr(N.getOperand(0));
985 unsigned pB = SelectExpr(N.getOperand(1));
987 /* our pseudocode for AND is:
989 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
990 cmp.eq pTemp,p0 = r0,r0 // pTemp = NOT pB
992 (pB) cmp.ne pTemp,p0 = r0,r0
994 (pTemp)cmp.ne pC,p0 = r0,r0 // if (NOT pB) pC = 0
997 unsigned pTemp = MakeReg(MVT::i1);
999 unsigned bogusTemp1 = MakeReg(MVT::i1);
1000 unsigned bogusTemp2 = MakeReg(MVT::i1);
1001 unsigned bogusTemp3 = MakeReg(MVT::i1);
1002 unsigned bogusTemp4 = MakeReg(MVT::i1);
1004 BuildMI(BB, IA64::PCMPEQUNC, 3, bogusTemp1)
1005 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1006 BuildMI(BB, IA64::CMPEQ, 2, bogusTemp2)
1007 .addReg(IA64::r0).addReg(IA64::r0);
1008 BuildMI(BB, IA64::TPCMPNE, 3, pTemp)
1009 .addReg(bogusTemp2).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1010 BuildMI(BB, IA64::TPCMPNE, 3, Result)
1011 .addReg(bogusTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pTemp);
1015 // if not a bool, we just AND away:
1020 Tmp1 = SelectExpr(N.getOperand(0));
1021 switch (ponderIntegerAndWith(N.getOperand(1), Tmp3)) {
1022 case 1: // ANDing a constant that is 2^n-1 for some n
1024 case 8: // if AND 0x00000000000000FF, be quaint and use zxt1
1025 BuildMI(BB, IA64::ZXT1, 1, Result).addReg(Tmp1);
1027 case 16: // if AND 0x000000000000FFFF, be quaint and use zxt2
1028 BuildMI(BB, IA64::ZXT2, 1, Result).addReg(Tmp1);
1030 case 32: // if AND 0x00000000FFFFFFFF, be quaint and use zxt4
1031 BuildMI(BB, IA64::ZXT4, 1, Result).addReg(Tmp1);
1033 default: // otherwise, use dep.z to paste zeros
1034 BuildMI(BB, IA64::DEPZ, 3, Result).addReg(Tmp1)
1035 .addImm(0).addImm(Tmp3);
1038 return Result; // early exit
1039 } // fallthrough and emit a simple AND:
1040 Tmp2 = SelectExpr(N.getOperand(1));
1041 BuildMI(BB, IA64::AND, 2, Result).addReg(Tmp1).addReg(Tmp2);
1048 switch (N.getValueType()) {
1049 default: assert(0 && "Cannot OR this type!");
1050 case MVT::i1: { // if a bool, we emit a pseudocode OR
1051 unsigned pA = SelectExpr(N.getOperand(0));
1052 unsigned pB = SelectExpr(N.getOperand(1));
1054 unsigned pTemp1 = MakeReg(MVT::i1);
1056 /* our pseudocode for OR is:
1062 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
1064 (pB) cmp.eq pC,p0 = r0,r0 // if (pB) pC = 1
1067 BuildMI(BB, IA64::PCMPEQUNC, 3, pTemp1)
1068 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1069 BuildMI(BB, IA64::TPCMPEQ, 3, Result)
1070 .addReg(pTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1073 // if not a bool, we just OR away:
1078 Tmp1 = SelectExpr(N.getOperand(0));
1079 Tmp2 = SelectExpr(N.getOperand(1));
1080 BuildMI(BB, IA64::OR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1088 switch (N.getValueType()) {
1089 default: assert(0 && "Cannot XOR this type!");
1090 case MVT::i1: { // if a bool, we emit a pseudocode XOR
1091 unsigned pY = SelectExpr(N.getOperand(0));
1092 unsigned pZ = SelectExpr(N.getOperand(1));
1094 /* one possible routine for XOR is:
1096 // Compute px = py ^ pz
1097 // using sum of products: px = (py & !pz) | (pz & !py)
1098 // Uses 5 instructions in 3 cycles.
1100 (pz) cmp.eq.unc px = r0, r0 // px = pz
1101 (py) cmp.eq.unc pt = r0, r0 // pt = py
1104 (pt) cmp.ne.and px = r0, r0 // px = px & !pt (px = pz & !pt)
1105 (pz) cmp.ne.and pt = r0, r0 // pt = pt & !pz
1109 (pt) cmp.eq.or px = r0, r0 // px = px | pt
1111 *** Another, which we use here, requires one scratch GR. it is:
1113 mov rt = 0 // initialize rt off critical path
1117 (pz) cmp.eq.unc px = r0, r0 // px = pz
1118 (pz) mov rt = 1 // rt = pz
1121 (py) cmp.ne px = 1, rt // if (py) px = !pz
1123 .. these routines kindly provided by Jim Hull
1125 unsigned rt = MakeReg(MVT::i64);
1127 // these two temporaries will never actually appear,
1128 // due to the two-address form of some of the instructions below
1129 unsigned bogoPR = MakeReg(MVT::i1); // becomes Result
1130 unsigned bogoGR = MakeReg(MVT::i64); // becomes rt
1132 BuildMI(BB, IA64::MOV, 1, bogoGR).addReg(IA64::r0);
1133 BuildMI(BB, IA64::PCMPEQUNC, 3, bogoPR)
1134 .addReg(IA64::r0).addReg(IA64::r0).addReg(pZ);
1135 BuildMI(BB, IA64::TPCADDIMM22, 2, rt)
1136 .addReg(bogoGR).addImm(1).addReg(pZ);
1137 BuildMI(BB, IA64::TPCMPIMM8NE, 3, Result)
1138 .addReg(bogoPR).addImm(1).addReg(rt).addReg(pY);
1141 // if not a bool, we just XOR away:
1146 Tmp1 = SelectExpr(N.getOperand(0));
1147 Tmp2 = SelectExpr(N.getOperand(1));
1148 BuildMI(BB, IA64::XOR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1156 Tmp1 = SelectExpr(N.getOperand(0));
1157 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1158 Tmp2 = CN->getValue();
1159 BuildMI(BB, IA64::SHLI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1161 Tmp2 = SelectExpr(N.getOperand(1));
1162 BuildMI(BB, IA64::SHL, 2, Result).addReg(Tmp1).addReg(Tmp2);
1168 Tmp1 = SelectExpr(N.getOperand(0));
1169 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1170 Tmp2 = CN->getValue();
1171 BuildMI(BB, IA64::SHRUI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1173 Tmp2 = SelectExpr(N.getOperand(1));
1174 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1180 Tmp1 = SelectExpr(N.getOperand(0));
1181 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1182 Tmp2 = CN->getValue();
1183 BuildMI(BB, IA64::SHRSI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1185 Tmp2 = SelectExpr(N.getOperand(1));
1186 BuildMI(BB, IA64::SHRS, 2, Result).addReg(Tmp1).addReg(Tmp2);
1196 Tmp1 = SelectExpr(N.getOperand(0));
1197 Tmp2 = SelectExpr(N.getOperand(1));
1201 if(DestType == MVT::f64) // XXX: we're not gonna be fed MVT::f32, are we?
1204 bool isModulus=false; // is it a division or a modulus?
1205 bool isSigned=false;
1207 switch(N.getOpcode()) {
1208 case ISD::SDIV: isModulus=false; isSigned=true; break;
1209 case ISD::UDIV: isModulus=false; isSigned=false; break;
1210 case ISD::SREM: isModulus=true; isSigned=true; break;
1211 case ISD::UREM: isModulus=true; isSigned=false; break;
1214 if(!isModulus && !isFP) { // if this is an integer divide,
1215 switch (ponderIntegerDivisionBy(N.getOperand(1), isSigned, Tmp3)) {
1216 case 1: // division by a constant that's a power of 2
1217 Tmp1 = SelectExpr(N.getOperand(0));
1218 if(isSigned) { // argument could be negative, so emit some code:
1219 unsigned divAmt=Tmp3;
1220 unsigned tempGR1=MakeReg(MVT::i64);
1221 unsigned tempGR2=MakeReg(MVT::i64);
1222 unsigned tempGR3=MakeReg(MVT::i64);
1223 BuildMI(BB, IA64::SHRS, 2, tempGR1)
1224 .addReg(Tmp1).addImm(divAmt-1);
1225 BuildMI(BB, IA64::EXTRU, 3, tempGR2)
1226 .addReg(tempGR1).addImm(64-divAmt).addImm(divAmt);
1227 BuildMI(BB, IA64::ADD, 2, tempGR3)
1228 .addReg(Tmp1).addReg(tempGR2);
1229 BuildMI(BB, IA64::SHRS, 2, Result)
1230 .addReg(tempGR3).addImm(divAmt);
1232 else // unsigned div-by-power-of-2 becomes a simple shift right:
1233 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addImm(Tmp3);
1234 return Result; // early exit
1238 unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
1239 unsigned TmpPR2=MakeReg(MVT::i1); // predicate registers,
1240 unsigned TmpF1=MakeReg(MVT::f64); // and one metric truckload of FP regs.
1241 unsigned TmpF2=MakeReg(MVT::f64); // lucky we have IA64?
1242 unsigned TmpF3=MakeReg(MVT::f64); // well, the real FIXME is to have
1243 unsigned TmpF4=MakeReg(MVT::f64); // isTwoAddress forms of these
1244 unsigned TmpF5=MakeReg(MVT::f64); // FP instructions so we can end up with
1245 unsigned TmpF6=MakeReg(MVT::f64); // stuff like setf.sig f10=f10 etc.
1246 unsigned TmpF7=MakeReg(MVT::f64);
1247 unsigned TmpF8=MakeReg(MVT::f64);
1248 unsigned TmpF9=MakeReg(MVT::f64);
1249 unsigned TmpF10=MakeReg(MVT::f64);
1250 unsigned TmpF11=MakeReg(MVT::f64);
1251 unsigned TmpF12=MakeReg(MVT::f64);
1252 unsigned TmpF13=MakeReg(MVT::f64);
1253 unsigned TmpF14=MakeReg(MVT::f64);
1254 unsigned TmpF15=MakeReg(MVT::f64);
1256 // OK, emit some code:
1259 // first, load the inputs into FP regs.
1260 BuildMI(BB, IA64::SETFSIG, 1, TmpF1).addReg(Tmp1);
1261 BuildMI(BB, IA64::SETFSIG, 1, TmpF2).addReg(Tmp2);
1263 // next, convert the inputs to FP
1265 BuildMI(BB, IA64::FCVTXF, 1, TmpF3).addReg(TmpF1);
1266 BuildMI(BB, IA64::FCVTXF, 1, TmpF4).addReg(TmpF2);
1268 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF3).addReg(TmpF1);
1269 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF4).addReg(TmpF2);
1272 } else { // this is an FP divide/remainder, so we 'leak' some temp
1273 // regs and assign TmpF3=Tmp1, TmpF4=Tmp2
1278 // we start by computing an approximate reciprocal (good to 9 bits?)
1279 // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate)
1280 BuildMI(BB, IA64::FRCPAS1, 4)
1281 .addReg(TmpF5, MachineOperand::Def)
1282 .addReg(TmpPR, MachineOperand::Def)
1283 .addReg(TmpF3).addReg(TmpF4);
1285 if(!isModulus) { // if this is a divide, we worry about div-by-zero
1286 unsigned bogusPR=MakeReg(MVT::i1); // won't appear, due to twoAddress
1288 BuildMI(BB, IA64::CMPEQ, 2, bogusPR).addReg(IA64::r0).addReg(IA64::r0);
1289 BuildMI(BB, IA64::TPCMPNE, 3, TmpPR2).addReg(bogusPR)
1290 .addReg(IA64::r0).addReg(IA64::r0).addReg(TmpPR);
1293 // now we apply newton's method, thrice! (FIXME: this is ~72 bits of
1294 // precision, don't need this much for f32/i32)
1295 BuildMI(BB, IA64::CFNMAS1, 4, TmpF6)
1296 .addReg(TmpF4).addReg(TmpF5).addReg(IA64::F1).addReg(TmpPR);
1297 BuildMI(BB, IA64::CFMAS1, 4, TmpF7)
1298 .addReg(TmpF3).addReg(TmpF5).addReg(IA64::F0).addReg(TmpPR);
1299 BuildMI(BB, IA64::CFMAS1, 4, TmpF8)
1300 .addReg(TmpF6).addReg(TmpF6).addReg(IA64::F0).addReg(TmpPR);
1301 BuildMI(BB, IA64::CFMAS1, 4, TmpF9)
1302 .addReg(TmpF6).addReg(TmpF7).addReg(TmpF7).addReg(TmpPR);
1303 BuildMI(BB, IA64::CFMAS1, 4,TmpF10)
1304 .addReg(TmpF6).addReg(TmpF5).addReg(TmpF5).addReg(TmpPR);
1305 BuildMI(BB, IA64::CFMAS1, 4,TmpF11)
1306 .addReg(TmpF8).addReg(TmpF9).addReg(TmpF9).addReg(TmpPR);
1307 BuildMI(BB, IA64::CFMAS1, 4,TmpF12)
1308 .addReg(TmpF8).addReg(TmpF10).addReg(TmpF10).addReg(TmpPR);
1309 BuildMI(BB, IA64::CFNMAS1, 4,TmpF13)
1310 .addReg(TmpF4).addReg(TmpF11).addReg(TmpF3).addReg(TmpPR);
1312 // FIXME: this is unfortunate :(
1313 // the story is that the dest reg of the fnma above and the fma below
1314 // (and therefore possibly the src of the fcvt.fx[u] as well) cannot
1315 // be the same register, or this code breaks if the first argument is
1316 // zero. (e.g. without this hack, 0%8 yields -64, not 0.)
1317 BuildMI(BB, IA64::CFMAS1, 4,TmpF14)
1318 .addReg(TmpF13).addReg(TmpF12).addReg(TmpF11).addReg(TmpPR);
1320 if(isModulus) { // XXX: fragile! fixes _only_ mod, *breaks* div! !
1321 BuildMI(BB, IA64::IUSE, 1).addReg(TmpF13); // hack :(
1325 // round to an integer
1327 BuildMI(BB, IA64::FCVTFXTRUNCS1, 1, TmpF15).addReg(TmpF14);
1329 BuildMI(BB, IA64::FCVTFXUTRUNCS1, 1, TmpF15).addReg(TmpF14);
1331 BuildMI(BB, IA64::FMOV, 1, TmpF15).addReg(TmpF14);
1332 // EXERCISE: can you see why TmpF15=TmpF14 does not work here, and
1333 // we really do need the above FMOV? ;)
1337 if(isFP) { // extra worrying about div-by-zero
1338 unsigned bogoResult=MakeReg(MVT::f64);
1340 // we do a 'conditional fmov' (of the correct result, depending
1341 // on how the frcpa predicate turned out)
1342 BuildMI(BB, IA64::PFMOV, 2, bogoResult)
1343 .addReg(TmpF12).addReg(TmpPR2);
1344 BuildMI(BB, IA64::CFMOV, 2, Result)
1345 .addReg(bogoResult).addReg(TmpF15).addReg(TmpPR);
1348 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TmpF15);
1350 } else { // this is a modulus
1352 // answer = q * (-b) + a
1353 unsigned ModulusResult = MakeReg(MVT::f64);
1354 unsigned TmpF = MakeReg(MVT::f64);
1355 unsigned TmpI = MakeReg(MVT::i64);
1357 BuildMI(BB, IA64::SUB, 2, TmpI).addReg(IA64::r0).addReg(Tmp2);
1358 BuildMI(BB, IA64::SETFSIG, 1, TmpF).addReg(TmpI);
1359 BuildMI(BB, IA64::XMAL, 3, ModulusResult)
1360 .addReg(TmpF15).addReg(TmpF).addReg(TmpF1);
1361 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(ModulusResult);
1362 } else { // FP modulus! The horror... the horror....
1363 assert(0 && "sorry, no FP modulus just yet!\n!\n");
1370 case ISD::SIGN_EXTEND_INREG: {
1371 Tmp1 = SelectExpr(N.getOperand(0));
1372 MVTSDNode* MVN = dyn_cast<MVTSDNode>(Node);
1373 switch(MVN->getExtraValueType())
1377 assert(0 && "don't know how to sign extend this type");
1379 case MVT::i8: Opc = IA64::SXT1; break;
1380 case MVT::i16: Opc = IA64::SXT2; break;
1381 case MVT::i32: Opc = IA64::SXT4; break;
1383 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1388 Tmp1 = SelectExpr(N.getOperand(0));
1390 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Node)) {
1391 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1393 if(ConstantSDNode *CSDN =
1394 dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1395 // if we are comparing against a constant zero
1396 if(CSDN->getValue()==0)
1397 Tmp2 = IA64::r0; // then we can just compare against r0
1399 Tmp2 = SelectExpr(N.getOperand(1));
1400 } else // not comparing against a constant
1401 Tmp2 = SelectExpr(N.getOperand(1));
1403 switch (SetCC->getCondition()) {
1404 default: assert(0 && "Unknown integer comparison!");
1406 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1409 BuildMI(BB, IA64::CMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1412 BuildMI(BB, IA64::CMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1415 BuildMI(BB, IA64::CMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1418 BuildMI(BB, IA64::CMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1421 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1424 BuildMI(BB, IA64::CMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1427 BuildMI(BB, IA64::CMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1430 BuildMI(BB, IA64::CMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1433 BuildMI(BB, IA64::CMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1437 else { // if not integer, should be FP. FIXME: what about bools? ;)
1438 assert(SetCC->getOperand(0).getValueType() != MVT::f32 &&
1439 "error: SETCC should have had incoming f32 promoted to f64!\n");
1441 if(ConstantFPSDNode *CFPSDN =
1442 dyn_cast<ConstantFPSDNode>(N.getOperand(1))) {
1444 // if we are comparing against a constant +0.0 or +1.0
1445 if(CFPSDN->isExactlyValue(+0.0))
1446 Tmp2 = IA64::F0; // then we can just compare against f0
1447 else if(CFPSDN->isExactlyValue(+1.0))
1448 Tmp2 = IA64::F1; // or f1
1450 Tmp2 = SelectExpr(N.getOperand(1));
1451 } else // not comparing against a constant
1452 Tmp2 = SelectExpr(N.getOperand(1));
1454 switch (SetCC->getCondition()) {
1455 default: assert(0 && "Unknown FP comparison!");
1457 BuildMI(BB, IA64::FCMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1460 BuildMI(BB, IA64::FCMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1463 BuildMI(BB, IA64::FCMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1466 BuildMI(BB, IA64::FCMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1469 BuildMI(BB, IA64::FCMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1472 BuildMI(BB, IA64::FCMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1475 BuildMI(BB, IA64::FCMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1478 BuildMI(BB, IA64::FCMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1481 BuildMI(BB, IA64::FCMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1484 BuildMI(BB, IA64::FCMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1490 assert(0 && "this setcc not implemented yet");
1498 // Make sure we generate both values.
1500 ExprMap[N.getValue(1)] = 1; // Generate the token
1502 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1506 if(opcode == ISD::LOAD) { // this is a LOAD
1507 switch (Node->getValueType(0)) {
1508 default: assert(0 && "Cannot load this type!");
1509 case MVT::i1: Opc = IA64::LD1; isBool=true; break;
1510 // FIXME: for now, we treat bool loads the same as i8 loads */
1511 case MVT::i8: Opc = IA64::LD1; break;
1512 case MVT::i16: Opc = IA64::LD2; break;
1513 case MVT::i32: Opc = IA64::LD4; break;
1514 case MVT::i64: Opc = IA64::LD8; break;
1516 case MVT::f32: Opc = IA64::LDF4; break;
1517 case MVT::f64: Opc = IA64::LDF8; break;
1519 } else { // this is an EXTLOAD or ZEXTLOAD
1520 MVT::ValueType TypeBeingLoaded = cast<MVTSDNode>(Node)->getExtraValueType();
1521 switch (TypeBeingLoaded) {
1522 default: assert(0 && "Cannot extload/zextload this type!");
1524 case MVT::i8: Opc = IA64::LD1; break;
1525 case MVT::i16: Opc = IA64::LD2; break;
1526 case MVT::i32: Opc = IA64::LD4; break;
1527 case MVT::f32: Opc = IA64::LDF4; break;
1531 SDOperand Chain = N.getOperand(0);
1532 SDOperand Address = N.getOperand(1);
1534 if(Address.getOpcode() == ISD::GlobalAddress) {
1536 unsigned dummy = MakeReg(MVT::i64);
1537 unsigned dummy2 = MakeReg(MVT::i64);
1538 BuildMI(BB, IA64::ADD, 2, dummy)
1539 .addGlobalAddress(cast<GlobalAddressSDNode>(Address)->getGlobal())
1541 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1543 BuildMI(BB, Opc, 1, Result).addReg(dummy2);
1544 else { // emit a little pseudocode to load a bool (stored in one byte)
1545 // into a predicate register
1546 assert(Opc==IA64::LD1 && "problem loading a bool");
1547 unsigned dummy3 = MakeReg(MVT::i64);
1548 BuildMI(BB, Opc, 1, dummy3).addReg(dummy2);
1549 // we compare to 0. true? 0. false? 1.
1550 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1552 } else if(ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Address)) {
1554 IA64Lowering.restoreGP(BB);
1555 unsigned dummy = MakeReg(MVT::i64);
1556 BuildMI(BB, IA64::ADD, 2, dummy).addConstantPoolIndex(CP->getIndex())
1557 .addReg(IA64::r1); // CPI+GP
1559 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1560 else { // emit a little pseudocode to load a bool (stored in one byte)
1561 // into a predicate register
1562 assert(Opc==IA64::LD1 && "problem loading a bool");
1563 unsigned dummy3 = MakeReg(MVT::i64);
1564 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1565 // we compare to 0. true? 0. false? 1.
1566 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1568 } else if(Address.getOpcode() == ISD::FrameIndex) {
1569 Select(Chain); // FIXME ? what about bools?
1570 unsigned dummy = MakeReg(MVT::i64);
1571 BuildMI(BB, IA64::MOV, 1, dummy)
1572 .addFrameIndex(cast<FrameIndexSDNode>(Address)->getIndex());
1574 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1575 else { // emit a little pseudocode to load a bool (stored in one byte)
1576 // into a predicate register
1577 assert(Opc==IA64::LD1 && "problem loading a bool");
1578 unsigned dummy3 = MakeReg(MVT::i64);
1579 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1580 // we compare to 0. true? 0. false? 1.
1581 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1583 } else { // none of the above...
1585 Tmp2 = SelectExpr(Address);
1587 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1588 else { // emit a little pseudocode to load a bool (stored in one byte)
1589 // into a predicate register
1590 assert(Opc==IA64::LD1 && "problem loading a bool");
1591 unsigned dummy = MakeReg(MVT::i64);
1592 BuildMI(BB, Opc, 1, dummy).addReg(Tmp2);
1593 // we compare to 0. true? 0. false? 1.
1594 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy).addReg(IA64::r0);
1601 case ISD::CopyFromReg: {
1603 Result = ExprMap[N.getValue(0)] =
1604 MakeReg(N.getValue(0).getValueType());
1606 SDOperand Chain = N.getOperand(0);
1609 unsigned r = dyn_cast<RegSDNode>(Node)->getReg();
1611 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
1612 BuildMI(BB, IA64::PCMPEQUNC, 3, Result)
1613 .addReg(IA64::r0).addReg(IA64::r0).addReg(r);
1614 // (r) Result =cmp.eq.unc(r0,r0)
1616 BuildMI(BB, IA64::MOV, 1, Result).addReg(r); // otherwise MOV
1621 Select(N.getOperand(0));
1623 // The chain for this call is now lowered.
1624 ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), 1));
1626 //grab the arguments
1627 std::vector<unsigned> argvregs;
1629 for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
1630 argvregs.push_back(SelectExpr(N.getOperand(i)));
1632 // see section 8.5.8 of "Itanium Software Conventions and
1633 // Runtime Architecture Guide to see some examples of what's going
1634 // on here. (in short: int args get mapped 1:1 'slot-wise' to out0->out7,
1635 // while FP args get mapped to F8->F15 as needed)
1637 unsigned used_FPArgs=0; // how many FP Args have been used so far?
1640 for(int i = 0, e = std::min(8, (int)argvregs.size()); i < e; ++i)
1642 unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
1643 IA64::out4, IA64::out5, IA64::out6, IA64::out7 };
1644 unsigned FPArgs[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
1645 IA64::F12, IA64::F13, IA64::F14, IA64::F15 };
1647 switch(N.getOperand(i+2).getValueType())
1649 default: // XXX do we need to support MVT::i1 here?
1651 N.getOperand(i).Val->dump();
1652 std::cerr << "Type for " << i << " is: " <<
1653 N.getOperand(i+2).getValueType() << std::endl;
1654 assert(0 && "Unknown value type for call");
1656 BuildMI(BB, IA64::MOV, 1, intArgs[i]).addReg(argvregs[i]);
1659 BuildMI(BB, IA64::FMOV, 1, FPArgs[used_FPArgs++])
1660 .addReg(argvregs[i]);
1661 // FIXME: we don't need to do this _all_ the time:
1662 BuildMI(BB, IA64::GETFD, 1, intArgs[i]).addReg(argvregs[i]);
1668 for (int i = 8, e = argvregs.size(); i < e; ++i)
1670 unsigned tempAddr = MakeReg(MVT::i64);
1672 switch(N.getOperand(i+2).getValueType()) {
1675 N.getOperand(i).Val->dump();
1676 std::cerr << "Type for " << i << " is: " <<
1677 N.getOperand(i+2).getValueType() << "\n";
1678 assert(0 && "Unknown value type for call");
1679 case MVT::i1: // FIXME?
1684 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
1685 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
1686 BuildMI(BB, IA64::ST8, 2).addReg(tempAddr).addReg(argvregs[i]);
1690 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
1691 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
1692 BuildMI(BB, IA64::STF8, 2).addReg(tempAddr).addReg(argvregs[i]);
1697 /* XXX we want to re-enable direct branches! crippling them now
1698 * to stress-test indirect branches.:
1699 //build the right kind of call
1700 if (GlobalAddressSDNode *GASD =
1701 dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
1703 BuildMI(BB, IA64::BRCALL, 1).addGlobalAddress(GASD->getGlobal(),true);
1704 IA64Lowering.restoreGP_SP_RP(BB);
1706 ^^^^^^^^^^^^^ we want this code one day XXX */
1707 if (ExternalSymbolSDNode *ESSDN =
1708 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
1709 { // FIXME : currently need this case for correctness, to avoid
1710 // "non-pic code with imm relocation against dynamic symbol" errors
1711 BuildMI(BB, IA64::BRCALL, 1)
1712 .addExternalSymbol(ESSDN->getSymbol(), true);
1713 IA64Lowering.restoreGP_SP_RP(BB);
1716 Tmp1 = SelectExpr(N.getOperand(1));
1718 unsigned targetEntryPoint=MakeReg(MVT::i64);
1719 unsigned targetGPAddr=MakeReg(MVT::i64);
1720 unsigned currentGP=MakeReg(MVT::i64);
1722 // b6 is a scratch branch register, we load the target entry point
1723 // from the base of the function descriptor
1724 BuildMI(BB, IA64::LD8, 1, targetEntryPoint).addReg(Tmp1);
1725 BuildMI(BB, IA64::MOV, 1, IA64::B6).addReg(targetEntryPoint);
1727 // save the current GP:
1728 BuildMI(BB, IA64::MOV, 1, currentGP).addReg(IA64::r1);
1730 /* TODO: we need to make sure doing this never, ever loads a
1731 * bogus value into r1 (GP). */
1732 // load the target GP (which is at mem[functiondescriptor+8])
1733 BuildMI(BB, IA64::ADDIMM22, 2, targetGPAddr)
1734 .addReg(Tmp1).addImm(8); // FIXME: addimm22? why not postincrement ld
1735 BuildMI(BB, IA64::LD8, 1, IA64::r1).addReg(targetGPAddr);
1737 // and then jump: (well, call)
1738 BuildMI(BB, IA64::BRCALL, 1).addReg(IA64::B6);
1739 // and finally restore the old GP
1740 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(currentGP);
1741 IA64Lowering.restoreSP_RP(BB);
1744 switch (Node->getValueType(0)) {
1745 default: assert(0 && "Unknown value type for call result!");
1746 case MVT::Other: return 1;
1748 BuildMI(BB, IA64::CMPNE, 2, Result)
1749 .addReg(IA64::r8).addReg(IA64::r0);
1755 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r8);
1758 BuildMI(BB, IA64::FMOV, 1, Result).addReg(IA64::F8);
1761 return Result+N.ResNo;
1768 void ISel::Select(SDOperand N) {
1769 unsigned Tmp1, Tmp2, Opc;
1770 unsigned opcode = N.getOpcode();
1772 if (!LoweredTokens.insert(N).second)
1773 return; // Already selected.
1775 SDNode *Node = N.Val;
1777 switch (Node->getOpcode()) {
1779 Node->dump(); std::cerr << "\n";
1780 assert(0 && "Node not handled yet!");
1782 case ISD::EntryToken: return; // Noop
1784 case ISD::TokenFactor: {
1785 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
1786 Select(Node->getOperand(i));
1790 case ISD::CopyToReg: {
1791 Select(N.getOperand(0));
1792 Tmp1 = SelectExpr(N.getOperand(1));
1793 Tmp2 = cast<RegSDNode>(N)->getReg();
1796 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
1797 BuildMI(BB, IA64::PCMPEQUNC, 3, Tmp2)
1798 .addReg(IA64::r0).addReg(IA64::r0).addReg(Tmp1);
1799 // (Tmp1) Tmp2 = cmp.eq.unc(r0,r0)
1801 BuildMI(BB, IA64::MOV, 1, Tmp2).addReg(Tmp1);
1802 // XXX is this the right way 'round? ;)
1809 /* what the heck is going on here:
1811 <_sabre_> ret with two operands is obvious: chain and value
1813 <_sabre_> ret with 3 values happens when 'expansion' occurs
1814 <_sabre_> e.g. i64 gets split into 2x i32
1816 <_sabre_> you don't have this case on ia64
1818 <_sabre_> so the two returned values go into EAX/EDX on ia32
1819 <camel_> ahhh *memories*
1821 <camel_> ok, thanks :)
1822 <_sabre_> so yeah, everything that has a side effect takes a 'token chain'
1823 <_sabre_> this is the first operand always
1824 <_sabre_> these operand often define chains, they are the last operand
1825 <_sabre_> they are printed as 'ch' if you do DAG.dump()
1828 switch (N.getNumOperands()) {
1830 assert(0 && "Unknown return instruction!");
1832 Select(N.getOperand(0));
1833 Tmp1 = SelectExpr(N.getOperand(1));
1834 switch (N.getOperand(1).getValueType()) {
1835 default: assert(0 && "All other types should have been promoted!!");
1836 // FIXME: do I need to add support for bools here?
1837 // (return '0' or '1' r8, basically...)
1839 // FIXME: need to round floats - 80 bits is bad, the tester
1842 // we mark r8 as live on exit up above in LowerArguments()
1843 BuildMI(BB, IA64::MOV, 1, IA64::r8).addReg(Tmp1);
1846 // we mark F8 as live on exit up above in LowerArguments()
1847 BuildMI(BB, IA64::FMOV, 1, IA64::F8).addReg(Tmp1);
1851 Select(N.getOperand(0));
1854 // before returning, restore the ar.pfs register (set by the 'alloc' up top)
1855 BuildMI(BB, IA64::MOV, 1).addReg(IA64::AR_PFS).addReg(IA64Lowering.VirtGPR);
1856 BuildMI(BB, IA64::RET, 0); // and then just emit a 'ret' instruction
1861 Select(N.getOperand(0));
1862 MachineBasicBlock *Dest =
1863 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
1864 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(IA64::p0).addMBB(Dest);
1865 // XXX HACK! we do _not_ need long branches all the time
1869 case ISD::ImplicitDef: {
1870 Select(N.getOperand(0));
1871 BuildMI(BB, IA64::IDEF, 0, cast<RegSDNode>(N)->getReg());
1876 MachineBasicBlock *Dest =
1877 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
1879 Select(N.getOperand(0));
1880 Tmp1 = SelectExpr(N.getOperand(1));
1881 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(Tmp1).addMBB(Dest);
1882 // XXX HACK! we do _not_ need long branches all the time
1891 case ISD::CopyFromReg:
1892 case ISD::DYNAMIC_STACKALLOC:
1896 case ISD::TRUNCSTORE:
1898 Select(N.getOperand(0));
1899 Tmp1 = SelectExpr(N.getOperand(1)); // value
1903 if(opcode == ISD::STORE) {
1904 switch (N.getOperand(1).getValueType()) {
1905 default: assert(0 && "Cannot store this type!");
1906 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
1907 // FIXME?: for now, we treat bool loads the same as i8 stores */
1908 case MVT::i8: Opc = IA64::ST1; break;
1909 case MVT::i16: Opc = IA64::ST2; break;
1910 case MVT::i32: Opc = IA64::ST4; break;
1911 case MVT::i64: Opc = IA64::ST8; break;
1913 case MVT::f32: Opc = IA64::STF4; break;
1914 case MVT::f64: Opc = IA64::STF8; break;
1916 } else { // truncstore
1917 switch(cast<MVTSDNode>(Node)->getExtraValueType()) {
1918 default: assert(0 && "unknown type in truncstore");
1919 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
1920 //FIXME: DAG does not promote this load?
1921 case MVT::i8: Opc = IA64::ST1; break;
1922 case MVT::i16: Opc = IA64::ST2; break;
1923 case MVT::i32: Opc = IA64::ST4; break;
1924 case MVT::f32: Opc = IA64::STF4; break;
1928 if(N.getOperand(2).getOpcode() == ISD::GlobalAddress) {
1929 unsigned dummy = MakeReg(MVT::i64);
1930 unsigned dummy2 = MakeReg(MVT::i64);
1931 BuildMI(BB, IA64::ADD, 2, dummy)
1932 .addGlobalAddress(cast<GlobalAddressSDNode>
1933 (N.getOperand(2))->getGlobal()).addReg(IA64::r1);
1934 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1937 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(Tmp1);
1938 else { // we are storing a bool, so emit a little pseudocode
1939 // to store a predicate register as one byte
1940 assert(Opc==IA64::ST1);
1941 unsigned dummy3 = MakeReg(MVT::i64);
1942 unsigned dummy4 = MakeReg(MVT::i64);
1943 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
1944 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
1945 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
1946 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(dummy4);
1948 } else if(N.getOperand(2).getOpcode() == ISD::FrameIndex) {
1950 // FIXME? (what about bools?)
1952 unsigned dummy = MakeReg(MVT::i64);
1953 BuildMI(BB, IA64::MOV, 1, dummy)
1954 .addFrameIndex(cast<FrameIndexSDNode>(N.getOperand(2))->getIndex());
1955 BuildMI(BB, Opc, 2).addReg(dummy).addReg(Tmp1);
1956 } else { // otherwise
1957 Tmp2 = SelectExpr(N.getOperand(2)); //address
1959 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(Tmp1);
1960 else { // we are storing a bool, so emit a little pseudocode
1961 // to store a predicate register as one byte
1962 assert(Opc==IA64::ST1);
1963 unsigned dummy3 = MakeReg(MVT::i64);
1964 unsigned dummy4 = MakeReg(MVT::i64);
1965 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
1966 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
1967 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
1968 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(dummy4);
1974 case ISD::ADJCALLSTACKDOWN:
1975 case ISD::ADJCALLSTACKUP: {
1976 Select(N.getOperand(0));
1977 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
1979 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? IA64::ADJUSTCALLSTACKDOWN :
1980 IA64::ADJUSTCALLSTACKUP;
1981 BuildMI(BB, Opc, 1).addImm(Tmp1);
1987 assert(0 && "GAME OVER. INSERT COIN?");
1991 /// createIA64PatternInstructionSelector - This pass converts an LLVM function
1992 /// into a machine code representation using pattern matching and a machine
1993 /// description file.
1995 FunctionPass *llvm::createIA64PatternInstructionSelector(TargetMachine &TM) {
1996 return new ISel(TM);