1 //===-- IA64ISelPattern.cpp - A pattern matching inst selector for IA64 ---===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for IA64.
12 //===----------------------------------------------------------------------===//
15 #include "IA64InstrBuilder.h"
16 #include "IA64RegisterInfo.h"
17 #include "IA64MachineFunctionInfo.h"
18 #include "llvm/Constants.h" // FIXME: REMOVE
19 #include "llvm/Function.h"
20 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/ADT/Statistic.h"
34 //===----------------------------------------------------------------------===//
35 // IA64TargetLowering - IA64 Implementation of the TargetLowering interface
37 class IA64TargetLowering : public TargetLowering {
38 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
40 //int ReturnAddrIndex; // FrameIndex for return slot.
41 unsigned GP, SP, RP; // FIXME - clean this mess up
44 unsigned VirtGPR; // this is public so it can be accessed in the selector
45 // for ISD::RET down below. add an accessor instead? FIXME
47 IA64TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
49 // register class for general registers
50 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
52 // register class for FP registers
53 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
55 // register class for predicate registers
56 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
58 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
59 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
61 setSetCCResultType(MVT::i1);
62 setShiftAmountType(MVT::i64);
64 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
65 setOperationAction(ISD::EXTLOAD , MVT::f32 , Promote);
67 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
68 setOperationAction(ISD::ZEXTLOAD , MVT::i32 , Expand);
70 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
71 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
72 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
74 setOperationAction(ISD::SREM , MVT::f32 , Expand);
75 setOperationAction(ISD::SREM , MVT::f64 , Expand);
77 setOperationAction(ISD::UREM , MVT::f32 , Expand);
78 setOperationAction(ISD::UREM , MVT::f64 , Expand);
80 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
81 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
82 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
84 computeRegisterProperties();
86 addLegalFPImmediate(+0.0);
87 addLegalFPImmediate(+1.0);
88 addLegalFPImmediate(-0.0);
89 addLegalFPImmediate(-1.0);
92 /// LowerArguments - This hook must be implemented to indicate how we should
93 /// lower the arguments for the specified function, into the specified DAG.
94 virtual std::vector<SDOperand>
95 LowerArguments(Function &F, SelectionDAG &DAG);
97 /// LowerCallTo - This hook lowers an abstract call to a function into an
99 virtual std::pair<SDOperand, SDOperand>
100 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
101 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
103 virtual std::pair<SDOperand, SDOperand>
104 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
106 virtual std::pair<SDOperand,SDOperand>
107 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
108 const Type *ArgTy, SelectionDAG &DAG);
110 virtual std::pair<SDOperand, SDOperand>
111 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
114 void restoreGP_SP_RP(MachineBasicBlock* BB)
116 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
117 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
118 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
121 void restoreSP_RP(MachineBasicBlock* BB)
123 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
124 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
127 void restoreRP(MachineBasicBlock* BB)
129 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
132 void restoreGP(MachineBasicBlock* BB)
134 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
141 std::vector<SDOperand>
142 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
143 std::vector<SDOperand> ArgValues;
146 // add beautiful description of IA64 stack frame format
147 // here (from intel 24535803.pdf most likely)
149 MachineFunction &MF = DAG.getMachineFunction();
150 MachineFrameInfo *MFI = MF.getFrameInfo();
152 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
153 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
154 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
156 MachineBasicBlock& BB = MF.front();
158 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
159 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
161 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
162 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
168 unsigned used_FPArgs = 0; // how many FP args have been used so far?
170 unsigned ArgOffset = 0;
173 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
175 SDOperand newroot, argt;
176 if(count < 8) { // need to fix this logic? maybe.
178 switch (getValueType(I->getType())) {
180 std::cerr << "ERROR in LowerArgs: unknown type "
181 << getValueType(I->getType()) << "\n";
184 // fixme? (well, will need to for weird FP structy stuff,
185 // see intel ABI docs)
187 BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
188 // floating point args go into f8..f15 as-needed, the increment
189 argVreg[count] = // is below..:
190 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
191 // FP args go into f8..f15 as needed: (hence the ++)
192 argPreg[count] = args_FP[used_FPArgs++];
193 argOpc[count] = IA64::FMOV;
194 argt = newroot = DAG.getCopyFromReg(argVreg[count],
195 getValueType(I->getType()), DAG.getRoot());
197 case MVT::i1: // NOTE: as far as C abi stuff goes,
198 // bools are just boring old ints
203 BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
205 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
206 argPreg[count] = args_int[count];
207 argOpc[count] = IA64::MOV;
209 DAG.getCopyFromReg(argVreg[count], MVT::i64, DAG.getRoot());
210 if ( getValueType(I->getType()) != MVT::i64)
211 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
215 } else { // more than 8 args go into the frame
216 // Create the frame index object for this incoming parameter...
217 ArgOffset = 16 + 8 * (count - 8);
218 int FI = MFI->CreateFixedObject(8, ArgOffset);
220 // Create the SelectionDAG nodes corresponding to a load
221 //from this parameter
222 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
223 argt = newroot = DAG.getLoad(getValueType(I->getType()),
224 DAG.getEntryNode(), FIN);
227 DAG.setRoot(newroot.getValue(1));
228 ArgValues.push_back(argt);
232 // Create a vreg to hold the output of (what will become)
233 // the "alloc" instruction
234 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
235 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
236 // we create a PSEUDO_ALLOC (pseudo)instruction for now
238 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
241 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
242 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
245 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
248 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
249 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
252 unsigned tempOffset=0;
254 // if this is a varargs function, we simply lower llvm.va_start by
255 // pointing to the first entry
258 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
261 // here we actually do the moving of args, and store them to the stack
262 // too if this is a varargs function:
263 for (int i = 0; i < count && i < 8; ++i) {
264 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
266 // if this is a varargs function, we copy the input registers to the stack
267 int FI = MFI->CreateFixedObject(8, tempOffset);
268 tempOffset+=8; //XXX: is it safe to use r22 like this?
269 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
270 // FIXME: we should use st8.spill here, one day
271 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
278 std::pair<SDOperand, SDOperand>
279 IA64TargetLowering::LowerCallTo(SDOperand Chain,
280 const Type *RetTy, bool isVarArg,
281 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG) {
283 MachineFunction &MF = DAG.getMachineFunction();
285 unsigned NumBytes = 16;
286 unsigned outRegsUsed = 0;
288 if (Args.size() > 8) {
289 NumBytes += (Args.size() - 8) * 8;
292 outRegsUsed = Args.size();
295 // FIXME? this WILL fail if we ever try to pass around an arg that
296 // consumes more than a single output slot (a 'real' double, int128
297 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
298 // registers we use. Hopefully, the assembler will notice.
299 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
300 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
302 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
303 DAG.getConstant(NumBytes, getPointerTy()));
305 std::vector<SDOperand> args_to_use;
306 for (unsigned i = 0, e = Args.size(); i != e; ++i)
308 switch (getValueType(Args[i].second)) {
309 default: assert(0 && "unexpected argument type!");
314 //promote to 64-bits, sign/zero extending based on type
316 if(Args[i].second->isSigned())
317 Args[i].first = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64,
320 Args[i].first = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64,
325 Args[i].first = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Args[i].first);
330 args_to_use.push_back(Args[i].first);
333 std::vector<MVT::ValueType> RetVals;
334 MVT::ValueType RetTyVT = getValueType(RetTy);
335 if (RetTyVT != MVT::isVoid)
336 RetVals.push_back(RetTyVT);
337 RetVals.push_back(MVT::Other);
339 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain,
340 Callee, args_to_use), 0);
341 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
342 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
343 DAG.getConstant(NumBytes, getPointerTy()));
344 return std::make_pair(TheCall, Chain);
347 std::pair<SDOperand, SDOperand>
348 IA64TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
349 // vastart just returns the address of the VarArgsFrameIndex slot.
350 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64), Chain);
353 std::pair<SDOperand,SDOperand> IA64TargetLowering::
354 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
355 const Type *ArgTy, SelectionDAG &DAG) {
357 MVT::ValueType ArgVT = getValueType(ArgTy);
360 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
363 if (ArgVT == MVT::i32 || ArgVT == MVT::f32)
366 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
367 "Other types should have been promoted for varargs!");
370 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
371 DAG.getConstant(Amt, VAList.getValueType()));
373 return std::make_pair(Result, Chain);
376 std::pair<SDOperand, SDOperand> IA64TargetLowering::
377 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
380 assert(0 && "LowerFrameReturnAddress not done yet\n");
387 //===--------------------------------------------------------------------===//
388 /// ISel - IA64 specific code to select IA64 machine instructions for
389 /// SelectionDAG operations.
391 class ISel : public SelectionDAGISel {
392 /// IA64Lowering - This object fully describes how to lower LLVM code to an
393 /// IA64-specific SelectionDAG.
394 IA64TargetLowering IA64Lowering;
396 /// ExprMap - As shared expressions are codegen'd, we keep track of which
397 /// vreg the value is produced in, so we only emit one copy of each compiled
399 std::map<SDOperand, unsigned> ExprMap;
400 std::set<SDOperand> LoweredTokens;
403 ISel(TargetMachine &TM) : SelectionDAGISel(IA64Lowering), IA64Lowering(TM) {
406 /// InstructionSelectBasicBlock - This callback is invoked by
407 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
408 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
410 unsigned SelectExpr(SDOperand N);
411 void Select(SDOperand N);
415 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
416 /// when it has created a SelectionDAG for us to codegen.
417 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
419 // Codegen the basic block.
420 Select(DAG.getRoot());
422 // Clear state used for selection.
424 LoweredTokens.clear();
427 /// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
428 /// returns zero when the input is not exactly a power of two.
429 static uint64_t ExactLog2(uint64_t Val) {
430 if (Val == 0 || (Val & (Val-1))) return 0;
439 /// ponderIntegerDivisionBy - When handling integer divides, if the divide
440 /// is by a constant such that we can efficiently codegen it, this
441 /// function says what to do. Currently, it returns 0 if the division must
442 /// become a genuine divide, and 1 if the division can be turned into a
444 static unsigned ponderIntegerDivisionBy(SDOperand N, bool isSigned,
446 if (N.getOpcode() != ISD::Constant) return 0; // if not a divide by
447 // a constant, give up.
449 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
451 if ((Imm = ExactLog2(v))) { // if a division by a power of two, say so
455 return 0; // fallthrough
458 static unsigned ponderIntegerAdditionWith(SDOperand N, unsigned& Imm) {
459 if (N.getOpcode() != ISD::Constant) return 0; // if not adding a
460 // constant, give up.
461 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
463 if (v <= 8191 && v >= -8192) { // if this constants fits in 14 bits, say so
464 Imm = v & 0x3FFF; // 14 bits
467 return 0; // fallthrough
470 static unsigned ponderIntegerSubtractionFrom(SDOperand N, unsigned& Imm) {
471 if (N.getOpcode() != ISD::Constant) return 0; // if not subtracting a
472 // constant, give up.
473 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
475 if (v <= 127 && v >= -128) { // if this constants fits in 8 bits, say so
476 Imm = v & 0xFF; // 8 bits
479 return 0; // fallthrough
482 unsigned ISel::SelectExpr(SDOperand N) {
484 unsigned Tmp1, Tmp2, Tmp3;
486 MVT::ValueType DestType = N.getValueType();
488 unsigned opcode = N.getOpcode();
490 SDNode *Node = N.Val;
493 if (Node->getOpcode() == ISD::CopyFromReg)
494 // Just use the specified register as our input.
495 return dyn_cast<RegSDNode>(Node)->getReg();
497 unsigned &Reg = ExprMap[N];
500 if (N.getOpcode() != ISD::CALL)
501 Reg = Result = (N.getValueType() != MVT::Other) ?
502 MakeReg(N.getValueType()) : 1;
504 // If this is a call instruction, make sure to prepare ALL of the result
505 // values as well as the chain.
506 if (Node->getNumValues() == 1)
507 Reg = Result = 1; // Void call, just a chain.
509 Result = MakeReg(Node->getValueType(0));
510 ExprMap[N.getValue(0)] = Result;
511 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
512 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
513 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
517 switch (N.getOpcode()) {
520 assert(0 && "Node not handled!\n");
522 case ISD::FrameIndex: {
523 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
524 BuildMI(BB, IA64::MOV, 1, Result).addFrameIndex(Tmp1);
528 case ISD::ConstantPool: {
529 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
530 IA64Lowering.restoreGP(BB); // FIXME: do i really need this?
531 BuildMI(BB, IA64::ADD, 2, Result).addConstantPoolIndex(Tmp1)
536 case ISD::ConstantFP: {
537 Tmp1 = Result; // Intermediate Register
538 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
539 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
540 Tmp1 = MakeReg(MVT::f64);
542 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
543 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
544 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F0); // load 0.0
545 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
546 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
547 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F1); // load 1.0
549 assert(0 && "Unexpected FP constant!");
551 // we multiply by +1.0, negate (this is FNMA), and then add 0.0
552 BuildMI(BB, IA64::FNMA, 3, Result).addReg(Tmp1).addReg(IA64::F1)
557 case ISD::DYNAMIC_STACKALLOC: {
558 // Generate both result values.
560 ExprMap[N.getValue(1)] = 1; // Generate the token
562 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
564 // FIXME: We are currently ignoring the requested alignment for handling
565 // greater than the stack alignment. This will need to be revisited at some
566 // point. Align = N.getOperand(2);
568 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
569 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
570 std::cerr << "Cannot allocate stack object with greater alignment than"
571 << " the stack alignment yet!";
576 Select(N.getOperand(0));
577 if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
579 if (CN->getValue() < 32000)
581 BuildMI(BB, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
582 .addImm(-CN->getValue());
584 Tmp1 = SelectExpr(N.getOperand(1));
585 // Subtract size from stack pointer, thereby allocating some space.
586 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
589 Tmp1 = SelectExpr(N.getOperand(1));
590 // Subtract size from stack pointer, thereby allocating some space.
591 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
594 Select(N.getOperand(0));
595 Tmp1 = SelectExpr(N.getOperand(1));
596 // Subtract size from stack pointer, thereby allocating some space.
597 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
598 // Put a pointer to the space into the result register, by copying the
600 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r12);
605 Tmp1 = SelectExpr(N.getOperand(0)); //Cond
606 Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
607 Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
611 switch (N.getOperand(1).getValueType()) {
613 "ISD::SELECT: 'select'ing something other than i64 or f64!\n");
615 bogoResult=MakeReg(MVT::i64);
618 bogoResult=MakeReg(MVT::f64);
622 BuildMI(BB, IA64::MOV, 1, bogoResult).addReg(Tmp3);
623 BuildMI(BB, IA64::CMOV, 2, Result).addReg(bogoResult).addReg(Tmp2)
624 .addReg(Tmp1); // FIXME: should be FMOV/FCMOV sometimes,
625 // though this will work for now (no JIT)
629 case ISD::Constant: {
630 unsigned depositPos=0;
631 unsigned depositLen=0;
632 switch (N.getValueType()) {
633 default: assert(0 && "Cannot use constants of this type!");
634 case MVT::i1: { // if a bool, we don't 'load' so much as generate
636 if(cast<ConstantSDNode>(N)->getValue()) // true:
637 BuildMI(BB, IA64::CMPEQ, 2, Result)
638 .addReg(IA64::r0).addReg(IA64::r0);
640 BuildMI(BB, IA64::CMPNE, 2, Result)
641 .addReg(IA64::r0).addReg(IA64::r0);
644 case MVT::i64: Opc = IA64::MOVLI32; break;
647 int64_t immediate = cast<ConstantSDNode>(N)->getValue();
648 if(immediate>>32) { // if our immediate really is big:
649 int highPart = immediate>>32;
650 int lowPart = immediate&0xFFFFFFFF;
651 unsigned dummy = MakeReg(MVT::i64);
652 unsigned dummy2 = MakeReg(MVT::i64);
653 unsigned dummy3 = MakeReg(MVT::i64);
655 BuildMI(BB, IA64::MOVLI32, 1, dummy).addImm(highPart);
656 BuildMI(BB, IA64::SHLI, 2, dummy2).addReg(dummy).addImm(32);
657 BuildMI(BB, IA64::MOVLI32, 1, dummy3).addImm(lowPart);
658 BuildMI(BB, IA64::ADD, 2, Result).addReg(dummy2).addReg(dummy3);
660 BuildMI(BB, IA64::MOVLI32, 1, Result).addImm(immediate);
667 BuildMI(BB, IA64::IDEF, 0, Result);
671 case ISD::GlobalAddress: {
672 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
673 unsigned Tmp1 = MakeReg(MVT::i64);
675 BuildMI(BB, IA64::ADD, 2, Tmp1).addGlobalAddress(GV).addReg(IA64::r1);
676 BuildMI(BB, IA64::LD8, 1, Result).addReg(Tmp1);
681 case ISD::ExternalSymbol: {
682 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
683 // assert(0 && "sorry, but what did you want an ExternalSymbol for again?");
684 BuildMI(BB, IA64::MOV, 1, Result).addExternalSymbol(Sym); // XXX
688 case ISD::FP_EXTEND: {
689 Tmp1 = SelectExpr(N.getOperand(0));
690 BuildMI(BB, IA64::FMOV, 1, Result).addReg(Tmp1);
694 case ISD::ZERO_EXTEND: {
695 Tmp1 = SelectExpr(N.getOperand(0)); // value
697 switch (N.getOperand(0).getValueType()) {
698 default: assert(0 && "Cannot zero-extend this type!");
699 case MVT::i8: Opc = IA64::ZXT1; break;
700 case MVT::i16: Opc = IA64::ZXT2; break;
701 case MVT::i32: Opc = IA64::ZXT4; break;
703 // we handle bools differently! :
704 case MVT::i1: { // if the predicate reg has 1, we want a '1' in our GR.
705 unsigned dummy = MakeReg(MVT::i64);
707 BuildMI(BB, IA64::MOV, 1, dummy).addReg(IA64::r0);
708 // ...then conditionally (PR:Tmp1) add 1:
709 BuildMI(BB, IA64::CADDIMM22, 3, Result).addReg(dummy)
710 .addImm(1).addReg(Tmp1);
711 return Result; // XXX early exit!
715 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
719 case ISD::SIGN_EXTEND: { // we should only have to handle i1 -> i64 here!!!
721 assert(0 && "hmm, ISD::SIGN_EXTEND: shouldn't ever be reached. bad luck!\n");
723 Tmp1 = SelectExpr(N.getOperand(0)); // value
725 switch (N.getOperand(0).getValueType()) {
726 default: assert(0 && "Cannot sign-extend this type!");
727 case MVT::i1: assert(0 && "trying to sign extend a bool? ow.\n");
728 Opc = IA64::SXT1; break;
729 // FIXME: for now, we treat bools the same as i8s
730 case MVT::i8: Opc = IA64::SXT1; break;
731 case MVT::i16: Opc = IA64::SXT2; break;
732 case MVT::i32: Opc = IA64::SXT4; break;
735 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
739 case ISD::TRUNCATE: {
740 // we use the funky dep.z (deposit (zero)) instruction to deposit bits
741 // of R0 appropriately.
742 switch (N.getOperand(0).getValueType()) {
743 default: assert(0 && "Unknown truncate!");
744 case MVT::i64: break;
746 Tmp1 = SelectExpr(N.getOperand(0));
747 unsigned depositPos, depositLen;
749 switch (N.getValueType()) {
750 default: assert(0 && "Unknown truncate!");
752 // if input (normal reg) is 0, 0!=0 -> false (0), if 1, 1!=0 ->true (1):
753 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1)
755 return Result; // XXX early exit!
757 case MVT::i8: depositPos=0; depositLen=8; break;
758 case MVT::i16: depositPos=0; depositLen=16; break;
759 case MVT::i32: depositPos=0; depositLen=32; break;
761 BuildMI(BB, IA64::DEPZ, 1, Result).addReg(Tmp1)
762 .addImm(depositPos).addImm(depositLen);
767 case ISD::FP_ROUND: {
768 assert (DestType == MVT::f32 && N.getOperand(0).getValueType() == MVT::f64 &&
769 "error: trying to FP_ROUND something other than f64 -> f32!\n");
770 Tmp1 = SelectExpr(N.getOperand(0));
771 BuildMI(BB, IA64::FADDS, 2, Result).addReg(Tmp1).addReg(IA64::F0);
772 // we add 0.0 using a single precision add to do rounding
777 // FIXME: the following 4 cases need cleaning
778 case ISD::SINT_TO_FP: {
779 Tmp1 = SelectExpr(N.getOperand(0));
780 Tmp2 = MakeReg(MVT::f64);
781 unsigned dummy = MakeReg(MVT::f64);
782 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
783 BuildMI(BB, IA64::FCVTXF, 1, dummy).addReg(Tmp2);
784 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
788 case ISD::UINT_TO_FP: {
789 Tmp1 = SelectExpr(N.getOperand(0));
790 Tmp2 = MakeReg(MVT::f64);
791 unsigned dummy = MakeReg(MVT::f64);
792 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
793 BuildMI(BB, IA64::FCVTXUF, 1, dummy).addReg(Tmp2);
794 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
798 case ISD::FP_TO_SINT: {
799 Tmp1 = SelectExpr(N.getOperand(0));
800 Tmp2 = MakeReg(MVT::f64);
801 BuildMI(BB, IA64::FCVTFXTRUNC, 1, Tmp2).addReg(Tmp1);
802 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
806 case ISD::FP_TO_UINT: {
807 Tmp1 = SelectExpr(N.getOperand(0));
808 Tmp2 = MakeReg(MVT::f64);
809 BuildMI(BB, IA64::FCVTFXUTRUNC, 1, Tmp2).addReg(Tmp1);
810 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
815 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
816 N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
817 // into an fma, do so:
818 // ++FusedFP; // Statistic
819 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
820 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
821 Tmp3 = SelectExpr(N.getOperand(1));
822 BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
823 return Result; // early exit
825 Tmp1 = SelectExpr(N.getOperand(0));
826 Tmp2 = SelectExpr(N.getOperand(1));
827 if(DestType != MVT::f64) { // integer addition:
828 switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
829 case 1: // adding a constant that's 14 bits
830 BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
831 return Result; // early exit
832 } // fallthrough and emit a reg+reg ADD:
833 BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
834 } else { // this is a floating point addition
835 BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
841 Tmp1 = SelectExpr(N.getOperand(0));
842 Tmp2 = SelectExpr(N.getOperand(1));
844 if(DestType != MVT::f64) { // TODO: speed!
845 // boring old integer multiply with xma
846 unsigned TempFR1=MakeReg(MVT::f64);
847 unsigned TempFR2=MakeReg(MVT::f64);
848 unsigned TempFR3=MakeReg(MVT::f64);
849 BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
850 BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
851 BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
853 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
855 else // floating point multiply
856 BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
861 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
862 N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
863 // into an fms, do so:
864 // ++FusedFP; // Statistic
865 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
866 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
867 Tmp3 = SelectExpr(N.getOperand(1));
868 BuildMI(BB, IA64::FMS, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
869 return Result; // early exit
871 Tmp1 = SelectExpr(N.getOperand(0));
872 Tmp2 = SelectExpr(N.getOperand(1));
873 if(DestType != MVT::f64) { // integer subtraction:
874 switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
875 case 1: // subtracting *from* an 8 bit constant:
876 BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
877 return Result; // early exit
878 } // fallthrough and emit a reg+reg SUB:
879 BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
880 } else { // this is a floating point subtraction
881 BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
887 Tmp1 = SelectExpr(N.getOperand(0));
888 assert(DestType == MVT::f64 && "trying to fabs something other than f64?");
889 BuildMI(BB, IA64::FABS, 1, Result).addReg(Tmp1);
894 assert(DestType == MVT::f64 && "trying to fneg something other than f64?");
896 if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
897 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
898 BuildMI(BB, IA64::FNEGABS, 1, Result).addReg(Tmp1); // fold in abs
900 Tmp1 = SelectExpr(N.getOperand(0));
901 BuildMI(BB, IA64::FNEG, 1, Result).addReg(Tmp1); // plain old fneg
908 switch (N.getValueType()) {
909 default: assert(0 && "Cannot AND this type!");
910 case MVT::i1: { // if a bool, we emit a pseudocode AND
911 unsigned pA = SelectExpr(N.getOperand(0));
912 unsigned pB = SelectExpr(N.getOperand(1));
914 /* our pseudocode for AND is:
916 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
917 cmp.eq pTemp,p0 = r0,r0 // pTemp = NOT pB
919 (pB) cmp.ne pTemp,p0 = r0,r0
921 (pTemp)cmp.ne pC,p0 = r0,r0 // if (NOT pB) pC = 0
924 unsigned pTemp = MakeReg(MVT::i1);
926 unsigned bogusTemp1 = MakeReg(MVT::i1);
927 unsigned bogusTemp2 = MakeReg(MVT::i1);
928 unsigned bogusTemp3 = MakeReg(MVT::i1);
929 unsigned bogusTemp4 = MakeReg(MVT::i1);
931 BuildMI(BB, IA64::PCMPEQUNC, 3, bogusTemp1)
932 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
933 BuildMI(BB, IA64::CMPEQ, 2, bogusTemp2)
934 .addReg(IA64::r0).addReg(IA64::r0);
935 BuildMI(BB, IA64::TPCMPNE, 3, pTemp)
936 .addReg(bogusTemp2).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
937 BuildMI(BB, IA64::TPCMPNE, 3, Result)
938 .addReg(bogusTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pTemp);
941 // if not a bool, we just AND away:
946 Tmp1 = SelectExpr(N.getOperand(0));
947 Tmp2 = SelectExpr(N.getOperand(1));
948 BuildMI(BB, IA64::AND, 2, Result).addReg(Tmp1).addReg(Tmp2);
956 switch (N.getValueType()) {
957 default: assert(0 && "Cannot OR this type!");
958 case MVT::i1: { // if a bool, we emit a pseudocode OR
959 unsigned pA = SelectExpr(N.getOperand(0));
960 unsigned pB = SelectExpr(N.getOperand(1));
962 unsigned pTemp1 = MakeReg(MVT::i1);
964 /* our pseudocode for OR is:
970 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
972 (pB) cmp.eq pC,p0 = r0,r0 // if (pB) pC = 1
975 BuildMI(BB, IA64::PCMPEQUNC, 3, pTemp1)
976 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
977 BuildMI(BB, IA64::TPCMPEQ, 3, Result)
978 .addReg(pTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
981 // if not a bool, we just OR away:
986 Tmp1 = SelectExpr(N.getOperand(0));
987 Tmp2 = SelectExpr(N.getOperand(1));
988 BuildMI(BB, IA64::OR, 2, Result).addReg(Tmp1).addReg(Tmp2);
996 switch (N.getValueType()) {
997 default: assert(0 && "Cannot XOR this type!");
998 case MVT::i1: { // if a bool, we emit a pseudocode XOR
999 unsigned pY = SelectExpr(N.getOperand(0));
1000 unsigned pZ = SelectExpr(N.getOperand(1));
1002 /* one possible routine for XOR is:
1004 // Compute px = py ^ pz
1005 // using sum of products: px = (py & !pz) | (pz & !py)
1006 // Uses 5 instructions in 3 cycles.
1008 (pz) cmp.eq.unc px = r0, r0 // px = pz
1009 (py) cmp.eq.unc pt = r0, r0 // pt = py
1012 (pt) cmp.ne.and px = r0, r0 // px = px & !pt (px = pz & !pt)
1013 (pz) cmp.ne.and pt = r0, r0 // pt = pt & !pz
1017 (pt) cmp.eq.or px = r0, r0 // px = px | pt
1019 *** Another, which we use here, requires one scratch GR. it is:
1021 mov rt = 0 // initialize rt off critical path
1025 (pz) cmp.eq.unc px = r0, r0 // px = pz
1026 (pz) mov rt = 1 // rt = pz
1029 (py) cmp.ne px = 1, rt // if (py) px = !pz
1031 .. these routines kindly provided by Jim Hull
1033 unsigned rt = MakeReg(MVT::i64);
1035 // these two temporaries will never actually appear,
1036 // due to the two-address form of some of the instructions below
1037 unsigned bogoPR = MakeReg(MVT::i1); // becomes Result
1038 unsigned bogoGR = MakeReg(MVT::i64); // becomes rt
1040 BuildMI(BB, IA64::MOV, 1, bogoGR).addReg(IA64::r0);
1041 BuildMI(BB, IA64::PCMPEQUNC, 3, bogoPR)
1042 .addReg(IA64::r0).addReg(IA64::r0).addReg(pZ);
1043 BuildMI(BB, IA64::TPCADDIMM22, 2, rt)
1044 .addReg(bogoGR).addImm(1).addReg(pZ);
1045 BuildMI(BB, IA64::TPCMPIMM8NE, 3, Result)
1046 .addReg(bogoPR).addImm(1).addReg(rt).addReg(pY);
1049 // if not a bool, we just XOR away:
1054 Tmp1 = SelectExpr(N.getOperand(0));
1055 Tmp2 = SelectExpr(N.getOperand(1));
1056 BuildMI(BB, IA64::XOR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1064 Tmp1 = SelectExpr(N.getOperand(0));
1065 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1066 Tmp2 = CN->getValue();
1067 BuildMI(BB, IA64::SHLI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1069 Tmp2 = SelectExpr(N.getOperand(1));
1070 BuildMI(BB, IA64::SHL, 2, Result).addReg(Tmp1).addReg(Tmp2);
1076 Tmp1 = SelectExpr(N.getOperand(0));
1077 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1078 Tmp2 = CN->getValue();
1079 BuildMI(BB, IA64::SHRUI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1081 Tmp2 = SelectExpr(N.getOperand(1));
1082 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1088 Tmp1 = SelectExpr(N.getOperand(0));
1089 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1090 Tmp2 = CN->getValue();
1091 BuildMI(BB, IA64::SHRSI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1093 Tmp2 = SelectExpr(N.getOperand(1));
1094 BuildMI(BB, IA64::SHRS, 2, Result).addReg(Tmp1).addReg(Tmp2);
1104 Tmp1 = SelectExpr(N.getOperand(0));
1105 Tmp2 = SelectExpr(N.getOperand(1));
1109 if(DestType == MVT::f64) // XXX: we're not gonna be fed MVT::f32, are we?
1112 bool isModulus=false; // is it a division or a modulus?
1113 bool isSigned=false;
1115 switch(N.getOpcode()) {
1116 case ISD::SDIV: isModulus=false; isSigned=true; break;
1117 case ISD::UDIV: isModulus=false; isSigned=false; break;
1118 case ISD::SREM: isModulus=true; isSigned=true; break;
1119 case ISD::UREM: isModulus=true; isSigned=false; break;
1122 if(!isModulus && !isFP) { // if this is an integer divide,
1123 switch (ponderIntegerDivisionBy(N.getOperand(1), isSigned, Tmp3)) {
1124 case 1: // division by a constant that's a power of 2
1125 Tmp1 = SelectExpr(N.getOperand(0));
1126 if(isSigned) { // argument could be negative, so emit some code:
1127 unsigned divAmt=Tmp3;
1128 unsigned tempGR1=MakeReg(MVT::i64);
1129 unsigned tempGR2=MakeReg(MVT::i64);
1130 unsigned tempGR3=MakeReg(MVT::i64);
1131 BuildMI(BB, IA64::SHRS, 2, tempGR1)
1132 .addReg(Tmp1).addImm(divAmt-1);
1133 BuildMI(BB, IA64::EXTRU, 3, tempGR2)
1134 .addReg(tempGR1).addImm(64-divAmt).addImm(divAmt);
1135 BuildMI(BB, IA64::ADD, 2, tempGR3)
1136 .addReg(Tmp1).addReg(tempGR2);
1137 BuildMI(BB, IA64::SHRS, 2, Result)
1138 .addReg(tempGR3).addImm(divAmt);
1140 else // unsigned div-by-power-of-2 becomes a simple shift right:
1141 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addImm(Tmp3);
1142 return Result; // early exit
1146 unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
1147 unsigned TmpPR2=MakeReg(MVT::i1); // predicate registers,
1148 unsigned TmpF1=MakeReg(MVT::f64); // and one metric truckload of FP regs.
1149 unsigned TmpF2=MakeReg(MVT::f64); // lucky we have IA64?
1150 unsigned TmpF3=MakeReg(MVT::f64); // well, the real FIXME is to have
1151 unsigned TmpF4=MakeReg(MVT::f64); // isTwoAddress forms of these
1152 unsigned TmpF5=MakeReg(MVT::f64); // FP instructions so we can end up with
1153 unsigned TmpF6=MakeReg(MVT::f64); // stuff like setf.sig f10=f10 etc.
1154 unsigned TmpF7=MakeReg(MVT::f64);
1155 unsigned TmpF8=MakeReg(MVT::f64);
1156 unsigned TmpF9=MakeReg(MVT::f64);
1157 unsigned TmpF10=MakeReg(MVT::f64);
1158 unsigned TmpF11=MakeReg(MVT::f64);
1159 unsigned TmpF12=MakeReg(MVT::f64);
1160 unsigned TmpF13=MakeReg(MVT::f64);
1161 unsigned TmpF14=MakeReg(MVT::f64);
1162 unsigned TmpF15=MakeReg(MVT::f64);
1164 // OK, emit some code:
1167 // first, load the inputs into FP regs.
1168 BuildMI(BB, IA64::SETFSIG, 1, TmpF1).addReg(Tmp1);
1169 BuildMI(BB, IA64::SETFSIG, 1, TmpF2).addReg(Tmp2);
1171 // next, convert the inputs to FP
1173 BuildMI(BB, IA64::FCVTXF, 1, TmpF3).addReg(TmpF1);
1174 BuildMI(BB, IA64::FCVTXF, 1, TmpF4).addReg(TmpF2);
1176 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF3).addReg(TmpF1);
1177 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF4).addReg(TmpF2);
1180 } else { // this is an FP divide/remainder, so we 'leak' some temp
1181 // regs and assign TmpF3=Tmp1, TmpF4=Tmp2
1186 // we start by computing an approximate reciprocal (good to 9 bits?)
1187 // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate)
1188 BuildMI(BB, IA64::FRCPAS1, 4)
1189 .addReg(TmpF5, MachineOperand::Def)
1190 .addReg(TmpPR, MachineOperand::Def)
1191 .addReg(TmpF3).addReg(TmpF4);
1193 if(!isModulus) { // if this is a divide, we worry about div-by-zero
1194 unsigned bogusPR=MakeReg(MVT::i1); // won't appear, due to twoAddress
1196 BuildMI(BB, IA64::CMPEQ, 2, bogusPR).addReg(IA64::r0).addReg(IA64::r0);
1197 BuildMI(BB, IA64::TPCMPNE, 3, TmpPR2).addReg(bogusPR)
1198 .addReg(IA64::r0).addReg(IA64::r0).addReg(TmpPR);
1201 // now we apply newton's method, thrice! (FIXME: this is ~72 bits of
1202 // precision, don't need this much for f32/i32)
1203 BuildMI(BB, IA64::CFNMAS1, 4, TmpF6)
1204 .addReg(TmpF4).addReg(TmpF5).addReg(IA64::F1).addReg(TmpPR);
1205 BuildMI(BB, IA64::CFMAS1, 4, TmpF7)
1206 .addReg(TmpF3).addReg(TmpF5).addReg(IA64::F0).addReg(TmpPR);
1207 BuildMI(BB, IA64::CFMAS1, 4, TmpF8)
1208 .addReg(TmpF6).addReg(TmpF6).addReg(IA64::F0).addReg(TmpPR);
1209 BuildMI(BB, IA64::CFMAS1, 4, TmpF9)
1210 .addReg(TmpF6).addReg(TmpF7).addReg(TmpF7).addReg(TmpPR);
1211 BuildMI(BB, IA64::CFMAS1, 4,TmpF10)
1212 .addReg(TmpF6).addReg(TmpF5).addReg(TmpF5).addReg(TmpPR);
1213 BuildMI(BB, IA64::CFMAS1, 4,TmpF11)
1214 .addReg(TmpF8).addReg(TmpF9).addReg(TmpF9).addReg(TmpPR);
1215 BuildMI(BB, IA64::CFMAS1, 4,TmpF12)
1216 .addReg(TmpF8).addReg(TmpF10).addReg(TmpF10).addReg(TmpPR);
1217 BuildMI(BB, IA64::CFNMAS1, 4,TmpF13)
1218 .addReg(TmpF4).addReg(TmpF11).addReg(TmpF3).addReg(TmpPR);
1220 // FIXME: this is unfortunate :(
1221 // the story is that the dest reg of the fnma above and the fma below
1222 // (and therefore possibly the src of the fcvt.fx[u] as well) cannot
1223 // be the same register, or this code breaks if the first argument is
1224 // zero. (e.g. without this hack, 0%8 yields -64, not 0.)
1225 BuildMI(BB, IA64::CFMAS1, 4,TmpF14)
1226 .addReg(TmpF13).addReg(TmpF12).addReg(TmpF11).addReg(TmpPR);
1228 if(isModulus) { // XXX: fragile! fixes _only_ mod, *breaks* div! !
1229 BuildMI(BB, IA64::IUSE, 1).addReg(TmpF13); // hack :(
1233 // round to an integer
1235 BuildMI(BB, IA64::FCVTFXTRUNCS1, 1, TmpF15).addReg(TmpF14);
1237 BuildMI(BB, IA64::FCVTFXUTRUNCS1, 1, TmpF15).addReg(TmpF14);
1239 BuildMI(BB, IA64::FMOV, 1, TmpF15).addReg(TmpF14);
1240 // EXERCISE: can you see why TmpF15=TmpF14 does not work here, and
1241 // we really do need the above FMOV? ;)
1245 if(isFP) { // extra worrying about div-by-zero
1246 unsigned bogoResult=MakeReg(MVT::f64);
1248 // we do a 'conditional fmov' (of the correct result, depending
1249 // on how the frcpa predicate turned out)
1250 BuildMI(BB, IA64::PFMOV, 2, bogoResult)
1251 .addReg(TmpF12).addReg(TmpPR2);
1252 BuildMI(BB, IA64::CFMOV, 2, Result)
1253 .addReg(bogoResult).addReg(TmpF15).addReg(TmpPR);
1256 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TmpF15);
1258 } else { // this is a modulus
1260 // answer = q * (-b) + a
1261 unsigned ModulusResult = MakeReg(MVT::f64);
1262 unsigned TmpF = MakeReg(MVT::f64);
1263 unsigned TmpI = MakeReg(MVT::i64);
1265 BuildMI(BB, IA64::SUB, 2, TmpI).addReg(IA64::r0).addReg(Tmp2);
1266 BuildMI(BB, IA64::SETFSIG, 1, TmpF).addReg(TmpI);
1267 BuildMI(BB, IA64::XMAL, 3, ModulusResult)
1268 .addReg(TmpF15).addReg(TmpF).addReg(TmpF1);
1269 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(ModulusResult);
1270 } else { // FP modulus! The horror... the horror....
1271 assert(0 && "sorry, no FP modulus just yet!\n!\n");
1278 case ISD::ZERO_EXTEND_INREG: {
1279 Tmp1 = SelectExpr(N.getOperand(0));
1280 MVTSDNode* MVN = dyn_cast<MVTSDNode>(Node);
1281 switch(MVN->getExtraValueType())
1285 assert(0 && "don't know how to zero extend this type");
1287 case MVT::i8: Opc = IA64::ZXT1; break;
1288 case MVT::i16: Opc = IA64::ZXT2; break;
1289 case MVT::i32: Opc = IA64::ZXT4; break;
1291 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1295 case ISD::SIGN_EXTEND_INREG: {
1296 Tmp1 = SelectExpr(N.getOperand(0));
1297 MVTSDNode* MVN = dyn_cast<MVTSDNode>(Node);
1298 switch(MVN->getExtraValueType())
1302 assert(0 && "don't know how to sign extend this type");
1304 case MVT::i8: Opc = IA64::SXT1; break;
1305 case MVT::i16: Opc = IA64::SXT2; break;
1306 case MVT::i32: Opc = IA64::SXT4; break;
1308 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1313 Tmp1 = SelectExpr(N.getOperand(0));
1314 Tmp2 = SelectExpr(N.getOperand(1));
1315 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Node)) {
1316 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1317 switch (SetCC->getCondition()) {
1318 default: assert(0 && "Unknown integer comparison!");
1320 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1323 BuildMI(BB, IA64::CMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1326 BuildMI(BB, IA64::CMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1329 BuildMI(BB, IA64::CMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1332 BuildMI(BB, IA64::CMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1335 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1338 BuildMI(BB, IA64::CMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1341 BuildMI(BB, IA64::CMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1344 BuildMI(BB, IA64::CMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1347 BuildMI(BB, IA64::CMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1351 else { // if not integer, should be FP. FIXME: what about bools? ;)
1352 assert(SetCC->getOperand(0).getValueType() != MVT::f32 &&
1353 "error: SETCC should have had incoming f32 promoted to f64!\n");
1354 switch (SetCC->getCondition()) {
1355 default: assert(0 && "Unknown FP comparison!");
1357 BuildMI(BB, IA64::FCMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1360 BuildMI(BB, IA64::FCMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1363 BuildMI(BB, IA64::FCMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1366 BuildMI(BB, IA64::FCMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1369 BuildMI(BB, IA64::FCMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1372 BuildMI(BB, IA64::FCMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1375 BuildMI(BB, IA64::FCMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1378 BuildMI(BB, IA64::FCMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1381 BuildMI(BB, IA64::FCMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1384 BuildMI(BB, IA64::FCMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1390 assert(0 && "this setcc not implemented yet");
1398 // Make sure we generate both values.
1400 ExprMap[N.getValue(1)] = 1; // Generate the token
1402 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1406 if(opcode == ISD::LOAD) { // this is a LOAD
1407 switch (Node->getValueType(0)) {
1408 default: assert(0 && "Cannot load this type!");
1409 case MVT::i1: Opc = IA64::LD1; isBool=true; break;
1410 // FIXME: for now, we treat bool loads the same as i8 loads */
1411 case MVT::i8: Opc = IA64::LD1; break;
1412 case MVT::i16: Opc = IA64::LD2; break;
1413 case MVT::i32: Opc = IA64::LD4; break;
1414 case MVT::i64: Opc = IA64::LD8; break;
1416 case MVT::f32: Opc = IA64::LDF4; break;
1417 case MVT::f64: Opc = IA64::LDF8; break;
1419 } else { // this is an EXTLOAD or ZEXTLOAD
1420 MVT::ValueType TypeBeingLoaded = cast<MVTSDNode>(Node)->getExtraValueType();
1421 switch (TypeBeingLoaded) {
1422 default: assert(0 && "Cannot extload/zextload this type!");
1424 case MVT::i8: Opc = IA64::LD1; break;
1425 case MVT::i16: Opc = IA64::LD2; break;
1426 case MVT::i32: Opc = IA64::LD4; break;
1427 case MVT::f32: Opc = IA64::LDF4; break;
1431 SDOperand Chain = N.getOperand(0);
1432 SDOperand Address = N.getOperand(1);
1434 if(Address.getOpcode() == ISD::GlobalAddress) {
1436 unsigned dummy = MakeReg(MVT::i64);
1437 unsigned dummy2 = MakeReg(MVT::i64);
1438 BuildMI(BB, IA64::ADD, 2, dummy)
1439 .addGlobalAddress(cast<GlobalAddressSDNode>(Address)->getGlobal())
1441 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1443 BuildMI(BB, Opc, 1, Result).addReg(dummy2);
1444 else { // emit a little pseudocode to load a bool (stored in one byte)
1445 // into a predicate register
1446 assert(Opc==IA64::LD1 && "problem loading a bool");
1447 unsigned dummy3 = MakeReg(MVT::i64);
1448 BuildMI(BB, Opc, 1, dummy3).addReg(dummy2);
1449 // we compare to 0. true? 0. false? 1.
1450 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1452 } else if(ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Address)) {
1454 IA64Lowering.restoreGP(BB);
1455 unsigned dummy = MakeReg(MVT::i64);
1456 BuildMI(BB, IA64::ADD, 2, dummy).addConstantPoolIndex(CP->getIndex())
1457 .addReg(IA64::r1); // CPI+GP
1459 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1460 else { // emit a little pseudocode to load a bool (stored in one byte)
1461 // into a predicate register
1462 assert(Opc==IA64::LD1 && "problem loading a bool");
1463 unsigned dummy3 = MakeReg(MVT::i64);
1464 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1465 // we compare to 0. true? 0. false? 1.
1466 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1468 } else if(Address.getOpcode() == ISD::FrameIndex) {
1469 Select(Chain); // FIXME ? what about bools?
1470 unsigned dummy = MakeReg(MVT::i64);
1471 BuildMI(BB, IA64::MOV, 1, dummy)
1472 .addFrameIndex(cast<FrameIndexSDNode>(Address)->getIndex());
1474 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1475 else { // emit a little pseudocode to load a bool (stored in one byte)
1476 // into a predicate register
1477 assert(Opc==IA64::LD1 && "problem loading a bool");
1478 unsigned dummy3 = MakeReg(MVT::i64);
1479 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1480 // we compare to 0. true? 0. false? 1.
1481 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1483 } else { // none of the above...
1485 Tmp2 = SelectExpr(Address);
1487 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1488 else { // emit a little pseudocode to load a bool (stored in one byte)
1489 // into a predicate register
1490 assert(Opc==IA64::LD1 && "problem loading a bool");
1491 unsigned dummy = MakeReg(MVT::i64);
1492 BuildMI(BB, Opc, 1, dummy).addReg(Tmp2);
1493 // we compare to 0. true? 0. false? 1.
1494 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy).addReg(IA64::r0);
1501 case ISD::CopyFromReg: {
1503 Result = ExprMap[N.getValue(0)] =
1504 MakeReg(N.getValue(0).getValueType());
1506 SDOperand Chain = N.getOperand(0);
1509 unsigned r = dyn_cast<RegSDNode>(Node)->getReg();
1511 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
1512 BuildMI(BB, IA64::PCMPEQUNC, 3, Result)
1513 .addReg(IA64::r0).addReg(IA64::r0).addReg(r);
1514 // (r) Result =cmp.eq.unc(r0,r0)
1516 BuildMI(BB, IA64::MOV, 1, Result).addReg(r); // otherwise MOV
1521 Select(N.getOperand(0));
1523 // The chain for this call is now lowered.
1524 ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), 1));
1526 //grab the arguments
1527 std::vector<unsigned> argvregs;
1529 for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
1530 argvregs.push_back(SelectExpr(N.getOperand(i)));
1532 // see section 8.5.8 of "Itanium Software Conventions and
1533 // Runtime Architecture Guide to see some examples of what's going
1534 // on here. (in short: int args get mapped 1:1 'slot-wise' to out0->out7,
1535 // while FP args get mapped to F8->F15 as needed)
1537 unsigned used_FPArgs=0; // how many FP Args have been used so far?
1540 for(int i = 0, e = std::min(8, (int)argvregs.size()); i < e; ++i)
1542 unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
1543 IA64::out4, IA64::out5, IA64::out6, IA64::out7 };
1544 unsigned FPArgs[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
1545 IA64::F12, IA64::F13, IA64::F14, IA64::F15 };
1547 switch(N.getOperand(i+2).getValueType())
1549 default: // XXX do we need to support MVT::i1 here?
1551 N.getOperand(i).Val->dump();
1552 std::cerr << "Type for " << i << " is: " <<
1553 N.getOperand(i+2).getValueType() << std::endl;
1554 assert(0 && "Unknown value type for call");
1556 BuildMI(BB, IA64::MOV, 1, intArgs[i]).addReg(argvregs[i]);
1559 BuildMI(BB, IA64::FMOV, 1, FPArgs[used_FPArgs++])
1560 .addReg(argvregs[i]);
1561 // FIXME: we don't need to do this _all_ the time:
1562 BuildMI(BB, IA64::GETFD, 1, intArgs[i]).addReg(argvregs[i]);
1568 for (int i = 8, e = argvregs.size(); i < e; ++i)
1570 unsigned tempAddr = MakeReg(MVT::i64);
1572 switch(N.getOperand(i+2).getValueType()) {
1575 N.getOperand(i).Val->dump();
1576 std::cerr << "Type for " << i << " is: " <<
1577 N.getOperand(i+2).getValueType() << "\n";
1578 assert(0 && "Unknown value type for call");
1579 case MVT::i1: // FIXME?
1584 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
1585 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
1586 BuildMI(BB, IA64::ST8, 2).addReg(tempAddr).addReg(argvregs[i]);
1590 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
1591 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
1592 BuildMI(BB, IA64::STF8, 2).addReg(tempAddr).addReg(argvregs[i]);
1597 /* XXX we want to re-enable direct branches! crippling them now
1598 * to stress-test indirect branches.:
1599 //build the right kind of call
1600 if (GlobalAddressSDNode *GASD =
1601 dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
1603 BuildMI(BB, IA64::BRCALL, 1).addGlobalAddress(GASD->getGlobal(),true);
1604 IA64Lowering.restoreGP_SP_RP(BB);
1606 ^^^^^^^^^^^^^ we want this code one day XXX */
1607 if (ExternalSymbolSDNode *ESSDN =
1608 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
1609 { // FIXME : currently need this case for correctness, to avoid
1610 // "non-pic code with imm relocation against dynamic symbol" errors
1611 BuildMI(BB, IA64::BRCALL, 1)
1612 .addExternalSymbol(ESSDN->getSymbol(), true);
1613 IA64Lowering.restoreGP_SP_RP(BB);
1616 Tmp1 = SelectExpr(N.getOperand(1));
1618 unsigned targetEntryPoint=MakeReg(MVT::i64);
1619 unsigned targetGPAddr=MakeReg(MVT::i64);
1620 unsigned currentGP=MakeReg(MVT::i64);
1622 // b6 is a scratch branch register, we load the target entry point
1623 // from the base of the function descriptor
1624 BuildMI(BB, IA64::LD8, 1, targetEntryPoint).addReg(Tmp1);
1625 BuildMI(BB, IA64::MOV, 1, IA64::B6).addReg(targetEntryPoint);
1627 // save the current GP:
1628 BuildMI(BB, IA64::MOV, 1, currentGP).addReg(IA64::r1);
1630 /* TODO: we need to make sure doing this never, ever loads a
1631 * bogus value into r1 (GP). */
1632 // load the target GP (which is at mem[functiondescriptor+8])
1633 BuildMI(BB, IA64::ADDIMM22, 2, targetGPAddr)
1634 .addReg(Tmp1).addImm(8); // FIXME: addimm22? why not postincrement ld
1635 BuildMI(BB, IA64::LD8, 1, IA64::r1).addReg(targetGPAddr);
1637 // and then jump: (well, call)
1638 BuildMI(BB, IA64::BRCALL, 1).addReg(IA64::B6);
1639 // and finally restore the old GP
1640 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(currentGP);
1641 IA64Lowering.restoreSP_RP(BB);
1644 switch (Node->getValueType(0)) {
1645 default: assert(0 && "Unknown value type for call result!");
1646 case MVT::Other: return 1;
1648 BuildMI(BB, IA64::CMPNE, 2, Result)
1649 .addReg(IA64::r8).addReg(IA64::r0);
1655 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r8);
1658 BuildMI(BB, IA64::FMOV, 1, Result).addReg(IA64::F8);
1661 return Result+N.ResNo;
1668 void ISel::Select(SDOperand N) {
1669 unsigned Tmp1, Tmp2, Opc;
1670 unsigned opcode = N.getOpcode();
1672 if (!LoweredTokens.insert(N).second)
1673 return; // Already selected.
1675 SDNode *Node = N.Val;
1677 switch (Node->getOpcode()) {
1679 Node->dump(); std::cerr << "\n";
1680 assert(0 && "Node not handled yet!");
1682 case ISD::EntryToken: return; // Noop
1684 case ISD::TokenFactor: {
1685 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
1686 Select(Node->getOperand(i));
1690 case ISD::CopyToReg: {
1691 Select(N.getOperand(0));
1692 Tmp1 = SelectExpr(N.getOperand(1));
1693 Tmp2 = cast<RegSDNode>(N)->getReg();
1696 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
1697 BuildMI(BB, IA64::PCMPEQUNC, 3, Tmp2)
1698 .addReg(IA64::r0).addReg(IA64::r0).addReg(Tmp1);
1699 // (Tmp1) Tmp2 = cmp.eq.unc(r0,r0)
1701 BuildMI(BB, IA64::MOV, 1, Tmp2).addReg(Tmp1);
1702 // XXX is this the right way 'round? ;)
1709 /* what the heck is going on here:
1711 <_sabre_> ret with two operands is obvious: chain and value
1713 <_sabre_> ret with 3 values happens when 'expansion' occurs
1714 <_sabre_> e.g. i64 gets split into 2x i32
1716 <_sabre_> you don't have this case on ia64
1718 <_sabre_> so the two returned values go into EAX/EDX on ia32
1719 <camel_> ahhh *memories*
1721 <camel_> ok, thanks :)
1722 <_sabre_> so yeah, everything that has a side effect takes a 'token chain'
1723 <_sabre_> this is the first operand always
1724 <_sabre_> these operand often define chains, they are the last operand
1725 <_sabre_> they are printed as 'ch' if you do DAG.dump()
1728 switch (N.getNumOperands()) {
1730 assert(0 && "Unknown return instruction!");
1732 Select(N.getOperand(0));
1733 Tmp1 = SelectExpr(N.getOperand(1));
1734 switch (N.getOperand(1).getValueType()) {
1735 default: assert(0 && "All other types should have been promoted!!");
1736 // FIXME: do I need to add support for bools here?
1737 // (return '0' or '1' r8, basically...)
1739 BuildMI(BB, IA64::MOV, 1, IA64::r8).addReg(Tmp1);
1742 BuildMI(BB, IA64::FMOV, 1, IA64::F8).addReg(Tmp1);
1746 Select(N.getOperand(0));
1749 // before returning, restore the ar.pfs register (set by the 'alloc' up top)
1750 BuildMI(BB, IA64::MOV, 1).addReg(IA64::AR_PFS).addReg(IA64Lowering.VirtGPR);
1751 BuildMI(BB, IA64::RET, 0); // and then just emit a 'ret' instruction
1756 Select(N.getOperand(0));
1757 MachineBasicBlock *Dest =
1758 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
1759 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(IA64::p0).addMBB(Dest);
1760 // XXX HACK! we do _not_ need long branches all the time
1764 case ISD::ImplicitDef: {
1765 Select(N.getOperand(0));
1766 BuildMI(BB, IA64::IDEF, 0, cast<RegSDNode>(N)->getReg());
1771 MachineBasicBlock *Dest =
1772 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
1774 Select(N.getOperand(0));
1775 Tmp1 = SelectExpr(N.getOperand(1));
1776 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(Tmp1).addMBB(Dest);
1777 // XXX HACK! we do _not_ need long branches all the time
1786 case ISD::CopyFromReg:
1787 case ISD::DYNAMIC_STACKALLOC:
1791 case ISD::TRUNCSTORE:
1793 Select(N.getOperand(0));
1794 Tmp1 = SelectExpr(N.getOperand(1)); // value
1798 if(opcode == ISD::STORE) {
1799 switch (N.getOperand(1).getValueType()) {
1800 default: assert(0 && "Cannot store this type!");
1801 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
1802 // FIXME?: for now, we treat bool loads the same as i8 stores */
1803 case MVT::i8: Opc = IA64::ST1; break;
1804 case MVT::i16: Opc = IA64::ST2; break;
1805 case MVT::i32: Opc = IA64::ST4; break;
1806 case MVT::i64: Opc = IA64::ST8; break;
1808 case MVT::f32: Opc = IA64::STF4; break;
1809 case MVT::f64: Opc = IA64::STF8; break;
1811 } else { // truncstore
1812 switch(cast<MVTSDNode>(Node)->getExtraValueType()) {
1813 default: assert(0 && "unknown type in truncstore");
1814 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
1815 //FIXME: DAG does not promote this load?
1816 case MVT::i8: Opc = IA64::ST1; break;
1817 case MVT::i16: Opc = IA64::ST2; break;
1818 case MVT::i32: Opc = IA64::ST4; break;
1819 case MVT::f32: Opc = IA64::STF4; break;
1823 if(N.getOperand(2).getOpcode() == ISD::GlobalAddress) {
1824 unsigned dummy = MakeReg(MVT::i64);
1825 unsigned dummy2 = MakeReg(MVT::i64);
1826 BuildMI(BB, IA64::ADD, 2, dummy)
1827 .addGlobalAddress(cast<GlobalAddressSDNode>
1828 (N.getOperand(2))->getGlobal()).addReg(IA64::r1);
1829 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1832 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(Tmp1);
1833 else { // we are storing a bool, so emit a little pseudocode
1834 // to store a predicate register as one byte
1835 assert(Opc==IA64::ST1);
1836 unsigned dummy3 = MakeReg(MVT::i64);
1837 unsigned dummy4 = MakeReg(MVT::i64);
1838 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
1839 BuildMI(BB, IA64::CADDIMM22, 3, dummy4)
1840 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
1841 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(dummy4);
1843 } else if(N.getOperand(2).getOpcode() == ISD::FrameIndex) {
1845 // FIXME? (what about bools?)
1847 unsigned dummy = MakeReg(MVT::i64);
1848 BuildMI(BB, IA64::MOV, 1, dummy)
1849 .addFrameIndex(cast<FrameIndexSDNode>(N.getOperand(2))->getIndex());
1850 BuildMI(BB, Opc, 2).addReg(dummy).addReg(Tmp1);
1851 } else { // otherwise
1852 Tmp2 = SelectExpr(N.getOperand(2)); //address
1854 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(Tmp1);
1855 else { // we are storing a bool, so emit a little pseudocode
1856 // to store a predicate register as one byte
1857 assert(Opc==IA64::ST1);
1858 unsigned dummy3 = MakeReg(MVT::i64);
1859 unsigned dummy4 = MakeReg(MVT::i64);
1860 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
1861 BuildMI(BB, IA64::CADDIMM22, 3, dummy4)
1862 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
1863 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(dummy4);
1869 case ISD::ADJCALLSTACKDOWN:
1870 case ISD::ADJCALLSTACKUP: {
1871 Select(N.getOperand(0));
1872 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
1874 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? IA64::ADJUSTCALLSTACKDOWN :
1875 IA64::ADJUSTCALLSTACKUP;
1876 BuildMI(BB, Opc, 1).addImm(Tmp1);
1882 assert(0 && "GAME OVER. INSERT COIN?");
1886 /// createIA64PatternInstructionSelector - This pass converts an LLVM function
1887 /// into a machine code representation using pattern matching and a machine
1888 /// description file.
1890 FunctionPass *llvm::createIA64PatternInstructionSelector(TargetMachine &TM) {
1891 return new ISel(TM);