1 //===-- IA64ISelPattern.cpp - A pattern matching inst selector for IA64 ---===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for IA64.
12 //===----------------------------------------------------------------------===//
15 #include "IA64InstrBuilder.h"
16 #include "IA64RegisterInfo.h"
17 #include "IA64MachineFunctionInfo.h"
18 #include "llvm/Constants.h" // FIXME: REMOVE
19 #include "llvm/Function.h"
20 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/ADT/Statistic.h"
35 //===----------------------------------------------------------------------===//
36 // IA64TargetLowering - IA64 Implementation of the TargetLowering interface
38 class IA64TargetLowering : public TargetLowering {
39 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
41 //int ReturnAddrIndex; // FrameIndex for return slot.
42 unsigned GP, SP, RP; // FIXME - clean this mess up
45 unsigned VirtGPR; // this is public so it can be accessed in the selector
46 // for ISD::RET down below. add an accessor instead? FIXME
48 IA64TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
50 // register class for general registers
51 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
53 // register class for FP registers
54 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
56 // register class for predicate registers
57 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
59 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
60 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
62 setSetCCResultType(MVT::i1);
63 setShiftAmountType(MVT::i64);
65 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
67 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
69 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
70 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
71 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
72 setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
74 setOperationAction(ISD::SREM , MVT::f32 , Expand);
75 setOperationAction(ISD::SREM , MVT::f64 , Expand);
77 setOperationAction(ISD::UREM , MVT::f32 , Expand);
78 setOperationAction(ISD::UREM , MVT::f64 , Expand);
80 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
81 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
82 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
84 computeRegisterProperties();
86 addLegalFPImmediate(+0.0);
87 addLegalFPImmediate(+1.0);
88 addLegalFPImmediate(-0.0);
89 addLegalFPImmediate(-1.0);
92 /// LowerArguments - This hook must be implemented to indicate how we should
93 /// lower the arguments for the specified function, into the specified DAG.
94 virtual std::vector<SDOperand>
95 LowerArguments(Function &F, SelectionDAG &DAG);
97 /// LowerCallTo - This hook lowers an abstract call to a function into an
99 virtual std::pair<SDOperand, SDOperand>
100 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
101 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
103 virtual std::pair<SDOperand, SDOperand>
104 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
106 virtual std::pair<SDOperand,SDOperand>
107 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
108 const Type *ArgTy, SelectionDAG &DAG);
110 virtual std::pair<SDOperand, SDOperand>
111 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
114 void restoreGP_SP_RP(MachineBasicBlock* BB)
116 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
117 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
118 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
121 void restoreSP_RP(MachineBasicBlock* BB)
123 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
124 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
127 void restoreRP(MachineBasicBlock* BB)
129 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
132 void restoreGP(MachineBasicBlock* BB)
134 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
141 std::vector<SDOperand>
142 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
143 std::vector<SDOperand> ArgValues;
146 // add beautiful description of IA64 stack frame format
147 // here (from intel 24535803.pdf most likely)
149 MachineFunction &MF = DAG.getMachineFunction();
150 MachineFrameInfo *MFI = MF.getFrameInfo();
152 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
153 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
154 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
156 MachineBasicBlock& BB = MF.front();
158 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
159 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
161 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
162 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
168 unsigned used_FPArgs = 0; // how many FP args have been used so far?
170 unsigned ArgOffset = 0;
173 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
175 SDOperand newroot, argt;
176 if(count < 8) { // need to fix this logic? maybe.
178 switch (getValueType(I->getType())) {
180 std::cerr << "ERROR in LowerArgs: unknown type "
181 << getValueType(I->getType()) << "\n";
184 // fixme? (well, will need to for weird FP structy stuff,
185 // see intel ABI docs)
187 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
188 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
189 // floating point args go into f8..f15 as-needed, the increment
190 argVreg[count] = // is below..:
191 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
192 // FP args go into f8..f15 as needed: (hence the ++)
193 argPreg[count] = args_FP[used_FPArgs++];
194 argOpc[count] = IA64::FMOV;
195 argt = newroot = DAG.getCopyFromReg(argVreg[count],
196 getValueType(I->getType()), DAG.getRoot());
198 case MVT::i1: // NOTE: as far as C abi stuff goes,
199 // bools are just boring old ints
204 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
205 MF.addLiveIn(args_int[count]); // mark this register as liveIn
207 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
208 argPreg[count] = args_int[count];
209 argOpc[count] = IA64::MOV;
211 DAG.getCopyFromReg(argVreg[count], MVT::i64, DAG.getRoot());
212 if ( getValueType(I->getType()) != MVT::i64)
213 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
217 } else { // more than 8 args go into the frame
218 // Create the frame index object for this incoming parameter...
219 ArgOffset = 16 + 8 * (count - 8);
220 int FI = MFI->CreateFixedObject(8, ArgOffset);
222 // Create the SelectionDAG nodes corresponding to a load
223 //from this parameter
224 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
225 argt = newroot = DAG.getLoad(getValueType(I->getType()),
226 DAG.getEntryNode(), FIN, DAG.getSrcValue(NULL));
229 DAG.setRoot(newroot.getValue(1));
230 ArgValues.push_back(argt);
234 // Create a vreg to hold the output of (what will become)
235 // the "alloc" instruction
236 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
237 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
238 // we create a PSEUDO_ALLOC (pseudo)instruction for now
240 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
243 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
244 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
247 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
250 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
251 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
254 unsigned tempOffset=0;
256 // if this is a varargs function, we simply lower llvm.va_start by
257 // pointing to the first entry
260 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
263 // here we actually do the moving of args, and store them to the stack
264 // too if this is a varargs function:
265 for (int i = 0; i < count && i < 8; ++i) {
266 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
268 // if this is a varargs function, we copy the input registers to the stack
269 int FI = MFI->CreateFixedObject(8, tempOffset);
270 tempOffset+=8; //XXX: is it safe to use r22 like this?
271 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
272 // FIXME: we should use st8.spill here, one day
273 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
277 // Finally, inform the code generator which regs we return values in.
278 // (see the ISD::RET: case down below)
279 switch (getValueType(F.getReturnType())) {
280 default: assert(0 && "i have no idea where to return this type!");
281 case MVT::isVoid: break;
287 MF.addLiveOut(IA64::r8);
291 MF.addLiveOut(IA64::F8);
298 std::pair<SDOperand, SDOperand>
299 IA64TargetLowering::LowerCallTo(SDOperand Chain,
300 const Type *RetTy, bool isVarArg,
301 SDOperand Callee, ArgListTy &Args,
304 MachineFunction &MF = DAG.getMachineFunction();
306 unsigned NumBytes = 16;
307 unsigned outRegsUsed = 0;
309 if (Args.size() > 8) {
310 NumBytes += (Args.size() - 8) * 8;
313 outRegsUsed = Args.size();
316 // FIXME? this WILL fail if we ever try to pass around an arg that
317 // consumes more than a single output slot (a 'real' double, int128
318 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
319 // registers we use. Hopefully, the assembler will notice.
320 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
321 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
323 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
324 DAG.getConstant(NumBytes, getPointerTy()));
326 std::vector<SDOperand> args_to_use;
327 for (unsigned i = 0, e = Args.size(); i != e; ++i)
329 switch (getValueType(Args[i].second)) {
330 default: assert(0 && "unexpected argument type!");
335 //promote to 64-bits, sign/zero extending based on type
337 if(Args[i].second->isSigned())
338 Args[i].first = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64,
341 Args[i].first = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64,
346 Args[i].first = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Args[i].first);
351 args_to_use.push_back(Args[i].first);
354 std::vector<MVT::ValueType> RetVals;
355 MVT::ValueType RetTyVT = getValueType(RetTy);
356 if (RetTyVT != MVT::isVoid)
357 RetVals.push_back(RetTyVT);
358 RetVals.push_back(MVT::Other);
360 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain,
361 Callee, args_to_use), 0);
362 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
363 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
364 DAG.getConstant(NumBytes, getPointerTy()));
365 return std::make_pair(TheCall, Chain);
368 std::pair<SDOperand, SDOperand>
369 IA64TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
370 // vastart just returns the address of the VarArgsFrameIndex slot.
371 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64), Chain);
374 std::pair<SDOperand,SDOperand> IA64TargetLowering::
375 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
376 const Type *ArgTy, SelectionDAG &DAG) {
378 MVT::ValueType ArgVT = getValueType(ArgTy);
381 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList, DAG.getSrcValue(NULL));
384 if (ArgVT == MVT::i32 || ArgVT == MVT::f32)
387 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
388 "Other types should have been promoted for varargs!");
391 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
392 DAG.getConstant(Amt, VAList.getValueType()));
394 return std::make_pair(Result, Chain);
397 std::pair<SDOperand, SDOperand> IA64TargetLowering::
398 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
401 assert(0 && "LowerFrameReturnAddress not done yet\n");
408 //===--------------------------------------------------------------------===//
409 /// ISel - IA64 specific code to select IA64 machine instructions for
410 /// SelectionDAG operations.
412 class ISel : public SelectionDAGISel {
413 /// IA64Lowering - This object fully describes how to lower LLVM code to an
414 /// IA64-specific SelectionDAG.
415 IA64TargetLowering IA64Lowering;
416 SelectionDAG *ISelDAG; // Hack to support us having a dag->dag transform
417 // for sdiv and udiv until it is put into the future
420 /// ExprMap - As shared expressions are codegen'd, we keep track of which
421 /// vreg the value is produced in, so we only emit one copy of each compiled
423 std::map<SDOperand, unsigned> ExprMap;
424 std::set<SDOperand> LoweredTokens;
427 ISel(TargetMachine &TM) : SelectionDAGISel(IA64Lowering), IA64Lowering(TM),
430 /// InstructionSelectBasicBlock - This callback is invoked by
431 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
432 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
434 unsigned SelectExpr(SDOperand N);
435 void Select(SDOperand N);
436 // a dag->dag to transform mul-by-constant-int to shifts+adds/subs
437 SDOperand BuildConstmulSequence(SDOperand N);
442 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
443 /// when it has created a SelectionDAG for us to codegen.
444 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
446 // Codegen the basic block.
448 Select(DAG.getRoot());
450 // Clear state used for selection.
452 LoweredTokens.clear();
456 // strip leading '0' characters from a string
457 void munchLeadingZeros(std::string& inString) {
458 while(inString.c_str()[0]=='0') {
459 inString.erase(0, 1);
463 // strip trailing '0' characters from a string
464 void munchTrailingZeros(std::string& inString) {
465 int curPos=inString.length()-1;
467 while(inString.c_str()[curPos]=='0') {
468 inString.erase(curPos, 1);
473 // return how many consecutive '0' characters are at the end of a string
474 unsigned int countTrailingZeros(std::string& inString) {
475 int curPos=inString.length()-1;
476 unsigned int zeroCount=0;
478 while(inString.c_str()[curPos--]=='0') {
484 // booth encode a string of '1' and '0' characters (returns string of 'P' (+1)
485 // '0' and 'N' (-1) characters)
486 void boothEncode(std::string inString, std::string& boothEncodedString) {
490 int lim=inString.size();
493 if(inString[curpos]=='1') { // if we see a '1', look for a run of them
495 std::string replaceString="N";
497 // find the run length
498 for(;inString[curpos+runlength]=='1';runlength++) ;
500 for(int i=0; i<runlength-1; i++)
505 inString.replace(curpos, runlength+1, replaceString);
509 } else { // a zero, we just keep chugging along
514 // clean up (trim the string, reverse it and turn '1's into 'P's)
515 munchTrailingZeros(inString);
516 boothEncodedString="";
518 for(int i=inString.size()-1;i>=0;i--)
520 boothEncodedString+="P";
522 boothEncodedString+=inString[i];
526 struct shiftaddblob { // this encodes stuff like (x=) "A << B [+-] C << D"
527 unsigned firstVal; // A
528 unsigned firstShift; // B
529 unsigned secondVal; // C
530 unsigned secondShift; // D
534 /* this implements Lefevre's "pattern-based" constant multiplication,
535 * see "Multiplication by an Integer Constant", INRIA report 1999-06
537 * TODO: implement a method to try rewriting P0N<->0PP / N0P<->0NN
538 * to get better booth encodings - this does help in practice
539 * TODO: weight shifts appropriately (most architectures can't
540 * fuse a shift and an add for arbitrary shift amounts) */
541 unsigned lefevre(const std::string inString,
542 std::vector<struct shiftaddblob> &ops) {
543 std::string retstring;
544 std::string s = inString;
545 munchTrailingZeros(s);
547 int length=s.length()-1;
553 std::vector<int> p,n;
555 for(int i=0; i<=length; i++) {
556 if (s.c_str()[length-i]=='P') {
558 } else if (s.c_str()[length-i]=='N') {
566 std::map<const int, int> w;
568 for(unsigned i=0; i<p.size(); i++) {
569 for(unsigned j=0; j<i; j++) {
574 for(unsigned i=1; i<n.size(); i++) {
575 for(unsigned j=0; j<i; j++) {
580 for(unsigned i=0; i<p.size(); i++) {
581 for(unsigned j=0; j<n.size(); j++) {
582 w[-abs(p[i]-n[j])]++;
586 std::map<const int, int>::const_iterator ii;
588 std::multimap<int, int> sorted_by_value;
590 for(ii = w.begin(); ii!=w.end(); ii++)
591 sorted_by_value.insert(std::pair<int, int>((*ii).second,(*ii).first));
593 for (std::multimap<int, int>::iterator it = sorted_by_value.begin();
594 it != sorted_by_value.end(); ++it) {
595 d.push_back((*it).second);
601 while(d.size()>0 && (w[int_d=d.back()] > int_W)) {
609 for(unsigned base=0; base<retstring.size(); base++) {
610 if( ((base+z+1) < retstring.size()) &&
611 retstring.c_str()[base]=='P' &&
612 retstring.c_str()[base+z+1]=='P')
616 retstring.replace(base, 1, "0");
617 retstring.replace(base+z+1, 1, "p");
621 for(unsigned base=0; base<retstring.size(); base++) {
622 if( ((base+z+1) < retstring.size()) &&
623 retstring.c_str()[base]=='N' &&
624 retstring.c_str()[base+z+1]=='N')
628 retstring.replace(base, 1, "0");
629 retstring.replace(base+z+1, 1, "n");
634 for(unsigned base=0; base<retstring.size(); base++) {
635 if( ((base+z+1) < retstring.size()) &&
636 ((retstring.c_str()[base]=='P' &&
637 retstring.c_str()[base+z+1]=='N') ||
638 (retstring.c_str()[base]=='N' &&
639 retstring.c_str()[base+z+1]=='P')) ) {
643 if(retstring.c_str()[base]=='P') {
644 retstring.replace(base, 1, "0");
645 retstring.replace(base+z+1, 1, "p");
646 } else { // retstring[base]=='N'
647 retstring.replace(base, 1, "0");
648 retstring.replace(base+z+1, 1, "n");
660 } d.pop_back(); // hmm
664 for(unsigned i=0; i<t.length(); i++) {
665 if(t.c_str()[i]=='p' || t.c_str()[i]=='n')
666 t.replace(i, 1, "0");
669 for(unsigned i=0; i<u.length(); i++) {
670 if(u.c_str()[i]=='P' || u.c_str()[i]=='N')
671 u.replace(i, 1, "0");
672 if(u.c_str()[i]=='p')
673 u.replace(i, 1, "P");
674 if(u.c_str()[i]=='n')
675 u.replace(i, 1, "N");
685 for(unsigned i=0; i<u.length(); i++) {
696 for(unsigned p=0; p<u.length(); p++) {
697 bool isP=(u.c_str()[p]=='P');
698 bool isN=(u.c_str()[p]=='N');
701 u.replace(p, 1, "N");
703 u.replace(p, 1, "P");
707 munchLeadingZeros(u);
709 int i = lefevre(u, ops);
713 blob.firstVal=i; blob.firstShift=c;
715 blob.secondVal=i; blob.secondShift=0;
721 munchLeadingZeros(t);
726 if(t.c_str()[0]!='P') {
728 for(unsigned p=0; p<t.length(); p++) {
729 bool isP=(t.c_str()[p]=='P');
730 bool isN=(t.c_str()[p]=='N');
733 t.replace(p, 1, "N");
735 t.replace(p, 1, "P");
739 int j = lefevre(t, ops);
741 int trail=countTrailingZeros(u);
742 blob.secondVal=i; blob.secondShift=trail;
744 trail=countTrailingZeros(t);
745 blob.firstVal=j; blob.firstShift=trail;
749 blob.isSub=false; // first + second
752 blob.isSub=true; // first - second
755 blob.isSub=true; // second - first
756 int tmpval, tmpshift;
757 tmpval=blob.firstVal;
758 tmpshift=blob.firstShift;
759 blob.firstVal=blob.secondVal;
760 blob.firstShift=blob.secondShift;
761 blob.secondVal=tmpval;
762 blob.secondShift=tmpshift;
771 SDOperand ISel::BuildConstmulSequence(SDOperand N) {
772 //FIXME: we should shortcut this stuff for multiplies by 2^n+1
773 // in particular, *3 is nicer as *2+1, not *4-1
774 int64_t constant=cast<ConstantSDNode>(N.getOperand(1))->getValue();
777 unsigned preliminaryShift=0;
779 assert(constant > 0 && "erk, don't multiply by zero or negative nums\n");
781 // first, we make the constant to multiply by positive
789 // next, we make it odd.
790 for(; (constant%2==0); preliminaryShift++)
793 //OK, we have a positive, odd number of 64 bits or less. Convert it
794 //to a binary string, constantString[0] is the LSB
795 char constantString[65];
796 for(int i=0; i<64; i++)
797 constantString[i]='0'+((constant>>i)&0x1);
798 constantString[64]=0;
800 // now, Booth encode it
801 std::string boothEncodedString;
802 boothEncode(constantString, boothEncodedString);
804 std::vector<struct shiftaddblob> ops;
805 // do the transformation, filling out 'ops'
806 lefevre(boothEncodedString, ops);
808 SDOperand results[ops.size()]; // temporary results (of adds/subs of shifts)
810 // now turn 'ops' into DAG bits
811 for(unsigned i=0; i<ops.size(); i++) {
812 SDOperand amt = ISelDAG->getConstant(ops[i].firstShift, MVT::i64);
813 SDOperand val = (ops[i].firstVal == 0) ? N.getOperand(0) :
814 results[ops[i].firstVal-1];
815 SDOperand left = ISelDAG->getNode(ISD::SHL, MVT::i64, val, amt);
816 amt = ISelDAG->getConstant(ops[i].secondShift, MVT::i64);
817 val = (ops[i].secondVal == 0) ? N.getOperand(0) :
818 results[ops[i].secondVal-1];
819 SDOperand right = ISelDAG->getNode(ISD::SHL, MVT::i64, val, amt);
821 results[i] = ISelDAG->getNode(ISD::SUB, MVT::i64, left, right);
823 results[i] = ISelDAG->getNode(ISD::ADD, MVT::i64, left, right);
826 // don't forget flippedSign and preliminaryShift!
827 SDOperand finalresult;
828 if(preliminaryShift) {
829 SDOperand finalshift = ISelDAG->getConstant(preliminaryShift, MVT::i64);
830 finalresult = ISelDAG->getNode(ISD::SHL, MVT::i64,
831 results[ops.size()-1], finalshift);
832 } else { // there was no preliminary divide-by-power-of-2 required
833 finalresult = results[ops.size()-1];
839 /// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It
840 /// returns zero when the input is not exactly a power of two.
841 static unsigned ExactLog2(uint64_t Val) {
842 if (Val == 0 || (Val & (Val-1))) return 0;
851 /// ExactLog2sub1 - This function solves for (Val == (1 << (N-1))-1)
852 /// and returns N. It returns 666 if Val is not 2^n -1 for some n.
853 static unsigned ExactLog2sub1(uint64_t Val) {
855 for(n=0; n<64; n++) {
856 if(Val==(uint64_t)((1LL<<n)-1))
862 /// ponderIntegerDivisionBy - When handling integer divides, if the divide
863 /// is by a constant such that we can efficiently codegen it, this
864 /// function says what to do. Currently, it returns 0 if the division must
865 /// become a genuine divide, and 1 if the division can be turned into a
867 static unsigned ponderIntegerDivisionBy(SDOperand N, bool isSigned,
869 if (N.getOpcode() != ISD::Constant) return 0; // if not a divide by
870 // a constant, give up.
872 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
874 if ((Imm = ExactLog2(v))) { // if a division by a power of two, say so
878 return 0; // fallthrough
881 static unsigned ponderIntegerAndWith(SDOperand N, unsigned& Imm) {
882 if (N.getOpcode() != ISD::Constant) return 0; // if not ANDing with
883 // a constant, give up.
885 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
887 if ((Imm = ExactLog2sub1(v))!=666) { // if ANDing with ((2^n)-1) for some n
891 return 0; // fallthrough
894 static unsigned ponderIntegerAdditionWith(SDOperand N, unsigned& Imm) {
895 if (N.getOpcode() != ISD::Constant) return 0; // if not adding a
896 // constant, give up.
897 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
899 if (v <= 8191 && v >= -8192) { // if this constants fits in 14 bits, say so
900 Imm = v & 0x3FFF; // 14 bits
903 return 0; // fallthrough
906 static unsigned ponderIntegerSubtractionFrom(SDOperand N, unsigned& Imm) {
907 if (N.getOpcode() != ISD::Constant) return 0; // if not subtracting a
908 // constant, give up.
909 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
911 if (v <= 127 && v >= -128) { // if this constants fits in 8 bits, say so
912 Imm = v & 0xFF; // 8 bits
915 return 0; // fallthrough
918 unsigned ISel::SelectExpr(SDOperand N) {
920 unsigned Tmp1, Tmp2, Tmp3;
922 MVT::ValueType DestType = N.getValueType();
924 unsigned opcode = N.getOpcode();
926 SDNode *Node = N.Val;
929 if (Node->getOpcode() == ISD::CopyFromReg)
930 // Just use the specified register as our input.
931 return dyn_cast<RegSDNode>(Node)->getReg();
933 unsigned &Reg = ExprMap[N];
936 if (N.getOpcode() != ISD::CALL)
937 Reg = Result = (N.getValueType() != MVT::Other) ?
938 MakeReg(N.getValueType()) : 1;
940 // If this is a call instruction, make sure to prepare ALL of the result
941 // values as well as the chain.
942 if (Node->getNumValues() == 1)
943 Reg = Result = 1; // Void call, just a chain.
945 Result = MakeReg(Node->getValueType(0));
946 ExprMap[N.getValue(0)] = Result;
947 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
948 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
949 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
953 switch (N.getOpcode()) {
956 assert(0 && "Node not handled!\n");
958 case ISD::FrameIndex: {
959 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
960 BuildMI(BB, IA64::MOV, 1, Result).addFrameIndex(Tmp1);
964 case ISD::ConstantPool: {
965 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
966 IA64Lowering.restoreGP(BB); // FIXME: do i really need this?
967 BuildMI(BB, IA64::ADD, 2, Result).addConstantPoolIndex(Tmp1)
972 case ISD::ConstantFP: {
973 Tmp1 = Result; // Intermediate Register
974 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
975 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
976 Tmp1 = MakeReg(MVT::f64);
978 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
979 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
980 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F0); // load 0.0
981 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
982 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
983 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F1); // load 1.0
985 assert(0 && "Unexpected FP constant!");
987 // we multiply by +1.0, negate (this is FNMA), and then add 0.0
988 BuildMI(BB, IA64::FNMA, 3, Result).addReg(Tmp1).addReg(IA64::F1)
993 case ISD::DYNAMIC_STACKALLOC: {
994 // Generate both result values.
996 ExprMap[N.getValue(1)] = 1; // Generate the token
998 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1000 // FIXME: We are currently ignoring the requested alignment for handling
1001 // greater than the stack alignment. This will need to be revisited at some
1002 // point. Align = N.getOperand(2);
1004 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1005 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1006 std::cerr << "Cannot allocate stack object with greater alignment than"
1007 << " the stack alignment yet!";
1012 Select(N.getOperand(0));
1013 if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1015 if (CN->getValue() < 32000)
1017 BuildMI(BB, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
1018 .addImm(-CN->getValue());
1020 Tmp1 = SelectExpr(N.getOperand(1));
1021 // Subtract size from stack pointer, thereby allocating some space.
1022 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1025 Tmp1 = SelectExpr(N.getOperand(1));
1026 // Subtract size from stack pointer, thereby allocating some space.
1027 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1030 Select(N.getOperand(0));
1031 Tmp1 = SelectExpr(N.getOperand(1));
1032 // Subtract size from stack pointer, thereby allocating some space.
1033 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1034 // Put a pointer to the space into the result register, by copying the
1036 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r12);
1041 Tmp1 = SelectExpr(N.getOperand(0)); //Cond
1042 Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
1043 Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
1045 unsigned bogoResult;
1047 switch (N.getOperand(1).getValueType()) {
1048 default: assert(0 &&
1049 "ISD::SELECT: 'select'ing something other than i64 or f64!\n");
1051 bogoResult=MakeReg(MVT::i64);
1054 bogoResult=MakeReg(MVT::f64);
1058 BuildMI(BB, IA64::MOV, 1, bogoResult).addReg(Tmp3);
1059 BuildMI(BB, IA64::CMOV, 2, Result).addReg(bogoResult).addReg(Tmp2)
1060 .addReg(Tmp1); // FIXME: should be FMOV/FCMOV sometimes,
1061 // though this will work for now (no JIT)
1065 case ISD::Constant: {
1066 unsigned depositPos=0;
1067 unsigned depositLen=0;
1068 switch (N.getValueType()) {
1069 default: assert(0 && "Cannot use constants of this type!");
1070 case MVT::i1: { // if a bool, we don't 'load' so much as generate
1072 if(cast<ConstantSDNode>(N)->getValue()) // true:
1073 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(IA64::r0).addReg(IA64::r0);
1075 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(IA64::r0).addReg(IA64::r0);
1076 return Result; // early exit
1078 case MVT::i64: break;
1081 int64_t immediate = cast<ConstantSDNode>(N)->getValue();
1083 if(immediate==0) { // if the constant is just zero,
1084 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r0); // just copy r0
1085 return Result; // early exit
1088 if (immediate <= 8191 && immediate >= -8192) {
1089 // if this constants fits in 14 bits, we use a mov the assembler will
1090 // turn into: "adds rDest=imm,r0" (and _not_ "andl"...)
1091 BuildMI(BB, IA64::MOVSIMM14, 1, Result).addSImm(immediate);
1092 return Result; // early exit
1095 if (immediate <= 2097151 && immediate >= -2097152) {
1096 // if this constants fits in 22 bits, we use a mov the assembler will
1097 // turn into: "addl rDest=imm,r0"
1098 BuildMI(BB, IA64::MOVSIMM22, 1, Result).addSImm(immediate);
1099 return Result; // early exit
1102 /* otherwise, our immediate is big, so we use movl */
1103 uint64_t Imm = immediate;
1104 BuildMI(BB, IA64::MOVLIMM64, 1, Result).addImm64(Imm);
1109 BuildMI(BB, IA64::IDEF, 0, Result);
1113 case ISD::GlobalAddress: {
1114 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1115 unsigned Tmp1 = MakeReg(MVT::i64);
1117 BuildMI(BB, IA64::ADD, 2, Tmp1).addGlobalAddress(GV).addReg(IA64::r1);
1118 BuildMI(BB, IA64::LD8, 1, Result).addReg(Tmp1);
1123 case ISD::ExternalSymbol: {
1124 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1125 // assert(0 && "sorry, but what did you want an ExternalSymbol for again?");
1126 BuildMI(BB, IA64::MOV, 1, Result).addExternalSymbol(Sym); // XXX
1130 case ISD::FP_EXTEND: {
1131 Tmp1 = SelectExpr(N.getOperand(0));
1132 BuildMI(BB, IA64::FMOV, 1, Result).addReg(Tmp1);
1136 case ISD::ZERO_EXTEND: {
1137 Tmp1 = SelectExpr(N.getOperand(0)); // value
1139 switch (N.getOperand(0).getValueType()) {
1140 default: assert(0 && "Cannot zero-extend this type!");
1141 case MVT::i8: Opc = IA64::ZXT1; break;
1142 case MVT::i16: Opc = IA64::ZXT2; break;
1143 case MVT::i32: Opc = IA64::ZXT4; break;
1145 // we handle bools differently! :
1146 case MVT::i1: { // if the predicate reg has 1, we want a '1' in our GR.
1147 unsigned dummy = MakeReg(MVT::i64);
1149 BuildMI(BB, IA64::MOV, 1, dummy).addReg(IA64::r0);
1150 // ...then conditionally (PR:Tmp1) add 1:
1151 BuildMI(BB, IA64::TPCADDIMM22, 2, Result).addReg(dummy)
1152 .addImm(1).addReg(Tmp1);
1153 return Result; // XXX early exit!
1157 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1161 case ISD::SIGN_EXTEND: { // we should only have to handle i1 -> i64 here!!!
1163 assert(0 && "hmm, ISD::SIGN_EXTEND: shouldn't ever be reached. bad luck!\n");
1165 Tmp1 = SelectExpr(N.getOperand(0)); // value
1167 switch (N.getOperand(0).getValueType()) {
1168 default: assert(0 && "Cannot sign-extend this type!");
1169 case MVT::i1: assert(0 && "trying to sign extend a bool? ow.\n");
1170 Opc = IA64::SXT1; break;
1171 // FIXME: for now, we treat bools the same as i8s
1172 case MVT::i8: Opc = IA64::SXT1; break;
1173 case MVT::i16: Opc = IA64::SXT2; break;
1174 case MVT::i32: Opc = IA64::SXT4; break;
1177 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1181 case ISD::TRUNCATE: {
1182 // we use the funky dep.z (deposit (zero)) instruction to deposit bits
1183 // of R0 appropriately.
1184 switch (N.getOperand(0).getValueType()) {
1185 default: assert(0 && "Unknown truncate!");
1186 case MVT::i64: break;
1188 Tmp1 = SelectExpr(N.getOperand(0));
1189 unsigned depositPos, depositLen;
1191 switch (N.getValueType()) {
1192 default: assert(0 && "Unknown truncate!");
1194 // if input (normal reg) is 0, 0!=0 -> false (0), if 1, 1!=0 ->true (1):
1195 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1)
1197 return Result; // XXX early exit!
1199 case MVT::i8: depositPos=0; depositLen=8; break;
1200 case MVT::i16: depositPos=0; depositLen=16; break;
1201 case MVT::i32: depositPos=0; depositLen=32; break;
1203 BuildMI(BB, IA64::DEPZ, 1, Result).addReg(Tmp1)
1204 .addImm(depositPos).addImm(depositLen);
1209 case ISD::FP_ROUND: {
1210 assert (DestType == MVT::f32 && N.getOperand(0).getValueType() == MVT::f64 &&
1211 "error: trying to FP_ROUND something other than f64 -> f32!\n");
1212 Tmp1 = SelectExpr(N.getOperand(0));
1213 BuildMI(BB, IA64::FADDS, 2, Result).addReg(Tmp1).addReg(IA64::F0);
1214 // we add 0.0 using a single precision add to do rounding
1219 // FIXME: the following 4 cases need cleaning
1220 case ISD::SINT_TO_FP: {
1221 Tmp1 = SelectExpr(N.getOperand(0));
1222 Tmp2 = MakeReg(MVT::f64);
1223 unsigned dummy = MakeReg(MVT::f64);
1224 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
1225 BuildMI(BB, IA64::FCVTXF, 1, dummy).addReg(Tmp2);
1226 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
1230 case ISD::UINT_TO_FP: {
1231 Tmp1 = SelectExpr(N.getOperand(0));
1232 Tmp2 = MakeReg(MVT::f64);
1233 unsigned dummy = MakeReg(MVT::f64);
1234 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
1235 BuildMI(BB, IA64::FCVTXUF, 1, dummy).addReg(Tmp2);
1236 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
1240 case ISD::FP_TO_SINT: {
1241 Tmp1 = SelectExpr(N.getOperand(0));
1242 Tmp2 = MakeReg(MVT::f64);
1243 BuildMI(BB, IA64::FCVTFXTRUNC, 1, Tmp2).addReg(Tmp1);
1244 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
1248 case ISD::FP_TO_UINT: {
1249 Tmp1 = SelectExpr(N.getOperand(0));
1250 Tmp2 = MakeReg(MVT::f64);
1251 BuildMI(BB, IA64::FCVTFXUTRUNC, 1, Tmp2).addReg(Tmp1);
1252 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
1257 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
1258 N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
1259 // into an fma, do so:
1260 // ++FusedFP; // Statistic
1261 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1262 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
1263 Tmp3 = SelectExpr(N.getOperand(1));
1264 BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
1265 return Result; // early exit
1268 if(DestType != MVT::f64 && N.getOperand(0).getOpcode() == ISD::SHL &&
1269 N.getOperand(0).Val->hasOneUse()) { // if we might be able to fold
1270 // this add into a shladd, try:
1271 ConstantSDNode *CSD = NULL;
1272 if((CSD = dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) &&
1273 (CSD->getValue() >= 1) && (CSD->getValue() <= 4) ) { // we can:
1275 // ++FusedSHLADD; // Statistic
1276 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1277 int shl_amt = CSD->getValue();
1278 Tmp3 = SelectExpr(N.getOperand(1));
1280 BuildMI(BB, IA64::SHLADD, 3, Result)
1281 .addReg(Tmp1).addImm(shl_amt).addReg(Tmp3);
1282 return Result; // early exit
1286 //else, fallthrough:
1287 Tmp1 = SelectExpr(N.getOperand(0));
1288 if(DestType != MVT::f64) { // integer addition:
1289 switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
1290 case 1: // adding a constant that's 14 bits
1291 BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
1292 return Result; // early exit
1293 } // fallthrough and emit a reg+reg ADD:
1294 Tmp2 = SelectExpr(N.getOperand(1));
1295 BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
1296 } else { // this is a floating point addition
1297 Tmp2 = SelectExpr(N.getOperand(1));
1298 BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
1305 if(DestType != MVT::f64) { // TODO: speed!
1306 if(N.getOperand(1).getOpcode() != ISD::Constant) { // if not a const mul
1307 // boring old integer multiply with xma
1308 Tmp1 = SelectExpr(N.getOperand(0));
1309 Tmp2 = SelectExpr(N.getOperand(1));
1311 unsigned TempFR1=MakeReg(MVT::f64);
1312 unsigned TempFR2=MakeReg(MVT::f64);
1313 unsigned TempFR3=MakeReg(MVT::f64);
1314 BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
1315 BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
1316 BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
1318 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
1319 return Result; // early exit
1320 } else { // we are multiplying by an integer constant! yay
1321 return Reg = SelectExpr(BuildConstmulSequence(N)); // avert your eyes!
1324 else { // floating point multiply
1325 Tmp1 = SelectExpr(N.getOperand(0));
1326 Tmp2 = SelectExpr(N.getOperand(1));
1327 BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
1333 if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
1334 N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
1335 // into an fms, do so:
1336 // ++FusedFP; // Statistic
1337 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1338 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
1339 Tmp3 = SelectExpr(N.getOperand(1));
1340 BuildMI(BB, IA64::FMS, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
1341 return Result; // early exit
1343 Tmp2 = SelectExpr(N.getOperand(1));
1344 if(DestType != MVT::f64) { // integer subtraction:
1345 switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
1346 case 1: // subtracting *from* an 8 bit constant:
1347 BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
1348 return Result; // early exit
1349 } // fallthrough and emit a reg+reg SUB:
1350 Tmp1 = SelectExpr(N.getOperand(0));
1351 BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
1352 } else { // this is a floating point subtraction
1353 Tmp1 = SelectExpr(N.getOperand(0));
1354 BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
1360 Tmp1 = SelectExpr(N.getOperand(0));
1361 assert(DestType == MVT::f64 && "trying to fabs something other than f64?");
1362 BuildMI(BB, IA64::FABS, 1, Result).addReg(Tmp1);
1367 assert(DestType == MVT::f64 && "trying to fneg something other than f64?");
1369 if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
1370 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1371 BuildMI(BB, IA64::FNEGABS, 1, Result).addReg(Tmp1); // fold in abs
1373 Tmp1 = SelectExpr(N.getOperand(0));
1374 BuildMI(BB, IA64::FNEG, 1, Result).addReg(Tmp1); // plain old fneg
1381 switch (N.getValueType()) {
1382 default: assert(0 && "Cannot AND this type!");
1383 case MVT::i1: { // if a bool, we emit a pseudocode AND
1384 unsigned pA = SelectExpr(N.getOperand(0));
1385 unsigned pB = SelectExpr(N.getOperand(1));
1387 /* our pseudocode for AND is:
1389 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
1390 cmp.eq pTemp,p0 = r0,r0 // pTemp = NOT pB
1392 (pB) cmp.ne pTemp,p0 = r0,r0
1394 (pTemp)cmp.ne pC,p0 = r0,r0 // if (NOT pB) pC = 0
1397 unsigned pTemp = MakeReg(MVT::i1);
1399 unsigned bogusTemp1 = MakeReg(MVT::i1);
1400 unsigned bogusTemp2 = MakeReg(MVT::i1);
1401 unsigned bogusTemp3 = MakeReg(MVT::i1);
1402 unsigned bogusTemp4 = MakeReg(MVT::i1);
1404 BuildMI(BB, IA64::PCMPEQUNC, 3, bogusTemp1)
1405 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1406 BuildMI(BB, IA64::CMPEQ, 2, bogusTemp2)
1407 .addReg(IA64::r0).addReg(IA64::r0);
1408 BuildMI(BB, IA64::TPCMPNE, 3, pTemp)
1409 .addReg(bogusTemp2).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1410 BuildMI(BB, IA64::TPCMPNE, 3, Result)
1411 .addReg(bogusTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pTemp);
1415 // if not a bool, we just AND away:
1420 Tmp1 = SelectExpr(N.getOperand(0));
1421 switch (ponderIntegerAndWith(N.getOperand(1), Tmp3)) {
1422 case 1: // ANDing a constant that is 2^n-1 for some n
1424 case 8: // if AND 0x00000000000000FF, be quaint and use zxt1
1425 BuildMI(BB, IA64::ZXT1, 1, Result).addReg(Tmp1);
1427 case 16: // if AND 0x000000000000FFFF, be quaint and use zxt2
1428 BuildMI(BB, IA64::ZXT2, 1, Result).addReg(Tmp1);
1430 case 32: // if AND 0x00000000FFFFFFFF, be quaint and use zxt4
1431 BuildMI(BB, IA64::ZXT4, 1, Result).addReg(Tmp1);
1433 default: // otherwise, use dep.z to paste zeros
1434 BuildMI(BB, IA64::DEPZ, 3, Result).addReg(Tmp1)
1435 .addImm(0).addImm(Tmp3);
1438 return Result; // early exit
1439 } // fallthrough and emit a simple AND:
1440 Tmp2 = SelectExpr(N.getOperand(1));
1441 BuildMI(BB, IA64::AND, 2, Result).addReg(Tmp1).addReg(Tmp2);
1448 switch (N.getValueType()) {
1449 default: assert(0 && "Cannot OR this type!");
1450 case MVT::i1: { // if a bool, we emit a pseudocode OR
1451 unsigned pA = SelectExpr(N.getOperand(0));
1452 unsigned pB = SelectExpr(N.getOperand(1));
1454 unsigned pTemp1 = MakeReg(MVT::i1);
1456 /* our pseudocode for OR is:
1462 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
1464 (pB) cmp.eq pC,p0 = r0,r0 // if (pB) pC = 1
1467 BuildMI(BB, IA64::PCMPEQUNC, 3, pTemp1)
1468 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1469 BuildMI(BB, IA64::TPCMPEQ, 3, Result)
1470 .addReg(pTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1473 // if not a bool, we just OR away:
1478 Tmp1 = SelectExpr(N.getOperand(0));
1479 Tmp2 = SelectExpr(N.getOperand(1));
1480 BuildMI(BB, IA64::OR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1488 switch (N.getValueType()) {
1489 default: assert(0 && "Cannot XOR this type!");
1490 case MVT::i1: { // if a bool, we emit a pseudocode XOR
1491 unsigned pY = SelectExpr(N.getOperand(0));
1492 unsigned pZ = SelectExpr(N.getOperand(1));
1494 /* one possible routine for XOR is:
1496 // Compute px = py ^ pz
1497 // using sum of products: px = (py & !pz) | (pz & !py)
1498 // Uses 5 instructions in 3 cycles.
1500 (pz) cmp.eq.unc px = r0, r0 // px = pz
1501 (py) cmp.eq.unc pt = r0, r0 // pt = py
1504 (pt) cmp.ne.and px = r0, r0 // px = px & !pt (px = pz & !pt)
1505 (pz) cmp.ne.and pt = r0, r0 // pt = pt & !pz
1509 (pt) cmp.eq.or px = r0, r0 // px = px | pt
1511 *** Another, which we use here, requires one scratch GR. it is:
1513 mov rt = 0 // initialize rt off critical path
1517 (pz) cmp.eq.unc px = r0, r0 // px = pz
1518 (pz) mov rt = 1 // rt = pz
1521 (py) cmp.ne px = 1, rt // if (py) px = !pz
1523 .. these routines kindly provided by Jim Hull
1525 unsigned rt = MakeReg(MVT::i64);
1527 // these two temporaries will never actually appear,
1528 // due to the two-address form of some of the instructions below
1529 unsigned bogoPR = MakeReg(MVT::i1); // becomes Result
1530 unsigned bogoGR = MakeReg(MVT::i64); // becomes rt
1532 BuildMI(BB, IA64::MOV, 1, bogoGR).addReg(IA64::r0);
1533 BuildMI(BB, IA64::PCMPEQUNC, 3, bogoPR)
1534 .addReg(IA64::r0).addReg(IA64::r0).addReg(pZ);
1535 BuildMI(BB, IA64::TPCADDIMM22, 2, rt)
1536 .addReg(bogoGR).addImm(1).addReg(pZ);
1537 BuildMI(BB, IA64::TPCMPIMM8NE, 3, Result)
1538 .addReg(bogoPR).addImm(1).addReg(rt).addReg(pY);
1541 // if not a bool, we just XOR away:
1546 Tmp1 = SelectExpr(N.getOperand(0));
1547 Tmp2 = SelectExpr(N.getOperand(1));
1548 BuildMI(BB, IA64::XOR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1556 Tmp1 = SelectExpr(N.getOperand(0));
1557 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1558 Tmp2 = CN->getValue();
1559 BuildMI(BB, IA64::SHLI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1561 Tmp2 = SelectExpr(N.getOperand(1));
1562 BuildMI(BB, IA64::SHL, 2, Result).addReg(Tmp1).addReg(Tmp2);
1568 Tmp1 = SelectExpr(N.getOperand(0));
1569 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1570 Tmp2 = CN->getValue();
1571 BuildMI(BB, IA64::SHRUI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1573 Tmp2 = SelectExpr(N.getOperand(1));
1574 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1580 Tmp1 = SelectExpr(N.getOperand(0));
1581 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1582 Tmp2 = CN->getValue();
1583 BuildMI(BB, IA64::SHRSI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1585 Tmp2 = SelectExpr(N.getOperand(1));
1586 BuildMI(BB, IA64::SHRS, 2, Result).addReg(Tmp1).addReg(Tmp2);
1596 Tmp1 = SelectExpr(N.getOperand(0));
1597 Tmp2 = SelectExpr(N.getOperand(1));
1601 if(DestType == MVT::f64) // XXX: we're not gonna be fed MVT::f32, are we?
1604 bool isModulus=false; // is it a division or a modulus?
1605 bool isSigned=false;
1607 switch(N.getOpcode()) {
1608 case ISD::SDIV: isModulus=false; isSigned=true; break;
1609 case ISD::UDIV: isModulus=false; isSigned=false; break;
1610 case ISD::SREM: isModulus=true; isSigned=true; break;
1611 case ISD::UREM: isModulus=true; isSigned=false; break;
1614 if(!isModulus && !isFP) { // if this is an integer divide,
1615 switch (ponderIntegerDivisionBy(N.getOperand(1), isSigned, Tmp3)) {
1616 case 1: // division by a constant that's a power of 2
1617 Tmp1 = SelectExpr(N.getOperand(0));
1618 if(isSigned) { // argument could be negative, so emit some code:
1619 unsigned divAmt=Tmp3;
1620 unsigned tempGR1=MakeReg(MVT::i64);
1621 unsigned tempGR2=MakeReg(MVT::i64);
1622 unsigned tempGR3=MakeReg(MVT::i64);
1623 BuildMI(BB, IA64::SHRS, 2, tempGR1)
1624 .addReg(Tmp1).addImm(divAmt-1);
1625 BuildMI(BB, IA64::EXTRU, 3, tempGR2)
1626 .addReg(tempGR1).addImm(64-divAmt).addImm(divAmt);
1627 BuildMI(BB, IA64::ADD, 2, tempGR3)
1628 .addReg(Tmp1).addReg(tempGR2);
1629 BuildMI(BB, IA64::SHRS, 2, Result)
1630 .addReg(tempGR3).addImm(divAmt);
1632 else // unsigned div-by-power-of-2 becomes a simple shift right:
1633 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addImm(Tmp3);
1634 return Result; // early exit
1638 unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
1639 unsigned TmpPR2=MakeReg(MVT::i1); // predicate registers,
1640 unsigned TmpF1=MakeReg(MVT::f64); // and one metric truckload of FP regs.
1641 unsigned TmpF2=MakeReg(MVT::f64); // lucky we have IA64?
1642 unsigned TmpF3=MakeReg(MVT::f64); // well, the real FIXME is to have
1643 unsigned TmpF4=MakeReg(MVT::f64); // isTwoAddress forms of these
1644 unsigned TmpF5=MakeReg(MVT::f64); // FP instructions so we can end up with
1645 unsigned TmpF6=MakeReg(MVT::f64); // stuff like setf.sig f10=f10 etc.
1646 unsigned TmpF7=MakeReg(MVT::f64);
1647 unsigned TmpF8=MakeReg(MVT::f64);
1648 unsigned TmpF9=MakeReg(MVT::f64);
1649 unsigned TmpF10=MakeReg(MVT::f64);
1650 unsigned TmpF11=MakeReg(MVT::f64);
1651 unsigned TmpF12=MakeReg(MVT::f64);
1652 unsigned TmpF13=MakeReg(MVT::f64);
1653 unsigned TmpF14=MakeReg(MVT::f64);
1654 unsigned TmpF15=MakeReg(MVT::f64);
1656 // OK, emit some code:
1659 // first, load the inputs into FP regs.
1660 BuildMI(BB, IA64::SETFSIG, 1, TmpF1).addReg(Tmp1);
1661 BuildMI(BB, IA64::SETFSIG, 1, TmpF2).addReg(Tmp2);
1663 // next, convert the inputs to FP
1665 BuildMI(BB, IA64::FCVTXF, 1, TmpF3).addReg(TmpF1);
1666 BuildMI(BB, IA64::FCVTXF, 1, TmpF4).addReg(TmpF2);
1668 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF3).addReg(TmpF1);
1669 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF4).addReg(TmpF2);
1672 } else { // this is an FP divide/remainder, so we 'leak' some temp
1673 // regs and assign TmpF3=Tmp1, TmpF4=Tmp2
1678 // we start by computing an approximate reciprocal (good to 9 bits?)
1679 // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate)
1680 BuildMI(BB, IA64::FRCPAS1, 4)
1681 .addReg(TmpF5, MachineOperand::Def)
1682 .addReg(TmpPR, MachineOperand::Def)
1683 .addReg(TmpF3).addReg(TmpF4);
1685 if(!isModulus) { // if this is a divide, we worry about div-by-zero
1686 unsigned bogusPR=MakeReg(MVT::i1); // won't appear, due to twoAddress
1688 BuildMI(BB, IA64::CMPEQ, 2, bogusPR).addReg(IA64::r0).addReg(IA64::r0);
1689 BuildMI(BB, IA64::TPCMPNE, 3, TmpPR2).addReg(bogusPR)
1690 .addReg(IA64::r0).addReg(IA64::r0).addReg(TmpPR);
1693 // now we apply newton's method, thrice! (FIXME: this is ~72 bits of
1694 // precision, don't need this much for f32/i32)
1695 BuildMI(BB, IA64::CFNMAS1, 4, TmpF6)
1696 .addReg(TmpF4).addReg(TmpF5).addReg(IA64::F1).addReg(TmpPR);
1697 BuildMI(BB, IA64::CFMAS1, 4, TmpF7)
1698 .addReg(TmpF3).addReg(TmpF5).addReg(IA64::F0).addReg(TmpPR);
1699 BuildMI(BB, IA64::CFMAS1, 4, TmpF8)
1700 .addReg(TmpF6).addReg(TmpF6).addReg(IA64::F0).addReg(TmpPR);
1701 BuildMI(BB, IA64::CFMAS1, 4, TmpF9)
1702 .addReg(TmpF6).addReg(TmpF7).addReg(TmpF7).addReg(TmpPR);
1703 BuildMI(BB, IA64::CFMAS1, 4,TmpF10)
1704 .addReg(TmpF6).addReg(TmpF5).addReg(TmpF5).addReg(TmpPR);
1705 BuildMI(BB, IA64::CFMAS1, 4,TmpF11)
1706 .addReg(TmpF8).addReg(TmpF9).addReg(TmpF9).addReg(TmpPR);
1707 BuildMI(BB, IA64::CFMAS1, 4,TmpF12)
1708 .addReg(TmpF8).addReg(TmpF10).addReg(TmpF10).addReg(TmpPR);
1709 BuildMI(BB, IA64::CFNMAS1, 4,TmpF13)
1710 .addReg(TmpF4).addReg(TmpF11).addReg(TmpF3).addReg(TmpPR);
1712 // FIXME: this is unfortunate :(
1713 // the story is that the dest reg of the fnma above and the fma below
1714 // (and therefore possibly the src of the fcvt.fx[u] as well) cannot
1715 // be the same register, or this code breaks if the first argument is
1716 // zero. (e.g. without this hack, 0%8 yields -64, not 0.)
1717 BuildMI(BB, IA64::CFMAS1, 4,TmpF14)
1718 .addReg(TmpF13).addReg(TmpF12).addReg(TmpF11).addReg(TmpPR);
1720 if(isModulus) { // XXX: fragile! fixes _only_ mod, *breaks* div! !
1721 BuildMI(BB, IA64::IUSE, 1).addReg(TmpF13); // hack :(
1725 // round to an integer
1727 BuildMI(BB, IA64::FCVTFXTRUNCS1, 1, TmpF15).addReg(TmpF14);
1729 BuildMI(BB, IA64::FCVTFXUTRUNCS1, 1, TmpF15).addReg(TmpF14);
1731 BuildMI(BB, IA64::FMOV, 1, TmpF15).addReg(TmpF14);
1732 // EXERCISE: can you see why TmpF15=TmpF14 does not work here, and
1733 // we really do need the above FMOV? ;)
1737 if(isFP) { // extra worrying about div-by-zero
1738 unsigned bogoResult=MakeReg(MVT::f64);
1740 // we do a 'conditional fmov' (of the correct result, depending
1741 // on how the frcpa predicate turned out)
1742 BuildMI(BB, IA64::PFMOV, 2, bogoResult)
1743 .addReg(TmpF12).addReg(TmpPR2);
1744 BuildMI(BB, IA64::CFMOV, 2, Result)
1745 .addReg(bogoResult).addReg(TmpF15).addReg(TmpPR);
1748 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TmpF15);
1750 } else { // this is a modulus
1752 // answer = q * (-b) + a
1753 unsigned ModulusResult = MakeReg(MVT::f64);
1754 unsigned TmpF = MakeReg(MVT::f64);
1755 unsigned TmpI = MakeReg(MVT::i64);
1757 BuildMI(BB, IA64::SUB, 2, TmpI).addReg(IA64::r0).addReg(Tmp2);
1758 BuildMI(BB, IA64::SETFSIG, 1, TmpF).addReg(TmpI);
1759 BuildMI(BB, IA64::XMAL, 3, ModulusResult)
1760 .addReg(TmpF15).addReg(TmpF).addReg(TmpF1);
1761 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(ModulusResult);
1762 } else { // FP modulus! The horror... the horror....
1763 assert(0 && "sorry, no FP modulus just yet!\n!\n");
1770 case ISD::SIGN_EXTEND_INREG: {
1771 Tmp1 = SelectExpr(N.getOperand(0));
1772 MVTSDNode* MVN = dyn_cast<MVTSDNode>(Node);
1773 switch(MVN->getExtraValueType())
1777 assert(0 && "don't know how to sign extend this type");
1779 case MVT::i8: Opc = IA64::SXT1; break;
1780 case MVT::i16: Opc = IA64::SXT2; break;
1781 case MVT::i32: Opc = IA64::SXT4; break;
1783 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1788 Tmp1 = SelectExpr(N.getOperand(0));
1790 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Node)) {
1791 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
1793 if(ConstantSDNode *CSDN =
1794 dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1795 // if we are comparing against a constant zero
1796 if(CSDN->getValue()==0)
1797 Tmp2 = IA64::r0; // then we can just compare against r0
1799 Tmp2 = SelectExpr(N.getOperand(1));
1800 } else // not comparing against a constant
1801 Tmp2 = SelectExpr(N.getOperand(1));
1803 switch (SetCC->getCondition()) {
1804 default: assert(0 && "Unknown integer comparison!");
1806 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1809 BuildMI(BB, IA64::CMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1812 BuildMI(BB, IA64::CMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1815 BuildMI(BB, IA64::CMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1818 BuildMI(BB, IA64::CMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1821 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1824 BuildMI(BB, IA64::CMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1827 BuildMI(BB, IA64::CMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1830 BuildMI(BB, IA64::CMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1833 BuildMI(BB, IA64::CMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1837 else { // if not integer, should be FP. FIXME: what about bools? ;)
1838 assert(SetCC->getOperand(0).getValueType() != MVT::f32 &&
1839 "error: SETCC should have had incoming f32 promoted to f64!\n");
1841 if(ConstantFPSDNode *CFPSDN =
1842 dyn_cast<ConstantFPSDNode>(N.getOperand(1))) {
1844 // if we are comparing against a constant +0.0 or +1.0
1845 if(CFPSDN->isExactlyValue(+0.0))
1846 Tmp2 = IA64::F0; // then we can just compare against f0
1847 else if(CFPSDN->isExactlyValue(+1.0))
1848 Tmp2 = IA64::F1; // or f1
1850 Tmp2 = SelectExpr(N.getOperand(1));
1851 } else // not comparing against a constant
1852 Tmp2 = SelectExpr(N.getOperand(1));
1854 switch (SetCC->getCondition()) {
1855 default: assert(0 && "Unknown FP comparison!");
1857 BuildMI(BB, IA64::FCMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1860 BuildMI(BB, IA64::FCMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1863 BuildMI(BB, IA64::FCMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1866 BuildMI(BB, IA64::FCMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1869 BuildMI(BB, IA64::FCMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1872 BuildMI(BB, IA64::FCMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1875 BuildMI(BB, IA64::FCMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1878 BuildMI(BB, IA64::FCMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1881 BuildMI(BB, IA64::FCMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1884 BuildMI(BB, IA64::FCMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1890 assert(0 && "this setcc not implemented yet");
1898 // Make sure we generate both values.
1900 ExprMap[N.getValue(1)] = 1; // Generate the token
1902 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1906 if(opcode == ISD::LOAD) { // this is a LOAD
1907 switch (Node->getValueType(0)) {
1908 default: assert(0 && "Cannot load this type!");
1909 case MVT::i1: Opc = IA64::LD1; isBool=true; break;
1910 // FIXME: for now, we treat bool loads the same as i8 loads */
1911 case MVT::i8: Opc = IA64::LD1; break;
1912 case MVT::i16: Opc = IA64::LD2; break;
1913 case MVT::i32: Opc = IA64::LD4; break;
1914 case MVT::i64: Opc = IA64::LD8; break;
1916 case MVT::f32: Opc = IA64::LDF4; break;
1917 case MVT::f64: Opc = IA64::LDF8; break;
1919 } else { // this is an EXTLOAD or ZEXTLOAD
1920 MVT::ValueType TypeBeingLoaded = cast<MVTSDNode>(Node)->getExtraValueType();
1921 switch (TypeBeingLoaded) {
1922 default: assert(0 && "Cannot extload/zextload this type!");
1924 case MVT::i8: Opc = IA64::LD1; break;
1925 case MVT::i16: Opc = IA64::LD2; break;
1926 case MVT::i32: Opc = IA64::LD4; break;
1927 case MVT::f32: Opc = IA64::LDF4; break;
1931 SDOperand Chain = N.getOperand(0);
1932 SDOperand Address = N.getOperand(1);
1934 if(Address.getOpcode() == ISD::GlobalAddress) {
1936 unsigned dummy = MakeReg(MVT::i64);
1937 unsigned dummy2 = MakeReg(MVT::i64);
1938 BuildMI(BB, IA64::ADD, 2, dummy)
1939 .addGlobalAddress(cast<GlobalAddressSDNode>(Address)->getGlobal())
1941 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1943 BuildMI(BB, Opc, 1, Result).addReg(dummy2);
1944 else { // emit a little pseudocode to load a bool (stored in one byte)
1945 // into a predicate register
1946 assert(Opc==IA64::LD1 && "problem loading a bool");
1947 unsigned dummy3 = MakeReg(MVT::i64);
1948 BuildMI(BB, Opc, 1, dummy3).addReg(dummy2);
1949 // we compare to 0. true? 0. false? 1.
1950 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1952 } else if(ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Address)) {
1954 IA64Lowering.restoreGP(BB);
1955 unsigned dummy = MakeReg(MVT::i64);
1956 BuildMI(BB, IA64::ADD, 2, dummy).addConstantPoolIndex(CP->getIndex())
1957 .addReg(IA64::r1); // CPI+GP
1959 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1960 else { // emit a little pseudocode to load a bool (stored in one byte)
1961 // into a predicate register
1962 assert(Opc==IA64::LD1 && "problem loading a bool");
1963 unsigned dummy3 = MakeReg(MVT::i64);
1964 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1965 // we compare to 0. true? 0. false? 1.
1966 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1968 } else if(Address.getOpcode() == ISD::FrameIndex) {
1969 Select(Chain); // FIXME ? what about bools?
1970 unsigned dummy = MakeReg(MVT::i64);
1971 BuildMI(BB, IA64::MOV, 1, dummy)
1972 .addFrameIndex(cast<FrameIndexSDNode>(Address)->getIndex());
1974 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1975 else { // emit a little pseudocode to load a bool (stored in one byte)
1976 // into a predicate register
1977 assert(Opc==IA64::LD1 && "problem loading a bool");
1978 unsigned dummy3 = MakeReg(MVT::i64);
1979 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1980 // we compare to 0. true? 0. false? 1.
1981 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1983 } else { // none of the above...
1985 Tmp2 = SelectExpr(Address);
1987 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1988 else { // emit a little pseudocode to load a bool (stored in one byte)
1989 // into a predicate register
1990 assert(Opc==IA64::LD1 && "problem loading a bool");
1991 unsigned dummy = MakeReg(MVT::i64);
1992 BuildMI(BB, Opc, 1, dummy).addReg(Tmp2);
1993 // we compare to 0. true? 0. false? 1.
1994 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy).addReg(IA64::r0);
2001 case ISD::CopyFromReg: {
2003 Result = ExprMap[N.getValue(0)] =
2004 MakeReg(N.getValue(0).getValueType());
2006 SDOperand Chain = N.getOperand(0);
2009 unsigned r = dyn_cast<RegSDNode>(Node)->getReg();
2011 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
2012 BuildMI(BB, IA64::PCMPEQUNC, 3, Result)
2013 .addReg(IA64::r0).addReg(IA64::r0).addReg(r);
2014 // (r) Result =cmp.eq.unc(r0,r0)
2016 BuildMI(BB, IA64::MOV, 1, Result).addReg(r); // otherwise MOV
2021 Select(N.getOperand(0));
2023 // The chain for this call is now lowered.
2024 ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), 1));
2026 //grab the arguments
2027 std::vector<unsigned> argvregs;
2029 for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
2030 argvregs.push_back(SelectExpr(N.getOperand(i)));
2032 // see section 8.5.8 of "Itanium Software Conventions and
2033 // Runtime Architecture Guide to see some examples of what's going
2034 // on here. (in short: int args get mapped 1:1 'slot-wise' to out0->out7,
2035 // while FP args get mapped to F8->F15 as needed)
2037 unsigned used_FPArgs=0; // how many FP Args have been used so far?
2040 for(int i = 0, e = std::min(8, (int)argvregs.size()); i < e; ++i)
2042 unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
2043 IA64::out4, IA64::out5, IA64::out6, IA64::out7 };
2044 unsigned FPArgs[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
2045 IA64::F12, IA64::F13, IA64::F14, IA64::F15 };
2047 switch(N.getOperand(i+2).getValueType())
2049 default: // XXX do we need to support MVT::i1 here?
2051 N.getOperand(i).Val->dump();
2052 std::cerr << "Type for " << i << " is: " <<
2053 N.getOperand(i+2).getValueType() << std::endl;
2054 assert(0 && "Unknown value type for call");
2056 BuildMI(BB, IA64::MOV, 1, intArgs[i]).addReg(argvregs[i]);
2059 BuildMI(BB, IA64::FMOV, 1, FPArgs[used_FPArgs++])
2060 .addReg(argvregs[i]);
2061 // FIXME: we don't need to do this _all_ the time:
2062 BuildMI(BB, IA64::GETFD, 1, intArgs[i]).addReg(argvregs[i]);
2068 for (int i = 8, e = argvregs.size(); i < e; ++i)
2070 unsigned tempAddr = MakeReg(MVT::i64);
2072 switch(N.getOperand(i+2).getValueType()) {
2075 N.getOperand(i).Val->dump();
2076 std::cerr << "Type for " << i << " is: " <<
2077 N.getOperand(i+2).getValueType() << "\n";
2078 assert(0 && "Unknown value type for call");
2079 case MVT::i1: // FIXME?
2084 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
2085 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
2086 BuildMI(BB, IA64::ST8, 2).addReg(tempAddr).addReg(argvregs[i]);
2090 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
2091 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
2092 BuildMI(BB, IA64::STF8, 2).addReg(tempAddr).addReg(argvregs[i]);
2097 /* XXX we want to re-enable direct branches! crippling them now
2098 * to stress-test indirect branches.:
2099 //build the right kind of call
2100 if (GlobalAddressSDNode *GASD =
2101 dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
2103 BuildMI(BB, IA64::BRCALL, 1).addGlobalAddress(GASD->getGlobal(),true);
2104 IA64Lowering.restoreGP_SP_RP(BB);
2106 ^^^^^^^^^^^^^ we want this code one day XXX */
2107 if (ExternalSymbolSDNode *ESSDN =
2108 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
2109 { // FIXME : currently need this case for correctness, to avoid
2110 // "non-pic code with imm relocation against dynamic symbol" errors
2111 BuildMI(BB, IA64::BRCALL, 1)
2112 .addExternalSymbol(ESSDN->getSymbol(), true);
2113 IA64Lowering.restoreGP_SP_RP(BB);
2116 Tmp1 = SelectExpr(N.getOperand(1));
2118 unsigned targetEntryPoint=MakeReg(MVT::i64);
2119 unsigned targetGPAddr=MakeReg(MVT::i64);
2120 unsigned currentGP=MakeReg(MVT::i64);
2122 // b6 is a scratch branch register, we load the target entry point
2123 // from the base of the function descriptor
2124 BuildMI(BB, IA64::LD8, 1, targetEntryPoint).addReg(Tmp1);
2125 BuildMI(BB, IA64::MOV, 1, IA64::B6).addReg(targetEntryPoint);
2127 // save the current GP:
2128 BuildMI(BB, IA64::MOV, 1, currentGP).addReg(IA64::r1);
2130 /* TODO: we need to make sure doing this never, ever loads a
2131 * bogus value into r1 (GP). */
2132 // load the target GP (which is at mem[functiondescriptor+8])
2133 BuildMI(BB, IA64::ADDIMM22, 2, targetGPAddr)
2134 .addReg(Tmp1).addImm(8); // FIXME: addimm22? why not postincrement ld
2135 BuildMI(BB, IA64::LD8, 1, IA64::r1).addReg(targetGPAddr);
2137 // and then jump: (well, call)
2138 BuildMI(BB, IA64::BRCALL, 1).addReg(IA64::B6);
2139 // and finally restore the old GP
2140 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(currentGP);
2141 IA64Lowering.restoreSP_RP(BB);
2144 switch (Node->getValueType(0)) {
2145 default: assert(0 && "Unknown value type for call result!");
2146 case MVT::Other: return 1;
2148 BuildMI(BB, IA64::CMPNE, 2, Result)
2149 .addReg(IA64::r8).addReg(IA64::r0);
2155 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r8);
2158 BuildMI(BB, IA64::FMOV, 1, Result).addReg(IA64::F8);
2161 return Result+N.ResNo;
2168 void ISel::Select(SDOperand N) {
2169 unsigned Tmp1, Tmp2, Opc;
2170 unsigned opcode = N.getOpcode();
2172 if (!LoweredTokens.insert(N).second)
2173 return; // Already selected.
2175 SDNode *Node = N.Val;
2177 switch (Node->getOpcode()) {
2179 Node->dump(); std::cerr << "\n";
2180 assert(0 && "Node not handled yet!");
2182 case ISD::EntryToken: return; // Noop
2184 case ISD::TokenFactor: {
2185 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
2186 Select(Node->getOperand(i));
2190 case ISD::CopyToReg: {
2191 Select(N.getOperand(0));
2192 Tmp1 = SelectExpr(N.getOperand(1));
2193 Tmp2 = cast<RegSDNode>(N)->getReg();
2196 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
2197 BuildMI(BB, IA64::PCMPEQUNC, 3, Tmp2)
2198 .addReg(IA64::r0).addReg(IA64::r0).addReg(Tmp1);
2199 // (Tmp1) Tmp2 = cmp.eq.unc(r0,r0)
2201 BuildMI(BB, IA64::MOV, 1, Tmp2).addReg(Tmp1);
2202 // XXX is this the right way 'round? ;)
2209 /* what the heck is going on here:
2211 <_sabre_> ret with two operands is obvious: chain and value
2213 <_sabre_> ret with 3 values happens when 'expansion' occurs
2214 <_sabre_> e.g. i64 gets split into 2x i32
2216 <_sabre_> you don't have this case on ia64
2218 <_sabre_> so the two returned values go into EAX/EDX on ia32
2219 <camel_> ahhh *memories*
2221 <camel_> ok, thanks :)
2222 <_sabre_> so yeah, everything that has a side effect takes a 'token chain'
2223 <_sabre_> this is the first operand always
2224 <_sabre_> these operand often define chains, they are the last operand
2225 <_sabre_> they are printed as 'ch' if you do DAG.dump()
2228 switch (N.getNumOperands()) {
2230 assert(0 && "Unknown return instruction!");
2232 Select(N.getOperand(0));
2233 Tmp1 = SelectExpr(N.getOperand(1));
2234 switch (N.getOperand(1).getValueType()) {
2235 default: assert(0 && "All other types should have been promoted!!");
2236 // FIXME: do I need to add support for bools here?
2237 // (return '0' or '1' r8, basically...)
2239 // FIXME: need to round floats - 80 bits is bad, the tester
2242 // we mark r8 as live on exit up above in LowerArguments()
2243 BuildMI(BB, IA64::MOV, 1, IA64::r8).addReg(Tmp1);
2246 // we mark F8 as live on exit up above in LowerArguments()
2247 BuildMI(BB, IA64::FMOV, 1, IA64::F8).addReg(Tmp1);
2251 Select(N.getOperand(0));
2254 // before returning, restore the ar.pfs register (set by the 'alloc' up top)
2255 BuildMI(BB, IA64::MOV, 1).addReg(IA64::AR_PFS).addReg(IA64Lowering.VirtGPR);
2256 BuildMI(BB, IA64::RET, 0); // and then just emit a 'ret' instruction
2261 Select(N.getOperand(0));
2262 MachineBasicBlock *Dest =
2263 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
2264 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(IA64::p0).addMBB(Dest);
2265 // XXX HACK! we do _not_ need long branches all the time
2269 case ISD::ImplicitDef: {
2270 Select(N.getOperand(0));
2271 BuildMI(BB, IA64::IDEF, 0, cast<RegSDNode>(N)->getReg());
2276 MachineBasicBlock *Dest =
2277 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
2279 Select(N.getOperand(0));
2280 Tmp1 = SelectExpr(N.getOperand(1));
2281 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(Tmp1).addMBB(Dest);
2282 // XXX HACK! we do _not_ need long branches all the time
2291 case ISD::CopyFromReg:
2292 case ISD::DYNAMIC_STACKALLOC:
2296 case ISD::TRUNCSTORE:
2298 Select(N.getOperand(0));
2299 Tmp1 = SelectExpr(N.getOperand(1)); // value
2303 if(opcode == ISD::STORE) {
2304 switch (N.getOperand(1).getValueType()) {
2305 default: assert(0 && "Cannot store this type!");
2306 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
2307 // FIXME?: for now, we treat bool loads the same as i8 stores */
2308 case MVT::i8: Opc = IA64::ST1; break;
2309 case MVT::i16: Opc = IA64::ST2; break;
2310 case MVT::i32: Opc = IA64::ST4; break;
2311 case MVT::i64: Opc = IA64::ST8; break;
2313 case MVT::f32: Opc = IA64::STF4; break;
2314 case MVT::f64: Opc = IA64::STF8; break;
2316 } else { // truncstore
2317 switch(cast<MVTSDNode>(Node)->getExtraValueType()) {
2318 default: assert(0 && "unknown type in truncstore");
2319 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
2320 //FIXME: DAG does not promote this load?
2321 case MVT::i8: Opc = IA64::ST1; break;
2322 case MVT::i16: Opc = IA64::ST2; break;
2323 case MVT::i32: Opc = IA64::ST4; break;
2324 case MVT::f32: Opc = IA64::STF4; break;
2328 if(N.getOperand(2).getOpcode() == ISD::GlobalAddress) {
2329 unsigned dummy = MakeReg(MVT::i64);
2330 unsigned dummy2 = MakeReg(MVT::i64);
2331 BuildMI(BB, IA64::ADD, 2, dummy)
2332 .addGlobalAddress(cast<GlobalAddressSDNode>
2333 (N.getOperand(2))->getGlobal()).addReg(IA64::r1);
2334 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
2337 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(Tmp1);
2338 else { // we are storing a bool, so emit a little pseudocode
2339 // to store a predicate register as one byte
2340 assert(Opc==IA64::ST1);
2341 unsigned dummy3 = MakeReg(MVT::i64);
2342 unsigned dummy4 = MakeReg(MVT::i64);
2343 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
2344 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
2345 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
2346 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(dummy4);
2348 } else if(N.getOperand(2).getOpcode() == ISD::FrameIndex) {
2350 // FIXME? (what about bools?)
2352 unsigned dummy = MakeReg(MVT::i64);
2353 BuildMI(BB, IA64::MOV, 1, dummy)
2354 .addFrameIndex(cast<FrameIndexSDNode>(N.getOperand(2))->getIndex());
2355 BuildMI(BB, Opc, 2).addReg(dummy).addReg(Tmp1);
2356 } else { // otherwise
2357 Tmp2 = SelectExpr(N.getOperand(2)); //address
2359 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(Tmp1);
2360 else { // we are storing a bool, so emit a little pseudocode
2361 // to store a predicate register as one byte
2362 assert(Opc==IA64::ST1);
2363 unsigned dummy3 = MakeReg(MVT::i64);
2364 unsigned dummy4 = MakeReg(MVT::i64);
2365 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
2366 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
2367 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
2368 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(dummy4);
2374 case ISD::ADJCALLSTACKDOWN:
2375 case ISD::ADJCALLSTACKUP: {
2376 Select(N.getOperand(0));
2377 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2379 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? IA64::ADJUSTCALLSTACKDOWN :
2380 IA64::ADJUSTCALLSTACKUP;
2381 BuildMI(BB, Opc, 1).addImm(Tmp1);
2387 assert(0 && "GAME OVER. INSERT COIN?");
2391 /// createIA64PatternInstructionSelector - This pass converts an LLVM function
2392 /// into a machine code representation using pattern matching and a machine
2393 /// description file.
2395 FunctionPass *llvm::createIA64PatternInstructionSelector(TargetMachine &TM) {
2396 return new ISel(TM);