1 //===-- IA64ISelPattern.cpp - A pattern matching inst selector for IA64 ---===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for IA64.
12 //===----------------------------------------------------------------------===//
15 #include "IA64InstrBuilder.h"
16 #include "IA64RegisterInfo.h"
17 #include "IA64MachineFunctionInfo.h"
18 #include "llvm/Constants.h" // FIXME: REMOVE
19 #include "llvm/Function.h"
20 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/ADT/Statistic.h"
35 //===----------------------------------------------------------------------===//
36 // IA64TargetLowering - IA64 Implementation of the TargetLowering interface
38 class IA64TargetLowering : public TargetLowering {
39 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
41 //int ReturnAddrIndex; // FrameIndex for return slot.
42 unsigned GP, SP, RP; // FIXME - clean this mess up
45 unsigned VirtGPR; // this is public so it can be accessed in the selector
46 // for ISD::RET down below. add an accessor instead? FIXME
48 IA64TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
50 // register class for general registers
51 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
53 // register class for FP registers
54 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
56 // register class for predicate registers
57 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
59 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
60 setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
61 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
63 setSetCCResultType(MVT::i1);
64 setShiftAmountType(MVT::i64);
66 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
68 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
70 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
71 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
72 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
73 setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
75 setOperationAction(ISD::FREM , MVT::f32 , Expand);
76 setOperationAction(ISD::FREM , MVT::f64 , Expand);
78 setOperationAction(ISD::UREM , MVT::f32 , Expand);
79 setOperationAction(ISD::UREM , MVT::f64 , Expand);
81 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
82 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
83 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
85 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
86 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
88 // We don't support sin/cos/sqrt
89 setOperationAction(ISD::FSIN , MVT::f64, Expand);
90 setOperationAction(ISD::FCOS , MVT::f64, Expand);
91 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
92 setOperationAction(ISD::FSIN , MVT::f32, Expand);
93 setOperationAction(ISD::FCOS , MVT::f32, Expand);
94 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
96 //IA64 has these, but they are not implemented
97 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
98 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
99 // FIXME: implement mulhs (xma.h) and mulhu (xma.hu)
100 setOperationAction(ISD::MULHS , MVT::i64 , Expand);
101 setOperationAction(ISD::MULHU , MVT::i64 , Expand);
103 // We don't have line number support yet.
104 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
105 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
107 computeRegisterProperties();
109 addLegalFPImmediate(+0.0);
110 addLegalFPImmediate(+1.0);
111 addLegalFPImmediate(-0.0);
112 addLegalFPImmediate(-1.0);
115 /// LowerArguments - This hook must be implemented to indicate how we should
116 /// lower the arguments for the specified function, into the specified DAG.
117 virtual std::vector<SDOperand>
118 LowerArguments(Function &F, SelectionDAG &DAG);
120 /// LowerCallTo - This hook lowers an abstract call to a function into an
122 virtual std::pair<SDOperand, SDOperand>
123 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
124 bool isTailCall, SDOperand Callee, ArgListTy &Args,
127 virtual SDOperand LowerVAStart(SDOperand Chain, SDOperand VAListP,
128 Value *VAListV, SelectionDAG &DAG);
129 virtual std::pair<SDOperand,SDOperand>
130 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
131 const Type *ArgTy, SelectionDAG &DAG);
133 void restoreGP_SP_RP(MachineBasicBlock* BB)
135 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
136 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
137 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
140 void restoreSP_RP(MachineBasicBlock* BB)
142 BuildMI(BB, IA64::MOV, 1, IA64::r12).addReg(SP);
143 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
146 void restoreRP(MachineBasicBlock* BB)
148 BuildMI(BB, IA64::MOV, 1, IA64::rp).addReg(RP);
151 void restoreGP(MachineBasicBlock* BB)
153 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(GP);
160 std::vector<SDOperand>
161 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
162 std::vector<SDOperand> ArgValues;
165 // add beautiful description of IA64 stack frame format
166 // here (from intel 24535803.pdf most likely)
168 MachineFunction &MF = DAG.getMachineFunction();
169 MachineFrameInfo *MFI = MF.getFrameInfo();
171 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
172 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
173 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
175 MachineBasicBlock& BB = MF.front();
177 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
178 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
180 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
181 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
187 unsigned used_FPArgs = 0; // how many FP args have been used so far?
189 unsigned ArgOffset = 0;
192 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
194 SDOperand newroot, argt;
195 if(count < 8) { // need to fix this logic? maybe.
197 switch (getValueType(I->getType())) {
199 std::cerr << "ERROR in LowerArgs: unknown type "
200 << getValueType(I->getType()) << "\n";
203 // fixme? (well, will need to for weird FP structy stuff,
204 // see intel ABI docs)
206 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
207 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
208 // floating point args go into f8..f15 as-needed, the increment
209 argVreg[count] = // is below..:
210 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
211 // FP args go into f8..f15 as needed: (hence the ++)
212 argPreg[count] = args_FP[used_FPArgs++];
213 argOpc[count] = IA64::FMOV;
214 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count],
216 if (I->getType() == Type::FloatTy)
217 argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt);
219 case MVT::i1: // NOTE: as far as C abi stuff goes,
220 // bools are just boring old ints
225 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
226 MF.addLiveIn(args_int[count]); // mark this register as liveIn
228 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
229 argPreg[count] = args_int[count];
230 argOpc[count] = IA64::MOV;
232 DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64);
233 if ( getValueType(I->getType()) != MVT::i64)
234 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
238 } else { // more than 8 args go into the frame
239 // Create the frame index object for this incoming parameter...
240 ArgOffset = 16 + 8 * (count - 8);
241 int FI = MFI->CreateFixedObject(8, ArgOffset);
243 // Create the SelectionDAG nodes corresponding to a load
244 //from this parameter
245 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
246 argt = newroot = DAG.getLoad(getValueType(I->getType()),
247 DAG.getEntryNode(), FIN, DAG.getSrcValue(NULL));
250 DAG.setRoot(newroot.getValue(1));
251 ArgValues.push_back(argt);
255 // Create a vreg to hold the output of (what will become)
256 // the "alloc" instruction
257 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
258 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
259 // we create a PSEUDO_ALLOC (pseudo)instruction for now
261 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
264 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
265 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
268 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
271 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
272 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
275 unsigned tempOffset=0;
277 // if this is a varargs function, we simply lower llvm.va_start by
278 // pointing to the first entry
281 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
284 // here we actually do the moving of args, and store them to the stack
285 // too if this is a varargs function:
286 for (int i = 0; i < count && i < 8; ++i) {
287 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
289 // if this is a varargs function, we copy the input registers to the stack
290 int FI = MFI->CreateFixedObject(8, tempOffset);
291 tempOffset+=8; //XXX: is it safe to use r22 like this?
292 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
293 // FIXME: we should use st8.spill here, one day
294 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
298 // Finally, inform the code generator which regs we return values in.
299 // (see the ISD::RET: case down below)
300 switch (getValueType(F.getReturnType())) {
301 default: assert(0 && "i have no idea where to return this type!");
302 case MVT::isVoid: break;
308 MF.addLiveOut(IA64::r8);
312 MF.addLiveOut(IA64::F8);
319 std::pair<SDOperand, SDOperand>
320 IA64TargetLowering::LowerCallTo(SDOperand Chain,
321 const Type *RetTy, bool isVarArg,
322 unsigned CallingConv, bool isTailCall,
323 SDOperand Callee, ArgListTy &Args,
326 MachineFunction &MF = DAG.getMachineFunction();
328 unsigned NumBytes = 16;
329 unsigned outRegsUsed = 0;
331 if (Args.size() > 8) {
332 NumBytes += (Args.size() - 8) * 8;
335 outRegsUsed = Args.size();
338 // FIXME? this WILL fail if we ever try to pass around an arg that
339 // consumes more than a single output slot (a 'real' double, int128
340 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
341 // registers we use. Hopefully, the assembler will notice.
342 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
343 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
345 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
346 DAG.getConstant(NumBytes, getPointerTy()));
348 std::vector<SDOperand> args_to_use;
349 for (unsigned i = 0, e = Args.size(); i != e; ++i)
351 switch (getValueType(Args[i].second)) {
352 default: assert(0 && "unexpected argument type!");
357 //promote to 64-bits, sign/zero extending based on type
359 if(Args[i].second->isSigned())
360 Args[i].first = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64,
363 Args[i].first = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64,
368 Args[i].first = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Args[i].first);
373 args_to_use.push_back(Args[i].first);
376 std::vector<MVT::ValueType> RetVals;
377 MVT::ValueType RetTyVT = getValueType(RetTy);
378 if (RetTyVT != MVT::isVoid)
379 RetVals.push_back(RetTyVT);
380 RetVals.push_back(MVT::Other);
382 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain,
383 Callee, args_to_use), 0);
384 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
385 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
386 DAG.getConstant(NumBytes, getPointerTy()));
387 return std::make_pair(TheCall, Chain);
391 IA64TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
392 Value *VAListV, SelectionDAG &DAG) {
393 // vastart just stores the address of the VarArgsFrameIndex slot.
394 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
395 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR,
396 VAListP, DAG.getSrcValue(VAListV));
399 std::pair<SDOperand,SDOperand> IA64TargetLowering::
400 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
401 const Type *ArgTy, SelectionDAG &DAG) {
403 MVT::ValueType ArgVT = getValueType(ArgTy);
404 SDOperand Val = DAG.getLoad(MVT::i64, Chain,
405 VAListP, DAG.getSrcValue(VAListV));
406 SDOperand Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), Val,
407 DAG.getSrcValue(NULL));
409 if (ArgVT == MVT::i32 || ArgVT == MVT::f32)
412 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
413 "Other types should have been promoted for varargs!");
416 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
417 DAG.getConstant(Amt, Val.getValueType()));
418 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
419 Val, VAListP, DAG.getSrcValue(VAListV));
420 return std::make_pair(Result, Chain);
425 //===--------------------------------------------------------------------===//
426 /// ISel - IA64 specific code to select IA64 machine instructions for
427 /// SelectionDAG operations.
429 class ISel : public SelectionDAGISel {
430 /// IA64Lowering - This object fully describes how to lower LLVM code to an
431 /// IA64-specific SelectionDAG.
432 IA64TargetLowering IA64Lowering;
433 SelectionDAG *ISelDAG; // Hack to support us having a dag->dag transform
434 // for sdiv and udiv until it is put into the future
437 /// ExprMap - As shared expressions are codegen'd, we keep track of which
438 /// vreg the value is produced in, so we only emit one copy of each compiled
440 std::map<SDOperand, unsigned> ExprMap;
441 std::set<SDOperand> LoweredTokens;
444 ISel(TargetMachine &TM) : SelectionDAGISel(IA64Lowering), IA64Lowering(TM),
447 /// InstructionSelectBasicBlock - This callback is invoked by
448 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
449 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
451 unsigned SelectExpr(SDOperand N);
452 void Select(SDOperand N);
453 // a dag->dag to transform mul-by-constant-int to shifts+adds/subs
454 SDOperand BuildConstmulSequence(SDOperand N);
456 const char *getPassName() const { return "IA64 Instruction Selector"; }
460 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
461 /// when it has created a SelectionDAG for us to codegen.
462 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
464 // Codegen the basic block.
466 Select(DAG.getRoot());
468 // Clear state used for selection.
470 LoweredTokens.clear();
474 // strip leading '0' characters from a string
475 void munchLeadingZeros(std::string& inString) {
476 while(inString.c_str()[0]=='0') {
477 inString.erase(0, 1);
481 // strip trailing '0' characters from a string
482 void munchTrailingZeros(std::string& inString) {
483 int curPos=inString.length()-1;
485 while(inString.c_str()[curPos]=='0') {
486 inString.erase(curPos, 1);
491 // return how many consecutive '0' characters are at the end of a string
492 unsigned int countTrailingZeros(std::string& inString) {
493 int curPos=inString.length()-1;
494 unsigned int zeroCount=0;
496 while(inString.c_str()[curPos--]=='0') {
502 // booth encode a string of '1' and '0' characters (returns string of 'P' (+1)
503 // '0' and 'N' (-1) characters)
504 void boothEncode(std::string inString, std::string& boothEncodedString) {
508 int lim=inString.size();
511 if(inString[curpos]=='1') { // if we see a '1', look for a run of them
513 std::string replaceString="N";
515 // find the run length
516 for(;inString[curpos+runlength]=='1';runlength++) ;
518 for(int i=0; i<runlength-1; i++)
523 inString.replace(curpos, runlength+1, replaceString);
527 } else { // a zero, we just keep chugging along
532 // clean up (trim the string, reverse it and turn '1's into 'P's)
533 munchTrailingZeros(inString);
534 boothEncodedString="";
536 for(int i=inString.size()-1;i>=0;i--)
538 boothEncodedString+="P";
540 boothEncodedString+=inString[i];
544 struct shiftaddblob { // this encodes stuff like (x=) "A << B [+-] C << D"
545 unsigned firstVal; // A
546 unsigned firstShift; // B
547 unsigned secondVal; // C
548 unsigned secondShift; // D
552 /* this implements Lefevre's "pattern-based" constant multiplication,
553 * see "Multiplication by an Integer Constant", INRIA report 1999-06
555 * TODO: implement a method to try rewriting P0N<->0PP / N0P<->0NN
556 * to get better booth encodings - this does help in practice
557 * TODO: weight shifts appropriately (most architectures can't
558 * fuse a shift and an add for arbitrary shift amounts) */
559 unsigned lefevre(const std::string inString,
560 std::vector<struct shiftaddblob> &ops) {
561 std::string retstring;
562 std::string s = inString;
563 munchTrailingZeros(s);
565 int length=s.length()-1;
571 std::vector<int> p,n;
573 for(int i=0; i<=length; i++) {
574 if (s.c_str()[length-i]=='P') {
576 } else if (s.c_str()[length-i]=='N') {
584 std::map<const int, int> w;
586 for(unsigned i=0; i<p.size(); i++) {
587 for(unsigned j=0; j<i; j++) {
592 for(unsigned i=1; i<n.size(); i++) {
593 for(unsigned j=0; j<i; j++) {
598 for(unsigned i=0; i<p.size(); i++) {
599 for(unsigned j=0; j<n.size(); j++) {
600 w[-abs(p[i]-n[j])]++;
604 std::map<const int, int>::const_iterator ii;
606 std::multimap<int, int> sorted_by_value;
608 for(ii = w.begin(); ii!=w.end(); ii++)
609 sorted_by_value.insert(std::pair<int, int>((*ii).second,(*ii).first));
611 for (std::multimap<int, int>::iterator it = sorted_by_value.begin();
612 it != sorted_by_value.end(); ++it) {
613 d.push_back((*it).second);
619 while(d.size()>0 && (w[int_d=d.back()] > int_W)) {
627 for(unsigned base=0; base<retstring.size(); base++) {
628 if( ((base+z+1) < retstring.size()) &&
629 retstring.c_str()[base]=='P' &&
630 retstring.c_str()[base+z+1]=='P')
634 retstring.replace(base, 1, "0");
635 retstring.replace(base+z+1, 1, "p");
639 for(unsigned base=0; base<retstring.size(); base++) {
640 if( ((base+z+1) < retstring.size()) &&
641 retstring.c_str()[base]=='N' &&
642 retstring.c_str()[base+z+1]=='N')
646 retstring.replace(base, 1, "0");
647 retstring.replace(base+z+1, 1, "n");
652 for(unsigned base=0; base<retstring.size(); base++) {
653 if( ((base+z+1) < retstring.size()) &&
654 ((retstring.c_str()[base]=='P' &&
655 retstring.c_str()[base+z+1]=='N') ||
656 (retstring.c_str()[base]=='N' &&
657 retstring.c_str()[base+z+1]=='P')) ) {
661 if(retstring.c_str()[base]=='P') {
662 retstring.replace(base, 1, "0");
663 retstring.replace(base+z+1, 1, "p");
664 } else { // retstring[base]=='N'
665 retstring.replace(base, 1, "0");
666 retstring.replace(base+z+1, 1, "n");
678 } d.pop_back(); // hmm
682 for(unsigned i=0; i<t.length(); i++) {
683 if(t.c_str()[i]=='p' || t.c_str()[i]=='n')
684 t.replace(i, 1, "0");
687 for(unsigned i=0; i<u.length(); i++) {
688 if(u[i]=='P' || u[i]=='N')
689 u.replace(i, 1, "0");
691 u.replace(i, 1, "P");
693 u.replace(i, 1, "N");
706 bool hit=(u[pos]=='N');
711 for(unsigned p=0; p<u.length(); p++) {
712 bool isP=(u[p]=='P');
713 bool isN=(u[p]=='N');
716 u.replace(p, 1, "N");
718 u.replace(p, 1, "P");
722 munchLeadingZeros(u);
724 int i = lefevre(u, ops);
728 blob.firstVal=i; blob.firstShift=c;
730 blob.secondVal=i; blob.secondShift=0;
736 munchLeadingZeros(t);
741 if(t.c_str()[0]!='P') {
743 for(unsigned p=0; p<t.length(); p++) {
744 bool isP=(t.c_str()[p]=='P');
745 bool isN=(t.c_str()[p]=='N');
748 t.replace(p, 1, "N");
750 t.replace(p, 1, "P");
754 int j = lefevre(t, ops);
756 int trail=countTrailingZeros(u);
757 blob.secondVal=i; blob.secondShift=trail;
759 trail=countTrailingZeros(t);
760 blob.firstVal=j; blob.firstShift=trail;
764 blob.isSub=false; // first + second
767 blob.isSub=true; // first - second
770 blob.isSub=true; // second - first
771 int tmpval, tmpshift;
772 tmpval=blob.firstVal;
773 tmpshift=blob.firstShift;
774 blob.firstVal=blob.secondVal;
775 blob.firstShift=blob.secondShift;
776 blob.secondVal=tmpval;
777 blob.secondShift=tmpshift;
786 SDOperand ISel::BuildConstmulSequence(SDOperand N) {
787 //FIXME: we should shortcut this stuff for multiplies by 2^n+1
788 // in particular, *3 is nicer as *2+1, not *4-1
789 int64_t constant=cast<ConstantSDNode>(N.getOperand(1))->getValue();
792 unsigned preliminaryShift=0;
794 assert(constant != 0 && "erk, you're trying to multiply by constant zero\n");
796 // first, we make the constant to multiply by positive
804 // next, we make it odd.
805 for(; (constant%2==0); preliminaryShift++)
808 //OK, we have a positive, odd number of 64 bits or less. Convert it
809 //to a binary string, constantString[0] is the LSB
810 char constantString[65];
811 for(int i=0; i<64; i++)
812 constantString[i]='0'+((constant>>i)&0x1);
813 constantString[64]=0;
815 // now, Booth encode it
816 std::string boothEncodedString;
817 boothEncode(constantString, boothEncodedString);
819 std::vector<struct shiftaddblob> ops;
820 // do the transformation, filling out 'ops'
821 lefevre(boothEncodedString, ops);
823 assert(ops.size() < 80 && "constmul code has gone haywire\n");
824 SDOperand results[80]; // temporary results (of adds/subs of shifts)
826 // now turn 'ops' into DAG bits
827 for(unsigned i=0; i<ops.size(); i++) {
828 SDOperand amt = ISelDAG->getConstant(ops[i].firstShift, MVT::i64);
829 SDOperand val = (ops[i].firstVal == 0) ? N.getOperand(0) :
830 results[ops[i].firstVal-1];
831 SDOperand left = ISelDAG->getNode(ISD::SHL, MVT::i64, val, amt);
832 amt = ISelDAG->getConstant(ops[i].secondShift, MVT::i64);
833 val = (ops[i].secondVal == 0) ? N.getOperand(0) :
834 results[ops[i].secondVal-1];
835 SDOperand right = ISelDAG->getNode(ISD::SHL, MVT::i64, val, amt);
837 results[i] = ISelDAG->getNode(ISD::SUB, MVT::i64, left, right);
839 results[i] = ISelDAG->getNode(ISD::ADD, MVT::i64, left, right);
842 // don't forget flippedSign and preliminaryShift!
843 SDOperand shiftedresult;
844 if(preliminaryShift) {
845 SDOperand finalshift = ISelDAG->getConstant(preliminaryShift, MVT::i64);
846 shiftedresult = ISelDAG->getNode(ISD::SHL, MVT::i64,
847 results[ops.size()-1], finalshift);
848 } else { // there was no preliminary divide-by-power-of-2 required
849 shiftedresult = results[ops.size()-1];
852 SDOperand finalresult;
853 if(flippedSign) { // if we were multiplying by a negative constant:
854 SDOperand zero = ISelDAG->getConstant(0, MVT::i64);
855 // subtract the result from 0 to flip its sign
856 finalresult = ISelDAG->getNode(ISD::SUB, MVT::i64, zero, shiftedresult);
857 } else { // there was no preliminary multiply by -1 required
858 finalresult = shiftedresult;
864 /// ponderIntegerDivisionBy - When handling integer divides, if the divide
865 /// is by a constant such that we can efficiently codegen it, this
866 /// function says what to do. Currently, it returns 0 if the division must
867 /// become a genuine divide, and 1 if the division can be turned into a
869 static unsigned ponderIntegerDivisionBy(SDOperand N, bool isSigned,
871 if (N.getOpcode() != ISD::Constant) return 0; // if not a divide by
872 // a constant, give up.
874 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
876 if (isPowerOf2_64(v)) { // if a division by a power of two, say so
881 return 0; // fallthrough
884 static unsigned ponderIntegerAndWith(SDOperand N, unsigned& Imm) {
885 if (N.getOpcode() != ISD::Constant) return 0; // if not ANDing with
886 // a constant, give up.
888 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
890 if (isMask_64(v)) { // if ANDing with ((2^n)-1) for some n
891 Imm = Log2_64(v) + 1;
895 return 0; // fallthrough
898 static unsigned ponderIntegerAdditionWith(SDOperand N, unsigned& Imm) {
899 if (N.getOpcode() != ISD::Constant) return 0; // if not adding a
900 // constant, give up.
901 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
903 if (v <= 8191 && v >= -8192) { // if this constants fits in 14 bits, say so
904 Imm = v & 0x3FFF; // 14 bits
907 return 0; // fallthrough
910 static unsigned ponderIntegerSubtractionFrom(SDOperand N, unsigned& Imm) {
911 if (N.getOpcode() != ISD::Constant) return 0; // if not subtracting a
912 // constant, give up.
913 int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
915 if (v <= 127 && v >= -128) { // if this constants fits in 8 bits, say so
916 Imm = v & 0xFF; // 8 bits
919 return 0; // fallthrough
922 unsigned ISel::SelectExpr(SDOperand N) {
924 unsigned Tmp1, Tmp2, Tmp3;
926 MVT::ValueType DestType = N.getValueType();
928 unsigned opcode = N.getOpcode();
930 SDNode *Node = N.Val;
933 if (Node->getOpcode() == ISD::CopyFromReg)
934 // Just use the specified register as our input.
935 return cast<RegisterSDNode>(Node->getOperand(1))->getReg();
937 unsigned &Reg = ExprMap[N];
940 if (N.getOpcode() != ISD::CALL && N.getOpcode() != ISD::TAILCALL)
941 Reg = Result = (N.getValueType() != MVT::Other) ?
942 MakeReg(N.getValueType()) : 1;
944 // If this is a call instruction, make sure to prepare ALL of the result
945 // values as well as the chain.
946 if (Node->getNumValues() == 1)
947 Reg = Result = 1; // Void call, just a chain.
949 Result = MakeReg(Node->getValueType(0));
950 ExprMap[N.getValue(0)] = Result;
951 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
952 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
953 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
957 switch (N.getOpcode()) {
960 assert(0 && "Node not handled!\n");
962 case ISD::FrameIndex: {
963 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
964 BuildMI(BB, IA64::MOV, 1, Result).addFrameIndex(Tmp1);
968 case ISD::ConstantPool: {
969 Tmp1 = BB->getParent()->getConstantPool()->
970 getConstantPoolIndex(cast<ConstantPoolSDNode>(N)->get());
971 IA64Lowering.restoreGP(BB); // FIXME: do i really need this?
972 BuildMI(BB, IA64::ADD, 2, Result).addConstantPoolIndex(Tmp1)
977 case ISD::ConstantFP: {
978 Tmp1 = Result; // Intermediate Register
979 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
980 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
981 Tmp1 = MakeReg(MVT::f64);
983 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
984 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
985 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F0); // load 0.0
986 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
987 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
988 BuildMI(BB, IA64::FMOV, 1, Tmp1).addReg(IA64::F1); // load 1.0
990 assert(0 && "Unexpected FP constant!");
992 // we multiply by +1.0, negate (this is FNMA), and then add 0.0
993 BuildMI(BB, IA64::FNMA, 3, Result).addReg(Tmp1).addReg(IA64::F1)
998 case ISD::DYNAMIC_STACKALLOC: {
999 // Generate both result values.
1001 ExprMap[N.getValue(1)] = 1; // Generate the token
1003 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1005 // FIXME: We are currently ignoring the requested alignment for handling
1006 // greater than the stack alignment. This will need to be revisited at some
1007 // point. Align = N.getOperand(2);
1009 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1010 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1011 std::cerr << "Cannot allocate stack object with greater alignment than"
1012 << " the stack alignment yet!";
1017 Select(N.getOperand(0));
1018 if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1020 if (CN->getValue() < 32000)
1022 BuildMI(BB, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
1023 .addImm(-CN->getValue());
1025 Tmp1 = SelectExpr(N.getOperand(1));
1026 // Subtract size from stack pointer, thereby allocating some space.
1027 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1030 Tmp1 = SelectExpr(N.getOperand(1));
1031 // Subtract size from stack pointer, thereby allocating some space.
1032 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1035 Select(N.getOperand(0));
1036 Tmp1 = SelectExpr(N.getOperand(1));
1037 // Subtract size from stack pointer, thereby allocating some space.
1038 BuildMI(BB, IA64::SUB, 2, IA64::r12).addReg(IA64::r12).addReg(Tmp1);
1039 // Put a pointer to the space into the result register, by copying the
1041 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r12);
1046 Tmp1 = SelectExpr(N.getOperand(0)); //Cond
1047 Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
1048 Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
1050 unsigned bogoResult;
1052 switch (N.getOperand(1).getValueType()) {
1053 default: assert(0 &&
1054 "ISD::SELECT: 'select'ing something other than i1, i64 or f64!\n");
1055 // for i1, we load the condition into an integer register, then
1056 // conditionally copy Tmp2 and Tmp3 to Tmp1 in parallel (only one
1057 // of them will go through, since the integer register will hold
1060 bogoResult=MakeReg(MVT::i1);
1062 // load the condition into an integer register
1063 unsigned condReg=MakeReg(MVT::i64);
1064 unsigned dummy=MakeReg(MVT::i64);
1065 BuildMI(BB, IA64::MOV, 1, dummy).addReg(IA64::r0);
1066 BuildMI(BB, IA64::TPCADDIMM22, 2, condReg).addReg(dummy)
1067 .addImm(1).addReg(Tmp1);
1069 // initialize Result (bool) to false (hence UNC) and if
1070 // the select condition (condReg) is false (0), copy Tmp3
1071 BuildMI(BB, IA64::PCMPEQUNC, 3, bogoResult)
1072 .addReg(condReg).addReg(IA64::r0).addReg(Tmp3);
1074 // now, if the selection condition is true, write 1 to the
1075 // result if Tmp2 is 1
1076 BuildMI(BB, IA64::TPCMPNE, 3, Result).addReg(bogoResult)
1077 .addReg(condReg).addReg(IA64::r0).addReg(Tmp2);
1080 // for i64/f64, we just copy Tmp3 and then conditionally overwrite it
1081 // with Tmp2 if Tmp1 is true
1083 bogoResult=MakeReg(MVT::i64);
1084 BuildMI(BB, IA64::MOV, 1, bogoResult).addReg(Tmp3);
1085 BuildMI(BB, IA64::CMOV, 2, Result).addReg(bogoResult).addReg(Tmp2)
1089 bogoResult=MakeReg(MVT::f64);
1090 BuildMI(BB, IA64::FMOV, 1, bogoResult).addReg(Tmp3);
1091 BuildMI(BB, IA64::CFMOV, 2, Result).addReg(bogoResult).addReg(Tmp2)
1099 case ISD::Constant: {
1100 unsigned depositPos=0;
1101 unsigned depositLen=0;
1102 switch (N.getValueType()) {
1103 default: assert(0 && "Cannot use constants of this type!");
1104 case MVT::i1: { // if a bool, we don't 'load' so much as generate
1106 if(cast<ConstantSDNode>(N)->getValue()) // true:
1107 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(IA64::r0).addReg(IA64::r0);
1109 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(IA64::r0).addReg(IA64::r0);
1110 return Result; // early exit
1112 case MVT::i64: break;
1115 int64_t immediate = cast<ConstantSDNode>(N)->getValue();
1117 if(immediate==0) { // if the constant is just zero,
1118 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r0); // just copy r0
1119 return Result; // early exit
1122 if (immediate <= 8191 && immediate >= -8192) {
1123 // if this constants fits in 14 bits, we use a mov the assembler will
1124 // turn into: "adds rDest=imm,r0" (and _not_ "andl"...)
1125 BuildMI(BB, IA64::MOVSIMM14, 1, Result).addSImm(immediate);
1126 return Result; // early exit
1129 if (immediate <= 2097151 && immediate >= -2097152) {
1130 // if this constants fits in 22 bits, we use a mov the assembler will
1131 // turn into: "addl rDest=imm,r0"
1132 BuildMI(BB, IA64::MOVSIMM22, 1, Result).addSImm(immediate);
1133 return Result; // early exit
1136 /* otherwise, our immediate is big, so we use movl */
1137 uint64_t Imm = immediate;
1138 BuildMI(BB, IA64::MOVLIMM64, 1, Result).addImm64(Imm);
1143 BuildMI(BB, IA64::IDEF, 0, Result);
1147 case ISD::GlobalAddress: {
1148 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1149 unsigned Tmp1 = MakeReg(MVT::i64);
1151 BuildMI(BB, IA64::ADD, 2, Tmp1).addGlobalAddress(GV).addReg(IA64::r1);
1152 BuildMI(BB, IA64::LD8, 1, Result).addReg(Tmp1);
1157 case ISD::ExternalSymbol: {
1158 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1159 // assert(0 && "sorry, but what did you want an ExternalSymbol for again?");
1160 BuildMI(BB, IA64::MOV, 1, Result).addExternalSymbol(Sym); // XXX
1164 case ISD::FP_EXTEND: {
1165 Tmp1 = SelectExpr(N.getOperand(0));
1166 BuildMI(BB, IA64::FMOV, 1, Result).addReg(Tmp1);
1170 case ISD::ANY_EXTEND:
1171 case ISD::ZERO_EXTEND: {
1172 Tmp1 = SelectExpr(N.getOperand(0)); // value
1174 assert(N.getOperand(0).getValueType() == MVT::i1 &&
1175 "Cannot zero-extend this type!");
1177 // if the predicate reg has 1, we want a '1' in our GR.
1178 unsigned dummy = MakeReg(MVT::i64);
1180 BuildMI(BB, IA64::MOV, 1, dummy).addReg(IA64::r0);
1181 // ...then conditionally (PR:Tmp1) add 1:
1182 BuildMI(BB, IA64::TPCADDIMM22, 2, Result).addReg(dummy)
1183 .addImm(1).addReg(Tmp1);
1184 return Result; // XXX early exit!
1187 case ISD::SIGN_EXTEND:
1188 assert(N.getOperand(0).getValueType() == MVT::i1 &&
1189 "Cannot zero-extend this type!");
1191 Tmp1 = SelectExpr(N.getOperand(0)); // value
1192 assert(0 && "don't know how to sign_extend from bool yet!");
1196 // we use the funky dep.z (deposit (zero)) instruction to deposit bits
1197 // of R0 appropriately.
1198 assert(N.getOperand(0).getValueType() == MVT::i64 &&
1199 N.getValueType() == MVT::i1 && "Unknown truncate!");
1200 Tmp1 = SelectExpr(N.getOperand(0));
1202 // if input (normal reg) is 0, 0!=0 -> false (0), if 1, 1!=0 ->true (1):
1203 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1).addReg(IA64::r0);
1204 return Result; // XXX early exit!
1207 case ISD::FP_ROUND: {
1208 assert (DestType == MVT::f32 && N.getOperand(0).getValueType() == MVT::f64 &&
1209 "error: trying to FP_ROUND something other than f64 -> f32!\n");
1210 Tmp1 = SelectExpr(N.getOperand(0));
1211 BuildMI(BB, IA64::FADDS, 2, Result).addReg(Tmp1).addReg(IA64::F0);
1212 // we add 0.0 using a single precision add to do rounding
1217 // FIXME: the following 4 cases need cleaning
1218 case ISD::SINT_TO_FP: {
1219 Tmp1 = SelectExpr(N.getOperand(0));
1220 Tmp2 = MakeReg(MVT::f64);
1221 unsigned dummy = MakeReg(MVT::f64);
1222 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
1223 BuildMI(BB, IA64::FCVTXF, 1, dummy).addReg(Tmp2);
1224 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
1228 case ISD::UINT_TO_FP: {
1229 Tmp1 = SelectExpr(N.getOperand(0));
1230 Tmp2 = MakeReg(MVT::f64);
1231 unsigned dummy = MakeReg(MVT::f64);
1232 BuildMI(BB, IA64::SETFSIG, 1, Tmp2).addReg(Tmp1);
1233 BuildMI(BB, IA64::FCVTXUF, 1, dummy).addReg(Tmp2);
1234 BuildMI(BB, IA64::FNORMD, 1, Result).addReg(dummy);
1238 case ISD::FP_TO_SINT: {
1239 Tmp1 = SelectExpr(N.getOperand(0));
1240 Tmp2 = MakeReg(MVT::f64);
1241 BuildMI(BB, IA64::FCVTFXTRUNC, 1, Tmp2).addReg(Tmp1);
1242 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
1246 case ISD::FP_TO_UINT: {
1247 Tmp1 = SelectExpr(N.getOperand(0));
1248 Tmp2 = MakeReg(MVT::f64);
1249 BuildMI(BB, IA64::FCVTFXUTRUNC, 1, Tmp2).addReg(Tmp1);
1250 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
1255 if (N.getOperand(0).getOpcode() == ISD::FMUL &&
1256 N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
1257 // into an fma, do so:
1258 // ++FusedFP; // Statistic
1259 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1260 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
1261 Tmp3 = SelectExpr(N.getOperand(1));
1262 BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
1263 return Result; // early exit
1266 //else, fallthrough:
1267 Tmp1 = SelectExpr(N.getOperand(0));
1268 Tmp2 = SelectExpr(N.getOperand(1));
1269 BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
1274 if (N.getOperand(0).getOpcode() == ISD::SHL &&
1275 N.getOperand(0).Val->hasOneUse()) { // if we might be able to fold
1276 // this add into a shladd, try:
1277 ConstantSDNode *CSD = NULL;
1278 if((CSD = dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) &&
1279 (CSD->getValue() >= 1) && (CSD->getValue() <= 4) ) { // we can:
1281 // ++FusedSHLADD; // Statistic
1282 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1283 int shl_amt = CSD->getValue();
1284 Tmp3 = SelectExpr(N.getOperand(1));
1286 BuildMI(BB, IA64::SHLADD, 3, Result)
1287 .addReg(Tmp1).addImm(shl_amt).addReg(Tmp3);
1288 return Result; // early exit
1292 //else, fallthrough:
1293 Tmp1 = SelectExpr(N.getOperand(0));
1294 switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
1295 case 1: // adding a constant that's 14 bits
1296 BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
1297 return Result; // early exit
1298 } // fallthrough and emit a reg+reg ADD:
1299 Tmp2 = SelectExpr(N.getOperand(1));
1300 BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
1305 Tmp1 = SelectExpr(N.getOperand(0));
1306 Tmp2 = SelectExpr(N.getOperand(1));
1307 BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
1313 /* FIXME if(N.getOperand(1).getOpcode() != ISD::Constant) { // if not a const mul
1315 // boring old integer multiply with xma
1316 Tmp1 = SelectExpr(N.getOperand(0));
1317 Tmp2 = SelectExpr(N.getOperand(1));
1319 unsigned TempFR1=MakeReg(MVT::f64);
1320 unsigned TempFR2=MakeReg(MVT::f64);
1321 unsigned TempFR3=MakeReg(MVT::f64);
1322 BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
1323 BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
1324 BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
1326 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
1327 return Result; // early exit
1328 /* FIXME } else { // we are multiplying by an integer constant! yay
1329 return Reg = SelectExpr(BuildConstmulSequence(N)); // avert your eyes!
1334 if(N.getOperand(0).getOpcode() == ISD::FMUL &&
1335 N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
1336 // into an fms, do so:
1337 // ++FusedFP; // Statistic
1338 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1339 Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
1340 Tmp3 = SelectExpr(N.getOperand(1));
1341 BuildMI(BB, IA64::FMS, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
1342 return Result; // early exit
1345 Tmp2 = SelectExpr(N.getOperand(1));
1346 Tmp1 = SelectExpr(N.getOperand(0));
1347 BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
1351 Tmp2 = SelectExpr(N.getOperand(1));
1352 switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
1353 case 1: // subtracting *from* an 8 bit constant:
1354 BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
1355 return Result; // early exit
1356 } // fallthrough and emit a reg+reg SUB:
1357 Tmp1 = SelectExpr(N.getOperand(0));
1358 BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
1363 Tmp1 = SelectExpr(N.getOperand(0));
1364 assert(DestType == MVT::f64 && "trying to fabs something other than f64?");
1365 BuildMI(BB, IA64::FABS, 1, Result).addReg(Tmp1);
1370 assert(DestType == MVT::f64 && "trying to fneg something other than f64?");
1372 if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
1373 Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
1374 BuildMI(BB, IA64::FNEGABS, 1, Result).addReg(Tmp1); // fold in abs
1376 Tmp1 = SelectExpr(N.getOperand(0));
1377 BuildMI(BB, IA64::FNEG, 1, Result).addReg(Tmp1); // plain old fneg
1384 switch (N.getValueType()) {
1385 default: assert(0 && "Cannot AND this type!");
1386 case MVT::i1: { // if a bool, we emit a pseudocode AND
1387 unsigned pA = SelectExpr(N.getOperand(0));
1388 unsigned pB = SelectExpr(N.getOperand(1));
1390 /* our pseudocode for AND is:
1392 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
1393 cmp.eq pTemp,p0 = r0,r0 // pTemp = NOT pB
1395 (pB) cmp.ne pTemp,p0 = r0,r0
1397 (pTemp)cmp.ne pC,p0 = r0,r0 // if (NOT pB) pC = 0
1400 unsigned pTemp = MakeReg(MVT::i1);
1402 unsigned bogusTemp1 = MakeReg(MVT::i1);
1403 unsigned bogusTemp2 = MakeReg(MVT::i1);
1404 unsigned bogusTemp3 = MakeReg(MVT::i1);
1405 unsigned bogusTemp4 = MakeReg(MVT::i1);
1407 BuildMI(BB, IA64::PCMPEQUNC, 3, bogusTemp1)
1408 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1409 BuildMI(BB, IA64::CMPEQ, 2, bogusTemp2)
1410 .addReg(IA64::r0).addReg(IA64::r0);
1411 BuildMI(BB, IA64::TPCMPNE, 3, pTemp)
1412 .addReg(bogusTemp2).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1413 BuildMI(BB, IA64::TPCMPNE, 3, Result)
1414 .addReg(bogusTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pTemp);
1418 // if not a bool, we just AND away:
1423 Tmp1 = SelectExpr(N.getOperand(0));
1424 switch (ponderIntegerAndWith(N.getOperand(1), Tmp3)) {
1425 case 1: // ANDing a constant that is 2^n-1 for some n
1427 case 8: // if AND 0x00000000000000FF, be quaint and use zxt1
1428 BuildMI(BB, IA64::ZXT1, 1, Result).addReg(Tmp1);
1430 case 16: // if AND 0x000000000000FFFF, be quaint and use zxt2
1431 BuildMI(BB, IA64::ZXT2, 1, Result).addReg(Tmp1);
1433 case 32: // if AND 0x00000000FFFFFFFF, be quaint and use zxt4
1434 BuildMI(BB, IA64::ZXT4, 1, Result).addReg(Tmp1);
1436 default: // otherwise, use dep.z to paste zeros
1437 // FIXME: assert the dep.z is in bounds
1438 BuildMI(BB, IA64::DEPZ, 3, Result).addReg(Tmp1)
1439 .addImm(0).addImm(Tmp3);
1442 return Result; // early exit
1443 } // fallthrough and emit a simple AND:
1444 Tmp2 = SelectExpr(N.getOperand(1));
1445 BuildMI(BB, IA64::AND, 2, Result).addReg(Tmp1).addReg(Tmp2);
1452 switch (N.getValueType()) {
1453 default: assert(0 && "Cannot OR this type!");
1454 case MVT::i1: { // if a bool, we emit a pseudocode OR
1455 unsigned pA = SelectExpr(N.getOperand(0));
1456 unsigned pB = SelectExpr(N.getOperand(1));
1458 unsigned pTemp1 = MakeReg(MVT::i1);
1460 /* our pseudocode for OR is:
1466 (pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
1468 (pB) cmp.eq pC,p0 = r0,r0 // if (pB) pC = 1
1471 BuildMI(BB, IA64::PCMPEQUNC, 3, pTemp1)
1472 .addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
1473 BuildMI(BB, IA64::TPCMPEQ, 4, Result)
1474 .addReg(pTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pB);
1477 // if not a bool, we just OR away:
1482 Tmp1 = SelectExpr(N.getOperand(0));
1483 Tmp2 = SelectExpr(N.getOperand(1));
1484 BuildMI(BB, IA64::OR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1492 switch (N.getValueType()) {
1493 default: assert(0 && "Cannot XOR this type!");
1494 case MVT::i1: { // if a bool, we emit a pseudocode XOR
1495 unsigned pY = SelectExpr(N.getOperand(0));
1496 unsigned pZ = SelectExpr(N.getOperand(1));
1498 /* one possible routine for XOR is:
1500 // Compute px = py ^ pz
1501 // using sum of products: px = (py & !pz) | (pz & !py)
1502 // Uses 5 instructions in 3 cycles.
1504 (pz) cmp.eq.unc px = r0, r0 // px = pz
1505 (py) cmp.eq.unc pt = r0, r0 // pt = py
1508 (pt) cmp.ne.and px = r0, r0 // px = px & !pt (px = pz & !pt)
1509 (pz) cmp.ne.and pt = r0, r0 // pt = pt & !pz
1513 (pt) cmp.eq.or px = r0, r0 // px = px | pt
1515 *** Another, which we use here, requires one scratch GR. it is:
1517 mov rt = 0 // initialize rt off critical path
1521 (pz) cmp.eq.unc px = r0, r0 // px = pz
1522 (pz) mov rt = 1 // rt = pz
1525 (py) cmp.ne px = 1, rt // if (py) px = !pz
1527 .. these routines kindly provided by Jim Hull
1529 unsigned rt = MakeReg(MVT::i64);
1531 // these two temporaries will never actually appear,
1532 // due to the two-address form of some of the instructions below
1533 unsigned bogoPR = MakeReg(MVT::i1); // becomes Result
1534 unsigned bogoGR = MakeReg(MVT::i64); // becomes rt
1536 BuildMI(BB, IA64::MOV, 1, bogoGR).addReg(IA64::r0);
1537 BuildMI(BB, IA64::PCMPEQUNC, 3, bogoPR)
1538 .addReg(IA64::r0).addReg(IA64::r0).addReg(pZ);
1539 BuildMI(BB, IA64::TPCADDIMM22, 2, rt)
1540 .addReg(bogoGR).addImm(1).addReg(pZ);
1541 BuildMI(BB, IA64::TPCMPIMM8NE, 3, Result)
1542 .addReg(bogoPR).addImm(1).addReg(rt).addReg(pY);
1545 // if not a bool, we just XOR away:
1550 Tmp1 = SelectExpr(N.getOperand(0));
1551 Tmp2 = SelectExpr(N.getOperand(1));
1552 BuildMI(BB, IA64::XOR, 2, Result).addReg(Tmp1).addReg(Tmp2);
1560 Tmp1 = SelectExpr(N.getOperand(0));
1561 BuildMI(BB, IA64::POPCNT, 1, Result).addReg(Tmp1);
1566 Tmp1 = SelectExpr(N.getOperand(0));
1567 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1568 Tmp2 = CN->getValue();
1569 BuildMI(BB, IA64::SHLI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1571 Tmp2 = SelectExpr(N.getOperand(1));
1572 BuildMI(BB, IA64::SHL, 2, Result).addReg(Tmp1).addReg(Tmp2);
1578 Tmp1 = SelectExpr(N.getOperand(0));
1579 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1580 Tmp2 = CN->getValue();
1581 BuildMI(BB, IA64::SHRUI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1583 Tmp2 = SelectExpr(N.getOperand(1));
1584 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1590 Tmp1 = SelectExpr(N.getOperand(0));
1591 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1592 Tmp2 = CN->getValue();
1593 BuildMI(BB, IA64::SHRSI, 2, Result).addReg(Tmp1).addImm(Tmp2);
1595 Tmp2 = SelectExpr(N.getOperand(1));
1596 BuildMI(BB, IA64::SHRS, 2, Result).addReg(Tmp1).addReg(Tmp2);
1607 Tmp1 = SelectExpr(N.getOperand(0));
1608 Tmp2 = SelectExpr(N.getOperand(1));
1612 if(DestType == MVT::f64) // XXX: we're not gonna be fed MVT::f32, are we?
1615 bool isModulus=false; // is it a division or a modulus?
1616 bool isSigned=false;
1618 switch(N.getOpcode()) {
1620 case ISD::SDIV: isModulus=false; isSigned=true; break;
1621 case ISD::UDIV: isModulus=false; isSigned=false; break;
1623 case ISD::SREM: isModulus=true; isSigned=true; break;
1624 case ISD::UREM: isModulus=true; isSigned=false; break;
1627 if(!isModulus && !isFP) { // if this is an integer divide,
1628 switch (ponderIntegerDivisionBy(N.getOperand(1), isSigned, Tmp3)) {
1629 case 1: // division by a constant that's a power of 2
1630 Tmp1 = SelectExpr(N.getOperand(0));
1631 if(isSigned) { // argument could be negative, so emit some code:
1632 unsigned divAmt=Tmp3;
1633 unsigned tempGR1=MakeReg(MVT::i64);
1634 unsigned tempGR2=MakeReg(MVT::i64);
1635 unsigned tempGR3=MakeReg(MVT::i64);
1636 BuildMI(BB, IA64::SHRS, 2, tempGR1)
1637 .addReg(Tmp1).addImm(divAmt-1);
1638 BuildMI(BB, IA64::EXTRU, 3, tempGR2)
1639 .addReg(tempGR1).addImm(64-divAmt).addImm(divAmt);
1640 BuildMI(BB, IA64::ADD, 2, tempGR3)
1641 .addReg(Tmp1).addReg(tempGR2);
1642 BuildMI(BB, IA64::SHRS, 2, Result)
1643 .addReg(tempGR3).addImm(divAmt);
1645 else // unsigned div-by-power-of-2 becomes a simple shift right:
1646 BuildMI(BB, IA64::SHRU, 2, Result).addReg(Tmp1).addImm(Tmp3);
1647 return Result; // early exit
1651 unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
1652 unsigned TmpPR2=MakeReg(MVT::i1); // predicate registers,
1653 unsigned TmpF1=MakeReg(MVT::f64); // and one metric truckload of FP regs.
1654 unsigned TmpF2=MakeReg(MVT::f64); // lucky we have IA64?
1655 unsigned TmpF3=MakeReg(MVT::f64); // well, the real FIXME is to have
1656 unsigned TmpF4=MakeReg(MVT::f64); // isTwoAddress forms of these
1657 unsigned TmpF5=MakeReg(MVT::f64); // FP instructions so we can end up with
1658 unsigned TmpF6=MakeReg(MVT::f64); // stuff like setf.sig f10=f10 etc.
1659 unsigned TmpF7=MakeReg(MVT::f64);
1660 unsigned TmpF8=MakeReg(MVT::f64);
1661 unsigned TmpF9=MakeReg(MVT::f64);
1662 unsigned TmpF10=MakeReg(MVT::f64);
1663 unsigned TmpF11=MakeReg(MVT::f64);
1664 unsigned TmpF12=MakeReg(MVT::f64);
1665 unsigned TmpF13=MakeReg(MVT::f64);
1666 unsigned TmpF14=MakeReg(MVT::f64);
1667 unsigned TmpF15=MakeReg(MVT::f64);
1669 // OK, emit some code:
1672 // first, load the inputs into FP regs.
1673 BuildMI(BB, IA64::SETFSIG, 1, TmpF1).addReg(Tmp1);
1674 BuildMI(BB, IA64::SETFSIG, 1, TmpF2).addReg(Tmp2);
1676 // next, convert the inputs to FP
1678 BuildMI(BB, IA64::FCVTXF, 1, TmpF3).addReg(TmpF1);
1679 BuildMI(BB, IA64::FCVTXF, 1, TmpF4).addReg(TmpF2);
1681 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF3).addReg(TmpF1);
1682 BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF4).addReg(TmpF2);
1685 } else { // this is an FP divide/remainder, so we 'leak' some temp
1686 // regs and assign TmpF3=Tmp1, TmpF4=Tmp2
1691 // we start by computing an approximate reciprocal (good to 9 bits?)
1692 // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate)
1693 BuildMI(BB, IA64::FRCPAS1, 4)
1694 .addReg(TmpF5, MachineOperand::Def)
1695 .addReg(TmpPR, MachineOperand::Def)
1696 .addReg(TmpF3).addReg(TmpF4);
1698 if(!isModulus) { // if this is a divide, we worry about div-by-zero
1699 unsigned bogusPR=MakeReg(MVT::i1); // won't appear, due to twoAddress
1701 BuildMI(BB, IA64::CMPEQ, 2, bogusPR).addReg(IA64::r0).addReg(IA64::r0);
1702 BuildMI(BB, IA64::TPCMPNE, 3, TmpPR2).addReg(bogusPR)
1703 .addReg(IA64::r0).addReg(IA64::r0).addReg(TmpPR);
1706 // now we apply newton's method, thrice! (FIXME: this is ~72 bits of
1707 // precision, don't need this much for f32/i32)
1708 BuildMI(BB, IA64::CFNMAS1, 4, TmpF6)
1709 .addReg(TmpF4).addReg(TmpF5).addReg(IA64::F1).addReg(TmpPR);
1710 BuildMI(BB, IA64::CFMAS1, 4, TmpF7)
1711 .addReg(TmpF3).addReg(TmpF5).addReg(IA64::F0).addReg(TmpPR);
1712 BuildMI(BB, IA64::CFMAS1, 4, TmpF8)
1713 .addReg(TmpF6).addReg(TmpF6).addReg(IA64::F0).addReg(TmpPR);
1714 BuildMI(BB, IA64::CFMAS1, 4, TmpF9)
1715 .addReg(TmpF6).addReg(TmpF7).addReg(TmpF7).addReg(TmpPR);
1716 BuildMI(BB, IA64::CFMAS1, 4,TmpF10)
1717 .addReg(TmpF6).addReg(TmpF5).addReg(TmpF5).addReg(TmpPR);
1718 BuildMI(BB, IA64::CFMAS1, 4,TmpF11)
1719 .addReg(TmpF8).addReg(TmpF9).addReg(TmpF9).addReg(TmpPR);
1720 BuildMI(BB, IA64::CFMAS1, 4,TmpF12)
1721 .addReg(TmpF8).addReg(TmpF10).addReg(TmpF10).addReg(TmpPR);
1722 BuildMI(BB, IA64::CFNMAS1, 4,TmpF13)
1723 .addReg(TmpF4).addReg(TmpF11).addReg(TmpF3).addReg(TmpPR);
1725 // FIXME: this is unfortunate :(
1726 // the story is that the dest reg of the fnma above and the fma below
1727 // (and therefore possibly the src of the fcvt.fx[u] as well) cannot
1728 // be the same register, or this code breaks if the first argument is
1729 // zero. (e.g. without this hack, 0%8 yields -64, not 0.)
1730 BuildMI(BB, IA64::CFMAS1, 4,TmpF14)
1731 .addReg(TmpF13).addReg(TmpF12).addReg(TmpF11).addReg(TmpPR);
1733 if(isModulus) { // XXX: fragile! fixes _only_ mod, *breaks* div! !
1734 BuildMI(BB, IA64::IUSE, 1).addReg(TmpF13); // hack :(
1738 // round to an integer
1740 BuildMI(BB, IA64::FCVTFXTRUNCS1, 1, TmpF15).addReg(TmpF14);
1742 BuildMI(BB, IA64::FCVTFXUTRUNCS1, 1, TmpF15).addReg(TmpF14);
1744 BuildMI(BB, IA64::FMOV, 1, TmpF15).addReg(TmpF14);
1745 // EXERCISE: can you see why TmpF15=TmpF14 does not work here, and
1746 // we really do need the above FMOV? ;)
1750 if(isFP) { // extra worrying about div-by-zero
1751 unsigned bogoResult=MakeReg(MVT::f64);
1753 // we do a 'conditional fmov' (of the correct result, depending
1754 // on how the frcpa predicate turned out)
1755 BuildMI(BB, IA64::PFMOV, 2, bogoResult)
1756 .addReg(TmpF12).addReg(TmpPR2);
1757 BuildMI(BB, IA64::CFMOV, 2, Result)
1758 .addReg(bogoResult).addReg(TmpF15).addReg(TmpPR);
1761 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TmpF15);
1763 } else { // this is a modulus
1765 // answer = q * (-b) + a
1766 unsigned ModulusResult = MakeReg(MVT::f64);
1767 unsigned TmpF = MakeReg(MVT::f64);
1768 unsigned TmpI = MakeReg(MVT::i64);
1770 BuildMI(BB, IA64::SUB, 2, TmpI).addReg(IA64::r0).addReg(Tmp2);
1771 BuildMI(BB, IA64::SETFSIG, 1, TmpF).addReg(TmpI);
1772 BuildMI(BB, IA64::XMAL, 3, ModulusResult)
1773 .addReg(TmpF15).addReg(TmpF).addReg(TmpF1);
1774 BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(ModulusResult);
1775 } else { // FP modulus! The horror... the horror....
1776 assert(0 && "sorry, no FP modulus just yet!\n!\n");
1783 case ISD::SIGN_EXTEND_INREG: {
1784 Tmp1 = SelectExpr(N.getOperand(0));
1785 switch(cast<VTSDNode>(Node->getOperand(1))->getVT()) {
1788 assert(0 && "don't know how to sign extend this type");
1790 case MVT::i8: Opc = IA64::SXT1; break;
1791 case MVT::i16: Opc = IA64::SXT2; break;
1792 case MVT::i32: Opc = IA64::SXT4; break;
1794 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1799 Tmp1 = SelectExpr(N.getOperand(0));
1800 ISD::CondCode CC = cast<CondCodeSDNode>(Node->getOperand(2))->get();
1801 if (MVT::isInteger(N.getOperand(0).getValueType())) {
1803 if(ConstantSDNode *CSDN =
1804 dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1805 // if we are comparing against a constant zero
1806 if(CSDN->getValue()==0)
1807 Tmp2 = IA64::r0; // then we can just compare against r0
1809 Tmp2 = SelectExpr(N.getOperand(1));
1810 } else // not comparing against a constant
1811 Tmp2 = SelectExpr(N.getOperand(1));
1814 default: assert(0 && "Unknown integer comparison!");
1816 BuildMI(BB, IA64::CMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1819 BuildMI(BB, IA64::CMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1822 BuildMI(BB, IA64::CMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1825 BuildMI(BB, IA64::CMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1828 BuildMI(BB, IA64::CMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1831 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1834 BuildMI(BB, IA64::CMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1837 BuildMI(BB, IA64::CMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1840 BuildMI(BB, IA64::CMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1843 BuildMI(BB, IA64::CMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1846 } else { // if not integer, should be FP.
1847 assert(N.getOperand(0).getValueType() != MVT::f32 &&
1848 "error: SETCC should have had incoming f32 promoted to f64!\n");
1850 if(ConstantFPSDNode *CFPSDN =
1851 dyn_cast<ConstantFPSDNode>(N.getOperand(1))) {
1853 // if we are comparing against a constant +0.0 or +1.0
1854 if(CFPSDN->isExactlyValue(+0.0))
1855 Tmp2 = IA64::F0; // then we can just compare against f0
1856 else if(CFPSDN->isExactlyValue(+1.0))
1857 Tmp2 = IA64::F1; // or f1
1859 Tmp2 = SelectExpr(N.getOperand(1));
1860 } else // not comparing against a constant
1861 Tmp2 = SelectExpr(N.getOperand(1));
1864 default: assert(0 && "Unknown FP comparison!");
1866 BuildMI(BB, IA64::FCMPEQ, 2, Result).addReg(Tmp1).addReg(Tmp2);
1869 BuildMI(BB, IA64::FCMPGT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1872 BuildMI(BB, IA64::FCMPGE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1875 BuildMI(BB, IA64::FCMPLT, 2, Result).addReg(Tmp1).addReg(Tmp2);
1878 BuildMI(BB, IA64::FCMPLE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1881 BuildMI(BB, IA64::FCMPNE, 2, Result).addReg(Tmp1).addReg(Tmp2);
1884 BuildMI(BB, IA64::FCMPLTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1887 BuildMI(BB, IA64::FCMPGTU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1890 BuildMI(BB, IA64::FCMPLEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1893 BuildMI(BB, IA64::FCMPGEU, 2, Result).addReg(Tmp1).addReg(Tmp2);
1903 // Make sure we generate both values.
1905 ExprMap[N.getValue(1)] = 1; // Generate the token
1907 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1911 if(opcode == ISD::LOAD) { // this is a LOAD
1912 switch (Node->getValueType(0)) {
1913 default: assert(0 && "Cannot load this type!");
1914 case MVT::i1: Opc = IA64::LD1; isBool=true; break;
1915 // FIXME: for now, we treat bool loads the same as i8 loads */
1916 case MVT::i8: Opc = IA64::LD1; break;
1917 case MVT::i16: Opc = IA64::LD2; break;
1918 case MVT::i32: Opc = IA64::LD4; break;
1919 case MVT::i64: Opc = IA64::LD8; break;
1921 case MVT::f32: Opc = IA64::LDF4; break;
1922 case MVT::f64: Opc = IA64::LDF8; break;
1924 } else { // this is an EXTLOAD or ZEXTLOAD
1925 MVT::ValueType TypeBeingLoaded =
1926 cast<VTSDNode>(Node->getOperand(3))->getVT();
1927 switch (TypeBeingLoaded) {
1928 default: assert(0 && "Cannot extload/zextload this type!");
1930 case MVT::i8: Opc = IA64::LD1; break;
1931 case MVT::i16: Opc = IA64::LD2; break;
1932 case MVT::i32: Opc = IA64::LD4; break;
1933 case MVT::f32: Opc = IA64::LDF4; break;
1937 SDOperand Chain = N.getOperand(0);
1938 SDOperand Address = N.getOperand(1);
1940 if(Address.getOpcode() == ISD::GlobalAddress) {
1942 unsigned dummy = MakeReg(MVT::i64);
1943 unsigned dummy2 = MakeReg(MVT::i64);
1944 BuildMI(BB, IA64::ADD, 2, dummy)
1945 .addGlobalAddress(cast<GlobalAddressSDNode>(Address)->getGlobal())
1947 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
1949 BuildMI(BB, Opc, 1, Result).addReg(dummy2);
1950 else { // emit a little pseudocode to load a bool (stored in one byte)
1951 // into a predicate register
1952 assert(Opc==IA64::LD1 && "problem loading a bool");
1953 unsigned dummy3 = MakeReg(MVT::i64);
1954 BuildMI(BB, Opc, 1, dummy3).addReg(dummy2);
1955 // we compare to 0. true? 0. false? 1.
1956 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1958 } else if(ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Address)) {
1959 unsigned CPIdx = BB->getParent()->getConstantPool()->
1960 getConstantPoolIndex(CP->get());
1962 IA64Lowering.restoreGP(BB);
1963 unsigned dummy = MakeReg(MVT::i64);
1964 unsigned dummy2 = MakeReg(MVT::i64);
1965 BuildMI(BB, IA64::MOVLIMM64, 1, dummy2).addConstantPoolIndex(CPIdx);
1966 BuildMI(BB, IA64::ADD, 2, dummy).addReg(dummy2).addReg(IA64::r1); //CPI+GP
1969 // OLD BuildMI(BB, IA64::ADD, 2, dummy).addConstantPoolIndex(CPIdx)
1970 // (FIXME!) .addReg(IA64::r1); // CPI+GP
1972 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1973 else { // emit a little pseudocode to load a bool (stored in one byte)
1974 // into a predicate register
1975 assert(Opc==IA64::LD1 && "problem loading a bool");
1976 unsigned dummy3 = MakeReg(MVT::i64);
1977 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1978 // we compare to 0. true? 0. false? 1.
1979 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1981 } else if(Address.getOpcode() == ISD::FrameIndex) {
1982 Select(Chain); // FIXME ? what about bools?
1983 unsigned dummy = MakeReg(MVT::i64);
1984 BuildMI(BB, IA64::MOV, 1, dummy)
1985 .addFrameIndex(cast<FrameIndexSDNode>(Address)->getIndex());
1987 BuildMI(BB, Opc, 1, Result).addReg(dummy);
1988 else { // emit a little pseudocode to load a bool (stored in one byte)
1989 // into a predicate register
1990 assert(Opc==IA64::LD1 && "problem loading a bool");
1991 unsigned dummy3 = MakeReg(MVT::i64);
1992 BuildMI(BB, Opc, 1, dummy3).addReg(dummy);
1993 // we compare to 0. true? 0. false? 1.
1994 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
1996 } else { // none of the above...
1998 Tmp2 = SelectExpr(Address);
2000 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2001 else { // emit a little pseudocode to load a bool (stored in one byte)
2002 // into a predicate register
2003 assert(Opc==IA64::LD1 && "problem loading a bool");
2004 unsigned dummy = MakeReg(MVT::i64);
2005 BuildMI(BB, Opc, 1, dummy).addReg(Tmp2);
2006 // we compare to 0. true? 0. false? 1.
2007 BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy).addReg(IA64::r0);
2014 case ISD::CopyFromReg: {
2016 Result = ExprMap[N.getValue(0)] =
2017 MakeReg(N.getValue(0).getValueType());
2019 SDOperand Chain = N.getOperand(0);
2022 unsigned r = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
2024 if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
2025 BuildMI(BB, IA64::PCMPEQUNC, 3, Result)
2026 .addReg(IA64::r0).addReg(IA64::r0).addReg(r);
2027 // (r) Result =cmp.eq.unc(r0,r0)
2029 BuildMI(BB, IA64::MOV, 1, Result).addReg(r); // otherwise MOV
2035 Select(N.getOperand(0));
2037 // The chain for this call is now lowered.
2038 ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), 1));
2040 //grab the arguments
2041 std::vector<unsigned> argvregs;
2043 for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
2044 argvregs.push_back(SelectExpr(N.getOperand(i)));
2046 // see section 8.5.8 of "Itanium Software Conventions and
2047 // Runtime Architecture Guide to see some examples of what's going
2048 // on here. (in short: int args get mapped 1:1 'slot-wise' to out0->out7,
2049 // while FP args get mapped to F8->F15 as needed)
2051 unsigned used_FPArgs=0; // how many FP Args have been used so far?
2054 for(int i = 0, e = std::min(8, (int)argvregs.size()); i < e; ++i)
2056 unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
2057 IA64::out4, IA64::out5, IA64::out6, IA64::out7 };
2058 unsigned FPArgs[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
2059 IA64::F12, IA64::F13, IA64::F14, IA64::F15 };
2061 switch(N.getOperand(i+2).getValueType())
2063 default: // XXX do we need to support MVT::i1 here?
2065 N.getOperand(i).Val->dump();
2066 std::cerr << "Type for " << i << " is: " <<
2067 N.getOperand(i+2).getValueType() << std::endl;
2068 assert(0 && "Unknown value type for call");
2070 BuildMI(BB, IA64::MOV, 1, intArgs[i]).addReg(argvregs[i]);
2073 BuildMI(BB, IA64::FMOV, 1, FPArgs[used_FPArgs++])
2074 .addReg(argvregs[i]);
2075 // FIXME: we don't need to do this _all_ the time:
2076 BuildMI(BB, IA64::GETFD, 1, intArgs[i]).addReg(argvregs[i]);
2082 for (int i = 8, e = argvregs.size(); i < e; ++i)
2084 unsigned tempAddr = MakeReg(MVT::i64);
2086 switch(N.getOperand(i+2).getValueType()) {
2089 N.getOperand(i).Val->dump();
2090 std::cerr << "Type for " << i << " is: " <<
2091 N.getOperand(i+2).getValueType() << "\n";
2092 assert(0 && "Unknown value type for call");
2093 case MVT::i1: // FIXME?
2098 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
2099 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
2100 BuildMI(BB, IA64::ST8, 2).addReg(tempAddr).addReg(argvregs[i]);
2104 BuildMI(BB, IA64::ADDIMM22, 2, tempAddr)
2105 .addReg(IA64::r12).addImm(16 + (i - 8) * 8); // r12 is SP
2106 BuildMI(BB, IA64::STF8, 2).addReg(tempAddr).addReg(argvregs[i]);
2111 // build the right kind of call. if we can branch directly, do so:
2112 if (GlobalAddressSDNode *GASD =
2113 dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
2115 BuildMI(BB, IA64::BRCALL, 1).addGlobalAddress(GASD->getGlobal(),true);
2116 IA64Lowering.restoreGP_SP_RP(BB);
2118 if (ExternalSymbolSDNode *ESSDN =
2119 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
2120 { // FIXME : currently need this case for correctness, to avoid
2121 // "non-pic code with imm relocation against dynamic symbol" errors
2122 BuildMI(BB, IA64::BRCALL, 1)
2123 .addExternalSymbol(ESSDN->getSymbol(), true);
2124 IA64Lowering.restoreGP_SP_RP(BB);
2126 else { // otherwise we need to get the function descriptor
2127 // load the branch target (function)'s entry point and
2129 Tmp1 = SelectExpr(N.getOperand(1));
2131 unsigned targetEntryPoint=MakeReg(MVT::i64);
2132 unsigned targetGPAddr=MakeReg(MVT::i64);
2133 unsigned currentGP=MakeReg(MVT::i64);
2135 // b6 is a scratch branch register, we load the target entry point
2136 // from the base of the function descriptor
2137 BuildMI(BB, IA64::LD8, 1, targetEntryPoint).addReg(Tmp1);
2138 BuildMI(BB, IA64::MOV, 1, IA64::B6).addReg(targetEntryPoint);
2140 // save the current GP:
2141 BuildMI(BB, IA64::MOV, 1, currentGP).addReg(IA64::r1);
2143 /* TODO: we need to make sure doing this never, ever loads a
2144 * bogus value into r1 (GP). */
2145 // load the target GP (which is at mem[functiondescriptor+8])
2146 BuildMI(BB, IA64::ADDIMM22, 2, targetGPAddr)
2147 .addReg(Tmp1).addImm(8); // FIXME: addimm22? why not postincrement ld
2148 BuildMI(BB, IA64::LD8, 1, IA64::r1).addReg(targetGPAddr);
2150 // and then jump: (well, call)
2151 BuildMI(BB, IA64::BRCALL, 1).addReg(IA64::B6);
2152 // and finally restore the old GP
2153 BuildMI(BB, IA64::MOV, 1, IA64::r1).addReg(currentGP);
2154 IA64Lowering.restoreSP_RP(BB);
2157 switch (Node->getValueType(0)) {
2158 default: assert(0 && "Unknown value type for call result!");
2159 case MVT::Other: return 1;
2161 BuildMI(BB, IA64::CMPNE, 2, Result)
2162 .addReg(IA64::r8).addReg(IA64::r0);
2168 BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r8);
2171 BuildMI(BB, IA64::FMOV, 1, Result).addReg(IA64::F8);
2174 return Result+N.ResNo;
2181 void ISel::Select(SDOperand N) {
2182 unsigned Tmp1, Tmp2, Opc;
2183 unsigned opcode = N.getOpcode();
2185 if (!LoweredTokens.insert(N).second)
2186 return; // Already selected.
2188 SDNode *Node = N.Val;
2190 switch (Node->getOpcode()) {
2192 Node->dump(); std::cerr << "\n";
2193 assert(0 && "Node not handled yet!");
2195 case ISD::EntryToken: return; // Noop
2197 case ISD::TokenFactor: {
2198 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
2199 Select(Node->getOperand(i));
2203 case ISD::CopyToReg: {
2204 Select(N.getOperand(0));
2205 Tmp1 = SelectExpr(N.getOperand(2));
2206 Tmp2 = cast<RegisterSDNode>(N.getOperand(1))->getReg();
2209 // if a bool, we use pseudocode
2210 if (N.getOperand(2).getValueType() == MVT::i1)
2211 BuildMI(BB, IA64::PCMPEQUNC, 3, Tmp2)
2212 .addReg(IA64::r0).addReg(IA64::r0).addReg(Tmp1);
2213 // (Tmp1) Tmp2 = cmp.eq.unc(r0,r0)
2215 BuildMI(BB, IA64::MOV, 1, Tmp2).addReg(Tmp1);
2216 // XXX is this the right way 'round? ;)
2217 // FIXME: WHAT ABOUT FLOATING POINT?
2224 /* what the heck is going on here:
2226 <_sabre_> ret with two operands is obvious: chain and value
2228 <_sabre_> ret with 3 values happens when 'expansion' occurs
2229 <_sabre_> e.g. i64 gets split into 2x i32
2231 <_sabre_> you don't have this case on ia64
2233 <_sabre_> so the two returned values go into EAX/EDX on ia32
2234 <camel_> ahhh *memories*
2236 <camel_> ok, thanks :)
2237 <_sabre_> so yeah, everything that has a side effect takes a 'token chain'
2238 <_sabre_> this is the first operand always
2239 <_sabre_> these operand often define chains, they are the last operand
2240 <_sabre_> they are printed as 'ch' if you do DAG.dump()
2243 switch (N.getNumOperands()) {
2245 assert(0 && "Unknown return instruction!");
2247 Select(N.getOperand(0));
2248 Tmp1 = SelectExpr(N.getOperand(1));
2249 switch (N.getOperand(1).getValueType()) {
2250 default: assert(0 && "All other types should have been promoted!!");
2251 // FIXME: do I need to add support for bools here?
2252 // (return '0' or '1' r8, basically...)
2254 // FIXME: need to round floats - 80 bits is bad, the tester
2257 // we mark r8 as live on exit up above in LowerArguments()
2258 BuildMI(BB, IA64::MOV, 1, IA64::r8).addReg(Tmp1);
2261 // we mark F8 as live on exit up above in LowerArguments()
2262 BuildMI(BB, IA64::FMOV, 1, IA64::F8).addReg(Tmp1);
2266 Select(N.getOperand(0));
2269 // before returning, restore the ar.pfs register (set by the 'alloc' up top)
2270 BuildMI(BB, IA64::MOV, 1).addReg(IA64::AR_PFS).addReg(IA64Lowering.VirtGPR);
2271 BuildMI(BB, IA64::RET, 0); // and then just emit a 'ret' instruction
2276 Select(N.getOperand(0));
2277 MachineBasicBlock *Dest =
2278 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
2279 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(IA64::p0).addMBB(Dest);
2280 // XXX HACK! we do _not_ need long branches all the time
2285 MachineBasicBlock *Dest =
2286 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
2288 Select(N.getOperand(0));
2289 Tmp1 = SelectExpr(N.getOperand(1));
2290 BuildMI(BB, IA64::BRLCOND_NOTCALL, 1).addReg(Tmp1).addMBB(Dest);
2291 // XXX HACK! we do _not_ need long branches all the time
2301 case ISD::CopyFromReg:
2302 case ISD::DYNAMIC_STACKALLOC:
2306 case ISD::TRUNCSTORE:
2308 Select(N.getOperand(0));
2309 Tmp1 = SelectExpr(N.getOperand(1)); // value
2313 if(opcode == ISD::STORE) {
2314 switch (N.getOperand(1).getValueType()) {
2315 default: assert(0 && "Cannot store this type!");
2316 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
2317 // FIXME?: for now, we treat bool loads the same as i8 stores */
2318 case MVT::i8: Opc = IA64::ST1; break;
2319 case MVT::i16: Opc = IA64::ST2; break;
2320 case MVT::i32: Opc = IA64::ST4; break;
2321 case MVT::i64: Opc = IA64::ST8; break;
2323 case MVT::f32: Opc = IA64::STF4; break;
2324 case MVT::f64: Opc = IA64::STF8; break;
2326 } else { // truncstore
2327 switch(cast<VTSDNode>(Node->getOperand(4))->getVT()) {
2328 default: assert(0 && "unknown type in truncstore");
2329 case MVT::i1: Opc = IA64::ST1; isBool=true; break;
2330 //FIXME: DAG does not promote this load?
2331 case MVT::i8: Opc = IA64::ST1; break;
2332 case MVT::i16: Opc = IA64::ST2; break;
2333 case MVT::i32: Opc = IA64::ST4; break;
2334 case MVT::f32: Opc = IA64::STF4; break;
2338 if(N.getOperand(2).getOpcode() == ISD::GlobalAddress) {
2339 unsigned dummy = MakeReg(MVT::i64);
2340 unsigned dummy2 = MakeReg(MVT::i64);
2341 BuildMI(BB, IA64::ADD, 2, dummy)
2342 .addGlobalAddress(cast<GlobalAddressSDNode>
2343 (N.getOperand(2))->getGlobal()).addReg(IA64::r1);
2344 BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
2347 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(Tmp1);
2348 else { // we are storing a bool, so emit a little pseudocode
2349 // to store a predicate register as one byte
2350 assert(Opc==IA64::ST1);
2351 unsigned dummy3 = MakeReg(MVT::i64);
2352 unsigned dummy4 = MakeReg(MVT::i64);
2353 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
2354 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
2355 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
2356 BuildMI(BB, Opc, 2).addReg(dummy2).addReg(dummy4);
2358 } else if(N.getOperand(2).getOpcode() == ISD::FrameIndex) {
2360 // FIXME? (what about bools?)
2362 unsigned dummy = MakeReg(MVT::i64);
2363 BuildMI(BB, IA64::MOV, 1, dummy)
2364 .addFrameIndex(cast<FrameIndexSDNode>(N.getOperand(2))->getIndex());
2365 BuildMI(BB, Opc, 2).addReg(dummy).addReg(Tmp1);
2366 } else { // otherwise
2367 Tmp2 = SelectExpr(N.getOperand(2)); //address
2369 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(Tmp1);
2370 else { // we are storing a bool, so emit a little pseudocode
2371 // to store a predicate register as one byte
2372 assert(Opc==IA64::ST1);
2373 unsigned dummy3 = MakeReg(MVT::i64);
2374 unsigned dummy4 = MakeReg(MVT::i64);
2375 BuildMI(BB, IA64::MOV, 1, dummy3).addReg(IA64::r0);
2376 BuildMI(BB, IA64::TPCADDIMM22, 2, dummy4)
2377 .addReg(dummy3).addImm(1).addReg(Tmp1); // if(Tmp1) dummy=0+1;
2378 BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(dummy4);
2384 case ISD::CALLSEQ_START:
2385 case ISD::CALLSEQ_END: {
2386 Select(N.getOperand(0));
2387 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2389 Opc = N.getOpcode() == ISD::CALLSEQ_START ? IA64::ADJUSTCALLSTACKDOWN :
2390 IA64::ADJUSTCALLSTACKUP;
2391 BuildMI(BB, Opc, 1).addImm(Tmp1);
2397 assert(0 && "GAME OVER. INSERT COIN?");
2401 /// createIA64PatternInstructionSelector - This pass converts an LLVM function
2402 /// into a machine code representation using pattern matching and a machine
2403 /// description file.
2405 FunctionPass *llvm::createIA64PatternInstructionSelector(TargetMachine &TM) {
2406 return new ISel(TM);