1 //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the IA64ISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "IA64ISelLowering.h"
15 #include "IA64MachineFunctionInfo.h"
16 #include "IA64TargetMachine.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
26 IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
27 : TargetLowering(TM) {
29 // register class for general registers
30 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
32 // register class for FP registers
33 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
35 // register class for predicate registers
36 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
38 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
39 setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
40 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
42 // We need to handle ISD::RET for void functions ourselves,
43 // so we get a chance to restore ar.pfs before adding a
45 setOperationAction(ISD::RET, MVT::Other, Custom);
47 setSetCCResultType(MVT::i1);
48 setShiftAmountType(MVT::i64);
50 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
52 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
54 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
55 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
56 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
57 setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
59 setOperationAction(ISD::FREM , MVT::f32 , Expand);
60 setOperationAction(ISD::FREM , MVT::f64 , Expand);
62 setOperationAction(ISD::UREM , MVT::f32 , Expand);
63 setOperationAction(ISD::UREM , MVT::f64 , Expand);
65 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
66 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
67 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
69 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
70 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
72 // We don't support sin/cos/sqrt
73 setOperationAction(ISD::FSIN , MVT::f64, Expand);
74 setOperationAction(ISD::FCOS , MVT::f64, Expand);
75 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
76 setOperationAction(ISD::FSIN , MVT::f32, Expand);
77 setOperationAction(ISD::FCOS , MVT::f32, Expand);
78 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
80 // We don't have line number support yet.
81 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
82 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
83 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
85 //IA64 has these, but they are not implemented
86 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
87 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
88 setOperationAction(ISD::ROTL , MVT::i64 , Expand);
89 setOperationAction(ISD::ROTR , MVT::i64 , Expand);
90 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
92 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
93 setOperationAction(ISD::VAARG , MVT::Other, Custom);
94 setOperationAction(ISD::VASTART , MVT::Other, Custom);
96 // Use the default implementation.
97 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
98 setOperationAction(ISD::VAEND , MVT::Other, Expand);
99 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
100 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
101 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
103 setStackPointerRegisterToSaveRestore(IA64::r12);
105 computeRegisterProperties();
107 addLegalFPImmediate(+0.0);
108 addLegalFPImmediate(+1.0);
111 const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
114 case IA64ISD::GETFD: return "IA64ISD::GETFD";
115 case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
116 case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
121 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
122 static bool isFloatingPointZero(SDOperand Op) {
123 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
124 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
125 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
126 // Maybe this has already been legalized into the constant pool?
127 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
128 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
129 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
134 std::vector<SDOperand>
135 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
136 std::vector<SDOperand> ArgValues;
138 // add beautiful description of IA64 stack frame format
139 // here (from intel 24535803.pdf most likely)
141 MachineFunction &MF = DAG.getMachineFunction();
142 MachineFrameInfo *MFI = MF.getFrameInfo();
144 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
145 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
146 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
148 MachineBasicBlock& BB = MF.front();
150 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
151 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
153 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
154 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
160 unsigned used_FPArgs = 0; // how many FP args have been used so far?
162 unsigned ArgOffset = 0;
165 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
167 SDOperand newroot, argt;
168 if(count < 8) { // need to fix this logic? maybe.
170 switch (getValueType(I->getType())) {
172 assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
174 // fixme? (well, will need to for weird FP structy stuff,
175 // see intel ABI docs)
177 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
178 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
179 // floating point args go into f8..f15 as-needed, the increment
180 argVreg[count] = // is below..:
181 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
182 // FP args go into f8..f15 as needed: (hence the ++)
183 argPreg[count] = args_FP[used_FPArgs++];
184 argOpc[count] = IA64::FMOV;
185 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count],
187 if (I->getType() == Type::FloatTy)
188 argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt);
190 case MVT::i1: // NOTE: as far as C abi stuff goes,
191 // bools are just boring old ints
196 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
197 MF.addLiveIn(args_int[count]); // mark this register as liveIn
199 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
200 argPreg[count] = args_int[count];
201 argOpc[count] = IA64::MOV;
203 DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64);
204 if ( getValueType(I->getType()) != MVT::i64)
205 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
209 } else { // more than 8 args go into the frame
210 // Create the frame index object for this incoming parameter...
211 ArgOffset = 16 + 8 * (count - 8);
212 int FI = MFI->CreateFixedObject(8, ArgOffset);
214 // Create the SelectionDAG nodes corresponding to a load
215 //from this parameter
216 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
217 argt = newroot = DAG.getLoad(getValueType(I->getType()),
218 DAG.getEntryNode(), FIN, DAG.getSrcValue(NULL));
221 DAG.setRoot(newroot.getValue(1));
222 ArgValues.push_back(argt);
226 // Create a vreg to hold the output of (what will become)
227 // the "alloc" instruction
228 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
229 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
230 // we create a PSEUDO_ALLOC (pseudo)instruction for now
232 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
235 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
236 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
239 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
242 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
243 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
247 unsigned tempOffset=0;
249 // if this is a varargs function, we simply lower llvm.va_start by
250 // pointing to the first entry
253 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
256 // here we actually do the moving of args, and store them to the stack
257 // too if this is a varargs function:
258 for (int i = 0; i < count && i < 8; ++i) {
259 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
261 // if this is a varargs function, we copy the input registers to the stack
262 int FI = MFI->CreateFixedObject(8, tempOffset);
263 tempOffset+=8; //XXX: is it safe to use r22 like this?
264 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
265 // FIXME: we should use st8.spill here, one day
266 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
270 // Finally, inform the code generator which regs we return values in.
271 // (see the ISD::RET: case in the instruction selector)
272 switch (getValueType(F.getReturnType())) {
273 default: assert(0 && "i have no idea where to return this type!");
274 case MVT::isVoid: break;
280 MF.addLiveOut(IA64::r8);
284 MF.addLiveOut(IA64::F8);
291 std::pair<SDOperand, SDOperand>
292 IA64TargetLowering::LowerCallTo(SDOperand Chain,
293 const Type *RetTy, bool isVarArg,
294 unsigned CallingConv, bool isTailCall,
295 SDOperand Callee, ArgListTy &Args,
298 MachineFunction &MF = DAG.getMachineFunction();
300 unsigned NumBytes = 16;
301 unsigned outRegsUsed = 0;
303 if (Args.size() > 8) {
304 NumBytes += (Args.size() - 8) * 8;
307 outRegsUsed = Args.size();
310 // FIXME? this WILL fail if we ever try to pass around an arg that
311 // consumes more than a single output slot (a 'real' double, int128
312 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
313 // registers we use. Hopefully, the assembler will notice.
314 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
315 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
317 // keep stack frame 16-byte aligned
318 //assert(NumBytes==((NumBytes+15) & ~15) && "stack frame not 16-byte aligned!");
319 NumBytes = (NumBytes+15) & ~15;
321 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
322 DAG.getConstant(NumBytes, getPointerTy()));
324 SDOperand StackPtr, NullSV;
325 std::vector<SDOperand> Stores;
326 std::vector<SDOperand> Converts;
327 std::vector<SDOperand> RegValuesToPass;
328 unsigned ArgOffset = 16;
330 for (unsigned i = 0, e = Args.size(); i != e; ++i)
332 SDOperand Val = Args[i].first;
333 MVT::ValueType ObjectVT = Val.getValueType();
334 SDOperand ValToStore(0, 0), ValToConvert(0, 0);
337 default: assert(0 && "unexpected argument type!");
342 //promote to 64-bits, sign/zero extending based on type
344 if(Args[i].second->isSigned())
345 Val = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64, Val);
347 Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64, Val);
351 if(RegValuesToPass.size() >= 8) {
354 RegValuesToPass.push_back(Val);
359 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
362 if(RegValuesToPass.size() >= 8) {
365 RegValuesToPass.push_back(Val);
366 if(1 /* TODO: if(calling external or varadic function)*/ ) {
367 ValToConvert = Val; // additionally pass this FP value as an int
375 StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
376 NullSV = DAG.getSrcValue(NULL);
378 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
379 PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff);
380 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
381 ValToStore, PtrOff, NullSV));
382 ArgOffset += ObjSize;
385 if(ValToConvert.Val) {
386 Converts.push_back(DAG.getNode(IA64ISD::GETFD, MVT::i64, ValToConvert));
390 // Emit all stores, make sure they occur before any copies into physregs.
392 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
394 static const unsigned IntArgRegs[] = {
395 IA64::out0, IA64::out1, IA64::out2, IA64::out3,
396 IA64::out4, IA64::out5, IA64::out6, IA64::out7
399 static const unsigned FPArgRegs[] = {
400 IA64::F8, IA64::F9, IA64::F10, IA64::F11,
401 IA64::F12, IA64::F13, IA64::F14, IA64::F15
406 // save the current GP, SP and RP : FIXME: do we need to do all 3 always?
407 SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag);
408 Chain = GPBeforeCall.getValue(1);
409 InFlag = Chain.getValue(2);
410 SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag);
411 Chain = SPBeforeCall.getValue(1);
412 InFlag = Chain.getValue(2);
413 SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag);
414 Chain = RPBeforeCall.getValue(1);
415 InFlag = Chain.getValue(2);
417 // Build a sequence of copy-to-reg nodes chained together with token chain
418 // and flag operands which copy the outgoing integer args into regs out[0-7]
419 // mapped 1:1 and the FP args into regs F8-F15 "lazily"
420 // TODO: for performance, we should only copy FP args into int regs when we
421 // know this is required (i.e. for varardic or external (unknown) functions)
423 // first to the FP->(integer representation) conversions, these are
424 // flagged for now, but shouldn't have to be (TODO)
425 unsigned seenConverts = 0;
426 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
427 if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) {
428 Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], InFlag);
429 InFlag = Chain.getValue(1);
433 // next copy args into the usual places, these are flagged
434 unsigned usedFPArgs = 0;
435 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
436 Chain = DAG.getCopyToReg(Chain,
437 MVT::isInteger(RegValuesToPass[i].getValueType()) ?
438 IntArgRegs[i] : FPArgRegs[usedFPArgs++],
439 RegValuesToPass[i], InFlag);
440 InFlag = Chain.getValue(1);
443 // If the callee is a GlobalAddress node (quite common, every direct call is)
444 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
446 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
447 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
451 std::vector<MVT::ValueType> NodeTys;
452 std::vector<SDOperand> CallOperands;
453 NodeTys.push_back(MVT::Other); // Returns a chain
454 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
455 CallOperands.push_back(Chain);
456 CallOperands.push_back(Callee);
458 // emit the call itself
460 CallOperands.push_back(InFlag);
462 assert(0 && "this should never happen!\n");
464 /* out with the old...
465 Chain = SDOperand(DAG.getCall(NodeTys, Chain, Callee, InFlag), 0);
467 Chain = SDOperand(DAG.getCall(NodeTys, Chain, Callee), 0);
469 // to make way for a hack:
470 Chain = DAG.getNode(IA64ISD::BRCALL, NodeTys, CallOperands);
471 InFlag = Chain.getValue(1);
473 // restore the GP, SP and RP after the call
474 Chain = DAG.getCopyToReg(Chain, IA64::r1, GPBeforeCall, InFlag);
475 InFlag = Chain.getValue(1);
476 Chain = DAG.getCopyToReg(Chain, IA64::r12, SPBeforeCall, InFlag);
477 InFlag = Chain.getValue(1);
478 Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag);
479 InFlag = Chain.getValue(1);
481 std::vector<MVT::ValueType> RetVals;
482 RetVals.push_back(MVT::Other);
483 RetVals.push_back(MVT::Flag);
485 MVT::ValueType RetTyVT = getValueType(RetTy);
487 if (RetTyVT != MVT::isVoid) {
489 default: assert(0 && "Unknown value type to return!");
490 case MVT::i1: { // bools are just like other integers (returned in r8)
491 // we *could* fall through to the truncate below, but this saves a
492 // few redundant predicate ops
493 SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
494 InFlag = boolInR8.getValue(2);
495 Chain = boolInR8.getValue(1);
496 SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag);
497 InFlag = zeroReg.getValue(2);
498 Chain = zeroReg.getValue(1);
500 RetVal = DAG.getSetCC(MVT::i1, boolInR8, zeroReg, ISD::SETNE);
506 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
507 Chain = RetVal.getValue(1);
509 // keep track of whether it is sign or zero extended (todo: bools?)
511 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
512 MVT::i64, RetVal, DAG.getValueType(RetTyVT));
514 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
517 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
518 Chain = RetVal.getValue(1);
519 InFlag = RetVal.getValue(2); // XXX dead
522 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
523 Chain = RetVal.getValue(1);
524 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal);
527 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
528 Chain = RetVal.getValue(1);
529 InFlag = RetVal.getValue(2); // XXX dead
534 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
535 DAG.getConstant(NumBytes, getPointerTy()));
537 return std::make_pair(RetVal, Chain);
540 SDOperand IA64TargetLowering::LowerReturnTo(SDOperand Chain, SDOperand Op,
542 SDOperand Copy, InFlag;
543 SDOperand AR_PFSVal = DAG.getCopyFromReg(Chain, this->VirtGPR,
545 Chain = AR_PFSVal.getValue(1);
547 switch (Op.getValueType()) {
548 default: assert(0 && "Unknown type to return! (promote?)");
550 Copy = DAG.getCopyToReg(Chain, IA64::r8, Op, InFlag);
553 Copy = DAG.getCopyToReg(Chain, IA64::F8, Op, InFlag);
557 Chain = Copy.getValue(0);
558 InFlag = Copy.getValue(1);
559 // we need to copy VirtGPR (the vreg (to become a real reg)) that holds
560 // the output of this function's alloc instruction back into ar.pfs
561 // before we return. this copy must not float up above the last
562 // outgoing call in this function - we flag this to the ret instruction
563 Chain = DAG.getCopyToReg(Chain, IA64::AR_PFS, AR_PFSVal, InFlag);
564 InFlag = Chain.getValue(1);
566 // and then just emit a 'ret' instruction
567 std::vector<MVT::ValueType> NodeTys;
568 std::vector<SDOperand> RetOperands;
569 NodeTys.push_back(MVT::Other);
570 NodeTys.push_back(MVT::Flag);
571 RetOperands.push_back(Chain);
572 RetOperands.push_back(InFlag);
574 return DAG.getNode(IA64ISD::RET_FLAG, NodeTys, RetOperands);
575 // return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, MVT::Other, Copy, Chain, InFlag);
578 std::pair<SDOperand, SDOperand> IA64TargetLowering::
579 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
581 assert(0 && "LowerFrameReturnAddress unimplemented");
585 SDOperand IA64TargetLowering::
586 LowerOperation(SDOperand Op, SelectionDAG &DAG) {
587 switch (Op.getOpcode()) {
588 default: assert(0 && "Should not custom lower this!");
589 case ISD::RET: { // the DAGgy stuff takes care of
590 // restoring ar.pfs before adding a br.ret for functions
591 // that return something, but we need to take care of stuff
592 // that returns void manually, so here it is:
593 assert(Op.getNumOperands()==1 &&
594 "trying to custom lower a return other than void! (numops!=1)");
596 SDOperand Chain = Op.getOperand(0);
597 SDOperand AR_PFSVal = DAG.getCopyFromReg(Chain, this->VirtGPR,
599 Chain = AR_PFSVal.getValue(1);
600 Chain = DAG.getCopyToReg(Chain, IA64::AR_PFS, AR_PFSVal);
602 // and then just emit a 'ret' instruction
603 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, Chain);
606 MVT::ValueType VT = getPointerTy();
607 SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1),
609 // Increment the pointer, VAList, to the next vaarg
610 SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList,
611 DAG.getConstant(MVT::getSizeInBits(VT)/8,
613 // Store the incremented VAList to the legalized pointer
614 VAIncr = DAG.getNode(ISD::STORE, MVT::Other, VAList.getValue(1), VAIncr,
615 Op.getOperand(1), Op.getOperand(2));
616 // Load the actual argument out of the pointer VAList
617 return DAG.getLoad(VT, VAIncr, VAList, DAG.getSrcValue(0));
620 // vastart just stores the address of the VarArgsFrameIndex slot into the
621 // memory location argument.
622 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
623 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
624 Op.getOperand(1), Op.getOperand(2));