#include "llvm/Function.h"
#include "llvm/Constants.h"
#include "llvm/ConstantHandling.h"
+#include "llvm/Intrinsics.h"
#include "Support/MathExtras.h"
#include <math.h>
InstructionNode* ptrChild = gepNode;
while (ptrChild && (ptrChild->getOpLabel() == Instruction::GetElementPtr ||
ptrChild->getOpLabel() == GetElemPtrIdx))
- {
- // Child is a GetElemPtr instruction
- gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
- User::op_iterator OI, firstIdx = gepInst->idx_begin();
- User::op_iterator lastIdx = gepInst->idx_end();
- bool allConstantOffsets = true;
-
- // The first index of every GEP must be an array index.
- assert((*firstIdx)->getType() == Type::LongTy &&
- "INTERNAL ERROR: Structure index for a pointer type!");
-
- // If the last instruction had a leading non-zero index, check if the
- // current one references a sequential (i.e., indexable) type.
- // If not, the code is not type-safe and we would create an illegal GEP
- // by folding them, so don't fold any more instructions.
- //
- if (lastInstHasLeadingNonZero)
- if (! isa<SequentialType>(gepInst->getType()->getElementType()))
- break; // cannot fold in any preceding getElementPtr instrs.
-
- // Check that all offsets are constant for this instruction
- for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
- allConstantOffsets = isa<ConstantInt>(*OI);
-
- if (allConstantOffsets)
- { // Get pointer value out of ptrChild.
- ptrVal = gepInst->getPointerOperand();
-
- // Remember if it has leading zero index: it will be discarded later.
- lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
-
- // Insert its index vector at the start, skipping any leading [0]
- chainIdxVec.insert(chainIdxVec.begin(),
- firstIdx + !lastInstHasLeadingNonZero, lastIdx);
-
- // Mark the folded node so no code is generated for it.
- ((InstructionNode*) ptrChild)->markFoldedIntoParent();
-
- // Get the previous GEP instruction and continue trying to fold
- ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
- }
- else // cannot fold this getElementPtr instr. or any preceding ones
- break;
- }
+ {
+ // Child is a GetElemPtr instruction
+ gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
+ User::op_iterator OI, firstIdx = gepInst->idx_begin();
+ User::op_iterator lastIdx = gepInst->idx_end();
+ bool allConstantOffsets = true;
+
+ // The first index of every GEP must be an array index.
+ assert((*firstIdx)->getType() == Type::LongTy &&
+ "INTERNAL ERROR: Structure index for a pointer type!");
+
+ // If the last instruction had a leading non-zero index, check if the
+ // current one references a sequential (i.e., indexable) type.
+ // If not, the code is not type-safe and we would create an illegal GEP
+ // by folding them, so don't fold any more instructions.
+ //
+ if (lastInstHasLeadingNonZero)
+ if (! isa<SequentialType>(gepInst->getType()->getElementType()))
+ break; // cannot fold in any preceding getElementPtr instrs.
+
+ // Check that all offsets are constant for this instruction
+ for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
+ allConstantOffsets = isa<ConstantInt>(*OI);
+
+ if (allConstantOffsets) {
+ // Get pointer value out of ptrChild.
+ ptrVal = gepInst->getPointerOperand();
+
+ // Remember if it has leading zero index: it will be discarded later.
+ lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
+
+ // Insert its index vector at the start, skipping any leading [0]
+ chainIdxVec.insert(chainIdxVec.begin(),
+ firstIdx + !lastInstHasLeadingNonZero, lastIdx);
+
+ // Mark the folded node so no code is generated for it.
+ ((InstructionNode*) ptrChild)->markFoldedIntoParent();
+
+ // Get the previous GEP instruction and continue trying to fold
+ ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
+ } else // cannot fold this getElementPtr instr. or any preceding ones
+ break;
+ }
// If the first getElementPtr instruction had a leading [0], add it back.
// Note that this instruction is the *last* one successfully folded above.
bool foldedGEPs = false;
bool leadingNonZeroIdx = gepI && ! IsZero(*gepI->idx_begin());
if (allConstantIndices)
- if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx))
- {
- ptrVal = newPtr;
- foldedGEPs = true;
- }
+ if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx)) {
+ ptrVal = newPtr;
+ foldedGEPs = true;
+ }
// Append the index vector of the current instruction.
// Skip the leading [0] index if preceding GEPs were folded into this.
InstructionNode* gepNode = NULL;
if (isa<GetElementPtrInst>(memInst))
gepNode = memInstrNode;
- else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal))
- { // Child of load/store is a GEP and memInst is its only use.
- // Use its indices and mark it as folded.
- gepNode = cast<InstructionNode>(ptrChild);
- gepNode->markFoldedIntoParent();
- }
+ else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal)) {
+ // Child of load/store is a GEP and memInst is its only use.
+ // Use its indices and mark it as folded.
+ gepNode = cast<InstructionNode>(ptrChild);
+ gepNode->markFoldedIntoParent();
+ }
// If there are no indices, return the current pointer.
// Else extract the pointer from the GEP and fold the indices.
((InstructionNode*) instrNode->leftChild())->getInstruction();
switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BRZ; break;
- case Instruction::SetNE: opCode = V9::BRNZ; break;
- case Instruction::SetLE: opCode = V9::BRLEZ; break;
- case Instruction::SetGE: opCode = V9::BRGEZ; break;
- case Instruction::SetLT: opCode = V9::BRLZ; break;
- case Instruction::SetGT: opCode = V9::BRGZ; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- opCode = V9::INVALID_OPCODE;
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::BRZ; break;
+ case Instruction::SetNE: opCode = V9::BRNZ; break;
+ case Instruction::SetLE: opCode = V9::BRLEZ; break;
+ case Instruction::SetGE: opCode = V9::BRGEZ; break;
+ case Instruction::SetLT: opCode = V9::BRLZ; break;
+ case Instruction::SetGT: opCode = V9::BRGZ; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ opCode = V9::INVALID_OPCODE;
+ break;
+ }
return opCode;
}
bool isSigned = setCCInstr->getOperand(0)->getType()->isSigned();
- if (isSigned)
+ if (isSigned) {
+ switch(setCCInstr->getOpcode())
{
- switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BE; break;
- case Instruction::SetNE: opCode = V9::BNE; break;
- case Instruction::SetLE: opCode = V9::BLE; break;
- case Instruction::SetGE: opCode = V9::BGE; break;
- case Instruction::SetLT: opCode = V9::BL; break;
- case Instruction::SetGT: opCode = V9::BG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ case Instruction::SetEQ: opCode = V9::BE; break;
+ case Instruction::SetNE: opCode = V9::BNE; break;
+ case Instruction::SetLE: opCode = V9::BLE; break;
+ case Instruction::SetGE: opCode = V9::BGE; break;
+ case Instruction::SetLT: opCode = V9::BL; break;
+ case Instruction::SetGT: opCode = V9::BG; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
}
- else
+ } else {
+ switch(setCCInstr->getOpcode())
{
- switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::BE; break;
- case Instruction::SetNE: opCode = V9::BNE; break;
- case Instruction::SetLE: opCode = V9::BLEU; break;
- case Instruction::SetGE: opCode = V9::BCC; break;
- case Instruction::SetLT: opCode = V9::BCS; break;
- case Instruction::SetGT: opCode = V9::BGU; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ case Instruction::SetEQ: opCode = V9::BE; break;
+ case Instruction::SetNE: opCode = V9::BNE; break;
+ case Instruction::SetLE: opCode = V9::BLEU; break;
+ case Instruction::SetGE: opCode = V9::BCC; break;
+ case Instruction::SetLT: opCode = V9::BCS; break;
+ case Instruction::SetGT: opCode = V9::BGU; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
}
+ }
return opCode;
}
MachineOpCode opCode = V9::INVALID_OPCODE;
switch(setCCInstr->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::FBE; break;
- case Instruction::SetNE: opCode = V9::FBNE; break;
- case Instruction::SetLE: opCode = V9::FBLE; break;
- case Instruction::SetGE: opCode = V9::FBGE; break;
- case Instruction::SetLT: opCode = V9::FBL; break;
- case Instruction::SetGT: opCode = V9::FBG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::FBE; break;
+ case Instruction::SetNE: opCode = V9::FBNE; break;
+ case Instruction::SetLE: opCode = V9::FBLE; break;
+ case Instruction::SetGE: opCode = V9::FBGE; break;
+ case Instruction::SetLT: opCode = V9::FBL; break;
+ case Instruction::SetGT: opCode = V9::FBG; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
+ }
return opCode;
}
assert(boolVal->getType() == Type::BoolTy && "Weird but ok! Delete assert");
- if (lastFunction != F)
- {
- lastFunction = F;
- boolToTmpCache.clear();
- }
+ if (lastFunction != F) {
+ lastFunction = F;
+ boolToTmpCache.clear();
+ }
// Look for tmpI and create a new one otherwise. The new value is
// directly written to map using the ref returned by operator[].
MachineOpCode opCode = V9::INVALID_OPCODE;
switch(instrNode->getInstruction()->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::MOVFE; break;
- case Instruction::SetNE: opCode = V9::MOVFNE; break;
- case Instruction::SetLE: opCode = V9::MOVFLE; break;
- case Instruction::SetGE: opCode = V9::MOVFGE; break;
- case Instruction::SetLT: opCode = V9::MOVFL; break;
- case Instruction::SetGT: opCode = V9::MOVFG; break;
- default:
- assert(0 && "Unrecognized VM instruction!");
- break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::MOVFE; break;
+ case Instruction::SetNE: opCode = V9::MOVFNE; break;
+ case Instruction::SetLE: opCode = V9::MOVFLE; break;
+ case Instruction::SetGE: opCode = V9::MOVFGE; break;
+ case Instruction::SetLT: opCode = V9::MOVFL; break;
+ case Instruction::SetGT: opCode = V9::MOVFG; break;
+ default:
+ assert(0 && "Unrecognized VM instruction!");
+ break;
+ }
return opCode;
}
valueToMove = 1;
switch(instrNode->getInstruction()->getOpcode())
- {
- case Instruction::SetEQ: opCode = V9::MOVE; break;
- case Instruction::SetLE: opCode = V9::MOVLE; break;
- case Instruction::SetGE: opCode = V9::MOVGE; break;
- case Instruction::SetLT: opCode = V9::MOVL; break;
- case Instruction::SetGT: opCode = V9::MOVG; break;
- case Instruction::SetNE: assert(0 && "No move required!"); break;
- default: assert(0 && "Unrecognized VM instr!"); break;
- }
+ {
+ case Instruction::SetEQ: opCode = V9::MOVE; break;
+ case Instruction::SetLE: opCode = V9::MOVLE; break;
+ case Instruction::SetGE: opCode = V9::MOVGE; break;
+ case Instruction::SetLT: opCode = V9::MOVL; break;
+ case Instruction::SetGT: opCode = V9::MOVG; break;
+ case Instruction::SetNE: assert(0 && "No move required!"); break;
+ default: assert(0 && "Unrecognized VM instr!"); break;
+ }
return opCode;
}
MachineOpCode opCode = V9::INVALID_OPCODE;
switch(vopCode)
- {
- case ToFloatTy:
- if (opType == Type::SByteTy || opType == Type::ShortTy || opType == Type::IntTy)
- opCode = V9::FITOS;
- else if (opType == Type::LongTy)
- opCode = V9::FXTOS;
- else if (opType == Type::DoubleTy)
- opCode = V9::FDTOS;
- else if (opType == Type::FloatTy)
- ;
- else
- assert(0 && "Cannot convert this type to FLOAT on SPARC");
- break;
+ {
+ case ToFloatTy:
+ if (opType == Type::SByteTy || opType == Type::ShortTy ||
+ opType == Type::IntTy)
+ opCode = V9::FITOS;
+ else if (opType == Type::LongTy)
+ opCode = V9::FXTOS;
+ else if (opType == Type::DoubleTy)
+ opCode = V9::FDTOS;
+ else if (opType == Type::FloatTy)
+ ;
+ else
+ assert(0 && "Cannot convert this type to FLOAT on SPARC");
+ break;
- case ToDoubleTy:
- // This is usually used in conjunction with CreateCodeToCopyIntToFloat().
- // Both functions should treat the integer as a 32-bit value for types
- // of 4 bytes or less, and as a 64-bit value otherwise.
- if (opType == Type::SByteTy || opType == Type::UByteTy ||
- opType == Type::ShortTy || opType == Type::UShortTy ||
- opType == Type::IntTy || opType == Type::UIntTy)
- opCode = V9::FITOD;
- else if (opType == Type::LongTy || opType == Type::ULongTy)
- opCode = V9::FXTOD;
- else if (opType == Type::FloatTy)
- opCode = V9::FSTOD;
- else if (opType == Type::DoubleTy)
- ;
- else
- assert(0 && "Cannot convert this type to DOUBLE on SPARC");
- break;
+ case ToDoubleTy:
+ // This is usually used in conjunction with CreateCodeToCopyIntToFloat().
+ // Both functions should treat the integer as a 32-bit value for types
+ // of 4 bytes or less, and as a 64-bit value otherwise.
+ if (opType == Type::SByteTy || opType == Type::UByteTy ||
+ opType == Type::ShortTy || opType == Type::UShortTy ||
+ opType == Type::IntTy || opType == Type::UIntTy)
+ opCode = V9::FITOD;
+ else if (opType == Type::LongTy || opType == Type::ULongTy)
+ opCode = V9::FXTOD;
+ else if (opType == Type::FloatTy)
+ opCode = V9::FSTOD;
+ else if (opType == Type::DoubleTy)
+ ;
+ else
+ assert(0 && "Cannot convert this type to DOUBLE on SPARC");
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
return opCode;
}
assert((opType == Type::FloatTy || opType == Type::DoubleTy)
&& "This function should only be called for FLOAT or DOUBLE");
- if (tid==Type::UIntTyID)
- {
- assert(tid != Type::UIntTyID && "FP-to-uint conversions must be expanded"
- " into FP->long->uint for SPARC v9: SO RUN PRESELECTION PASS!");
- }
- else if (tid==Type::SByteTyID || tid==Type::ShortTyID || tid==Type::IntTyID ||
- tid==Type::UByteTyID || tid==Type::UShortTyID)
- {
- opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
- }
- else if (tid==Type::LongTyID || tid==Type::ULongTyID)
- {
+ if (tid == Type::UIntTyID) {
+ assert(tid != Type::UIntTyID && "FP-to-uint conversions must be expanded"
+ " into FP->long->uint for SPARC v9: SO RUN PRESELECTION PASS!");
+ } else if (tid == Type::SByteTyID || tid == Type::ShortTyID ||
+ tid == Type::IntTyID || tid == Type::UByteTyID ||
+ tid == Type::UShortTyID) {
+ opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
+ } else if (tid == Type::LongTyID || tid == Type::ULongTyID) {
opCode = (opType == Type::FloatTy)? V9::FSTOX : V9::FDTOX;
- }
- else
- assert(0 && "Should not get here, Mo!");
+ } else
+ assert(0 && "Should not get here, Mo!");
return opCode;
}
// instead of an FADD (1 vs 3 cycles). There is no integer MOV.
//
if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
- double dval = FPC->getValue();
- if (dval == 0.0)
- minstr = CreateMovFloatInstruction(instrNode,
- instrNode->getInstruction()->getType());
- }
+ double dval = FPC->getValue();
+ if (dval == 0.0)
+ minstr = CreateMovFloatInstruction(instrNode,
+ instrNode->getInstruction()->getType());
+ }
return minstr;
}
{
MachineOpCode opCode = V9::INVALID_OPCODE;
- if (resultType->isInteger() || isa<PointerType>(resultType))
- {
+ if (resultType->isInteger() || isa<PointerType>(resultType)) {
opCode = V9::SUB;
- }
- else
+ } else {
switch(resultType->getPrimitiveID())
- {
- case Type::FloatTyID: opCode = V9::FSUBS; break;
- case Type::DoubleTyID: opCode = V9::FSUBD; break;
- default: assert(0 && "Invalid type for SUB instruction"); break;
- }
-
+ {
+ case Type::FloatTyID: opCode = V9::FSUBS; break;
+ case Type::DoubleTyID: opCode = V9::FSUBD; break;
+ default: assert(0 && "Invalid type for SUB instruction"); break;
+ }
+ }
+
return opCode;
}
return true;
}
+// Generate code for any intrinsic that needs a special code sequence
+// instead of a regular call. If not that kind of intrinsic, do nothing.
+// Returns true if code was generated, otherwise false.
+//
+bool CodeGenIntrinsic(LLVMIntrinsic::ID iid, CallInst &callInstr,
+ TargetMachine &target,
+ std::vector<MachineInstr*>& mvec)
+{
+ switch (iid) {
+ case LLVMIntrinsic::va_start: {
+ // Get the address of the first vararg value on stack and copy it to
+ // the argument of va_start(va_list* ap).
+ bool ignore;
+ Function* func = cast<Function>(callInstr.getParent()->getParent());
+ int numFixedArgs = func->getFunctionType()->getNumParams();
+ int fpReg = target.getFrameInfo().getIncomingArgBaseRegNum();
+ int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
+ int firstVarArgOff = numFixedArgs * argSize + target.getFrameInfo().
+ getFirstIncomingArgOffset(MachineFunction::get(func), ignore);
+ mvec.push_back(BuildMI(V9::ADD, 3).addMReg(fpReg).addSImm(firstVarArgOff).
+ addReg(callInstr.getOperand(1)));
+ return true;
+ }
+
+ case LLVMIntrinsic::va_end:
+ return true; // no-op on Sparc
+
+ case LLVMIntrinsic::va_copy:
+ // Simple copy of current va_list (arg2) to new va_list (arg1)
+ mvec.push_back(BuildMI(V9::OR, 3).
+ addMReg(target.getRegInfo().getZeroRegNum()).
+ addReg(callInstr.getOperand(2)).
+ addReg(callInstr.getOperand(1)));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
//******************* Externally Visible Functions *************************/
//------------------------------------------------------------------------
//
CallInst *callInstr = cast<CallInst>(subtreeRoot->getInstruction());
Value *callee = callInstr->getCalledValue();
+ Function* calledFunc = dyn_cast<Function>(callee);
- // Create hidden virtual register for return address with type void*
- TmpInstruction* retAddrReg =
- new TmpInstruction(PointerType::get(Type::VoidTy), callInstr);
- MachineCodeForInstruction::get(callInstr).addTemp(retAddrReg);
+ // Check if this is an intrinsic function that needs a special code
+ // sequence (e.g., va_start). Indirect calls cannot be special.
+ //
+ bool specialIntrinsic = false;
+ LLVMIntrinsic::ID iid;
+ if (calledFunc && (iid=(LLVMIntrinsic::ID)calledFunc->getIntrinsicID()))
+ specialIntrinsic = CodeGenIntrinsic(iid, *callInstr, target, mvec);
- // Generate the machine instruction and its operands.
- // Use CALL for direct function calls; this optimistically assumes
- // the PC-relative address fits in the CALL address field (22 bits).
- // Use JMPL for indirect calls.
+ // If not, generate the normal call sequence for the function.
+ // This can also handle any intrinsics that are just function calls.
//
- if (isa<Function>(callee)) // direct function call
- M = BuildMI(V9::CALL, 1).addPCDisp(callee);
- else // indirect function call
- M = BuildMI(V9::JMPLCALL, 3).addReg(callee).addSImm((int64_t)0)
- .addRegDef(retAddrReg);
- mvec.push_back(M);
+ if (! specialIntrinsic)
+ {
+ // Create hidden virtual register for return address with type void*
+ TmpInstruction* retAddrReg =
+ new TmpInstruction(PointerType::get(Type::VoidTy), callInstr);
+ MachineCodeForInstruction::get(callInstr).addTemp(retAddrReg);
+
+ // Generate the machine instruction and its operands.
+ // Use CALL for direct function calls; this optimistically assumes
+ // the PC-relative address fits in the CALL address field (22 bits).
+ // Use JMPL for indirect calls.
+ //
+ if (calledFunc) // direct function call
+ M = BuildMI(V9::CALL, 1).addPCDisp(callee);
+ else // indirect function call
+ M = BuildMI(V9::JMPLCALL, 3).addReg(callee).addSImm((int64_t)0)
+ .addRegDef(retAddrReg);
+ mvec.push_back(M);
- const FunctionType* funcType =
- cast<FunctionType>(cast<PointerType>(callee->getType())
- ->getElementType());
- bool isVarArgs = funcType->isVarArg();
- bool noPrototype = isVarArgs && funcType->getNumParams() == 0;
-
- // Use a descriptor to pass information about call arguments
- // to the register allocator. This descriptor will be "owned"
- // and freed automatically when the MachineCodeForInstruction
- // object for the callInstr goes away.
- CallArgsDescriptor* argDesc = new CallArgsDescriptor(callInstr,
- retAddrReg, isVarArgs, noPrototype);
+ const FunctionType* funcType =
+ cast<FunctionType>(cast<PointerType>(callee->getType())
+ ->getElementType());
+ bool isVarArgs = funcType->isVarArg();
+ bool noPrototype = isVarArgs && funcType->getNumParams() == 0;
- assert(callInstr->getOperand(0) == callee
- && "This is assumed in the loop below!");
-
- for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i)
- {
- Value* argVal = callInstr->getOperand(i);
- Instruction* intArgReg = NULL;
+ // Use a descriptor to pass information about call arguments
+ // to the register allocator. This descriptor will be "owned"
+ // and freed automatically when the MachineCodeForInstruction
+ // object for the callInstr goes away.
+ CallArgsDescriptor* argDesc = new CallArgsDescriptor(callInstr,
+ retAddrReg, isVarArgs,noPrototype);
- // Check for FP arguments to varargs functions.
- // Any such argument in the first $K$ args must be passed in an
- // integer register, where K = #integer argument registers.
- if (isVarArgs && argVal->getType()->isFloatingPoint())
+ assert(callInstr->getOperand(0) == callee
+ && "This is assumed in the loop below!");
+
+ for (unsigned i=1, N=callInstr->getNumOperands(); i < N; ++i)
{
- // If it is a function with no prototype, pass value
- // as an FP value as well as a varargs value
- if (noPrototype)
- argDesc->getArgInfo(i-1).setUseFPArgReg();
-
- // If this arg. is in the first $K$ regs, add a copy
- // float-to-int instruction to pass the value as an integer.
- if (i <= target.getRegInfo().GetNumOfIntArgRegs())
+ Value* argVal = callInstr->getOperand(i);
+ Instruction* intArgReg = NULL;
+
+ // Check for FP arguments to varargs functions.
+ // Any such argument in the first $K$ args must be passed in an
+ // integer register, where K = #integer argument registers.
+ if (isVarArgs && argVal->getType()->isFloatingPoint())
{
- MachineCodeForInstruction &destMCFI =
- MachineCodeForInstruction::get(callInstr);
- intArgReg = new TmpInstruction(Type::IntTy, argVal);
- destMCFI.addTemp(intArgReg);
+ // If it is a function with no prototype, pass value
+ // as an FP value as well as a varargs value
+ if (noPrototype)
+ argDesc->getArgInfo(i-1).setUseFPArgReg();
+
+ // If this arg. is in the first $K$ regs, add a copy
+ // float-to-int instruction to pass the value as an integer.
+ if (i <= target.getRegInfo().getNumOfIntArgRegs())
+ {
+ MachineCodeForInstruction &destMCFI =
+ MachineCodeForInstruction::get(callInstr);
+ intArgReg = new TmpInstruction(Type::IntTy, argVal);
+ destMCFI.addTemp(intArgReg);
- std::vector<MachineInstr*> copyMvec;
- target.getInstrInfo().CreateCodeToCopyFloatToInt(target,
- callInstr->getParent()->getParent(),
- argVal, (TmpInstruction*) intArgReg,
- copyMvec, destMCFI);
- mvec.insert(mvec.begin(),copyMvec.begin(),copyMvec.end());
+ std::vector<MachineInstr*> copyMvec;
+ target.getInstrInfo().CreateCodeToCopyFloatToInt(target,
+ callInstr->getParent()->getParent(),
+ argVal, (TmpInstruction*) intArgReg,
+ copyMvec, destMCFI);
+ mvec.insert(mvec.begin(),copyMvec.begin(),copyMvec.end());
- argDesc->getArgInfo(i-1).setUseIntArgReg();
- argDesc->getArgInfo(i-1).setArgCopy(intArgReg);
+ argDesc->getArgInfo(i-1).setUseIntArgReg();
+ argDesc->getArgInfo(i-1).setArgCopy(intArgReg);
+ }
+ else
+ // Cannot fit in first $K$ regs so pass arg on stack
+ argDesc->getArgInfo(i-1).setUseStackSlot();
}
- else
- // Cannot fit in first $K$ regs so pass the arg on the stack
- argDesc->getArgInfo(i-1).setUseStackSlot();
- }
- if (intArgReg)
- mvec.back()->addImplicitRef(intArgReg);
+ if (intArgReg)
+ mvec.back()->addImplicitRef(intArgReg);
- mvec.back()->addImplicitRef(argVal);
- }
+ mvec.back()->addImplicitRef(argVal);
+ }
- // Add the return value as an implicit ref. The call operands
- // were added above.
- if (callInstr->getType() != Type::VoidTy)
- mvec.back()->addImplicitRef(callInstr, /*isDef*/ true);
+ // Add the return value as an implicit ref. The call operands
+ // were added above.
+ if (callInstr->getType() != Type::VoidTy)
+ mvec.back()->addImplicitRef(callInstr, /*isDef*/ true);
- // For the CALL instruction, the ret. addr. reg. is also implicit
- if (isa<Function>(callee))
- mvec.back()->addImplicitRef(retAddrReg, /*isDef*/ true);
+ // For the CALL instruction, the ret. addr. reg. is also implicit
+ if (isa<Function>(callee))
+ mvec.back()->addImplicitRef(retAddrReg, /*isDef*/ true);
- // delay slot
- mvec.push_back(BuildMI(V9::NOP, 0));
+ // delay slot
+ mvec.push_back(BuildMI(V9::NOP, 0));
+ }
+
break;
}
case 64: // reg: Phi(reg,reg)
break; // don't forward the value
+ case 65: // reg: VaArg(reg)
+ {
+ // Use value initialized by va_start as pointer to args on the stack.
+ // Load argument via current pointer value, then increment pointer.
+ int argSize = target.getFrameInfo().getSizeOfEachArgOnStack();
+ Instruction* vaArgI = subtreeRoot->getInstruction();
+ mvec.push_back(BuildMI(V9::LDX, 3).addReg(vaArgI->getOperand(0)).
+ addSImm(0).addRegDef(vaArgI));
+ mvec.push_back(BuildMI(V9::ADD, 3).addReg(vaArgI->getOperand(0)).
+ addSImm(argSize).addRegDef(vaArgI->getOperand(0)));
+ break;
+ }
+
case 71: // reg: VReg
case 72: // reg: Constant
break; // don't forward the value