//===-- Alpha.h - Top-level interface for Alpha representation --*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the entry points for global functions defined in the LLVM
//===-- AlphaAsmPrinter.cpp - Alpha LLVM assembly writer ------------------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains a printer that converts from our internal representation
/// Unique incrementer for label values for referencing Global values.
///
unsigned LabelNumber;
-
- AlphaAsmPrinter(std::ostream &o, TargetMachine &tm)
+
+ AlphaAsmPrinter(std::ostream &o, TargetMachine &tm)
: AsmPrinter(o, tm), LabelNumber(0)
{
AlignmentIsInBytes = false;
void printOperand(const MachineInstr *MI, int opNum, MVT::ValueType VT);
void printBaseOffsetPair (const MachineInstr *MI, int i, bool brackets=true);
void printMachineInstruction(const MachineInstr *MI);
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F);
bool doInitialization(Module &M);
bool doFinalization(Module &M);
void SwitchSection(std::ostream &OS, const char *NewSection);
void AlphaAsmPrinter::printOp(const MachineOperand &MO, bool IsCallOp) {
const MRegisterInfo &RI = *TM.getRegisterInfo();
int new_symbol;
-
+
switch (MO.getType()) {
case MachineOperand::MO_VirtualRegister:
if (Value *V = MO.getVRegValueOrNull()) {
std::cerr << "Shouldn't use addPCDisp() when building Alpha MachineInstrs";
abort();
return;
-
+
case MachineOperand::MO_MachineBasicBlock: {
MachineBasicBlock *MBBOp = MO.getMachineBasicBlock();
O << "LBB" << Mang->getValueName(MBBOp->getParent()->getFunction())
else
O << Mang->getValueName(MO.getGlobal());
return;
-
+
default:
O << "<unknown operand type: " << MO.getType() << ">";
return;
++EmittedInsts;
if (printInstruction(MI))
return; // Printer was automatically generated
-
+
assert(0 && "Unhandled instruction in asm writer!");
abort();
return;
void AlphaAsmPrinter::printConstantPool(MachineConstantPool *MCP) {
const std::vector<Constant*> &CP = MCP->getConstants();
const TargetData &TD = TM.getTargetData();
-
+
if (CP.empty()) return;
SwitchSection(O, "section .rodata");
O << "\t.arch ev56\n";
return false;
}
-
+
// SwitchSection - Switch to the specified section of the executable if we are
// not already in it!
//
-void AlphaAsmPrinter::SwitchSection(std::ostream &OS, const char *NewSection)
+void AlphaAsmPrinter::SwitchSection(std::ostream &OS, const char *NewSection)
{
if (CurSection != NewSection) {
CurSection = NewSection;
bool AlphaAsmPrinter::doFinalization(Module &M) {
const TargetData &TD = TM.getTargetData();
-
+
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I)
if (I->hasInitializer()) { // External global require no code
O << "\n\n";
unsigned Size = TD.getTypeSize(C->getType());
unsigned Align = TD.getTypeAlignmentShift(C->getType());
- if (C->isNullValue() &&
+ if (C->isNullValue() &&
(I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
I->hasWeakLinkage() /* FIXME: Verify correct */)) {
SwitchSection(O, "data");
if (I->hasInternalLinkage())
O << "\t.local " << name << "\n";
-
+
O << "\t.comm " << name << "," << TD.getTypeSize(C->getType())
<< "," << (1 << Align);
O << "\t\t# ";
//===- AlphaISelPattern.cpp - A pattern matching inst selector for Alpha --===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines a pattern matching instruction selector for Alpha.
using namespace llvm;
namespace llvm {
- cl::opt<bool> EnableAlphaIDIV("enable-alpha-intfpdiv",
- cl::desc("Use the FP div instruction for integer div when possible"),
+ cl::opt<bool> EnableAlphaIDIV("enable-alpha-intfpdiv",
+ cl::desc("Use the FP div instruction for integer div when possible"),
cl::Hidden);
- cl::opt<bool> EnableAlphaFTOI("enable-alpha-ftoi",
- cl::desc("Enable use of ftoi* and itof* instructions (ev6 and higher)"),
+ cl::opt<bool> EnableAlphaFTOI("enable-alpha-ftoi",
+ cl::desc("Enable use of ftoi* and itof* instructions (ev6 and higher)"),
cl::Hidden);
- cl::opt<bool> EnableAlphaCount("enable-alpha-count",
- cl::desc("Print estimates on live ins and outs"),
+ cl::opt<bool> EnableAlphaCount("enable-alpha-count",
+ cl::desc("Print estimates on live ins and outs"),
cl::Hidden);
}
setShiftAmountType(MVT::i64);
setSetCCResultType(MVT::i64);
setSetCCResultContents(ZeroOrOneSetCCResult);
-
+
addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);
addRegisterClass(MVT::f64, Alpha::FPRCRegisterClass);
addRegisterClass(MVT::f32, Alpha::FPRCRegisterClass);
-
+
setOperationAction(ISD::BRCONDTWOWAY, MVT::Other, Expand);
setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
setOperationAction(ISD::EXTLOAD , MVT::f32 , Promote);
setOperationAction(ISD::SETCC , MVT::f32, Promote);
computeRegisterProperties();
-
+
addLegalFPImmediate(+0.0); //F31
addLegalFPImmediate(-0.0); //-F31
}
/// lower the arguments for the specified function, into the specified DAG.
virtual std::vector<SDOperand>
LowerArguments(Function &F, SelectionDAG &DAG);
-
+
/// LowerCallTo - This hook lowers an abstract call to a function into an
/// actual call.
virtual std::pair<SDOperand, SDOperand>
LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
-
+
virtual std::pair<SDOperand, SDOperand>
LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
-
+
virtual std::pair<SDOperand,SDOperand>
LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
const Type *ArgTy, SelectionDAG &DAG);
// //#define PV $27
// //#define GP $29
// //#define SP $30
-
+
std::vector<SDOperand>
-AlphaTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG)
+AlphaTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG)
{
std::vector<SDOperand> ArgValues;
std::vector<SDOperand> LS;
//Handle the return address
//BuildMI(&BB, Alpha::IDEF, 0, Alpha::R26);
- unsigned args_int[] = {Alpha::R16, Alpha::R17, Alpha::R18,
+ unsigned args_int[] = {Alpha::R16, Alpha::R17, Alpha::R18,
Alpha::R19, Alpha::R20, Alpha::R21};
- unsigned args_float[] = {Alpha::F16, Alpha::F17, Alpha::F18,
+ unsigned args_float[] = {Alpha::F16, Alpha::F17, Alpha::F18,
Alpha::F19, Alpha::F20, Alpha::F21};
int count = 0;
unsigned Vreg;
MVT::ValueType VT = getValueType(I->getType());
switch (getValueType(I->getType())) {
- default:
- std::cerr << "Unknown Type " << VT << "\n";
+ default:
+ std::cerr << "Unknown Type " << VT << "\n";
abort();
case MVT::f64:
case MVT::f32:
Vreg = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(VT));
BuildMI(&BB, Alpha::CPYS, 2, Vreg).addReg(args_float[count]).addReg(args_float[count]);
- argt = newroot = DAG.getCopyFromReg(Vreg,
- getValueType(I->getType()),
+ argt = newroot = DAG.getCopyFromReg(Vreg,
+ getValueType(I->getType()),
Chain);
break;
case MVT::i1:
} else { //more args
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(8, 8 * (count - 6));
-
- // Create the SelectionDAG nodes corresponding to a load
+
+ // Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
- argt = newroot = DAG.getLoad(getValueType(I->getType()),
+ argt = newroot = DAG.getLoad(getValueType(I->getType()),
DAG.getEntryNode(), FIN);
}
++count;
}
// If the functions takes variable number of arguments, copy all regs to stack
- if (F.isVarArg())
+ if (F.isVarArg())
for (int i = 0; i < 6; ++i)
{
unsigned Vreg = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
int FI = MFI->CreateFixedObject(8, -8 * (6 - i));
SDOperand SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, argt, SDFI));
-
+
Vreg = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
BuildMI(&BB, Alpha::CPYS, 2, Vreg).addReg(args_float[i]).addReg(args_float[i]);
argt = DAG.getCopyFromReg(Vreg, MVT::f64, Chain);
}
args_to_use.push_back(Args[i].first);
}
-
+
std::vector<MVT::ValueType> RetVals;
MVT::ValueType RetTyVT = getValueType(RetTy);
if (RetTyVT != MVT::isVoid)
RetVals.push_back(RetTyVT);
RetVals.push_back(MVT::Other);
- SDOperand TheCall = SDOperand(DAG.getCall(RetVals,
+ SDOperand TheCall = SDOperand(DAG.getCall(RetVals,
Chain, Callee, args_to_use), 0);
Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
const Type *ArgTy, SelectionDAG &DAG) {
abort();
}
-
+
std::pair<SDOperand, SDOperand> AlphaTargetLowering::
LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
/// SelectionDAG operations.
//===--------------------------------------------------------------------===//
class ISel : public SelectionDAGISel {
-
+
/// AlphaLowering - This object fully describes how to lower LLVM code to an
/// Alpha-specific SelectionDAG.
AlphaTargetLowering AlphaLowering;
-
+
SelectionDAG *ISelDAG; // Hack to support us having a dag->dag transform
// for sdiv and udiv until it is put into the future
// dag combiner.
/// tree.
static const unsigned notIn = (unsigned)(-1);
std::map<SDOperand, unsigned> ExprMap;
-
+
//CCInvMap sometimes (SetNE) we have the inverse CC code for free
std::map<SDOperand, unsigned> CCInvMap;
-
+
int count_ins;
int count_outs;
bool has_sym;
public:
- ISel(TargetMachine &TM) : SelectionDAGISel(AlphaLowering), AlphaLowering(TM)
+ ISel(TargetMachine &TM) : SelectionDAGISel(AlphaLowering), AlphaLowering(TM)
{}
-
+
/// InstructionSelectBasicBlock - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
virtual void InstructionSelectBasicBlock(SelectionDAG &DAG) {
if(has_sym)
++count_ins;
if(EnableAlphaCount)
- std::cerr << "COUNT: " << BB->getParent()->getFunction ()->getName() << " "
- << BB->getNumber() << " "
+ std::cerr << "COUNT: " << BB->getParent()->getFunction ()->getName() << " "
+ << BB->getNumber() << " "
<< count_ins << " "
<< count_outs << "\n";
-
+
// Clear state used for selection.
ExprMap.clear();
CCInvMap.clear();
}
-
+
unsigned SelectExpr(SDOperand N);
unsigned SelectExprFP(SDOperand N, unsigned Result);
void Select(SDOperand N);
-
+
void SelectAddr(SDOperand N, unsigned& Reg, long& offset);
void SelectBranchCC(SDOperand N);
void MoveFP2Int(unsigned src, unsigned dst, bool isDouble);
//Shamelessly adapted from PPC32
-// Structure used to return the necessary information to codegen an SDIV as
+// Structure used to return the necessary information to codegen an SDIV as
// a multiply.
struct ms {
int64_t m; // magic number
};
/// magic - calculate the magic numbers required to codegen an integer sdiv as
-/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
+/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1,
/// or -1.
static struct ms magic(int64_t d) {
int64_t p;
uint64_t ad, anc, delta, q1, r1, q2, r2, t;
const uint64_t two63 = 9223372036854775808ULL; // 2^63
struct ms mag;
-
+
ad = abs(d);
t = two63 + ((uint64_t)d >> 63);
anc = t - 1 - t%ad; // absolute value of nc
int64_t d = (int64_t)cast<ConstantSDNode>(N.getOperand(1))->getSignExtended();
ms magics = magic(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q = ISelDAG->getNode(ISD::MULHS, MVT::i64, N.getOperand(0),
+ SDOperand Q = ISelDAG->getNode(ISD::MULHS, MVT::i64, N.getOperand(0),
ISelDAG->getConstant(magics.m, MVT::i64));
// If d > 0 and m < 0, add the numerator
if (d > 0 && magics.m < 0)
Q = ISelDAG->getNode(ISD::SUB, MVT::i64, Q, N.getOperand(0));
// Shift right algebraic if shift value is nonzero
if (magics.s > 0)
- Q = ISelDAG->getNode(ISD::SRA, MVT::i64, Q,
+ Q = ISelDAG->getNode(ISD::SRA, MVT::i64, Q,
ISelDAG->getConstant(magics.s, MVT::i64));
// Extract the sign bit and add it to the quotient
- SDOperand T =
+ SDOperand T =
ISelDAG->getNode(ISD::SRL, MVT::i64, Q, ISelDAG->getConstant(63, MVT::i64));
return ISelDAG->getNode(ISD::ADD, MVT::i64, Q, T);
}
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDOperand ISel::BuildUDIVSequence(SDOperand N) {
- unsigned d =
+ unsigned d =
(unsigned)cast<ConstantSDNode>(N.getOperand(1))->getSignExtended();
mu magics = magicu(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q = ISelDAG->getNode(ISD::MULHU, MVT::i64, N.getOperand(0),
+ SDOperand Q = ISelDAG->getNode(ISD::MULHU, MVT::i64, N.getOperand(0),
ISelDAG->getConstant(magics.m, MVT::i64));
if (magics.a == 0) {
- Q = ISelDAG->getNode(ISD::SRL, MVT::i64, Q,
+ Q = ISelDAG->getNode(ISD::SRL, MVT::i64, Q,
ISelDAG->getConstant(magics.s, MVT::i64));
} else {
SDOperand NPQ = ISelDAG->getNode(ISD::SUB, MVT::i64, N.getOperand(0), Q);
- NPQ = ISelDAG->getNode(ISD::SRL, MVT::i64, NPQ,
+ NPQ = ISelDAG->getNode(ISD::SRL, MVT::i64, NPQ,
ISelDAG->getConstant(1, MVT::i64));
NPQ = ISelDAG->getNode(ISD::ADD, MVT::i64, NPQ, Q);
- Q = ISelDAG->getNode(ISD::SRL, MVT::i64, NPQ,
+ Q = ISelDAG->getNode(ISD::SRL, MVT::i64, NPQ,
ISelDAG->getConstant(magics.s-1, MVT::i64));
}
return Q;
//assert(SetCC->getOperand(0).getValueType() != MVT::f32 && "SetCC f32 should have been promoted");
bool rev = false;
bool inv = false;
-
+
switch (SetCC->getCondition()) {
default: Node->dump(); assert(0 && "Unknown FP comparison!");
case ISD::SETEQ: Opc = Alpha::CMPTEQ; break;
case ISD::SETGE: Opc = Alpha::CMPTLE; rev = true; break;
case ISD::SETNE: Opc = Alpha::CMPTEQ; inv = true; break;
}
-
+
//FIXME: check for constant 0.0
ConstantFPSDNode *CN;
if ((CN = dyn_cast<ConstantFPSDNode>(SetCC->getOperand(0)))
Tmp1 = Alpha::F31;
else
Tmp1 = SelectExpr(N.getOperand(0));
-
+
if ((CN = dyn_cast<ConstantFPSDNode>(SetCC->getOperand(1)))
&& (CN->isExactlyValue(+0.0) || CN->isExactlyValue(-0.0)))
Tmp2 = Alpha::F31;
else
Tmp2 = SelectExpr(N.getOperand(1));
-
+
//Can only compare doubles, and dag won't promote for me
if (SetCC->getOperand(0).getValueType() == MVT::f32)
{
BuildMI(BB, Alpha::CVTST, 1, Tmp3).addReg(Tmp2);
Tmp2 = Tmp3;
}
-
+
if (rev) std::swap(Tmp1, Tmp2);
//do the comparison
BuildMI(BB, Opc, 2, dst).addReg(Tmp1).addReg(Tmp2);
{
unsigned opcode = N.getOpcode();
if (opcode == ISD::ADD) {
- if(N.getOperand(1).getOpcode() == ISD::Constant &&
+ if(N.getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(N.getOperand(1))->getValue() <= 32767)
{ //Normal imm add
Reg = SelectExpr(N.getOperand(0));
offset = cast<ConstantSDNode>(N.getOperand(1))->getValue();
return;
}
- else if(N.getOperand(0).getOpcode() == ISD::Constant &&
+ else if(N.getOperand(0).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(N.getOperand(0))->getValue() <= 32767)
{
Reg = SelectExpr(N.getOperand(1));
void ISel::SelectBranchCC(SDOperand N)
{
assert(N.getOpcode() == ISD::BRCOND && "Not a BranchCC???");
- MachineBasicBlock *Dest =
+ MachineBasicBlock *Dest =
cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
unsigned Opc = Alpha::WTF;
-
+
Select(N.getOperand(0)); //chain
SDOperand CC = N.getOperand(1);
-
+
if (CC.getOpcode() == ISD::SETCC)
{
SetCCSDNode* SetCC = dyn_cast<SetCCSDNode>(CC.Val);
bool RightZero = SetCC->getOperand(0).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(SetCC->getOperand(0))->getValue() == 0;
bool isNE = false;
-
+
//Fix up CC
ISD::CondCode cCode= SetCC->getCondition();
if (LeftZero && !RightZero) //Swap Operands
cCode = ISD::getSetCCSwappedOperands(cCode);
-
+
if(cCode == ISD::SETNE)
isNE = true;
SDOperand CC = N.getOperand(0);
SetCCSDNode* SetCC = dyn_cast<SetCCSDNode>(CC.Val);
- if (CC.getOpcode() == ISD::SETCC &&
+ if (CC.getOpcode() == ISD::SETCC &&
!MVT::isInteger(SetCC->getOperand(0).getValueType()))
{ //FP Setcc -> Select yay!
//a = b: c = 0
//a < b: c < 0
//a > b: c > 0
-
+
bool invTest = false;
unsigned Tmp3;
-
+
ConstantFPSDNode *CN;
if ((CN = dyn_cast<ConstantFPSDNode>(SetCC->getOperand(1)))
&& (CN->isExactlyValue(+0.0) || CN->isExactlyValue(-0.0)))
BuildMI(BB, isD ? Alpha::SUBT : Alpha::SUBS, 2, Tmp3)
.addReg(Tmp1).addReg(Tmp2);
}
-
+
switch (SetCC->getCondition()) {
default: CC.Val->dump(); assert(0 && "Unknown FP comparison!");
case ISD::SETEQ: Opc = invTest ? Alpha::FCMOVNE : Alpha::FCMOVEQ; break;
}
case ISD::FP_ROUND:
- assert (DestType == MVT::f32 &&
- N.getOperand(0).getValueType() == MVT::f64 &&
+ assert (DestType == MVT::f32 &&
+ N.getOperand(0).getValueType() == MVT::f64 &&
"only f64 to f32 conversion supported here");
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, Alpha::CVTTS, 1, Result).addReg(Tmp1);
return Result;
case ISD::FP_EXTEND:
- assert (DestType == MVT::f64 &&
- N.getOperand(0).getValueType() == MVT::f32 &&
+ assert (DestType == MVT::f64 &&
+ N.getOperand(0).getValueType() == MVT::f32 &&
"only f32 to f64 conversion supported here");
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, Alpha::CVTST, 1, Result).addReg(Tmp1);
ExprMap[N.getValue(1)] = notIn; // Generate the token
else
Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
-
+
SDOperand Chain = N.getOperand(0);
-
+
Select(Chain);
unsigned r = dyn_cast<RegSDNode>(Node)->getReg();
//std::cerr << "CopyFromReg " << Result << " = " << r << "\n";
BuildMI(BB, Alpha::CPYS, 2, Result).addReg(r).addReg(r);
return Result;
}
-
+
case ISD::LOAD:
{
// Make sure we generate both values.
}
}
return Result;
-
+
case ISD::SDIV:
case ISD::MUL:
case ISD::ADD:
};
ConstantFPSDNode *CN;
- if (opcode == ISD::SUB
+ if (opcode == ISD::SUB
&& (CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
&& (CN->isExactlyValue(+0.0) || CN->isExactlyValue(-0.0)))
{
ExprMap[N.getValue(1)] = notIn; // Generate the token
else
Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
-
+
Tmp1 = MakeReg(MVT::f32);
-
- assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
+
+ assert(cast<MVTSDNode>(Node)->getExtraValueType() == MVT::f32 &&
"EXTLOAD not from f32");
assert(Node->getValueType(0) == MVT::f64 && "EXTLOAD not to f64");
-
+
SDOperand Chain = N.getOperand(0);
SDOperand Address = N.getOperand(1);
Select(Chain);
-
+
if (Address.getOpcode() == ISD::GlobalAddress) {
AlphaLowering.restoreGP(BB);
has_sym = true;
BuildMI(BB, Alpha::LDS_SYM, 1, Tmp1).addGlobalAddress(cast<GlobalAddressSDNode>(Address)->getGlobal());
}
- else if (ConstantPoolSDNode *CP =
- dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
+ else if (ConstantPoolSDNode *CP =
+ dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
{
AlphaLowering.restoreGP(BB);
has_sym = true;
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
{
- assert (N.getOperand(0).getValueType() == MVT::i64
+ assert (N.getOperand(0).getValueType() == MVT::i64
&& "only quads can be loaded from");
Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
Tmp2 = MakeReg(MVT::f64);
if (DestType == MVT::f64 || DestType == MVT::f32 ||
(
- (opcode == ISD::LOAD || opcode == ISD::CopyFromReg ||
+ (opcode == ISD::LOAD || opcode == ISD::CopyFromReg ||
opcode == ISD::EXTLOAD) &&
- (N.getValue(0).getValueType() == MVT::f32 ||
+ (N.getValue(0).getValueType() == MVT::f32 ||
N.getValue(0).getValueType() == MVT::f64)
)
)
default:
Node->dump();
assert(0 && "Node not handled!\n");
-
+
case ISD::MULHU:
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, Alpha::IDEF, 0, Result);
return Result;
}
-
+
case ISD::DYNAMIC_STACKALLOC:
// Generate both result values.
if (Result != notIn)
<< " the stack alignment yet!";
abort();
}
-
+
Select(N.getOperand(0));
if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
{
.addFrameIndex(cast<FrameIndexSDNode>(N)->getIndex())
.addReg(Alpha::F31);
return Result;
-
+
case ISD::EXTLOAD:
case ISD::ZEXTLOAD:
case ISD::SEXTLOAD:
- case ISD::LOAD:
+ case ISD::LOAD:
{
// Make sure we generate both values.
if (Result != notIn)
ExprMap[N.getValue(1)] = notIn; // Generate the token
else
Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
-
+
SDOperand Chain = N.getOperand(0);
SDOperand Address = N.getOperand(1);
Select(Chain);
- assert(Node->getValueType(0) == MVT::i64 &&
+ assert(Node->getValueType(0) == MVT::i64 &&
"Unknown type to sign extend to.");
if (opcode == ISD::LOAD)
Opc = Alpha::LDQ;
else
switch (cast<MVTSDNode>(Node)->getExtraValueType()) {
default: Node->dump(); assert(0 && "Bad sign extend!");
- case MVT::i32: Opc = Alpha::LDL;
+ case MVT::i32: Opc = Alpha::LDL;
assert(opcode != ISD::ZEXTLOAD && "Not sext"); break;
- case MVT::i16: Opc = Alpha::LDWU;
+ case MVT::i16: Opc = Alpha::LDWU;
assert(opcode != ISD::SEXTLOAD && "Not zext"); break;
case MVT::i1: //FIXME: Treat i1 as i8 since there are problems otherwise
- case MVT::i8: Opc = Alpha::LDBU;
+ case MVT::i8: Opc = Alpha::LDBU;
assert(opcode != ISD::SEXTLOAD && "Not zext"); break;
}
case ISD::CALL:
{
Select(N.getOperand(0));
-
+
// The chain for this call is now lowered.
ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), notIn));
-
+
//grab the arguments
std::vector<unsigned> argvregs;
//assert(Node->getNumOperands() < 8 && "Only 6 args supported");
for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
argvregs.push_back(SelectExpr(N.getOperand(i)));
-
+
//in reg args
for(int i = 0, e = std::min(6, (int)argvregs.size()); i < e; ++i)
{
- unsigned args_int[] = {Alpha::R16, Alpha::R17, Alpha::R18,
+ unsigned args_int[] = {Alpha::R16, Alpha::R17, Alpha::R18,
Alpha::R19, Alpha::R20, Alpha::R21};
- unsigned args_float[] = {Alpha::F16, Alpha::F17, Alpha::F18,
+ unsigned args_float[] = {Alpha::F16, Alpha::F17, Alpha::F18,
Alpha::F19, Alpha::F20, Alpha::F21};
switch(N.getOperand(i+2).getValueType()) {
- default:
- Node->dump();
+ default:
+ Node->dump();
N.getOperand(i).Val->dump();
- std::cerr << "Type for " << i << " is: " <<
+ std::cerr << "Type for " << i << " is: " <<
N.getOperand(i+2).getValueType() << "\n";
assert(0 && "Unknown value type for call");
case MVT::i1:
for (int i = 6, e = argvregs.size(); i < e; ++i)
{
switch(N.getOperand(i+2).getValueType()) {
- default:
- Node->dump();
+ default:
+ Node->dump();
N.getOperand(i).Val->dump();
- std::cerr << "Type for " << i << " is: " <<
+ std::cerr << "Type for " << i << " is: " <<
N.getOperand(i+2).getValueType() << "\n";
assert(0 && "Unknown value type for call");
case MVT::i1:
}
//build the right kind of call
if (GlobalAddressSDNode *GASD =
- dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
+ dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
{
if (GASD->getGlobal()->isExternal()) {
//use safe calling convention
AlphaLowering.restoreGP(BB);
BuildMI(BB, Alpha::BSR, 1, Alpha::R26).addGlobalAddress(GASD->getGlobal(),true);
}
- }
+ }
else if (ExternalSymbolSDNode *ESSDN =
- dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
+ dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
{
AlphaLowering.restoreGP(BB);
has_sym = true;
BuildMI(BB, Alpha::BIS, 2, Alpha::R27).addReg(Tmp1).addReg(Tmp1);
BuildMI(BB, Alpha::JSR, 2, Alpha::R26).addReg(Alpha::R27).addImm(0);
}
-
+
//push the result into a virtual register
-
+
switch (Node->getValueType(0)) {
default: Node->dump(); assert(0 && "Unknown value type for call result!");
case MVT::Other: return notIn;
break;
}
return Result+N.ResNo;
- }
-
+ }
+
case ISD::SIGN_EXTEND_INREG:
{
//do SDIV opt for all levels of ints if not dividing by a constant
MoveFP2Int(Tmp9, Result, true);
return Result;
}
-
+
//Alpha has instructions for a bunch of signed 32 bit stuff
if( dyn_cast<MVTSDNode>(Node)->getExtraValueType() == MVT::i32)
{
}
return Result;
}
-
+
case ISD::SETCC:
{
if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Node)) {
bool isConst1 = false;
bool isConst2 = false;
int dir;
-
+
//Tmp1 = SelectExpr(N.getOperand(0));
if(N.getOperand(0).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(N.getOperand(0))->getValue() <= 255)
switch (SetCC->getCondition()) {
default: Node->dump(); assert(0 && "Unknown integer comparison!");
case ISD::SETEQ: Opc = Alpha::CMPEQ; dir=0; break;
- case ISD::SETLT:
+ case ISD::SETLT:
Opc = isConst2 ? Alpha::CMPLTi : Alpha::CMPLT; dir = 1; break;
- case ISD::SETLE:
+ case ISD::SETLE:
Opc = isConst2 ? Alpha::CMPLEi : Alpha::CMPLE; dir = 1; break;
- case ISD::SETGT:
+ case ISD::SETGT:
Opc = isConst1 ? Alpha::CMPLTi : Alpha::CMPLT; dir = 2; break;
- case ISD::SETGE:
+ case ISD::SETGE:
Opc = isConst1 ? Alpha::CMPLEi : Alpha::CMPLE; dir = 2; break;
- case ISD::SETULT:
+ case ISD::SETULT:
Opc = isConst2 ? Alpha::CMPULTi : Alpha::CMPULT; dir = 1; break;
- case ISD::SETUGT:
+ case ISD::SETUGT:
Opc = isConst1 ? Alpha::CMPULTi : Alpha::CMPULT; dir = 2; break;
- case ISD::SETULE:
+ case ISD::SETULE:
Opc = isConst2 ? Alpha::CMPULEi : Alpha::CMPULE; dir = 1; break;
- case ISD::SETUGE:
+ case ISD::SETUGE:
Opc = isConst1 ? Alpha::CMPULEi : Alpha::CMPULE; dir = 2; break;
case ISD::SETNE: {//Handle this one special
//std::cerr << "Alpha does not have a setne.\n";
}
return Result;
}
-
+
case ISD::CopyFromReg:
{
++count_ins;
ExprMap[N.getValue(1)] = notIn; // Generate the token
else
Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
-
+
SDOperand Chain = N.getOperand(0);
Select(Chain);
return Result;
}
- //Most of the plain arithmetic and logic share the same form, and the same
+ //Most of the plain arithmetic and logic share the same form, and the same
//constant immediate test
case ISD::XOR:
//Match Not
}
case ISD::OR:
//Check operand(0) == Not
- if (N.getOperand(0).getOpcode() == ISD::XOR &&
+ if (N.getOperand(0).getOpcode() == ISD::XOR &&
N.getOperand(0).getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(N.getOperand(0).getOperand(1))->getSignExtended() == -1)
{
return Result;
}
//Check operand(1) == Not
- if (N.getOperand(1).getOpcode() == ISD::XOR &&
+ if (N.getOperand(1).getOpcode() == ISD::XOR &&
N.getOperand(1).getOperand(1).getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getSignExtended() == -1)
{
BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
}
return Result;
-
+
case ISD::ADD:
case ISD::SUB:
{
// If this is a divide by constant, we can emit code using some magic
// constants to implement it as a multiply instead.
ExprMap.erase(N);
- if (opcode == ISD::SDIV)
+ if (opcode == ISD::SDIV)
return SelectExpr(BuildSDIVSequence(N));
else
return SelectExpr(BuildUDIVSequence(N));
//else fall though
case ISD::UREM:
case ISD::SREM:
- //FIXME: alpha really doesn't support any of these operations,
+ //FIXME: alpha really doesn't support any of these operations,
// the ops are expanded into special library calls with
// special calling conventions
//Restore GP because it is a call after all...
Tmp2 = SelectExpr(N.getOperand(1));
//set up regs explicitly (helps Reg alloc)
BuildMI(BB, Alpha::BIS, 2, Alpha::R24).addReg(Tmp1).addReg(Tmp1);
- BuildMI(BB, Alpha::BIS, 2, Alpha::R25).addReg(Tmp2).addReg(Tmp2);
+ BuildMI(BB, Alpha::BIS, 2, Alpha::R25).addReg(Tmp2).addReg(Tmp2);
AlphaLowering.restoreGP(BB);
BuildMI(BB, Opc, 2).addReg(Alpha::R24).addReg(Alpha::R25);
- BuildMI(BB, Alpha::BIS, 2, Result).addReg(Alpha::R27).addReg(Alpha::R27);
+ BuildMI(BB, Alpha::BIS, 2, Result).addReg(Alpha::R27).addReg(Alpha::R27);
return Result;
case ISD::FP_TO_UINT:
Tmp2 = MakeReg(MVT::f64);
BuildMI(BB, Alpha::CVTTQ, 1, Tmp2).addReg(Tmp1);
MoveFP2Int(Tmp2, Result, true);
-
+
return Result;
}
SDOperand CC = N.getOperand(0);
SetCCSDNode* SetCC = dyn_cast<SetCCSDNode>(CC.Val);
- if (CC.getOpcode() == ISD::SETCC &&
+ if (CC.getOpcode() == ISD::SETCC &&
!MVT::isInteger(SetCC->getOperand(0).getValueType()))
{ //FP Setcc -> Int Select
Tmp1 = MakeReg(MVT::f64);
cCode = ISD::getSetCCInverse(cCode, true);
if (LeftZero && !RightZero) //Swap Operands
cCode = ISD::getSetCCSwappedOperands(cCode);
-
+
//Choose the CMOV
switch (cCode) {
default: CC.Val->dump(); assert(0 && "Unknown integer comparison!");
Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
BuildMI(BB, Alpha::CMOVEQ, 2, Result).addReg(Tmp2).addReg(Tmp3).addReg(Tmp1);
-
+
return Result;
}
return; // Already selected.
SDNode *Node = N.Val;
-
+
switch (opcode) {
default:
Select(N.getOperand(0));
BuildMI(BB, Alpha::IDEF, 0, cast<RegSDNode>(N)->getReg());
return;
-
+
case ISD::EntryToken: return; // Noop
case ISD::TokenFactor:
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
Select(Node->getOperand(i));
-
+
//N.Val->dump(); std::cerr << "\n";
//assert(0 && "Node not handled yet!");
-
+
return;
case ISD::CopyToReg:
Select(N.getOperand(0));
Tmp1 = SelectExpr(N.getOperand(1));
Tmp2 = cast<RegSDNode>(N)->getReg();
-
+
if (Tmp1 != Tmp2) {
- if (N.getOperand(1).getValueType() == MVT::f64 ||
+ if (N.getOperand(1).getValueType() == MVT::f64 ||
N.getOperand(1).getValueType() == MVT::f32)
BuildMI(BB, Alpha::CPYS, 2, Tmp2).addReg(Tmp1).addReg(Tmp1);
else
Select(N.getOperand(0));
Tmp1 = SelectExpr(N.getOperand(1));
switch (N.getOperand(1).getValueType()) {
- default: Node->dump();
+ default: Node->dump();
assert(0 && "All other types should have been promoted!!");
case MVT::f64:
case MVT::f32:
BuildMI(BB, Alpha::RETURN, 0); // Just emit a 'ret' instruction
return;
- case ISD::TRUNCSTORE:
- case ISD::STORE:
+ case ISD::TRUNCSTORE:
+ case ISD::STORE:
{
SDOperand Chain = N.getOperand(0);
SDOperand Value = N.getOperand(1);
case ISD::ADJCALLSTACKUP:
Select(N.getOperand(0));
Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
-
+
Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? Alpha::ADJUSTSTACKDOWN :
Alpha::ADJUSTSTACKUP;
BuildMI(BB, Opc, 1).addImm(Tmp1);
/// description file.
///
FunctionPass *llvm::createAlphaPatternInstructionSelector(TargetMachine &TM) {
- return new ISel(TM);
+ return new ISel(TM);
}
//===- AlphaInstrInfo.cpp - Alpha Instruction Information -------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the Alpha implementation of the TargetInstrInfo class.
//===- AlphaInstrInfo.h - Alpha Instruction Information ---------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the Alpha implementation of the TargetInstrInfo class.
//===- AlphaRegisterInfo.cpp - Alpha Register Information -------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the Alpha implementation of the MRegisterInfo class.
return Alpha::GPRCRegisterClass;
}
-void
+void
AlphaRegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, int FrameIdx) const {
BuildMI(MBB, MI, Alpha::BIS, 2, DestReg).addReg(SrcReg).addReg(SrcReg);
} else if (RC == Alpha::FPRCRegisterClass) {
BuildMI(MBB, MI, Alpha::CPYS, 2, DestReg).addReg(SrcReg).addReg(SrcReg);
- } else {
+ } else {
std::cerr << "Attempt to copy register that is not GPR or FPR";
abort();
}
New=BuildMI(Alpha::LDA, 2, Alpha::R30)
.addImm(Amount).addReg(Alpha::R30);
}
-
+
// Replace the pseudo instruction with a new instruction...
MBB.insert(I, New);
}
}
//Alpha has a slightly funny stack:
-//Args
+//Args
//<- incoming SP
//fixed locals (and spills, callee saved, etc)
//<- FP
// Add the base register of R30 (SP) or R15 (FP).
MI.SetMachineOperandReg(i + 1, FP ? Alpha::R15 : Alpha::R30);
-
+
// Now add the frame object offset to the offset from the virtual frame index.
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
DEBUG(std::cerr << "FI: " << FrameIndex << " Offset: " << Offset << "\n");
Offset += MF.getFrameInfo()->getStackSize();
-
- DEBUG(std::cerr << "Corrected Offset " << Offset <<
+
+ DEBUG(std::cerr << "Corrected Offset " << Offset <<
" for stack size: " << MF.getFrameInfo()->getStackSize() << "\n");
if (Offset > IMM_HIGH || Offset < IMM_LOW) {
//inst off the SP/FP
//fix up the old:
MI.SetMachineOperandReg(i + 1, Alpha::R28);
- MI.SetMachineOperandConst(i, MachineOperand::MO_SignExtendedImmed,
+ MI.SetMachineOperandConst(i, MachineOperand::MO_SignExtendedImmed,
getLower16(Offset));
//insert the new
MachineInstr* nMI=BuildMI(Alpha::LDAH, 2, Alpha::R28)
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineInstr *MI;
bool FP = hasFP(MF);
-
+
//handle GOP offset
MI = BuildMI(Alpha::LDGP, 0);
MBB.insert(MBBI, MI);
//evil const_cast until MO stuff setup to handle const
MI = BuildMI(Alpha::ALTENT, 1).addGlobalAddress(const_cast<Function*>(MF.getFunction()), true);
MBB.insert(MBBI, MI);
-
+
// Get the number of bytes to allocate from the FrameInfo
long NumBytes = MFI->getStackSize();
if (MFI->hasCalls() && !FP) {
- // We reserve argument space for call sites in the function immediately on
- // entry to the current function. This eliminates the need for add/sub
+ // We reserve argument space for call sites in the function immediately on
+ // entry to the current function. This eliminates the need for add/sub
// brackets around call sites.
//If there is a frame pointer, then we don't do this
NumBytes += MFI->getMaxCallFrameSize();
- DEBUG(std::cerr << "Added " << MFI->getMaxCallFrameSize()
+ DEBUG(std::cerr << "Added " << MFI->getMaxCallFrameSize()
<< " to the stack due to calls\n");
}
MachineInstr *MI;
assert((MBBI->getOpcode() == Alpha::RET || MBBI->getOpcode() == Alpha::RETURN) &&
"Can only insert epilog into returning blocks");
-
+
bool FP = hasFP(MF);
-
+
// Get the number of bytes allocated from the FrameInfo...
long NumBytes = MFI->getStackSize();
MBB.insert(MBBI, MI);
}
- if (NumBytes != 0)
+ if (NumBytes != 0)
{
if (NumBytes <= IMM_HIGH) {
MI=BuildMI(Alpha::LDA, 2, Alpha::R30).addImm(NumBytes).addReg(Alpha::R30);
case Type::PointerTyID:
case Type::LongTyID:
case Type::ULongTyID: return &GPRCInstance;
-
+
case Type::FloatTyID:
case Type::DoubleTyID: return &FPRCInstance;
}
//===- AlphaRegisterInfo.h - Alpha Register Information Impl ----*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the Alpha implementation of the MRegisterInfo class.
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex) const;
-
+
void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *RC) const;
//===-- AlphaTargetMachine.cpp - Define TargetMachine for Alpha -----------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
-//
+//
//
//===----------------------------------------------------------------------===//
}
namespace llvm {
- cl::opt<bool> EnableAlphaLSR("enable-lsr-for-alpha",
- cl::desc("Enable LSR for Alpha (beta option!)"),
+ cl::opt<bool> EnableAlphaLSR("enable-lsr-for-alpha",
+ cl::desc("Enable LSR for Alpha (beta option!)"),
cl::Hidden);
}
}
AlphaTargetMachine::AlphaTargetMachine( const Module &M, IntrinsicLowering *IL)
- : TargetMachine("alpha", IL, true),
+ : TargetMachine("alpha", IL, true),
FrameInfo(TargetFrameInfo::StackGrowsDown, 8, 0) //TODO: check these
{}
///
bool AlphaTargetMachine::addPassesToEmitAssembly(PassManager &PM,
std::ostream &Out) {
-
+
if (EnableAlphaLSR) {
PM.add(createLoopStrengthReducePass());
PM.add(createCFGSimplificationPass());
PM.add(createMachineFunctionPrinterPass(&std::cerr));
PM.add(createPrologEpilogCodeInserter());
-
+
// Must run branch selection immediately preceding the asm printer
//PM.add(createAlphaBranchSelectionPass());
-
+
PM.add(createAlphaCodePrinterPass(Out, *this));
-
+
PM.add(createMachineCodeDeleter());
return false;
}
//===-- AlphaTargetMachine.h - Define TargetMachine for Alpha ---*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
-//
+//
// This file declares the Alpha-specific subclass of TargetMachine.
//
//===----------------------------------------------------------------------===//
public:
AlphaTargetMachine(const Module &M, IntrinsicLowering *IL);
-
- virtual const AlphaInstrInfo *getInstrInfo() const { return &InstrInfo; }
+
+ virtual const AlphaInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
virtual const MRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
-
+
virtual bool addPassesToEmitAssembly(PassManager &PM, std::ostream &Out);
static unsigned getModuleMatchQuality(const Module &M);
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the entry points for global functions defined in the IA64
//===-- IA64AsmPrinter.cpp - Print out IA64 LLVM as assembly --------------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains a printer that converts from our internal representation
struct IA64SharedAsmPrinter : public AsmPrinter {
std::set<std::string> ExternalFunctionNames, ExternalObjectNames;
-
+
IA64SharedAsmPrinter(std::ostream &O, TargetMachine &TM)
: AsmPrinter(O, TM) { }
void IA64SharedAsmPrinter::printConstantPool(MachineConstantPool *MCP) {
const std::vector<Constant*> &CP = MCP->getConstants();
const TargetData &TD = TM.getTargetData();
-
+
if (CP.empty()) return;
O << "\n\t.section .data, \"aw\", \"progbits\"\n";
unsigned Size = TD.getTypeSize(C->getType());
unsigned Align = TD.getTypeAlignmentShift(C->getType());
- if (C->isNullValue() &&
+ if (C->isNullValue() &&
(I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
I->hasWeakLinkage() /* FIXME: Verify correct */)) {
SwitchSection(O, CurSection, ".data");
O << "\t.global " << *i << "\n\t.type " << *i << ", @function\n";
}
O << "\n\n";
-
+
// we print out ".global X \n .type X, @object" for each external object
O << "\n\n// (external) symbols referenced (and not defined) above: \n";
for (std::set<std::string>::iterator i = ExternalObjectNames.begin(),
printOp(MO);
}
}
-
+
void printS8ImmOperand(const MachineInstr *MI, unsigned OpNo,
MVT::ValueType VT) {
int val=(unsigned int)MI->getOperand(OpNo).getImmedValue();
MVT::ValueType VT) {
O << (int64_t)MI->getOperand(OpNo).getImmedValue();
}
-
+
void printCallOperand(const MachineInstr *MI, unsigned OpNo,
MVT::ValueType VT) {
- printOp(MI->getOperand(OpNo), true); // this is a br.call instruction
+ printOp(MI->getOperand(OpNo), true); // this is a br.call instruction
}
void printMachineInstruction(const MachineInstr *MI);
void printOp(const MachineOperand &MO, bool isBRCALLinsn= false);
- bool runOnMachineFunction(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F);
bool doInitialization(Module &M);
};
} // end of anonymous namespace
// @ltoff(@fptr(X)) ?
if(F && !isBRCALLinsn /*&& F->isExternal()*/)
Needfptr=true;
-
+
// if this is the target of a call instruction, we should define
// the function somewhere (GNU gas has no problem without this, but
// Intel ias rightly complains of an 'undefined symbol')
-
+
if(F /*&& isBRCALLinsn*/ && F->isExternal())
ExternalFunctionNames.insert(Mang->getValueName(MO.getGlobal()));
else
ExternalFunctionNames.insert(MO.getSymbolName());
return;
default:
- O << "<AsmPrinter: unknown operand type: " << MO.getType() << " >"; return;
+ O << "<AsmPrinter: unknown operand type: " << MO.getType() << " >"; return;
}
}
/// MI to the current output stream.
///
void IA64AsmPrinter::printMachineInstruction(const MachineInstr *MI) {
-
+
++EmittedInsts;
-
+
// Call the autogenerated instruction printer routines.
printInstruction(MI);
}
bool IA64AsmPrinter::doInitialization(Module &M) {
AsmPrinter::doInitialization(M);
-
+
O << "\n.ident \"LLVM-ia64\"\n\n"
<< "\t.psr lsb\n" // should be "msb" on HP-UX, for starters
<< "\t.radix C\n"
//===-- IA64ISelPattern.cpp - A pattern matching inst selector for IA64 ---===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines a pattern matching instruction selector for IA64.
namespace {
class IA64TargetLowering : public TargetLowering {
int VarArgsFrameIndex; // FrameIndex for start of varargs area.
-
+
//int ReturnAddrIndex; // FrameIndex for return slot.
unsigned GP, SP, RP; // FIXME - clean this mess up
public:
// for ISD::RET down below. add an accessor instead? FIXME
IA64TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
-
+
// register class for general registers
addRegisterClass(MVT::i64, IA64::GRRegisterClass);
// register class for FP registers
addRegisterClass(MVT::f64, IA64::FPRegisterClass);
-
- // register class for predicate registers
+
+ // register class for predicate registers
addRegisterClass(MVT::i1, IA64::PRRegisterClass);
-
+
setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
- setSetCCResultType(MVT::i1);
+ setSetCCResultType(MVT::i1);
setShiftAmountType(MVT::i64);
setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
setOperationAction(ISD::UREM , MVT::f32 , Expand);
setOperationAction(ISD::UREM , MVT::f64 , Expand);
-
+
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
setOperationAction(ISD::MEMSET , MVT::Other, Expand);
setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
MachineBasicBlock& BB = MF.front();
- unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
+ unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
IA64::r36, IA64::r37, IA64::r38, IA64::r39};
-
- unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
+
+ unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
IA64::F12,IA64::F13,IA64::F14, IA64::F15};
-
+
unsigned argVreg[8];
unsigned argPreg[8];
unsigned argOpc[8];
unsigned used_FPArgs = 0; // how many FP args have been used so far?
-
+
unsigned ArgOffset = 0;
int count = 0;
-
+
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
{
SDOperand newroot, argt;
if(count < 8) { // need to fix this logic? maybe.
-
+
switch (getValueType(I->getType())) {
default:
std::cerr << "ERROR in LowerArgs: unknown type "
<< getValueType(I->getType()) << "\n";
abort();
case MVT::f32:
- // fixme? (well, will need to for weird FP structy stuff,
+ // fixme? (well, will need to for weird FP structy stuff,
// see intel ABI docs)
case MVT::f64:
//XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
case MVT::i64:
//XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
MF.addLiveIn(args_int[count]); // mark this register as liveIn
- argVreg[count] =
+ argVreg[count] =
MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
argPreg[count] = args_int[count];
- argOpc[count] = IA64::MOV;
+ argOpc[count] = IA64::MOV;
argt = newroot =
DAG.getCopyFromReg(argVreg[count], MVT::i64, DAG.getRoot());
if ( getValueType(I->getType()) != MVT::i64)
// Create the frame index object for this incoming parameter...
ArgOffset = 16 + 8 * (count - 8);
int FI = MFI->CreateFixedObject(8, ArgOffset);
-
- // Create the SelectionDAG nodes corresponding to a load
+
+ // Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
- argt = newroot = DAG.getLoad(getValueType(I->getType()),
+ argt = newroot = DAG.getLoad(getValueType(I->getType()),
DAG.getEntryNode(), FIN);
}
++count;
DAG.setRoot(newroot.getValue(1));
ArgValues.push_back(argt);
- }
+ }
+
-
// Create a vreg to hold the output of (what will become)
// the "alloc" instruction
VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
// ..hmm.
unsigned tempOffset=0;
-
+
// if this is a varargs function, we simply lower llvm.va_start by
// pointing to the first entry
if(F.isVarArg()) {
tempOffset=0;
VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
}
-
+
// here we actually do the moving of args, and store them to the stack
// too if this is a varargs function:
for (int i = 0; i < count && i < 8; ++i) {
MF.addLiveOut(IA64::F8);
break;
}
-
+
return ArgValues;
}
-
+
std::pair<SDOperand, SDOperand>
IA64TargetLowering::LowerCallTo(SDOperand Chain,
const Type *RetTy, bool isVarArg,
} else {
outRegsUsed = Args.size();
}
-
+
// FIXME? this WILL fail if we ever try to pass around an arg that
// consumes more than a single output slot (a 'real' double, int128
// some sort of aggregate etc.), as we'll underestimate how many 'outX'
// registers we use. Hopefully, the assembler will notice.
MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
-
+
Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
DAG.getConstant(NumBytes, getPointerTy()));
-
+
std::vector<SDOperand> args_to_use;
for (unsigned i = 0, e = Args.size(); i != e; ++i)
{
int64_t v = (int64_t)cast<ConstantSDNode>(N)->getSignExtended();
- if ((Imm = ExactLog2(v))) { // if a division by a power of two, say so
+ if ((Imm = ExactLog2(v))) { // if a division by a power of two, say so
return 1;
- }
-
+ }
+
return 0; // fallthrough
}
if ((Imm = ExactLog2sub1(v))!=666) { // if ANDing with ((2^n)-1) for some n
return 1; // say so
- }
-
+ }
+
return 0; // fallthrough
}
if (v <= 8191 && v >= -8192) { // if this constants fits in 14 bits, say so
Imm = v & 0x3FFF; // 14 bits
return 1;
- }
+ }
return 0; // fallthrough
}
if (v <= 127 && v >= -128) { // if this constants fits in 8 bits, say so
Imm = v & 0xFF; // 8 bits
return 1;
- }
+ }
return 0; // fallthrough
}
if (Node->getOpcode() == ISD::CopyFromReg)
// Just use the specified register as our input.
return dyn_cast<RegSDNode>(Node)->getReg();
-
+
unsigned &Reg = ExprMap[N];
if (Reg) return Reg;
-
+
if (N.getOpcode() != ISD::CALL)
Reg = Result = (N.getValueType() != MVT::Other) ?
MakeReg(N.getValueType()) : 1;
ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
}
}
-
+
switch (N.getOpcode()) {
default:
Node->dump();
<< " the stack alignment yet!";
abort();
}
-
-/*
+
+/*
Select(N.getOperand(0));
if (ConstantSDNode* CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
{
BuildMI(BB, IA64::MOV, 1, Result).addReg(IA64::r12);
return Result;
}
-
+
case ISD::SELECT: {
Tmp1 = SelectExpr(N.getOperand(0)); //Cond
Tmp2 = SelectExpr(N.getOperand(1)); //Use if TRUE
Tmp3 = SelectExpr(N.getOperand(2)); //Use if FALSE
unsigned bogoResult;
-
+
switch (N.getOperand(1).getValueType()) {
default: assert(0 &&
"ISD::SELECT: 'select'ing something other than i64 or f64!\n");
// though this will work for now (no JIT)
return Result;
}
-
+
case ISD::Constant: {
unsigned depositPos=0;
unsigned depositLen=0;
}
case MVT::i64: break;
}
-
+
int64_t immediate = cast<ConstantSDNode>(N)->getValue();
if(immediate==0) { // if the constant is just zero,
// turn into: "adds rDest=imm,r0" (and _not_ "andl"...)
BuildMI(BB, IA64::MOVSIMM14, 1, Result).addSImm(immediate);
return Result; // early exit
- }
+ }
if (immediate <= 2097151 && immediate >= -2097152) {
// if this constants fits in 22 bits, we use a mov the assembler will
// turn into: "addl rDest=imm,r0"
BuildMI(BB, IA64::MOVSIMM22, 1, Result).addSImm(immediate);
return Result; // early exit
- }
+ }
/* otherwise, our immediate is big, so we use movl */
uint64_t Imm = immediate;
BuildMI(BB, IA64::IDEF, 0, Result);
return Result;
}
-
+
case ISD::GlobalAddress: {
GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
unsigned Tmp1 = MakeReg(MVT::i64);
return Result;
}
-
+
case ISD::ExternalSymbol: {
const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
// assert(0 && "sorry, but what did you want an ExternalSymbol for again?");
case ISD::ZERO_EXTEND: {
Tmp1 = SelectExpr(N.getOperand(0)); // value
-
+
switch (N.getOperand(0).getValueType()) {
default: assert(0 && "Cannot zero-extend this type!");
case MVT::i8: Opc = IA64::ZXT1; break;
case MVT::i16: Opc = IA64::ZXT2; break;
case MVT::i32: Opc = IA64::ZXT4; break;
- // we handle bools differently! :
+ // we handle bools differently! :
case MVT::i1: { // if the predicate reg has 1, we want a '1' in our GR.
unsigned dummy = MakeReg(MVT::i64);
// first load zero:
assert(0 && "hmm, ISD::SIGN_EXTEND: shouldn't ever be reached. bad luck!\n");
Tmp1 = SelectExpr(N.getOperand(0)); // value
-
+
switch (N.getOperand(0).getValueType()) {
default: assert(0 && "Cannot sign-extend this type!");
case MVT::i1: assert(0 && "trying to sign extend a bool? ow.\n");
BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
-
+
case ISD::SUB: {
if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
BuildMI(BB, IA64::FABS, 1, Result).addReg(Tmp1);
return Result;
}
-
+
case ISD::FNEG: {
assert(DestType == MVT::f64 && "trying to fneg something other than f64?");
- if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
+ if (ISD::FABS == N.getOperand(0).getOpcode()) { // && hasOneUse()?
Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
BuildMI(BB, IA64::FNEGABS, 1, Result).addReg(Tmp1); // fold in abs
} else {
return Result;
}
-
+
case ISD::AND: {
switch (N.getValueType()) {
default: assert(0 && "Cannot AND this type!");
case MVT::i1: { // if a bool, we emit a pseudocode AND
unsigned pA = SelectExpr(N.getOperand(0));
unsigned pB = SelectExpr(N.getOperand(1));
-
+
/* our pseudocode for AND is:
*
(pA) cmp.eq.unc pC,p0 = r0,r0 // pC = pA
*/
unsigned pTemp = MakeReg(MVT::i1);
-
+
unsigned bogusTemp1 = MakeReg(MVT::i1);
unsigned bogusTemp2 = MakeReg(MVT::i1);
unsigned bogusTemp3 = MakeReg(MVT::i1);
unsigned bogusTemp4 = MakeReg(MVT::i1);
-
+
BuildMI(BB, IA64::PCMPEQUNC, 3, bogusTemp1)
.addReg(IA64::r0).addReg(IA64::r0).addReg(pA);
BuildMI(BB, IA64::CMPEQ, 2, bogusTemp2)
.addReg(bogusTemp1).addReg(IA64::r0).addReg(IA64::r0).addReg(pTemp);
break;
}
-
+
// if not a bool, we just AND away:
case MVT::i8:
case MVT::i16:
}
return Result;
}
-
+
case ISD::OR: {
switch (N.getValueType()) {
default: assert(0 && "Cannot OR this type!");
unsigned pB = SelectExpr(N.getOperand(1));
unsigned pTemp1 = MakeReg(MVT::i1);
-
+
/* our pseudocode for OR is:
*
}
return Result;
}
-
+
case ISD::XOR: {
switch (N.getValueType()) {
default: assert(0 && "Cannot XOR this type!");
}
return Result;
}
-
+
case ISD::SRL: {
Tmp1 = SelectExpr(N.getOperand(0));
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
}
return Result;
}
-
+
case ISD::SRA: {
Tmp1 = SelectExpr(N.getOperand(0));
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
}
}
- unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
+ unsigned TmpPR=MakeReg(MVT::i1); // we need two scratch
unsigned TmpPR2=MakeReg(MVT::i1); // predicate registers,
unsigned TmpF1=MakeReg(MVT::f64); // and one metric truckload of FP regs.
unsigned TmpF2=MakeReg(MVT::f64); // lucky we have IA64?
unsigned TmpF13=MakeReg(MVT::f64);
unsigned TmpF14=MakeReg(MVT::f64);
unsigned TmpF15=MakeReg(MVT::f64);
-
+
// OK, emit some code:
if(!isFP) {
// first, load the inputs into FP regs.
BuildMI(BB, IA64::SETFSIG, 1, TmpF1).addReg(Tmp1);
BuildMI(BB, IA64::SETFSIG, 1, TmpF2).addReg(Tmp2);
-
+
// next, convert the inputs to FP
if(isSigned) {
BuildMI(BB, IA64::FCVTXF, 1, TmpF3).addReg(TmpF1);
BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF3).addReg(TmpF1);
BuildMI(BB, IA64::FCVTXUFS1, 1, TmpF4).addReg(TmpF2);
}
-
+
} else { // this is an FP divide/remainder, so we 'leak' some temp
// regs and assign TmpF3=Tmp1, TmpF4=Tmp2
TmpF3=Tmp1;
// we do a 'conditional fmov' (of the correct result, depending
// on how the frcpa predicate turned out)
BuildMI(BB, IA64::PFMOV, 2, bogoResult)
- .addReg(TmpF12).addReg(TmpPR2);
+ .addReg(TmpF12).addReg(TmpPR2);
BuildMI(BB, IA64::CFMOV, 2, Result)
.addReg(bogoResult).addReg(TmpF15).addReg(TmpPR);
}
Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
bool isBool=false;
-
+
if(opcode == ISD::LOAD) { // this is a LOAD
switch (Node->getValueType(0)) {
default: assert(0 && "Cannot load this type!");
case MVT::i16: Opc = IA64::LD2; break;
case MVT::i32: Opc = IA64::LD4; break;
case MVT::i64: Opc = IA64::LD8; break;
-
+
case MVT::f32: Opc = IA64::LDF4; break;
case MVT::f64: Opc = IA64::LDF8; break;
}
case MVT::f32: Opc = IA64::LDF4; break;
}
}
-
+
SDOperand Chain = N.getOperand(0);
SDOperand Address = N.getOperand(1);
// we compare to 0. true? 0. false? 1.
BuildMI(BB, IA64::CMPNE, 2, Result).addReg(dummy3).addReg(IA64::r0);
}
- } else { // none of the above...
+ } else { // none of the above...
Select(Chain);
Tmp2 = SelectExpr(Address);
if(!isBool)
return Result;
}
-
+
case ISD::CopyFromReg: {
if (Result == 1)
- Result = ExprMap[N.getValue(0)] =
+ Result = ExprMap[N.getValue(0)] =
MakeReg(N.getValue(0).getValueType());
-
+
SDOperand Chain = N.getOperand(0);
Select(Chain);
// The chain for this call is now lowered.
ExprMap.insert(std::make_pair(N.getValue(Node->getNumValues()-1), 1));
-
+
//grab the arguments
std::vector<unsigned> argvregs;
for(int i = 2, e = Node->getNumOperands(); i < e; ++i)
argvregs.push_back(SelectExpr(N.getOperand(i)));
-
- // see section 8.5.8 of "Itanium Software Conventions and
+
+ // see section 8.5.8 of "Itanium Software Conventions and
// Runtime Architecture Guide to see some examples of what's going
// on here. (in short: int args get mapped 1:1 'slot-wise' to out0->out7,
// while FP args get mapped to F8->F15 as needed)
unsigned used_FPArgs=0; // how many FP Args have been used so far?
-
+
// in reg args
for(int i = 0, e = std::min(8, (int)argvregs.size()); i < e; ++i)
{
- unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
+ unsigned intArgs[] = {IA64::out0, IA64::out1, IA64::out2, IA64::out3,
IA64::out4, IA64::out5, IA64::out6, IA64::out7 };
unsigned FPArgs[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
IA64::F12, IA64::F13, IA64::F14, IA64::F15 };
default: // XXX do we need to support MVT::i1 here?
Node->dump();
N.getOperand(i).Val->dump();
- std::cerr << "Type for " << i << " is: " <<
+ std::cerr << "Type for " << i << " is: " <<
N.getOperand(i+2).getValueType() << std::endl;
assert(0 && "Unknown value type for call");
case MVT::i64:
unsigned tempAddr = MakeReg(MVT::i64);
switch(N.getOperand(i+2).getValueType()) {
- default:
- Node->dump();
+ default:
+ Node->dump();
N.getOperand(i).Val->dump();
- std::cerr << "Type for " << i << " is: " <<
+ std::cerr << "Type for " << i << " is: " <<
N.getOperand(i+2).getValueType() << "\n";
assert(0 && "Unknown value type for call");
case MVT::i1: // FIXME?
}
/* XXX we want to re-enable direct branches! crippling them now
- * to stress-test indirect branches.:
+ * to stress-test indirect branches.:
//build the right kind of call
if (GlobalAddressSDNode *GASD =
- dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
+ dyn_cast<GlobalAddressSDNode>(N.getOperand(1)))
{
BuildMI(BB, IA64::BRCALL, 1).addGlobalAddress(GASD->getGlobal(),true);
IA64Lowering.restoreGP_SP_RP(BB);
}
- ^^^^^^^^^^^^^ we want this code one day XXX */
+ ^^^^^^^^^^^^^ we want this code one day XXX */
if (ExternalSymbolSDNode *ESSDN =
- dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
+ dyn_cast<ExternalSymbolSDNode>(N.getOperand(1)))
{ // FIXME : currently need this case for correctness, to avoid
// "non-pic code with imm relocation against dynamic symbol" errors
BuildMI(BB, IA64::BRCALL, 1)
unsigned targetEntryPoint=MakeReg(MVT::i64);
unsigned targetGPAddr=MakeReg(MVT::i64);
unsigned currentGP=MakeReg(MVT::i64);
-
+
// b6 is a scratch branch register, we load the target entry point
// from the base of the function descriptor
BuildMI(BB, IA64::LD8, 1, targetEntryPoint).addReg(Tmp1);
// save the current GP:
BuildMI(BB, IA64::MOV, 1, currentGP).addReg(IA64::r1);
-
+
/* TODO: we need to make sure doing this never, ever loads a
* bogus value into r1 (GP). */
// load the target GP (which is at mem[functiondescriptor+8])
return Result+N.ResNo;
}
- } // <- uhhh XXX
+ } // <- uhhh XXX
return 0;
}
assert(0 && "Node not handled yet!");
case ISD::EntryToken: return; // Noop
-
+
case ISD::TokenFactor: {
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
Select(Node->getOperand(i));
case ISD::CopyToReg: {
Select(N.getOperand(0));
- Tmp1 = SelectExpr(N.getOperand(1));
+ Tmp1 = SelectExpr(N.getOperand(1));
Tmp2 = cast<RegSDNode>(N)->getReg();
-
+
if (Tmp1 != Tmp2) {
if(N.getValueType() == MVT::i1) // if a bool, we use pseudocode
BuildMI(BB, IA64::PCMPEQUNC, 3, Tmp2)
}
return;
}
-
+
case ISD::RET: {
/* what the heck is going on here:
<_sabre_> these operand often define chains, they are the last operand
<_sabre_> they are printed as 'ch' if you do DAG.dump()
*/
-
+
switch (N.getNumOperands()) {
default:
assert(0 && "Unknown return instruction!");
// FIXME: need to round floats - 80 bits is bad, the tester
// told me so
case MVT::i64:
- // we mark r8 as live on exit up above in LowerArguments()
+ // we mark r8 as live on exit up above in LowerArguments()
BuildMI(BB, IA64::MOV, 1, IA64::r8).addReg(Tmp1);
break;
case MVT::f64:
BuildMI(BB, IA64::RET, 0); // and then just emit a 'ret' instruction
return;
}
-
+
case ISD::BR: {
Select(N.getOperand(0));
MachineBasicBlock *Dest =
// XXX HACK! we do _not_ need long branches all the time
return;
}
-
+
case ISD::EXTLOAD:
case ISD::ZEXTLOAD:
case ISD::SEXTLOAD:
Tmp1 = SelectExpr(N.getOperand(1)); // value
bool isBool=false;
-
+
if(opcode == ISD::STORE) {
switch (N.getOperand(1).getValueType()) {
default: assert(0 && "Cannot store this type!");
case MVT::i16: Opc = IA64::ST2; break;
case MVT::i32: Opc = IA64::ST4; break;
case MVT::i64: Opc = IA64::ST8; break;
-
+
case MVT::f32: Opc = IA64::STF4; break;
case MVT::f64: Opc = IA64::STF8; break;
}
case MVT::i8: Opc = IA64::ST1; break;
case MVT::i16: Opc = IA64::ST2; break;
case MVT::i32: Opc = IA64::ST4; break;
- case MVT::f32: Opc = IA64::STF4; break;
+ case MVT::f32: Opc = IA64::STF4; break;
}
}
.addGlobalAddress(cast<GlobalAddressSDNode>
(N.getOperand(2))->getGlobal()).addReg(IA64::r1);
BuildMI(BB, IA64::LD8, 1, dummy2).addReg(dummy);
-
+
if(!isBool)
BuildMI(BB, Opc, 2).addReg(dummy2).addReg(Tmp1);
else { // we are storing a bool, so emit a little pseudocode
BuildMI(BB, Opc, 2).addReg(dummy).addReg(Tmp1);
} else { // otherwise
Tmp2 = SelectExpr(N.getOperand(2)); //address
- if(!isBool)
+ if(!isBool)
BuildMI(BB, Opc, 2).addReg(Tmp2).addReg(Tmp1);
else { // we are storing a bool, so emit a little pseudocode
// to store a predicate register as one byte
}
return;
}
-
+
case ISD::ADJCALLSTACKDOWN:
case ISD::ADJCALLSTACKUP: {
Select(N.getOperand(0));
Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
-
+
Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? IA64::ADJUSTCALLSTACKDOWN :
IA64::ADJUSTCALLSTACKUP;
BuildMI(BB, Opc, 1).addImm(Tmp1);
/// description file.
///
FunctionPass *llvm::createIA64PatternInstructionSelector(TargetMachine &TM) {
- return new ISel(TM);
+ return new ISel(TM);
}
//===-- IA64PCInstrBuilder.h - Aids for building IA64 insts -----*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file exposes functions that may be used with BuildMI from the
/// This allows a constant offset to be specified as well...
///
inline const MachineInstrBuilder&
-addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
+addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
bool mem = true) {
if (mem)
return MIB.addSImm(Offset).addFrameIndex(FI);
//===- IA64InstrInfo.cpp - IA64 Instruction Information -----------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetInstrInfo class.
//===- IA64InstrInfo.h - IA64 Instruction Information ----------*- C++ -*- ===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the TargetInstrInfo class.
//===-- IA64MachineFunctionInfo.h - IA64-specific information ---*- C++ -*-===//
//===-- for MachineFunction ---*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
-//
+//
//===----------------------------------------------------------------------===//
-//
+//
// This file declares IA64-specific per-machine-function information.
//
//===----------------------------------------------------------------------===//
namespace llvm {
class IA64FunctionInfo : public MachineFunctionInfo {
-
+
public:
unsigned outRegsUsed; // how many 'out' registers are used
// by this machinefunction? (used to compute the appropriate
return IA64::FPRegisterClass;
if (IA64::PRRegisterClass->contains(SrcReg))
return IA64::PRRegisterClass;
-
+
assert(IA64::GRRegisterClass->contains(SrcReg) &&
"PROBLEM: Reg is not FP, predicate or GR!");
return IA64::GRRegisterClass;
// alignment boundary.
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
Amount = (Amount+Align-1)/Align*Align;
-
+
MachineInstr *New;
if (Old->getOpcode() == IA64::ADJUSTCALLSTACKDOWN) {
New=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
MachineFunction &MF = *MBB.getParent();
bool FP = hasFP(MF);
-
+
while (!MI.getOperand(i).isFrameIndex()) {
++i;
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineInstr *MI;
bool FP = hasFP(MF);
-
+
// first, we handle the 'alloc' instruction, that should be right up the
// top of any function
static const unsigned RegsInOrder[96] = { // there are 96 GPRs the
// RSE worries about
- IA64::r32, IA64::r33, IA64::r34, IA64::r35,
- IA64::r36, IA64::r37, IA64::r38, IA64::r39, IA64::r40, IA64::r41,
- IA64::r42, IA64::r43, IA64::r44, IA64::r45, IA64::r46, IA64::r47,
- IA64::r48, IA64::r49, IA64::r50, IA64::r51, IA64::r52, IA64::r53,
- IA64::r54, IA64::r55, IA64::r56, IA64::r57, IA64::r58, IA64::r59,
- IA64::r60, IA64::r61, IA64::r62, IA64::r63, IA64::r64, IA64::r65,
- IA64::r66, IA64::r67, IA64::r68, IA64::r69, IA64::r70, IA64::r71,
- IA64::r72, IA64::r73, IA64::r74, IA64::r75, IA64::r76, IA64::r77,
- IA64::r78, IA64::r79, IA64::r80, IA64::r81, IA64::r82, IA64::r83,
- IA64::r84, IA64::r85, IA64::r86, IA64::r87, IA64::r88, IA64::r89,
- IA64::r90, IA64::r91, IA64::r92, IA64::r93, IA64::r94, IA64::r95,
- IA64::r96, IA64::r97, IA64::r98, IA64::r99, IA64::r100, IA64::r101,
- IA64::r102, IA64::r103, IA64::r104, IA64::r105, IA64::r106, IA64::r107,
- IA64::r108, IA64::r109, IA64::r110, IA64::r111, IA64::r112, IA64::r113,
- IA64::r114, IA64::r115, IA64::r116, IA64::r117, IA64::r118, IA64::r119,
+ IA64::r32, IA64::r33, IA64::r34, IA64::r35,
+ IA64::r36, IA64::r37, IA64::r38, IA64::r39, IA64::r40, IA64::r41,
+ IA64::r42, IA64::r43, IA64::r44, IA64::r45, IA64::r46, IA64::r47,
+ IA64::r48, IA64::r49, IA64::r50, IA64::r51, IA64::r52, IA64::r53,
+ IA64::r54, IA64::r55, IA64::r56, IA64::r57, IA64::r58, IA64::r59,
+ IA64::r60, IA64::r61, IA64::r62, IA64::r63, IA64::r64, IA64::r65,
+ IA64::r66, IA64::r67, IA64::r68, IA64::r69, IA64::r70, IA64::r71,
+ IA64::r72, IA64::r73, IA64::r74, IA64::r75, IA64::r76, IA64::r77,
+ IA64::r78, IA64::r79, IA64::r80, IA64::r81, IA64::r82, IA64::r83,
+ IA64::r84, IA64::r85, IA64::r86, IA64::r87, IA64::r88, IA64::r89,
+ IA64::r90, IA64::r91, IA64::r92, IA64::r93, IA64::r94, IA64::r95,
+ IA64::r96, IA64::r97, IA64::r98, IA64::r99, IA64::r100, IA64::r101,
+ IA64::r102, IA64::r103, IA64::r104, IA64::r105, IA64::r106, IA64::r107,
+ IA64::r108, IA64::r109, IA64::r110, IA64::r111, IA64::r112, IA64::r113,
+ IA64::r114, IA64::r115, IA64::r116, IA64::r117, IA64::r118, IA64::r119,
IA64::r120, IA64::r121, IA64::r122, IA64::r123, IA64::r124, IA64::r125,
IA64::r126, IA64::r127 };
break;
}
}
-
+
MI=BuildMI(IA64::ALLOC,5).addReg(dstRegOfPseudoAlloc).addImm(0).\
addImm(numStackedGPRsUsed).addImm(numOutRegsUsed).addImm(0);
MBB.insert(MBBI, MI);
-
+
// Get the number of bytes to allocate from the FrameInfo
unsigned NumBytes = MFI->getStackSize();
if (MFI->hasCalls() && !FP) {
- // We reserve argument space for call sites in the function immediately on
- // entry to the current function. This eliminates the need for add/sub
+ // We reserve argument space for call sites in the function immediately on
+ // entry to the current function. This eliminates the need for add/sub
// brackets around call sites.
NumBytes += MFI->getMaxCallFrameSize();
}
MI=BuildMI(IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
MBB.insert(MBBI, MI); // then add (subtract) it to r12 (stack ptr)
}
-
+
// now if we need to, save the old FP and set the new
if (FP) {
MI = BuildMI(IA64::ST8, 2).addReg(IA64::r12).addReg(IA64::r15);
// this must be the last instr in the prolog ? (XXX: why??)
MI = BuildMI(IA64::MOV, 1, IA64::r15).addReg(IA64::r12);
MBB.insert(MBBI, MI);
- }
+ }
}
MBB.insert(MBBI, MI);
}
- if (NumBytes != 0)
+ if (NumBytes != 0)
{
if (NumBytes <= 8191) {
MI=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12).addImm(NumBytes);
//===- IA64RegisterInfo.h - IA64 Register Information Impl ------*- C++ -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file contains the IA64 implementation of the MRegisterInfo class.
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex) const;
-
+
void copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
//===-- IA64TargetMachine.cpp - Define TargetMachine for IA64 -------------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
-//
+//
// This file defines the IA64 specific subclass of TargetMachine.
//
//===----------------------------------------------------------------------===//
//===-- IA64TargetMachine.h - Define TargetMachine for IA64 ---*- C++ -*---===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file was developed by Duraid Madina and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
-//
+//
// This file declares the IA64 specific subclass of TargetMachine.
//
//===----------------------------------------------------------------------===//