const AArch64Subtarget *Subtarget;
LLVMContext *Context;
- bool FastLowerArguments() override;
- bool FastLowerCall(CallLoweringInfo &CLI) override;
- bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
+ bool fastLowerArguments() override;
+ bool fastLowerCall(CallLoweringInfo &CLI) override;
+ bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
private:
// Selection routines.
public:
// Backend specific FastISel code.
- unsigned TargetMaterializeAlloca(const AllocaInst *AI) override;
- unsigned TargetMaterializeConstant(const Constant *C) override;
- unsigned TargetMaterializeFloatZero(const ConstantFP* CF) override;
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ unsigned fastMaterializeFloatZero(const ConstantFP* CF) override;
explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo)
Context = &FuncInfo.Fn->getContext();
}
- bool TargetSelectInstruction(const Instruction *I) override;
+ bool fastSelectInstruction(const Instruction *I) override;
#include "AArch64GenFastISel.inc"
};
return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS;
}
-unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
+unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
assert(TLI.getValueType(AI->getType(), true) == MVT::i64 &&
"Alloca should always return a pointer.");
// Positive zero (+0.0) has to be materialized with a fmov from the zero
// register, because the immediate version of fmov cannot encode zero.
if (CFP->isNullValue())
- return TargetMaterializeFloatZero(CFP);
+ return fastMaterializeFloatZero(CFP);
if (VT != MVT::f32 && VT != MVT::f64)
return 0;
return ResultReg;
}
-unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) {
+unsigned AArch64FastISel::fastMaterializeConstant(const Constant *C) {
EVT CEVT = TLI.getValueType(C->getType(), true);
// Only handle simple types.
return 0;
}
-unsigned AArch64FastISel::TargetMaterializeFloatZero(const ConstantFP* CFP) {
+unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
assert(CFP->isNullValue() &&
"Floating-point constant is not a positive zero.");
MVT VT;
llvm_unreachable("Unexpected instruction.");
assert(ResultReg && "Couldn't select Add/Sub instruction.");
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!EmitLoad(VT, ResultReg, Addr, createMachineMemOperandFor(I)))
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
MachineBasicBlock *MSucc = FuncInfo.MBBMap[BI->getSuccessor(0)];
- FastEmitBranch(MSucc, BI->getDebugLoc());
+ fastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
} else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
} else if (const ConstantInt *CI =
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
TBB->getBasicBlock());
FuncInfo.MBB->addSuccessor(TBB, BranchWeight);
- FastEmitBranch(FBB, DbgLoc);
+ fastEmitBranch(FBB, DbgLoc);
return true;
}
.addReg(AArch64::WZR)
.addImm(invertedCC);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
FalseReg, FalseIsKill, CC);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
ResultReg).addReg(Op);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
ResultReg).addReg(Op);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
SrcIsKill);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::FastLowerArguments() {
+bool AArch64FastISel::fastLowerArguments() {
if (!FuncInfo.CanLowerReturn)
return false;
// Skip unused arguments.
if (Arg.use_empty()) {
- UpdateValueMap(&Arg, 0);
+ updateValueMap(&Arg, 0);
continue;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
- UpdateValueMap(&Arg, ResultReg);
+ updateValueMap(&Arg, ResultReg);
}
return true;
}
return true;
}
-bool AArch64FastISel::FastLowerCall(CallLoweringInfo &CLI) {
+bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
CallingConv::ID CC = CLI.CallConv;
bool IsTailCall = CLI.IsTailCall;
bool IsVarArg = CLI.IsVarArg;
return true;
}
-bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
+bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// FIXME: Handle more intrinsics.
switch (II->getIntrinsicID()) {
default: return false;
SrcReg = DestReg;
}
- UpdateValueMap(II, SrcReg);
+ updateValueMap(II, SrcReg);
return true;
}
case Intrinsic::memcpy:
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return LowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
+ return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
// address spaces.
return false;
- return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
case Intrinsic::trap: {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK))
if (!ResultReg)
return false;
- UpdateValueMap(II, ResultReg);
+ updateValueMap(II, ResultReg);
return true;
}
case Intrinsic::sadd_with_overflow:
/*IsKill=*/true, getInvertedCondCode(CC));
assert((ResultReg1 + 1) == ResultReg2 &&
"Nonconsecutive result registers.");
- UpdateValueMap(II, ResultReg1, 2);
+ updateValueMap(II, ResultReg1, 2);
return true;
}
}
.addReg(SrcReg, getKillRegState(SrcIsKill));
}
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
Src1Reg, Src1IsKill, Src0Reg,
Src0IsKill);
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
if (!ResultReg)
return false;
- UpdateValueMap(I, ResultReg);
+ updateValueMap(I, ResultReg);
return true;
}
-bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) {
+bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default:
return false;
unsigned Reg = getRegForValue(I->getOperand(0));
if (!Reg)
return false;
- UpdateValueMap(I, Reg);
+ updateValueMap(I, Reg);
return true;
}
case Instruction::ExtractValue: