/// \brief This method is called by target-independent code to request that an
/// instruction with the given type and opcode be emitted.
- virtual unsigned FastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
+ virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operand be emitted.
- virtual unsigned FastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operands be emitted.
- virtual unsigned FastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
// operands be emitted.
- virtual unsigned FastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and floating-point
/// immediate operands be emitted.
- virtual unsigned FastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
/// operands be emitted.
- virtual unsigned FastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
+ virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
unsigned Op0, bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm);
- /// \brief This method is a wrapper of FastEmit_ri.
+ /// \brief This method is a wrapper of fastEmit_ri.
///
/// It first tries to emit an instruction with an immediate operand using
- /// FastEmit_ri. If that fails, it materializes the immediate into a register
- /// and try FastEmit_rr instead.
- unsigned FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
+ /// fastEmit_ri. If that fails, it materializes the immediate into a register
+ /// and try fastEmit_rr instead.
+ unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
uint64_t Imm, MVT ImmType);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and immediate operand be emitted.
- virtual unsigned FastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
+ virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
/// \brief This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and floating-point immediate
/// operand be emitted.
- virtual unsigned FastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
+ virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with no operands and a result register in the
/// given register class.
- unsigned FastEmitInst_(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC);
/// \brief Emit a MachineInstr with one register operand and a result register
/// in the given register class.
- unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill);
/// \brief Emit a MachineInstr with two register operands and a result
/// register in the given register class.
- unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief Emit a MachineInstr with three register operands and a result
/// register in the given register class.
- unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
/// \brief Emit a MachineInstr with a register operand, an immediate, and a
/// result register in the given register class.
- unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief Emit a MachineInstr with one register operand and two immediate
/// operands.
- unsigned FastEmitInst_rii(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
/// \brief Emit a MachineInstr with two register operands and a result
/// register in the given register class.
- unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with two register operands, an immediate, and a
/// result register in the given register class.
- unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm);
/// \brief Emit a MachineInstr with two register operands, two immediates
/// operands, and a result register in the given register class.
- unsigned FastEmitInst_rrii(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rrii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm1, uint64_t Imm2);
/// \brief Emit a MachineInstr with a single immediate operand, and a result
/// register in the given register class.
- unsigned FastEmitInst_i(unsigned MachineInstrOpcode,
+ unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
const TargetRegisterClass *RC, uint64_t Imm);
/// \brief Emit a MachineInstr with a two immediate operands.
- unsigned FastEmitInst_ii(unsigned MachineInstrOpcode,
+ unsigned fastEmitInst_ii(unsigned MachineInstrOpcode,
const TargetRegisterClass *RC, uint64_t Imm1,
uint64_t Imm2);
/// \brief Emit a MachineInstr for an extract_subreg from a specified index of
/// a superregister to a specified type.
- unsigned FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
+ unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
uint32_t Idx);
/// \brief Emit MachineInstrs to compute the value of Op with all but the
unsigned Reg = 0;
if (const auto *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
- Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V))
Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
else if (isa<ConstantPointerNull>(V))
Reg = fastMaterializeFloatZero(CF);
else
// Try to emit the constant directly.
- Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
+ Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
// Try to emit the constant by using an integer constant with a cast.
unsigned IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), IntVal));
if (IntegerReg != 0)
- Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
+ Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
/*Kill=*/false);
}
}
MVT PtrVT = TLI.getPointerTy();
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
+ IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
IdxNIsKill);
IdxNIsKill = true;
} else if (IdxVT.bitsGT(PtrVT)) {
IdxN =
- FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
+ fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
IdxNIsKill = true;
}
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
unsigned ResultReg =
- FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
+ fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
CI->getZExtValue(), VT.getSimpleVT());
if (!ResultReg)
return false;
ISDOpcode = ISD::AND;
}
- unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
+ unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
Op0IsKill, Imm, VT.getSimpleVT());
if (!ResultReg)
return false;
// Check if the second operand is a constant float.
if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
- unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
+ unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, CF);
if (ResultReg) {
// We successfully emitted code for the given LLVM Instruction.
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
// Now we have both operands in registers. Emit the instruction.
- unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
+ unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
if (!ResultReg)
// Target-specific code wasn't able to find a machine opcode for
// N = N + Offset
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
TotalOffs +=
DL.getTypeAllocSize(Ty) * cast<ConstantInt>(CI)->getSExtValue();
if (TotalOffs >= MaxOffs) {
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
continue;
}
if (TotalOffs) {
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
return false;
if (ElementSize != 1) {
- IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
+ IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
return false;
IdxNIsKill = true;
}
- N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
+ N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
if (TotalOffs) {
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (!N) // Unhandled operand. Halt "fast" selection and bail.
return false;
}
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
- unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
+ unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
Opcode, InputReg, InputRegIsKill);
if (!ResultReg)
return false;
// If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg)
- ResultReg = FastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
+ ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
if (!ResultReg)
return false;
// If the target has ISD::FNEG, use it.
EVT VT = TLI.getValueType(I->getType());
- unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
+ unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill);
if (ResultReg) {
updateValueMap(I, ResultReg);
if (!TLI.isTypeLegal(IntVT))
return false;
- unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
+ unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BITCAST, OpReg, OpRegIsKill);
if (!IntReg)
return false;
- unsigned IntResultReg = FastEmit_ri_(
+ unsigned IntResultReg = fastEmit_ri_(
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg)
return false;
- ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
+ ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
IntResultReg, /*IsKill=*/true);
if (!ResultReg)
return false;
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
- return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
+ return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
return false;
}
-unsigned FastISel::FastEmit_(MVT, MVT, unsigned) { return 0; }
+unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
-unsigned FastISel::FastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
+unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/) {
return 0;
}
-unsigned FastISel::FastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
+unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/,
bool /*Op1IsKill*/) {
return 0;
}
-unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
+unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
return 0;
}
-unsigned FastISel::FastEmit_f(MVT, MVT, unsigned,
+unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
const ConstantFP * /*FPImm*/) {
return 0;
}
-unsigned FastISel::FastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
+unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, uint64_t /*Imm*/) {
return 0;
}
-unsigned FastISel::FastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
+unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/,
const ConstantFP * /*FPImm*/) {
return 0;
}
-unsigned FastISel::FastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
+unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
bool /*Op0IsKill*/, unsigned /*Op1*/,
bool /*Op1IsKill*/, uint64_t /*Imm*/) {
return 0;
}
-/// This method is a wrapper of FastEmit_ri. It first tries to emit an
-/// instruction with an immediate operand using FastEmit_ri.
+/// This method is a wrapper of fastEmit_ri. It first tries to emit an
+/// instruction with an immediate operand using fastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
-/// FastEmit_rr instead.
-unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
+/// fastEmit_rr instead.
+unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm, MVT ImmType) {
// If this is a multiply by a power of two, emit this as a shift left.
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
return 0;
// First check if immediate type is legal. If not, we can't use the ri form.
- unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
+ unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
if (ResultReg)
return ResultReg;
- unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
+ unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
if (!MaterialReg) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
if (!MaterialReg)
return 0;
}
- return FastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
+ return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
/*IsKill=*/true);
}
return Op;
}
-unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill) {
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, unsigned Op2,
return ResultReg;
}
-unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1,
uint64_t Imm2) {
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, const ConstantFP *FPImm) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm) {
return ResultReg;
}
-unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, unsigned Op1,
bool Op1IsKill, uint64_t Imm1,
return ResultReg;
}
-unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
+unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, uint64_t Imm1,
uint64_t Imm2) {
unsigned ResultReg = createResultReg(RC);
return ResultReg;
}
-unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
+unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
bool Op0IsKill, uint32_t Idx) {
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
/// Emit MachineInstrs to compute the value of Op with all but the least
/// significant bit set to zero.
unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
- return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
+ return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
}
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
return 0;
if (!CI->isZero())
- return FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
// Create a copy from the zero register to materialize a "0" value.
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
Is64Bit ? AArch64_AM::getFP64Imm(Val) : AArch64_AM::getFP32Imm(Val);
assert((Imm != -1) && "Cannot encode floating-point constant.");
unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
// Materialize via constant pool. MachineConstantPool wants an explicit
bool Is64Bit = (VT == MVT::f64);
unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
- return FastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
+ return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
}
// Computes the address to get to an object.
if (ImmediateOffsetNeedsLowering) {
unsigned ResultReg = 0;
if (Addr.getReg())
- ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
+ ResultReg = fastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
/*IsKill=*/false, Offset, MVT::i64);
else
- ResultReg = FastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
+ ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
if (!ResultReg)
return false;
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
return 0;
- return FastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
+ return fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
}
// Issue an extract_subreg to get the lower 32-bits.
if (SrcVT == MVT::i64) {
- CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
+ CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
AArch64::sub_32);
CondIsKill = true;
}
if (!TrueReg || !FalseReg)
return false;
- unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
+ unsigned ResultReg = fastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
FalseReg, FalseIsKill, CC);
updateValueMap(I, ResultReg);
return true;
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
+ unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
SrcIsKill);
updateValueMap(I, ResultReg);
return true;
unsigned DestReg;
unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
while (Depth--) {
- DestReg = FastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
+ DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
SrcReg, /*IsKill=*/true, 0);
assert(DestReg && "Unexpected LDR instruction emission failure.");
SrcReg = DestReg;
return false;
bool Op0IsKill = hasTrivialKill(II->getOperand(0));
- unsigned ResultReg = FastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg,
/*IsKill=*/false, 32);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
- ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
+ ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
AArch64::sub_32);
emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 31, /*WantResult=*/false);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
+ unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 63, /*WantResult=*/false);
emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg,
/*IsKill=*/false, AArch64_AM::LSR, 32,
/*WantResult=*/false);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
+ unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
/*IsKill=*/false, /*WantResult=*/false);
TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
}
- ResultReg2 = FastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
+ ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
/*IsKill=*/true, getInvertedCondCode(CC));
assert((ResultReg1 + 1) == ResultReg2 &&
break;
}
// Issue an extract_subreg to get the lower 32-bits.
- unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
+ unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
AArch64::sub_32);
// Create the AND instruction which performs the actual truncation.
ResultReg = emitAND_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
// FIXME: We're SExt i1 to i64.
return 0;
}
- return FastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
+ return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
/*TODO:IsKill=*/false, 0, 0);
}
}
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
+ return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
/*IsKill=*/ZReg, true);
}
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
+ return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
}
bool AArch64FastISel::SelectIntExt(const Instruction *I) {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned QuotReg = FastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
+ unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
Src1Reg, /*IsKill=*/false);
assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
Src1Reg, Src1IsKill, Src0Reg,
Src0IsKill);
updateValueMap(I, ResultReg);
if (!Op0Reg)
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
- unsigned ResultReg = FastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
return SelectIndirectBr(I);
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
- return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
+ return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
case Instruction::Alloca:
// Code from FastISel.cpp.
private:
- unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill);
- unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
- unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
- unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm);
- unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
uint64_t Imm);
- unsigned FastEmitInst_i(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm);
return MIB;
}
-unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
unsigned ResultReg = createResultReg(RC);
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
unsigned ResultReg = 0;
if (Subtarget->useMovt(*FuncInfo.MF))
- ResultReg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
if (ResultReg)
return ResultReg;
// Since the offset is too large for the load/store instruction
// get the reg+offset into a register.
if (needsLowering) {
- Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
+ Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
/*Op0IsKill*/false, Addr.Offset, MVT::i32);
Addr.Offset = 0;
}
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
+ unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
/*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
// for some reason, this default is not generated by tablegen
// so we explicitly generate it here.
//
- unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
+ unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, uint64_t imm1,
uint64_t imm2, unsigned Op3, bool Op3IsKill) {
return 0;
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI) override;
bool fastLowerArguments() override;
- unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
- unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm);
- unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill);
- unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
// Handle materializing integer constants into a register. This is not
// automatically generated for PowerPC, so must be explicitly created here.
-unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
+unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
if (Opc != ISD::Constant)
return 0;
// assigning R0 or X0 to the output register for GPRC and G8RC
// register classes, as any such result could be used in ADDI, etc.,
// where those regs have another meaning.
-unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_ri(MachineInstOpcode, UseRC,
+ return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC,
Op0, Op0IsKill, Imm);
}
// Override for instructions with one register operand to avoid use of
// R0/X0. The automatic infrastructure isn't aware of the context so
// we must be conservative.
-unsigned PPCFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill) {
const TargetRegisterClass *UseRC =
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
+ return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
}
// Override for instructions with two register operands to avoid use
// of R0/X0. The automatic infrastructure isn't aware of the context
// so we must be conservative.
-unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
+ return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
Op1, Op1IsKill);
}
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT,
unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
Src, /*TODO: Kill=*/false);
if (RR == 0)
return false;
}
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
ISD::SIGN_EXTEND;
- SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
+ SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
SrcReg, /*TODO: Kill=*/false);
}
ResultReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
ResultReg);
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
return false;
ResultReg)
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
} else if (DstVT != MVT::i8) {
- ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
+ ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Kill=*/true);
if (ResultReg == 0)
return false;
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
// Now reference the 8-bit subreg of the result.
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
/*Kill=*/true, X86::sub_8bit);
}
// Copy the result out of the physreg if we haven't already.
return false;
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill);
updateValueMap(I, ResultReg);
return true;
return false;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
- unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
+ unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
LHSReg, LHSIsKill);
- unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
+ unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
RHSReg, RHSIsKill);
- unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
AndReg, /*IsKill=*/true);
updateValueMap(I, ResultReg);
return true;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
unsigned ResultReg =
- FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
+ fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg);
return true;
}
}
// Issue an extract_subreg.
- unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
+ unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
InputReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
if (!isTypeLegal(RetTy, VT))
return false;
- // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
+ // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
static const unsigned SqrtOpc[2][2] = {
TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill));
} else
- ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
CI->getZExtValue());
}
if (RHSReg == 0)
return false;
RHSIsKill = hasTrivialKill(RHS);
- ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
+ ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
RHSIsKill);
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
static const unsigned MULOpc[] =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), X86::AL)
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
+ ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
RHSIsKill);
} else
- ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
}
return false;
ResultReg =
- FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
+ fastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
if (!ResultReg)
return false;
break;
}
case CCValAssign::BCvt: {
- ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
+ ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
/*TODO: Kill=*/false);
assert(ArgReg && "Failed to emit a bitcast!");
ArgVT = VA.getLocVT();
uint64_t Imm = CI->getZExtValue();
if (Imm == 0) {
- unsigned SrcReg = FastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
case MVT::i8:
- return FastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
X86::sub_8bit);
case MVT::i16:
- return FastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
X86::sub_16bit);
case MVT::i32:
return SrcReg;
}
}
if (VT == MVT::i64 && Opc == X86::MOV32ri) {
- unsigned SrcReg = FastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
+ unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
unsigned ResultReg = createResultReg(&X86::GR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
.addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
return ResultReg;
}
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
// Implicit physical register operand. e.g. Instruction::Mul expect to
// select to a binary op. On x86, mul may take a single operand with
// the other operand being implicit. We must emit something that looks
- // like a binary instruction except for the very inner FastEmitInst_*
+ // like a binary instruction except for the very inner fastEmitInst_*
// call.
continue;
Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
const PredMap &PM = RI->second;
bool HasPred = false;
- OS << "unsigned FastEmit_"
+ OS << "unsigned fastEmit_"
<< getLegalCName(Opcode)
<< "_" << getLegalCName(getName(VT))
<< "_" << getLegalCName(getName(RetVT)) << "_";
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
- OS << " return FastEmitInst_";
+ OS << " return fastEmitInst_";
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
ImmediatePredicates, true);
}
// Emit one function for the type that demultiplexes on return type.
- OS << "unsigned FastEmit_"
+ OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
RI != RE; ++RI) {
MVT::SimpleValueType RetVT = RI->first;
- OS << " case " << getName(RetVT) << ": return FastEmit_"
+ OS << " case " << getName(RetVT) << ": return fastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(getName(VT))
<< "_" << getLegalCName(getName(RetVT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
} else {
// Non-variadic return type.
- OS << "unsigned FastEmit_"
+ OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
<< (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
- OS << " return FastEmitInst_";
+ OS << " return fastEmitInst_";
if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs,
}
// Emit one function for the opcode that demultiplexes based on the type.
- OS << "unsigned FastEmit_"
+ OS << "unsigned fastEmit_"
<< getLegalCName(Opcode) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT VT, MVT RetVT";
TI != TE; ++TI) {
MVT::SimpleValueType VT = TI->first;
std::string TypeName = getName(VT);
- OS << " case " << TypeName << ": return FastEmit_"
+ OS << " case " << TypeName << ": return fastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(RetVT";
// Emit one function for the operand signature that demultiplexes based
// on opcode and type.
- OS << "unsigned FastEmit_";
+ OS << "unsigned fastEmit_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(MVT VT, MVT RetVT, unsigned Opcode";
if (!Operands.empty())
for (unsigned i = 0, e = MI->second.size(); i != e; ++i) {
OS << " if (";
MI->second[i].emitImmediatePredicate(OS, ImmediatePredicates);
- OS << ")\n if (unsigned Reg = FastEmit_";
+ OS << ")\n if (unsigned Reg = fastEmit_";
MI->second[i].PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(VT, RetVT, Opcode";
if (!MI->second[i].empty())
I != E; ++I) {
const std::string &Opcode = I->first;
- OS << " case " << Opcode << ": return FastEmit_"
+ OS << " case " << Opcode << ": return fastEmit_"
<< getLegalCName(Opcode) << "_";
Operands.PrintManglingSuffix(OS, ImmediatePredicates);
OS << "(VT, RetVT";