/// to which the flag operand points. Otherwise return NULL.
SDNode *getFlaggedNode() const {
if (getNumOperands() != 0 &&
- getOperand(getNumOperands()-1).getValueType().getSimpleVT() == MVT::Flag)
+ getOperand(getNumOperands()-1).getValueType() == MVT::Flag)
return getOperand(getNumOperands()-1).getNode();
return 0;
}
/// for it.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
}
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
- assert((unsigned)ValVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ MemVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
[MemVT.getSimpleVT().SimpleTy];
/// for it.
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
/// for it.
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
/// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
LegalizeAction Action) {
- assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
/// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
- assert((unsigned)ValVT.SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
}
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Load action are kept in the upper half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Store action are kept in the lower half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
- for(int i = 0; VTs[i].getSimpleVT().SimpleTy != MVT::Other; ++i)
+ for(int i = 0; VTs[i] != MVT::Other; ++i)
if (VTs[i] == vt)
return true;
return false;
vt_iterator vt_end() const {
vt_iterator I = VTs;
- while (I->getSimpleVT().SimpleTy != MVT::Other) ++I;
+ while (*I != MVT::Other) ++I;
return I;
}
if (Str.empty()) {
if (VT.isInteger())
return DAG.getConstant(0, VT);
- else if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
- VT.getSimpleVT().SimpleTy == MVT::f64)
+ else if (VT == MVT::f32 || VT == MVT::f64)
return DAG.getConstantFP(0.0, VT);
else if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
sys::SmartScopedLock<true> Lock(*VTMutex);
return &(*EVTs->insert(VT).first);
} else {
- assert(VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Value type out of range!");
return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
}
}
case OPC_SwitchType: {
- MVT::SimpleValueType CurNodeVT = N.getValueType().getSimpleVT().SimpleTy;
+ MVT CurNodeVT = N.getValueType().getSimpleVT();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
unsigned CaseSize;
while (1) {
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
- MVT::SimpleValueType CaseVT =
- (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
+ MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
- CaseVT = TLI.getPointerTy().SimpleTy;
+ CaseVT = TLI.getPointerTy();
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
// checks from the various callers.
unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
- if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0;
+ if (VT == MVT::f64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
}
unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
- if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0;
+ if (VT == MVT::i64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
// the combined constant into an FP reg.
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
const APFloat Val = CFP->getValueAPF();
- bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64;
+ bool is64bit = VT == MVT::f64;
// This checks to see if we can use VFP3 instructions to materialize
// a constant, otherwise we have to go through the constant pool.
unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
// For now 32-bit only.
- if (VT.getSimpleVT().SimpleTy != MVT::i32) return false;
+ if (VT != MVT::i32) return false;
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType());
unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
// For now 32-bit only.
- if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0;
+ if (VT != MVT::i32) return 0;
Reloc::Model RelocM = TM.getRelocationModel();
if (Op2 == 0) return false;
unsigned Opc;
- bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 ||
- VT.getSimpleVT().SimpleTy == MVT::i64;
+ bool is64bit = VT == MVT::f64 || VT == MVT::i64;
switch (ISDOpcode) {
default: return false;
case ISD::FADD:
EVT VecVT = N->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
unsigned NumElts = VecVT.getVectorNumElements();
- if (EltVT.getSimpleVT() == MVT::f64) {
+ if (EltVT == MVT::f64) {
assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
}
- assert(EltVT.getSimpleVT() == MVT::f32 &&
- "unexpected type for BUILD_VECTOR");
+ assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
if (NumElts == 2)
return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
} else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) &&
(N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) {
NewOpc = ARMISD::VMULLu;
- } else if (VT.getSimpleVT().SimpleTy == MVT::v2i64) {
+ } else if (VT == MVT::v2i64) {
// Fall through to expand this. It is not legal.
return SDValue();
} else {
SDValue Op0 = Op.getOperand(0);
EVT Op0VT = Op0.getValueType();
- if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
+ if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
// Create shuffle mask, least significant doubleword of quadword
unsigned maskHigh = 0x08090a0b;
unsigned maskLow = 0x0c0d0e0f;
// movssrr and movsdrr do not clear top bits. Try to use movd, movq
// instead.
MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
- if ((ExtVT.SimpleTy != MVT::i64 || Subtarget->is64Bit()) &&
+ if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {