EnumValue(Enum),
CostPerUse(R->getValueAsInt("CostPerUse")),
CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
+ HasDisjunctSubRegs(false),
SubRegsComplete(false),
SuperRegsComplete(false),
TopoSig(~0u)
return SubRegs;
SubRegsComplete = true;
+ HasDisjunctSubRegs = ExplicitSubRegs.size() > 1;
+
// First insert the explicit subregs and make sure they are fully indexed.
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
CodeGenRegister *SR = ExplicitSubRegs[i];
const SubRegMap &Map = SR->computeSubRegs(RegBank);
+ HasDisjunctSubRegs |= SR->HasDisjunctSubRegs;
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI) {
std::vector<Record*> Indices = Def->getValueAsListOfDefs("SubRegIndices");
unsigned Dim = Indices.size();
ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
- if (Dim != SubRegs->getSize())
+ if (Dim != SubRegs->size())
PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
if (Dim < 2)
PrintFatalError(Def->getLoc(),
CopyCost = R->getValueAsInt("CopyCost");
Allocatable = R->getValueAsBit("isAllocatable");
AltOrderSelect = R->getValueAsString("AltOrderSelect");
+ int AllocationPriority = R->getValueAsInt("AllocationPriority");
+ if (AllocationPriority < 0 || AllocationPriority > 63)
+ PrintFatalError(R->getLoc(), "AllocationPriority out of range [0,63]");
+ this->AllocationPriority = AllocationPriority;
}
// Create an inferred register class that was missing from the .td files.
SpillSize(Props.SpillSize),
SpillAlignment(Props.SpillAlignment),
CopyCost(0),
- Allocatable(true) {
+ Allocatable(true),
+ AllocationPriority(0) {
for (const auto R : Members)
TopoSigs.set(R->getTopoSig());
}
CopyCost = Super.CopyCost;
Allocatable = Super.Allocatable;
AltOrderSelect = Super.AltOrderSelect;
+ AllocationPriority = Super.AllocationPriority;
// Copy all allocation orders, filter out foreign registers from the larger
// super-class.
// Configure register Sets to understand register classes and tuples.
Sets.addFieldExpander("RegisterClass", "MemberList");
Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
- Sets.addExpander("RegisterTuples", new TupleExpander());
+ Sets.addExpander("RegisterTuples", llvm::make_unique<TupleExpander>());
// Read in the user-defined (named) sub-register indices.
// More indices will be synthesized later.
// Allocate user-defined register classes.
for (auto *RC : RCs) {
- RegClasses.push_back(CodeGenRegisterClass(*this, RC));
+ RegClasses.emplace_back(*this, RC);
addToMaps(&RegClasses.back());
}
return FoundI->second;
// Sub-class doesn't exist, create a new one.
- RegClasses.push_back(CodeGenRegisterClass(*this, Name, K));
+ RegClasses.emplace_back(*this, Name, K);
addToMaps(&RegClasses.back());
return &RegClasses.back();
}
CoveringLanes = ~0u;
for (auto &Idx : SubRegIndices) {
if (Idx.getComposites().empty()) {
+ if (Bit > 32) {
+ PrintFatalError(
+ Twine("Ran out of lanemask bits to represent subregister ")
+ + Idx.getName());
+ }
Idx.LaneMask = 1u << Bit;
- // Share bit 31 in the unlikely case there are more than 32 leafs.
- //
- // Sharing bits is harmless; it allows graceful degradation in targets
- // with more than 32 vector lanes. They simply get a limited resolution
- // view of lanes beyond the 32nd.
- //
- // See also the comment for getSubRegIndexLaneMask().
- if (Bit < 31)
- ++Bit;
- else
- // Once bit 31 is shared among multiple leafs, the 'lane' it represents
- // is no longer covering its registers.
- CoveringLanes &= ~(1u << Bit);
+ ++Bit;
} else {
Idx.LaneMask = 0;
}
for (auto &RegClass : RegClasses) {
unsigned LaneMask = 0;
for (const auto &SubRegIndex : SubRegIndices) {
- if (RegClass.getSubClassWithSubReg(&SubRegIndex) != &RegClass)
+ if (RegClass.getSubClassWithSubReg(&SubRegIndex) == nullptr)
continue;
LaneMask |= SubRegIndex.LaneMask;
}
+
+ // For classes without any subregisters set LaneMask to ~0u instead of 0.
+ // This makes it easier for client code to handle classes uniformly.
+ if (LaneMask == 0)
+ LaneMask = ~0u;
+
RegClass.LaneMask = LaneMask;
}
}
&& UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdx
<< "\n");
+ // We can pick any of the set names for the merged set. Go for the
+ // shortest one to avoid picking the name of one of the classes that are
+ // artificially created by tablegen. So "FPR128_lo" instead of
+ // "QQQQ_with_qsub3_in_FPR128_lo".
+ if (RegUnitSets[SubIdx].Name.size() < RegUnitSets[SuperIdx].Name.size())
+ RegUnitSets[SuperIdx].Name = RegUnitSets[SubIdx].Name;
break;
}
}
const CodeGenRegister *SubRegister = S->second;
unsigned LaneMask = SubRegIndex->LaneMask;
// Distribute LaneMask to Register Units touched.
- for (const auto &SUI : SubRegister->getRegUnits()) {
+ for (unsigned SUI : SubRegister->getRegUnits()) {
bool Found = false;
unsigned u = 0;
for (unsigned RU : RegUnits) {
computeRegUnitLaneMasks();
+ // Compute register class HasDisjunctSubRegs flag.
+ for (CodeGenRegisterClass &RC : RegClasses) {
+ RC.HasDisjunctSubRegs = false;
+ for (const CodeGenRegister *Reg : RC.getMembers())
+ RC.HasDisjunctSubRegs |= Reg->HasDisjunctSubRegs;
+ }
+
// Get the weight of each set.
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
RegUnitSets[Idx].Weight = getRegUnitSetWeight(RegUnitSets[Idx].Units);