using namespace llvm;
+#define DEBUG_TYPE "systemz-isel"
+
namespace {
// Used to build addressing modes.
struct SystemZAddressingMode {
errs() << "SystemZAddressingMode " << this << '\n';
errs() << " Base ";
- if (Base.getNode() != 0)
+ if (Base.getNode())
Base.getNode()->dump();
else
errs() << "null\n";
if (hasIndexField()) {
errs() << " Index ";
- if (Index.getNode() != 0)
+ if (Index.getNode())
Index.getNode()->dump();
else
errs() << "null\n";
//
// (and (rotl Input, Rotate), Mask)
//
-// otherwise. The value has BitSize bits.
+// otherwise. The output value has BitSize bits, although Input may be
+// narrower (in which case the upper bits are don't care).
struct RxSBGOperands {
RxSBGOperands(unsigned Op, SDValue N)
: Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
};
class SystemZDAGToDAGISel : public SelectionDAGISel {
- const SystemZTargetLowering &Lowering;
- const SystemZSubtarget &Subtarget;
+ const SystemZSubtarget *Subtarget;
// Used by SystemZOperands.td to create integer constants.
inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
}
const SystemZInstrInfo *getInstrInfo() const {
- return getTargetMachine().getInstrInfo();
+ return Subtarget->getInstrInfo();
}
// Try to fold more of the base or index of AM into AM, where IsBase
// Return true on success.
bool expandRxSBG(RxSBGOperands &RxSBG) const;
- // Return an undefined i64 value.
- SDValue getUNDEF64(SDLoc DL) const;
+ // Return an undefined value of type VT.
+ SDValue getUNDEF(SDLoc DL, EVT VT) const;
// Convert N to VT, if it isn't already.
SDValue convertTo(SDLoc DL, EVT VT, SDValue N) const;
SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
uint64_t UpperVal, uint64_t LowerVal);
+ // Return true if Load and Store are loads and stores of the same size
+ // and are guaranteed not to overlap. Such operations can be implemented
+ // using block (SS-format) instructions.
+ //
+ // Partial overlap would lead to incorrect code, since the block operations
+ // are logically bytewise, even though they have a fast path for the
+ // non-overlapping case. We also need to avoid full overlap (i.e. two
+ // addresses that might be equal at run time) because although that case
+ // would be handled correctly, it might be implemented by millicode.
+ bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
+
// N is a (store (load Y), X) pattern. Return true if it can use an MVC
// from Y to X.
bool storeLoadCanUseMVC(SDNode *N) const;
public:
SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel),
- Lowering(*TM.getTargetLowering()),
- Subtarget(*TM.getSubtargetImpl()) { }
+ : SelectionDAGISel(TM, OptLevel) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<SystemZSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
// Override MachineFunctionPass.
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "SystemZ DAG->DAG Pattern Instruction Selection";
}
// Override SelectionDAGISel.
- virtual SDNode *Select(SDNode *Node) LLVM_OVERRIDE;
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps)
- LLVM_OVERRIDE;
+ SDNode *Select(SDNode *Node) override;
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) override;
// Include the pieces autogenerated from the target description.
#include "SystemZGenDAGISel.inc"
return false;
// We need a constant mask.
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
if (!MaskNode)
return false;
uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
if (Used != (AndMask | InsertMask)) {
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
return false;
}
return false;
}
-// RxSBG.Input is a shift of Count bits in the direction given by IsLeft.
-// Return true if the result depends on the signs or zeros that are
-// shifted in.
-static bool shiftedInBitsMatter(RxSBGOperands &RxSBG, uint64_t Count,
- bool IsLeft) {
- // Work out which bits of the shift result are zeros or sign copies.
- uint64_t ShiftedIn = allOnes(Count);
- if (!IsLeft)
- ShiftedIn <<= RxSBG.BitSize - Count;
-
- // Rotate that mask in the same way as RxSBG.Input is rotated.
+// Return true if any bits of (RxSBG.Input & Mask) are significant.
+static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
+ // Rotate the mask in the same way as RxSBG.Input is rotated.
if (RxSBG.Rotate != 0)
- ShiftedIn = ((ShiftedIn << RxSBG.Rotate) |
- (ShiftedIn >> (64 - RxSBG.Rotate)));
-
- // Fail if any of the zero or sign bits are used.
- return (ShiftedIn & RxSBG.Mask) != 0;
+ Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
+ return (Mask & RxSBG.Mask) != 0;
}
bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
if (RxSBG.Opcode == SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask |= KnownZero.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
if (RxSBG.Opcode != SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask &= ~KnownOne.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
case ISD::ROTL: {
// Any 64-bit rotate left can be merged into the RxSBG.
- if (RxSBG.BitSize != 64)
+ if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
return false;
- ConstantSDNode *CountNode
- = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
return true;
}
+ case ISD::ANY_EXTEND:
+ // Bits above the extended operand are don't-care.
+ RxSBG.Input = N.getOperand(0);
+ return true;
+
+ case ISD::ZERO_EXTEND:
+ if (RxSBG.Opcode != SystemZ::RNSBG) {
+ // Restrict the mask to the extended operand.
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+ // Fall through.
+
+ case ISD::SIGN_EXTEND: {
+ // Check that the extension bits are don't-care (i.e. are masked out
+ // by the final mask).
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
case ISD::SHL: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
uint64_t Count = CountNode->getZExtValue();
- if (Count < 1 || Count >= RxSBG.BitSize)
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
return false;
if (RxSBG.Opcode == SystemZ::RNSBG) {
// Treat (shl X, count) as (rotl X, size-count) as long as the bottom
// count bits from RxSBG.Input are ignored.
- if (shiftedInBitsMatter(RxSBG, Count, true))
+ if (maskMatters(RxSBG, allOnes(Count)))
return false;
} else {
// Treat (shl X, count) as (and (rotl X, count), ~0<<count).
- if (!refineRxSBGMask(RxSBG, allOnes(RxSBG.BitSize - Count) << Count))
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
return false;
}
case ISD::SRL:
case ISD::SRA: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
uint64_t Count = CountNode->getZExtValue();
- if (Count < 1 || Count >= RxSBG.BitSize)
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
return false;
if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
// Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
// count bits from RxSBG.Input are ignored.
- if (shiftedInBitsMatter(RxSBG, Count, false))
+ if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
return false;
} else {
// Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
// which is similar to SLL above.
- if (!refineRxSBGMask(RxSBG, allOnes(RxSBG.BitSize - Count)))
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
return false;
}
}
}
-SDValue SystemZDAGToDAGISel::getUNDEF64(SDLoc DL) const {
- SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i64);
+SDValue SystemZDAGToDAGISel::getUNDEF(SDLoc DL, EVT VT) const {
+ SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
return SDValue(N, 0);
}
SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
if (N.getValueType() == MVT::i32 && VT == MVT::i64)
- return CurDAG->getTargetInsertSubreg(SystemZ::subreg_32bit,
- DL, VT, getUNDEF64(DL), N);
+ return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
+ DL, VT, getUNDEF(DL, MVT::i64), N);
if (N.getValueType() == MVT::i64 && VT == MVT::i32)
- return CurDAG->getTargetExtractSubreg(SystemZ::subreg_32bit, DL, VT, N);
+ return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
assert(N.getValueType() == VT && "Unexpected value types");
return N;
}
RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
unsigned Count = 0;
while (expandRxSBG(RISBG))
- Count += 1;
+ if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
+ Count += 1;
if (Count == 0)
- return 0;
+ return nullptr;
if (Count == 1) {
// Prefer to use normal shift instructions over RISBG, since they can handle
// all cases and are sometimes shorter.
if (N->getOpcode() != ISD::AND)
- return 0;
+ return nullptr;
// Prefer register extensions like LLC over RISBG. Also prefer to start
// out with normal ANDs if one instruction would be enough. We can convert
SystemZ::isImmLF(~RISBG.Mask) ||
SystemZ::isImmHF(~RISBG.Mask)) {
// Force the new mask into the DAG, since it may include known-one bits.
- ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
+ auto *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
if (MaskN->getZExtValue() != RISBG.Mask) {
SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
return SelectCode(N);
}
- return 0;
+ return nullptr;
}
}
+ unsigned Opcode = SystemZ::RISBG;
+ EVT OpcodeVT = MVT::i64;
+ if (VT == MVT::i32 && Subtarget->hasHighWord()) {
+ Opcode = SystemZ::RISBMux;
+ OpcodeVT = MVT::i32;
+ RISBG.Start &= 31;
+ RISBG.End &= 31;
+ }
SDValue Ops[5] = {
- getUNDEF64(SDLoc(N)),
- convertTo(SDLoc(N), MVT::i64, RISBG.Input),
+ getUNDEF(SDLoc(N), OpcodeVT),
+ convertTo(SDLoc(N), OpcodeVT, RISBG.Input),
CurDAG->getTargetConstant(RISBG.Start, MVT::i32),
CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32),
CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32)
};
- N = CurDAG->getMachineNode(SystemZ::RISBG, SDLoc(N), MVT::i64, Ops);
+ N = CurDAG->getMachineNode(Opcode, SDLoc(N), OpcodeVT, Ops);
return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
}
unsigned Count[] = { 0, 0 };
for (unsigned I = 0; I < 2; ++I)
while (expandRxSBG(RxSBG[I]))
- Count[I] += 1;
+ if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND)
+ Count[I] += 1;
// Do nothing if neither operand is suitable.
if (Count[0] == 0 && Count[1] == 0)
- return 0;
+ return nullptr;
// Pick the deepest second operand.
unsigned I = Count[0] > Count[1] ? 0 : 1;
// Prefer IC for character insertions from memory.
if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
- if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
+ if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
if (Load->getMemoryVT() == MVT::i8)
- return 0;
+ return nullptr;
// See whether we can avoid an AND in the first operand by converting
// ROSBG to RISBG.
return Or.getNode();
}
-// Return true if Load and Store:
-// - are loads and stores of the same size;
-// - do not partially overlap; and
-// - can be decomposed into what are logically individual character accesses
-// without changing the semantics.
-static bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load,
- AliasAnalysis *AA) {
+bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
+ LoadSDNode *Load) const {
// Check that the two memory operands have the same size.
if (Load->getMemoryVT() != Store->getMemoryVT())
return false;
if (Load->isInvariant())
return true;
- // If both operands are aligned, they must be equal or not overlap.
- uint64_t Size = Load->getMemoryVT().getStoreSize();
- if (Load->getAlignment() >= Size && Store->getAlignment() >= Size)
- return true;
-
// Otherwise we need to check whether there's an alias.
- const Value *V1 = Load->getSrcValue();
- const Value *V2 = Store->getSrcValue();
+ const Value *V1 = Load->getMemOperand()->getValue();
+ const Value *V2 = Store->getMemOperand()->getValue();
if (!V1 || !V2)
return false;
+ // Reject equality.
+ uint64_t Size = Load->getMemoryVT().getStoreSize();
int64_t End1 = Load->getSrcValueOffset() + Size;
int64_t End2 = Store->getSrcValueOffset() + Size;
- return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getTBAAInfo()),
- AliasAnalysis::Location(V2, End2, Store->getTBAAInfo()));
+ if (V1 == V2 && End1 == End2)
+ return false;
+
+ return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getAAInfo()),
+ AliasAnalysis::Location(V2, End2, Store->getAAInfo()));
}
bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
- StoreSDNode *Store = cast<StoreSDNode>(N);
- LoadSDNode *Load = cast<LoadSDNode>(Store->getValue());
+ auto *Store = cast<StoreSDNode>(N);
+ auto *Load = cast<LoadSDNode>(Store->getValue());
// Prefer not to use MVC if either address can use ... RELATIVE LONG
// instructions.
return false;
}
- return canUseBlockOperation(Store, Load, AA);
+ return canUseBlockOperation(Store, Load);
}
bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
unsigned I) const {
- StoreSDNode *StoreA = cast<StoreSDNode>(N);
- LoadSDNode *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
- LoadSDNode *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
- if (LoadA->isVolatile() ||
- LoadA->getMemoryVT() != StoreA->getMemoryVT() ||
- LoadA->getBasePtr() != StoreA->getBasePtr())
- return false;
- return canUseBlockOperation(StoreA, LoadB, AA);
+ auto *StoreA = cast<StoreSDNode>(N);
+ auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
+ auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
+ return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
}
SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return 0;
+ return nullptr;
}
unsigned Opcode = Node->getOpcode();
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
switch (Opcode) {
case ISD::OR:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
// split the operation into two.
if (!ResNode && Node->getValueType(0) == MVT::i64)
- if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
+ if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
uint64_t Val = Op1->getZExtValue();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0),
case ISD::ROTL:
case ISD::SHL:
case ISD::SRL:
+ case ISD::ZERO_EXTEND:
if (!ResNode)
ResNode = tryRISBGZero(Node);
break;
}
break;
- case ISD::ATOMIC_LOAD_SUB:
- // Try to convert subtractions of constants to additions.
- if (ConstantSDNode *Op2 = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
- uint64_t Value = -Op2->getZExtValue();
- EVT VT = Node->getValueType(0);
- if (VT == MVT::i32 || isInt<32>(Value)) {
- SDValue Ops[] = { Node->getOperand(0), Node->getOperand(1),
- CurDAG->getConstant(int32_t(Value), VT) };
- Node = CurDAG->MorphNodeTo(Node, ISD::ATOMIC_LOAD_ADD,
- Node->getVTList(), Ops, array_lengthof(Ops));
- }
- }
- break;
-
case SystemZISD::SELECT_CCMASK: {
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
ResNode = SelectCode(Node);
DEBUG(errs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
Node->dump(CurDAG);
else
ResNode->dump(CurDAG);
bool SystemZDAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
+ unsigned ConstraintID,
std::vector<SDValue> &OutOps) {
- assert(ConstraintCode == 'm' && "Unexpected constraint code");
+ assert(ConstraintID == InlineAsm::Constraint_m &&
+ "Unexpected constraint code");
// Accept addresses with short displacements, which are compatible
// with Q, R, S and T. But keep the index operand for future expansion.
SDValue Base, Disp, Index;