//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-lower"
-
#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
using namespace llvm;
+#define DEBUG_TYPE "systemz-lower"
+
namespace {
// Represents a sequence for extracting a 0/1 value from an IPM result:
// (((X ^ XORValue) + AddValue) >> Bit)
return Op;
}
-SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
- : TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()), TM(tm) {
+SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
+ : TargetLowering(tm, new TargetLoweringObjectFileELF()),
+ Subtarget(tm.getSubtarget<SystemZSubtarget>()) {
MVT PtrVT = getPointerTy();
// Set up the register classes.
setOperationAction(ISD::SMUL_LOHI, VT, Custom);
setOperationAction(ISD::UMUL_LOHI, VT, Custom);
- // We have instructions for signed but not unsigned FP conversion.
- setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ // Only z196 and above have native support for conversions to unsigned.
+ if (!Subtarget.hasFPExtension())
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
}
}
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
- // We have instructions for signed but not unsigned FP conversion.
+ // z10 has instructions for signed but not unsigned FP conversion.
// Handle unsigned 32-bit types as signed 64-bit types.
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ if (!Subtarget.hasFPExtension()) {
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ }
// We have native support for a 64-bit CTLZ, via FLOGR.
setOperationAction(ISD::CTLZ, MVT::i32, Promote);
// Give LowerOperation the chance to replace 64-bit ORs with subregs.
setOperationAction(ISD::OR, MVT::i64, Custom);
- // Give LowerOperation the chance to optimize SIGN_EXTEND sequences.
- setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
-
// FIXME: Can we support these natively?
setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ // Codes for which we want to perform some z-specific combinations.
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+
// We want to use MVC in preference to even a single load/store pair.
MaxStoresPerMemcpy = 0;
MaxStoresPerMemcpyOptSize = 0;
return Imm.isZero() || Imm.isNegZero();
}
-bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
- unsigned,
- bool *Fast) const {
+bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
+ unsigned,
+ unsigned,
+ bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
// they are required.
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
if (Index < 16 && Map[Index])
return std::make_pair(Map[Index], RC);
}
- return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0U, nullptr);
}
std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
MachineRegisterInfo &MRI = MF.getRegInfo();
SystemZMachineFunctionInfo *FuncInfo =
MF.getInfo<SystemZMachineFunctionInfo>();
- auto *TFL = static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
+ auto *TFL = static_cast<const SystemZFrameLowering *>(
+ DAG.getSubtarget().getFrameLowering());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
unsigned NumFixedGPRs = 0;
}
// Join the stores, which are independent of one another.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOps[NumFixedFPRs],
- SystemZ::NumArgFPRs - NumFixedFPRs);
+ makeArrayRef(&MemOps[NumFixedFPRs],
+ SystemZ::NumArgFPRs-NumFixedFPRs));
}
}
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// We don't support GuaranteedTailCallOpt, only automatically-detected
// Join the stores, which are independent of one another.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Accept direct calls by converting symbolic call addresses to the
// associated Target* opcodes. Force %r1 to be used for indirect
Ops.push_back(DAG.getRegister(RegsToPass[I].first,
RegsToPass[I].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
// Glue the call to the argument copies, if any.
if (Glue.getNode())
Ops.push_back(Glue);
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (IsTailCall)
- return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
- Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
+ Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
// Assign locations to each returned value.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
// Quick exit for void returns
if (Glue.getNode())
RetOps.push_back(Glue);
- return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
- RetOps.data(), RetOps.size());
+ return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
}
SDValue SystemZTargetLowering::
Load->getChain(), Load->getBasePtr(),
Load->getPointerInfo(), Load->getMemoryVT(),
Load->isVolatile(), Load->isNonTemporal(),
- Load->getAlignment());
+ Load->isInvariant(), Load->getAlignment());
// Make sure that the second operand is an i32 with the right value.
if (C.Op1.getValueType() != MVT::i32 ||
// Check whether the nonconstant input is an AND with a constant mask.
Comparison NewC(C);
uint64_t MaskVal;
- ConstantSDNode *Mask = 0;
+ ConstantSDNode *Mask = nullptr;
if (C.Op0.getOpcode() == ISD::AND) {
NewC.Op0 = C.Op0.getOperand(0);
NewC.Op1 = C.Op0.getOperand(1);
Ops.push_back(Glue);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
}
SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy();
- Reloc::Model RM = TM.getRelocationModel();
- CodeModel::Model CM = TM.getCodeModel();
+ Reloc::Model RM = DAG.getTarget().getRelocationModel();
+ CodeModel::Model CM = DAG.getTarget().getCodeModel();
SDValue Result;
if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
EVT PtrVT = getPointerTy();
- TLSModel::Model model = TM.getTLSModel(GV);
+ TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
if (model != TLSModel::LocalExec)
llvm_unreachable("only local-exec TLS mode supported");
false, false, 0);
Offset += 8;
}
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
SDValue Ops[2] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
}
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
// low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue Ops[2];
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
Op0, Op1, Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
else
lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// Get the known-zero masks for each operand.
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
APInt KnownZero[2], KnownOne[2];
- DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
- DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
+ DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
+ DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
MVT::i64, HighOp, Low32);
}
-SDValue SystemZTargetLowering::lowerSIGN_EXTEND(SDValue Op,
- SelectionDAG &DAG) const {
- // Convert (sext (ashr (shl X, C1), C2)) to
- // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
- // cheap as narrower ones.
- SDValue N0 = Op.getOperand(0);
- EVT VT = Op.getValueType();
- if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
- auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- SDValue Inner = N0.getOperand(0);
- if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
- auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1));
- if (ShlAmt) {
- unsigned Extra = (VT.getSizeInBits() -
- N0.getValueType().getSizeInBits());
- unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
- unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
- EVT ShiftVT = N0.getOperand(1).getValueType();
- SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
- Inner.getOperand(0));
- SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
- DAG.getConstant(NewShlAmt, ShiftVT));
- return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
- DAG.getConstant(NewSraAmt, ShiftVT));
- }
- }
- }
- return SDValue();
-}
-
// Op is an atomic load. Lower it into a normal volatile load.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SelectionDAG &DAG) const {
SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
- array_lengthof(Ops),
NarrowVT, MMO);
// Rotate the result of the final CS so that the field is in the lower
SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
- return DAG.getMergeValues(RetOps, 2, DL);
+ return DAG.getMergeValues(RetOps, DL);
}
// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
// Use an addition if the operand is constant and either LAA(G) is
// available or the negative value is in the range of A(G)FHI.
int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
- if (isInt<32>(Value) || TM.getSubtargetImpl()->hasInterlockedAccess1())
+ if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
NegSrc2 = DAG.getConstant(Value, MemVT);
- } else if (TM.getSubtargetImpl()->hasInterlockedAccess1())
+ } else if (Subtarget.hasInterlockedAccess1())
// Use LAA(G) if available.
NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT),
Src2);
SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
NegBitShift, DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
- VTList, Ops, array_lengthof(Ops),
- NarrowVT, MMO);
+ VTList, Ops, NarrowVT, MMO);
return AtomicOp;
}
Op.getOperand(1)
};
return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
- Node->getVTList(), Ops, array_lengthof(Ops),
+ Node->getVTList(), Ops,
Node->getMemoryVT(), Node->getMemOperand());
}
return lowerUDIVREM(Op, DAG);
case ISD::OR:
return lowerOR(Op, DAG);
- case ISD::SIGN_EXTEND:
- return lowerSIGN_EXTEND(Op, DAG);
case ISD::ATOMIC_SWAP:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
case ISD::ATOMIC_STORE:
OPCODE(ATOMIC_CMP_SWAPW);
OPCODE(PREFETCH);
}
- return NULL;
+ return nullptr;
#undef OPCODE
}
+SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned Opcode = N->getOpcode();
+ if (Opcode == ISD::SIGN_EXTEND) {
+ // Convert (sext (ashr (shl X, C1), C2)) to
+ // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
+ // cheap as narrower ones.
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
+ auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ SDValue Inner = N0.getOperand(0);
+ if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
+ if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
+ unsigned Extra = (VT.getSizeInBits() -
+ N0.getValueType().getSizeInBits());
+ unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
+ unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
+ EVT ShiftVT = N0.getOperand(1).getValueType();
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
+ Inner.getOperand(0));
+ SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
+ DAG.getConstant(NewShlAmt, ShiftVT));
+ return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
+ DAG.getConstant(NewSraAmt, ShiftVT));
+ }
+ }
+ }
+ }
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getSubtarget().getInstrInfo());
unsigned DestReg = MI->getOperand(0).getReg();
unsigned TrueReg = MI->getOperand(1).getReg();
MachineBasicBlock *MBB,
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getSubtarget().getInstrInfo());
unsigned SrcReg = MI->getOperand(0).getReg();
MachineOperand Base = MI->getOperand(1);
// Use STOCOpcode if possible. We could use different store patterns in
// order to avoid matching the index register, but the performance trade-offs
// might be more complicated in that case.
- if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
if (Invert)
CCMask ^= CCValid;
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
unsigned BinOpcode,
unsigned BitSize,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
unsigned CompareOpcode,
unsigned KeepOldMask,
unsigned BitSize) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
SystemZTargetLowering::emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
// When generating more than one CLC, all but the last will need to
// branch to the end when a difference is found.
MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
- splitBlockAfter(MI, MBB) : 0);
+ splitBlockAfter(MI, MBB) : nullptr);
// Check for the loop form, in which operand 5 is the trip count.
if (MI->getNumExplicitOperands() > 5) {
SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();