From c848b1bbcf88ab5d8318d990612fb1fda206ea3d Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 25 Apr 2014 05:30:21 +0000 Subject: [PATCH] [C++] Use 'nullptr'. Target edition. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207197 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64BranchFixupPass.cpp | 2 +- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 12 +- lib/Target/AArch64/AArch64ISelLowering.cpp | 16 +- lib/Target/AArch64/AArch64InstrInfo.cpp | 4 +- lib/Target/AArch64/AArch64MCInstLower.cpp | 2 +- .../AArch64/AArch64TargetTransformInfo.cpp | 2 +- .../AArch64/AsmParser/AArch64AsmParser.cpp | 4 +- .../MCTargetDesc/AArch64MCTargetDesc.cpp | 4 +- lib/Target/ARM/A15SDOptimizer.cpp | 6 +- lib/Target/ARM/ARMAsmPrinter.cpp | 4 +- lib/Target/ARM/ARMBaseInstrInfo.cpp | 61 ++--- lib/Target/ARM/ARMBaseRegisterInfo.cpp | 4 +- lib/Target/ARM/ARMCodeEmitter.cpp | 6 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 18 +- lib/Target/ARM/ARMExpandPseudoInsts.cpp | 2 +- lib/Target/ARM/ARMFastISel.cpp | 9 +- lib/Target/ARM/ARMHazardRecognizer.cpp | 4 +- lib/Target/ARM/ARMISelDAGToDAG.cpp | 79 +++---- lib/Target/ARM/ARMISelLowering.cpp | 37 +-- lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 4 +- lib/Target/ARM/ARMTargetObjectFile.cpp | 2 +- lib/Target/ARM/ARMTargetTransformInfo.cpp | 2 +- lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 15 +- .../ARM/MCTargetDesc/ARMELFStreamer.cpp | 18 +- lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp | 4 +- .../ARM/MCTargetDesc/ARMMCTargetDesc.cpp | 4 +- .../ARM/MCTargetDesc/ARMMachObjectWriter.cpp | 2 +- .../ARM/MCTargetDesc/ARMTargetStreamer.cpp | 2 +- lib/Target/ARM/MLxExpansionPass.cpp | 6 +- lib/Target/ARM/Thumb2SizeReduction.cpp | 6 +- .../ARM64/ARM64AddressTypePromotion.cpp | 8 +- lib/Target/ARM64/ARM64AsmPrinter.cpp | 2 +- lib/Target/ARM64/ARM64CollectLOH.cpp | 14 +- lib/Target/ARM64/ARM64ConditionalCompares.cpp | 24 +- lib/Target/ARM64/ARM64FastISel.cpp | 4 +- lib/Target/ARM64/ARM64ISelDAGToDAG.cpp | 28 +-- lib/Target/ARM64/ARM64ISelLowering.cpp | 14 +- lib/Target/ARM64/ARM64InstrInfo.cpp | 12 +- lib/Target/ARM64/ARM64PromoteConstant.cpp | 4 +- lib/Target/ARM64/ARM64RegisterInfo.cpp | 2 +- lib/Target/ARM64/ARM64SelectionDAGInfo.cpp | 2 +- lib/Target/ARM64/ARM64StorePairSuppress.cpp | 2 +- lib/Target/ARM64/ARM64TargetTransformInfo.cpp | 2 +- lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp | 14 +- .../Disassembler/ARM64ExternalSymbolizer.cpp | 12 +- .../ARM64/InstPrinter/ARM64InstPrinter.cpp | 10 +- .../ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp | 4 +- .../MCTargetDesc/ARM64MachObjectWriter.cpp | 14 +- lib/Target/CppBackend/CPPBackend.cpp | 2 +- lib/Target/Hexagon/HexagonAsmPrinter.cpp | 2 +- lib/Target/Hexagon/HexagonCFGOptimizer.cpp | 8 +- lib/Target/Hexagon/HexagonCopyToCombine.cpp | 6 +- .../Hexagon/HexagonExpandPredSpillCode.cpp | 2 +- lib/Target/Hexagon/HexagonFrameLowering.cpp | 4 +- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 98 ++++---- lib/Target/Hexagon/HexagonISelDAGToDAG.cpp | 6 +- lib/Target/Hexagon/HexagonISelLowering.cpp | 4 +- lib/Target/Hexagon/HexagonInstrInfo.cpp | 12 +- .../Hexagon/HexagonMachineScheduler.cpp | 16 +- lib/Target/Hexagon/HexagonNewValueJump.cpp | 4 +- .../Hexagon/HexagonSplitTFRCondSets.cpp | 3 +- lib/Target/Hexagon/HexagonVLIWPacketizer.cpp | 4 +- .../Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp | 2 +- .../MCTargetDesc/HexagonMCTargetDesc.cpp | 2 +- .../MCTargetDesc/MSP430MCTargetDesc.cpp | 2 +- lib/Target/MSP430/MSP430AsmPrinter.cpp | 2 +- lib/Target/MSP430/MSP430FrameLowering.cpp | 2 +- lib/Target/MSP430/MSP430ISelDAGToDAG.cpp | 24 +- lib/Target/MSP430/MSP430ISelLowering.cpp | 4 +- lib/Target/MSP430/MSP430InstrInfo.cpp | 4 +- lib/Target/Mips/AsmParser/MipsAsmParser.cpp | 6 +- .../Mips/MCTargetDesc/MipsAsmBackend.cpp | 2 +- .../Mips/MCTargetDesc/MipsMCTargetDesc.cpp | 2 +- lib/Target/Mips/Mips16HardFloat.cpp | 6 +- lib/Target/Mips/Mips16HardFloatInfo.cpp | 4 +- lib/Target/Mips/Mips16ISelDAGToDAG.cpp | 6 +- lib/Target/Mips/Mips16ISelLowering.cpp | 4 +- lib/Target/Mips/MipsAsmPrinter.cpp | 2 +- lib/Target/Mips/MipsCodeEmitter.cpp | 6 +- lib/Target/Mips/MipsConstantIslandPass.cpp | 12 +- lib/Target/Mips/MipsDelaySlotFiller.cpp | 14 +- lib/Target/Mips/MipsISelDAGToDAG.cpp | 4 +- lib/Target/Mips/MipsISelLowering.cpp | 26 +-- lib/Target/Mips/MipsInstrInfo.cpp | 4 +- lib/Target/Mips/MipsLongBranch.cpp | 4 +- lib/Target/Mips/MipsOptimizePICCall.cpp | 8 +- lib/Target/Mips/MipsSEISelDAGToDAG.cpp | 12 +- lib/Target/Mips/MipsSEISelLowering.cpp | 4 +- lib/Target/Mips/MipsSEInstrInfo.cpp | 2 +- lib/Target/Mips/MipsSERegisterInfo.cpp | 2 +- .../NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp | 2 +- lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 18 +- lib/Target/NVPTX/NVPTXGenericToNVVM.cpp | 5 +- lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 218 +++++++++--------- lib/Target/NVPTX/NVPTXISelLowering.cpp | 16 +- lib/Target/NVPTX/NVPTXInstrInfo.cpp | 2 +- lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp | 2 +- lib/Target/NVPTX/NVPTXRegisterInfo.cpp | 2 +- lib/Target/NVPTX/NVPTXTargetMachine.cpp | 2 +- lib/Target/NVPTX/NVPTXUtilities.cpp | 24 +- lib/Target/NVPTX/NVVMReflect.cpp | 6 +- lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp | 12 +- .../PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp | 4 +- .../PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp | 2 +- .../MCTargetDesc/PPCMachObjectWriter.cpp | 2 +- lib/Target/PowerPC/PPCAsmPrinter.cpp | 16 +- lib/Target/PowerPC/PPCBranchSelector.cpp | 2 +- lib/Target/PowerPC/PPCCTRLoops.cpp | 10 +- lib/Target/PowerPC/PPCCodeEmitter.cpp | 2 +- lib/Target/PowerPC/PPCFastISel.cpp | 10 +- lib/Target/PowerPC/PPCHazardRecognizers.cpp | 2 +- lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 14 +- lib/Target/PowerPC/PPCISelLowering.cpp | 53 ++--- lib/Target/PowerPC/PPCInstrInfo.cpp | 8 +- lib/Target/PowerPC/PPCMCInstLower.cpp | 2 +- lib/Target/PowerPC/PPCTargetTransformInfo.cpp | 2 +- lib/Target/R600/AMDGPUFrameLowering.cpp | 2 +- lib/Target/R600/AMDGPUISelLowering.cpp | 2 +- lib/Target/R600/AMDGPUInstrInfo.cpp | 6 +- lib/Target/R600/AMDGPUTargetMachine.cpp | 2 +- lib/Target/R600/AMDGPUTargetTransformInfo.cpp | 2 +- lib/Target/R600/AMDILCFGStructurizer.cpp | 32 +-- lib/Target/R600/AMDILIntrinsicInfo.cpp | 2 +- .../R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp | 4 +- lib/Target/R600/R600ControlFlowFinalizer.cpp | 8 +- lib/Target/R600/R600EmitClauseMarkers.cpp | 2 +- lib/Target/R600/R600ExpandSpecialInstrs.cpp | 2 +- lib/Target/R600/R600InstrInfo.cpp | 4 +- lib/Target/R600/R600MachineScheduler.cpp | 10 +- .../R600/R600OptimizeVectorRegisters.cpp | 2 +- lib/Target/R600/SIAnnotateControlFlow.cpp | 16 +- lib/Target/R600/SIISelLowering.cpp | 18 +- lib/Target/R600/SIInsertWaits.cpp | 4 +- lib/Target/R600/SIInstrInfo.cpp | 10 +- lib/Target/R600/SILowerControlFlow.cpp | 2 +- lib/Target/R600/SIRegisterInfo.cpp | 4 +- lib/Target/Sparc/AsmParser/SparcAsmParser.cpp | 18 +- .../Sparc/MCTargetDesc/SparcMCAsmInfo.cpp | 2 +- .../Sparc/MCTargetDesc/SparcMCTargetDesc.cpp | 4 +- lib/Target/Sparc/SparcAsmPrinter.cpp | 2 +- lib/Target/Sparc/SparcCodeEmitter.cpp | 4 +- lib/Target/Sparc/SparcISelDAGToDAG.cpp | 2 +- lib/Target/Sparc/SparcISelLowering.cpp | 30 +-- lib/Target/Sparc/SparcInstrInfo.cpp | 10 +- lib/Target/Sparc/SparcMCInstLower.cpp | 2 +- lib/Target/Sparc/SparcTargetObjectFile.cpp | 2 +- .../SystemZ/AsmParser/SystemZAsmParser.cpp | 8 +- .../MCTargetDesc/SystemZMCTargetDesc.cpp | 3 +- lib/Target/SystemZ/SystemZElimCompare.cpp | 2 +- lib/Target/SystemZ/SystemZISelDAGToDAG.cpp | 20 +- lib/Target/SystemZ/SystemZISelLowering.cpp | 10 +- lib/Target/SystemZ/SystemZInstrInfo.cpp | 18 +- lib/Target/SystemZ/SystemZLongBranch.cpp | 7 +- lib/Target/SystemZ/SystemZShortenInst.cpp | 2 +- lib/Target/TargetLoweringObjectFile.cpp | 8 +- lib/Target/TargetMachine.cpp | 2 +- lib/Target/TargetMachineC.cpp | 4 +- lib/Target/X86/AsmParser/X86AsmParser.cpp | 77 ++++--- .../X86/Disassembler/X86Disassembler.cpp | 2 +- .../X86/InstPrinter/X86InstComments.cpp | 4 +- .../X86/MCTargetDesc/X86ELFRelocationInfo.cpp | 2 +- lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp | 4 +- .../X86/MCTargetDesc/X86MCCodeEmitter.cpp | 4 +- .../X86/MCTargetDesc/X86MCTargetDesc.cpp | 6 +- .../MCTargetDesc/X86MachORelocationInfo.cpp | 2 +- .../X86/MCTargetDesc/X86MachObjectWriter.cpp | 8 +- lib/Target/X86/X86AsmPrinter.cpp | 16 +- lib/Target/X86/X86CodeEmitter.cpp | 4 +- lib/Target/X86/X86FastISel.cpp | 28 +-- lib/Target/X86/X86FixupLEAs.cpp | 8 +- lib/Target/X86/X86FloatingPoint.cpp | 2 +- lib/Target/X86/X86FrameLowering.cpp | 24 +- lib/Target/X86/X86ISelDAGToDAG.cpp | 69 +++--- lib/Target/X86/X86ISelLowering.cpp | 78 +++---- lib/Target/X86/X86InstrInfo.cpp | 138 +++++------ lib/Target/X86/X86JITInfo.cpp | 2 +- lib/Target/X86/X86MCInstLower.cpp | 8 +- lib/Target/X86/X86PadShortFunction.cpp | 2 +- lib/Target/X86/X86RegisterInfo.cpp | 2 +- lib/Target/X86/X86SelectionDAGInfo.cpp | 6 +- lib/Target/X86/X86Subtarget.cpp | 2 +- lib/Target/X86/X86TargetObjectFile.cpp | 12 +- lib/Target/X86/X86TargetTransformInfo.cpp | 2 +- .../XCore/MCTargetDesc/XCoreMCAsmInfo.cpp | 2 +- .../XCore/MCTargetDesc/XCoreMCTargetDesc.cpp | 2 +- lib/Target/XCore/XCoreISelDAGToDAG.cpp | 10 +- lib/Target/XCore/XCoreISelLowering.cpp | 4 +- lib/Target/XCore/XCoreInstrInfo.cpp | 2 +- lib/Target/XCore/XCoreLowerThreadLocal.cpp | 5 +- 189 files changed, 1074 insertions(+), 1051 deletions(-) diff --git a/lib/Target/AArch64/AArch64BranchFixupPass.cpp b/lib/Target/AArch64/AArch64BranchFixupPass.cpp index 1c65629908e..bff8ef50358 100644 --- a/lib/Target/AArch64/AArch64BranchFixupPass.cpp +++ b/lib/Target/AArch64/AArch64BranchFixupPass.cpp @@ -450,7 +450,7 @@ bool AArch64BranchFixup::isBBInRange(MachineInstr *MI, /// displacement field. bool AArch64BranchFixup::fixupImmediateBr(ImmBranch &Br) { MachineInstr *MI = Br.MI; - MachineBasicBlock *DestBB = 0; + MachineBasicBlock *DestBB = nullptr; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { if (MI->getOperand(i).isMBB()) { DestBB = MI->getOperand(i).getMBB(); diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index b2f8330306b..aab3d59697a 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -261,7 +261,7 @@ SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) { } else { // Can't handle it in one instruction. There's scope for permitting two (or // more) instructions, but that'll need more thought. - return NULL; + return nullptr; } ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType, @@ -737,7 +737,7 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); - return NULL; + return nullptr; } SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, @@ -862,7 +862,7 @@ SDNode *AArch64DAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1)); if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2)); - return NULL; + return nullptr; } // We only have 128-bit vector type of load/store lane instructions. @@ -956,7 +956,7 @@ SDNode *AArch64DAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1)); if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2)); - return NULL; + return nullptr; } unsigned AArch64DAGToDAGISel::getTBLOpc(bool IsExt, bool Is64Bit, @@ -1031,7 +1031,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { if (Node->isMachineOpcode()) { DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n"); Node->setNodeId(-1); - return NULL; + return nullptr; } switch (Node->getOpcode()) { @@ -1115,7 +1115,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { TFI, CurDAG->getTargetConstant(0, PtrTy)); } case ISD::Constant: { - SDNode *ResNode = 0; + SDNode *ResNode = nullptr; if (cast(Node)->getZExtValue() == 0) { // XZR and WZR are probably even better than an actual move: most of the // time they can be folded into another instruction with *no* cost. diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index b1c31aa37f3..85df80613c4 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -589,7 +589,7 @@ static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, // would fail to figure out the register pressure correctly. std::pair AArch64TargetLowering::findRepresentativeClass(MVT VT) const{ - const TargetRegisterClass *RRC = 0; + const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: @@ -1185,7 +1185,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { case AArch64ISD::NEON_VEXTRACT: return "AArch64ISD::NEON_VEXTRACT"; default: - return NULL; + return nullptr; } } @@ -2159,7 +2159,7 @@ AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { RHS = DAG.getConstant(0, LHS.getValueType()); CC = ISD::SETNE; } @@ -3019,7 +3019,7 @@ AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); // If softenSetCCOperands returned a scalar, use it. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { assert(LHS.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"); return LHS; @@ -3167,7 +3167,7 @@ AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { RHS = DAG.getConstant(0, LHS.getValueType()); CC = ISD::SETNE; } @@ -4535,14 +4535,14 @@ bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, VT.getVectorElementType()) return false; - if (V0.getNode() == 0) { + if (!V0.getNode()) { V0 = Elt.getOperand(0); V0NumElts = V0.getValueType().getVectorNumElements(); } if (Elt.getOperand(0) == V0) { Mask[i] = (cast(Elt->getOperand(1))->getZExtValue()); continue; - } else if (V1.getNode() == 0) { + } else if (!V1.getNode()) { V1 = Elt.getOperand(0); } if (Elt.getOperand(0) == V1) { @@ -5243,7 +5243,7 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector &Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result; // Only length 1 constraints are C_Other. if (Constraint.size() != 1) return; diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 6bc36030980..a39c1c5f258 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -391,10 +391,10 @@ AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl &Cond, DebugLoc DL) const { - if (FBB == 0 && Cond.empty()) { + if (!FBB && Cond.empty()) { BuildMI(&MBB, DL, get(AArch64::Bimm)).addMBB(TBB); return 1; - } else if (FBB == 0) { + } else if (!FBB) { MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm())); for (int i = 1, e = Cond.size(); i != e; ++i) MIB.addOperand(Cond[i]); diff --git a/lib/Target/AArch64/AArch64MCInstLower.cpp b/lib/Target/AArch64/AArch64MCInstLower.cpp index 3842bfdab0d..103aeb00d87 100644 --- a/lib/Target/AArch64/AArch64MCInstLower.cpp +++ b/lib/Target/AArch64/AArch64MCInstLower.cpp @@ -30,7 +30,7 @@ using namespace llvm; MCOperand AArch64AsmPrinter::lowerSymbolOperand(const MachineOperand &MO, const MCSymbol *Sym) const { - const MCExpr *Expr = 0; + const MCExpr *Expr = nullptr; Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, OutContext); diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 5b45b1046c4..c37ffb59154 100644 --- a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -38,7 +38,7 @@ class AArch64TTI final : public ImmutablePass, public TargetTransformInfo { const AArch64TargetLowering *TLI; public: - AArch64TTI() : ImmutablePass(ID), ST(0), TLI(0) { + AArch64TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index 0b34d6a01a7..361f9848ce9 100644 --- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -1313,7 +1313,7 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl &Operands, case AsmToken::Colon: { SMLoc StartLoc = Parser.getTok().getLoc(); SMLoc EndLoc; - const MCExpr *ImmVal = 0; + const MCExpr *ImmVal = nullptr; if (ParseImmediate(ImmVal) != MatchOperand_Success) return MatchOperand_ParseFail; @@ -1325,7 +1325,7 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl &Operands, case AsmToken::Hash: { // Immediates SMLoc StartLoc = Parser.getTok().getLoc(); SMLoc EndLoc; - const MCExpr *ImmVal = 0; + const MCExpr *ImmVal = nullptr; Parser.Lex(); if (ParseImmediate(ImmVal) != MatchOperand_Success) diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp index c1e443e8681..9a104b70e03 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -63,7 +63,7 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI, MCAsmInfo *MAI = new AArch64ELFMCAsmInfo(TT); unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true); - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0); MAI->addInitialFrameState(Inst); return MAI; @@ -114,7 +114,7 @@ static MCInstPrinter *createAArch64MCInstPrinter(const Target &T, const MCSubtargetInfo &STI) { if (SyntaxVariant == 0) return new AArch64InstPrinter(MAI, MII, MRI, STI); - return 0; + return nullptr; } namespace { diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp index 9f14ca02227..94faf6f60db 100644 --- a/lib/Target/ARM/A15SDOptimizer.cpp +++ b/lib/Target/ARM/A15SDOptimizer.cpp @@ -260,7 +260,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) { if (DPRMI && SPRMI) { // See if the first operand of this insert_subreg is IMPLICIT_DEF MachineInstr *ECDef = elideCopies(DPRMI); - if (ECDef != 0 && ECDef->isImplicitDef()) { + if (ECDef && ECDef->isImplicitDef()) { // Another corner case - if we're inserting something that is purely // a subreg copy of a DPR, just use that DPR. @@ -349,10 +349,10 @@ MachineInstr *A15SDOptimizer::elideCopies(MachineInstr *MI) { if (!MI->isFullCopy()) return MI; if (!TRI->isVirtualRegister(MI->getOperand(1).getReg())) - return NULL; + return nullptr; MachineInstr *Def = MRI->getVRegDef(MI->getOperand(1).getReg()); if (!Def) - return NULL; + return nullptr; return elideCopies(Def); } diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index aa285b27dee..05e004ca07f 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -384,7 +384,7 @@ void ARMAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo, // If either end mode is unknown (EndInfo == NULL) or different than // the start mode, then restore the start mode. const bool WasThumb = isThumb(StartInfo); - if (EndInfo == NULL || WasThumb != isThumb(*EndInfo)) { + if (!EndInfo || WasThumb != isThumb(*EndInfo)) { OutStreamer.EmitAssemblerFlag(WasThumb ? MCAF_Code16 : MCAF_Code32); } } @@ -724,7 +724,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV, MachineModuleInfoImpl::StubValueTy &StubSym = GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(MCSym) : MMIMachO.getGVStubEntry(MCSym); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); return MCSym; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index bf7a1aaaf25..1a91bb6fda6 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -127,14 +127,14 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, // FIXME: Thumb2 support. if (!EnableARM3Addr) - return NULL; + return nullptr; MachineInstr *MI = MBBI; MachineFunction &MF = *MI->getParent()->getParent(); uint64_t TSFlags = MI->getDesc().TSFlags; bool isPre = false; switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { - default: return NULL; + default: return nullptr; case ARMII::IndexModePre: isPre = true; break; @@ -146,10 +146,10 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, // operation. unsigned MemOpc = getUnindexedOpcode(MI->getOpcode()); if (MemOpc == 0) - return NULL; + return nullptr; - MachineInstr *UpdateMI = NULL; - MachineInstr *MemMI = NULL; + MachineInstr *UpdateMI = nullptr; + MachineInstr *MemMI = nullptr; unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); const MCInstrDesc &MCID = MI->getDesc(); unsigned NumOps = MCID.getNumOperands(); @@ -171,7 +171,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (ARM_AM::getSOImmVal(Amt) == -1) // Can't encode it in a so_imm operand. This transformation will // add more than 1 instruction. Abandon! - return NULL; + return nullptr; UpdateMI = BuildMI(MF, MI->getDebugLoc(), get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) .addReg(BaseReg).addImm(Amt) @@ -275,8 +275,8 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, bool AllowModify) const { - TBB = 0; - FBB = 0; + TBB = nullptr; + FBB = nullptr; MachineBasicBlock::iterator I = MBB.end(); if (I == MBB.begin()) @@ -333,7 +333,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, I->isReturn())) { // Forget any previous condition branch information - it no longer applies. Cond.clear(); - FBB = 0; + FBB = nullptr; // If we can modify the function, delete everything below this // unconditional branch. @@ -407,7 +407,7 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, assert((Cond.size() == 2 || Cond.size() == 0) && "ARM branch conditions have two components!"); - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) { // Unconditional branch? if (isThumb) BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); @@ -1252,7 +1252,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { static_cast(MCPE.Val.MachineCPVal); unsigned PCLabelId = AFI->createPICLabelUId(); - ARMConstantPoolValue *NewCPV = 0; + ARMConstantPoolValue *NewCPV = nullptr; // FIXME: The below assumes PIC relocation model and that the function // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and @@ -1663,10 +1663,10 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); // MOVCC AL can't be inverted. Shouldn't happen. if (CC == ARMCC::AL || PredReg != ARM::CPSR) - return NULL; + return nullptr; MI = TargetInstrInfo::commuteInstruction(MI, NewMI); if (!MI) - return NULL; + return nullptr; // After swapping the MOVCC operands, also invert the condition. MI->getOperand(MI->findFirstPredOperandIdx()) .setImm(ARMCC::getOppositeCondition(CC)); @@ -1682,35 +1682,36 @@ static MachineInstr *canFoldIntoMOVCC(unsigned Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII) { if (!TargetRegisterInfo::isVirtualRegister(Reg)) - return 0; + return nullptr; if (!MRI.hasOneNonDBGUse(Reg)) - return 0; + return nullptr; MachineInstr *MI = MRI.getVRegDef(Reg); if (!MI) - return 0; + return nullptr; // MI is folded into the MOVCC by predicating it. if (!MI->isPredicable()) - return 0; + return nullptr; // Check if MI has any non-dead defs or physreg uses. This also detects // predicated instructions which will be reading CPSR. for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); // Reject frame index operands, PEI can't handle the predicated pseudos. if (MO.isFI() || MO.isCPI() || MO.isJTI()) - return 0; + return nullptr; if (!MO.isReg()) continue; // MI can't have any tied operands, that would conflict with predication. if (MO.isTied()) - return 0; + return nullptr; if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) - return 0; + return nullptr; if (MO.isDef() && !MO.isDead()) - return 0; + return nullptr; } bool DontMoveAcrossStores = true; - if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores)) - return 0; + if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ nullptr, + DontMoveAcrossStores)) + return nullptr; return MI; } @@ -1745,14 +1746,14 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI, if (!DefMI) DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this); if (!DefMI) - return 0; + return nullptr; // Find new register class to use. MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1); unsigned DestReg = MI->getOperand(0).getReg(); const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); if (!MRI.constrainRegClass(DestReg, PreviousClass)) - return 0; + return nullptr; // Create a new predicated version of DefMI. // Rfalse is the first use. @@ -2258,7 +2259,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // Masked compares sometimes use the same register as the corresponding 'and'. if (CmpMask != ~0) { if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) { - MI = 0; + MI = nullptr; for (MachineRegisterInfo::use_instr_iterator UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); UI != UE; ++UI) { @@ -2285,17 +2286,17 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // One is MI, the other is a SUB instruction. // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). - MachineInstr *Sub = NULL; + MachineInstr *Sub = nullptr; if (SrcReg2 != 0) // MI is not a candidate for CMPrr. - MI = NULL; + MI = nullptr; else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) { // Conservatively refuse to convert an instruction which isn't in the same // BB as the comparison. // For CMPri, we need to check Sub, thus we can't return here. if (CmpInstr->getOpcode() == ARM::CMPri || CmpInstr->getOpcode() == ARM::t2CMPri) - MI = NULL; + MI = nullptr; else return false; } @@ -3299,7 +3300,7 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, if (Idx == -1) { Dist = 0; - return 0; + return nullptr; } UseIdx = Idx; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 1522893d458..9548dd50b23 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -107,7 +107,7 @@ ARMBaseRegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const { // should return NULL if (CC == CallingConv::GHC) // This is academic becase all GHC calls are (supposed to be) tail calls - return NULL; + return nullptr; return (STI.isTargetIOS() && !STI.isAAPCS_ABI()) ? CSR_iOS_ThisReturn_RegMask : CSR_AAPCS_ThisReturn_RegMask; } @@ -173,7 +173,7 @@ ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind const TargetRegisterClass * ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { if (RC == &ARM::CCRRegClass) - return 0; // Can't copy CCR registers. + return nullptr; // Can't copy CCR registers. return RC; } diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index 57e52367d4a..2fd7eddd874 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -66,10 +66,10 @@ namespace { static char ID; public: ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) - : MachineFunctionPass(ID), JTI(0), + : MachineFunctionPass(ID), JTI(nullptr), II((const ARMBaseInstrInfo *)tm.getInstrInfo()), TD(tm.getDataLayout()), TM(tm), - MCE(mce), MCPEs(0), MJTEs(0), + MCE(mce), MCPEs(nullptr), MJTEs(nullptr), IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {} /// getBinaryCodeForInstr - This function, generated by the @@ -374,7 +374,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) { Subtarget = &TM.getSubtarget(); MCPEs = &MF.getConstantPool()->getConstants(); - MJTEs = 0; + MJTEs = nullptr; if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables(); IsPIC = TM.getRelocationModel() == Reloc::PIC_; IsThumb = MF.getInfo()->isThumbFunction(); diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index 75adcc9feab..ce264eeef90 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -594,7 +594,7 @@ ARMConstantIslands::CPEntry if (CPEs[i].CPEMI == CPEMI) return &CPEs[i]; } - return NULL; + return nullptr; } /// getCPELogAlign - Returns the required alignment of the constant pool entry @@ -1103,7 +1103,7 @@ bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI, assert(CPE && "Unexpected!"); if (--CPE->RefCount == 0) { removeDeadCPEMI(CPEMI); - CPE->CPEMI = NULL; + CPE->CPEMI = nullptr; --NumCPEs; return true; } @@ -1136,7 +1136,7 @@ int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) if (CPEs[i].CPEMI == CPEMI) continue; // Removing CPEs can leave empty entries, skip - if (CPEs[i].CPEMI == NULL) + if (CPEs[i].CPEMI == nullptr) continue; if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(), U.NegOk)) { @@ -1318,7 +1318,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, ++MI; unsigned CPUIndex = CPUserIndex+1; unsigned NumCPUsers = CPUsers.size(); - MachineInstr *LastIT = 0; + MachineInstr *LastIT = nullptr; for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); Offset < BaseInsertOffset; Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) { @@ -1492,7 +1492,7 @@ bool ARMConstantIslands::removeUnusedCPEntries() { for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) { if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) { removeDeadCPEMI(CPEs[j].CPEMI); - CPEs[j].CPEMI = NULL; + CPEs[j].CPEMI = nullptr; MadeChange = true; } } @@ -1845,7 +1845,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() { // FIXME: After the tables are shrunk, can we get rid some of the // constantpool tables? MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); - if (MJTI == 0) return false; + if (!MJTI) return false; const std::vector &JT = MJTI->getJumpTables(); for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { @@ -1971,7 +1971,7 @@ bool ARMConstantIslands::reorderThumb2JumpTables() { bool MadeChange = false; MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); - if (MJTI == 0) return false; + if (!MJTI) return false; const std::vector &JT = MJTI->getJumpTables(); for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) { @@ -2013,7 +2013,7 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) { // try to move it; otherwise, create a new block following the jump // table that branches back to the actual target. This is a very simple // heuristic. FIXME: We can definitely improve it. - MachineBasicBlock *TBB = 0, *FBB = 0; + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; SmallVector Cond; SmallVector CondPrior; MachineFunction::iterator BBi = BB; @@ -2033,7 +2033,7 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) { // Update numbering to account for the block being moved. MF->RenumberBlocks(); ++NumJTMoved; - return NULL; + return nullptr; } // Create a new MBB for the code after the jump BB. diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 3692083e1c2..d3219aa6ef0 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -346,7 +346,7 @@ static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) { std::lower_bound(NEONLdStTable, NEONLdStTable + NumEntries, Opcode); if (I != NEONLdStTable + NumEntries && I->PseudoOpc == Opcode) return I; - return NULL; + return nullptr; } /// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register, diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 3884daa2c5b..c17631b5119 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -750,7 +750,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { // Computes the address to get to an object. bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { // Some boilerplate from the X86 FastISel. - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from @@ -2163,7 +2163,8 @@ unsigned ARMFastISel::getLibcallReg(const Twine &Name) { if (!LCREVT.isSimple()) return 0; GlobalValue *GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false, - GlobalValue::ExternalLinkage, 0, Name); + GlobalValue::ExternalLinkage, nullptr, + Name); assert(GV->getType() == GVTy && "We miscomputed the type for the global!"); return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); } @@ -2267,7 +2268,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { } bool ARMFastISel::SelectCall(const Instruction *I, - const char *IntrMemName = 0) { + const char *IntrMemName = nullptr) { const CallInst *CI = cast(I); const Value *Callee = CI->getCalledValue(); @@ -3073,6 +3074,6 @@ namespace llvm { TM.Options.NoFramePointerElim = true; return new ARMFastISel(funcInfo, libInfo); } - return 0; + return nullptr; } } diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp index 61d4e12cb09..0885c4e5721 100644 --- a/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -77,7 +77,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { } void ARMHazardRecognizer::Reset() { - LastMI = 0; + LastMI = nullptr; FpMLxStalls = 0; ScoreboardHazardRecognizer::Reset(); } @@ -95,7 +95,7 @@ void ARMHazardRecognizer::EmitInstruction(SUnit *SU) { void ARMHazardRecognizer::AdvanceCycle() { if (FpMLxStalls && --FpMLxStalls == 0) // Stalled for 4 cycles but still can't schedule any other instructions. - LastMI = 0; + LastMI = nullptr; ScoreboardHazardRecognizer::AdvanceCycle(); } diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 654e3cecefe..dc19955e490 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1441,7 +1441,7 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { LoadSDNode *LD = cast(N); ISD::MemIndexedMode AM = LD->getAddressingMode(); if (AM == ISD::UNINDEXED) - return NULL; + return nullptr; EVT LoadedVT = LD->getMemoryVT(); SDValue Offset, AMOpc; @@ -1507,14 +1507,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { } } - return NULL; + return nullptr; } SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { LoadSDNode *LD = cast(N); ISD::MemIndexedMode AM = LD->getAddressingMode(); if (AM == ISD::UNINDEXED) - return NULL; + return nullptr; EVT LoadedVT = LD->getMemoryVT(); bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; @@ -1541,7 +1541,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST; break; default: - return NULL; + return nullptr; } Match = true; } @@ -1555,7 +1555,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { MVT::Other, Ops); } - return NULL; + return nullptr; } /// \brief Form a GPRPair pseudo register from a pair of GPR regs. @@ -1777,7 +1777,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, SDValue MemAddr, Align; unsigned AddrOpIdx = isUpdating ? 1 : 2; if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) - return NULL; + return nullptr; SDValue Chain = N->getOperand(0); EVT VT = N->getValueType(0); @@ -1896,7 +1896,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); - return NULL; + return nullptr; } SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, @@ -1910,7 +1910,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, unsigned AddrOpIdx = isUpdating ? 1 : 2; unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) - return NULL; + return nullptr; MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast(N)->getMemOperand(); @@ -2056,7 +2056,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned AddrOpIdx = isUpdating ? 1 : 2; unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) - return NULL; + return nullptr; MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast(N)->getMemOperand(); @@ -2161,7 +2161,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1)); if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2)); - return NULL; + return nullptr; } SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, @@ -2172,7 +2172,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, SDValue MemAddr, Align; if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align)) - return NULL; + return nullptr; MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast(N)->getMemOperand(); @@ -2244,7 +2244,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1)); if (isUpdating) ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2)); - return NULL; + return nullptr; } SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, @@ -2283,7 +2283,7 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned) { if (!Subtarget->hasV6T2Ops()) - return NULL; + return nullptr; unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX) @@ -2296,7 +2296,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, // The immediate is a mask of the low bits iff imm & (imm+1) == 0 if (And_imm & (And_imm + 1)) - return NULL; + return nullptr; unsigned Srl_imm = 0; if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, @@ -2336,7 +2336,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); } } - return NULL; + return nullptr; } // Otherwise, we're looking for a shift of a shift @@ -2350,7 +2350,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, unsigned Width = 32 - Srl_imm - 1; int LSB = Srl_imm - Shl_imm; if (LSB < 0) - return NULL; + return nullptr; SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { N->getOperand(0).getOperand(0), CurDAG->getTargetConstant(LSB, MVT::i32), @@ -2359,7 +2359,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); } } - return NULL; + return nullptr; } /// Target-specific DAG combining for ISD::XOR. @@ -2378,10 +2378,10 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){ EVT VT = N->getValueType(0); if (Subtarget->isThumb1Only()) - return NULL; + return nullptr; if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA) - return NULL; + return nullptr; SDValue ADDSrc0 = XORSrc0.getOperand(0); SDValue ADDSrc1 = XORSrc0.getOperand(1); @@ -2392,13 +2392,13 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){ unsigned Size = XType.getSizeInBits() - 1; if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 && - XType.isInteger() && SRAConstant != NULL && + XType.isInteger() && SRAConstant != nullptr && Size == SRAConstant->getZExtValue()) { unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS; return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0); } - return NULL; + return nullptr; } SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) { @@ -2415,7 +2415,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { if (N->isMachineOpcode()) { N->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } switch (N->getOpcode()) { @@ -2479,7 +2479,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { Ops); } ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); - return NULL; + return nullptr; } // Other cases are autogenerated. @@ -2661,7 +2661,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { } } case ISD::LOAD: { - SDNode *ResNode = 0; + SDNode *ResNode = nullptr; if (Subtarget->isThumb() && Subtarget->hasThumb2()) ResNode = SelectT2IndexedLoad(N); else @@ -2708,13 +2708,13 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { } ReplaceUses(SDValue(N, 0), SDValue(Chain.getNode(), Chain.getResNo())); - return NULL; + return nullptr; } case ARMISD::VZIP: { unsigned Opc = 0; EVT VT = N->getValueType(0); switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; + default: return nullptr; case MVT::v8i8: Opc = ARM::VZIPd8; break; case MVT::v4i16: Opc = ARM::VZIPd16; break; case MVT::v2f32: @@ -2734,7 +2734,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { unsigned Opc = 0; EVT VT = N->getValueType(0); switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; + default: return nullptr; case MVT::v8i8: Opc = ARM::VUZPd8; break; case MVT::v4i16: Opc = ARM::VUZPd16; break; case MVT::v2f32: @@ -2754,7 +2754,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { unsigned Opc = 0; EVT VT = N->getValueType(0); switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; + default: return nullptr; case MVT::v8i8: Opc = ARM::VTRNd8; break; case MVT::v4i16: Opc = ARM::VTRNd16; break; case MVT::v2f32: @@ -2835,7 +2835,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VLD1q16wb_fixed, ARM::VLD1q32wb_fixed, ARM::VLD1q64wb_fixed }; - return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0); + return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr); } case ARMISD::VLD2_UPD: { @@ -2846,7 +2846,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed, ARM::VLD2q16PseudoWB_fixed, ARM::VLD2q32PseudoWB_fixed }; - return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0); + return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr); } case ARMISD::VLD3_UPD: { @@ -2913,7 +2913,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VST1q16wb_fixed, ARM::VST1q32wb_fixed, ARM::VST1q64wb_fixed }; - return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0); + return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr); } case ARMISD::VST2_UPD: { @@ -2924,7 +2924,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed, ARM::VST2q16PseudoWB_fixed, ARM::VST2q32PseudoWB_fixed }; - return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0); + return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr); } case ARMISD::VST3_UPD: { @@ -3048,7 +3048,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ReplaceUses(SDValue(N, 1), Result); } ReplaceUses(SDValue(N, 2), OutChain); - return NULL; + return nullptr; } case Intrinsic::arm_stlexd: case Intrinsic::arm_strexd: { @@ -3094,7 +3094,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VLD1d32, ARM::VLD1d64 }; static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16, ARM::VLD1q32, ARM::VLD1q64}; - return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0); + return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr); } case Intrinsic::arm_neon_vld2: { @@ -3102,7 +3102,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VLD2d32, ARM::VLD1q64 }; static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo, ARM::VLD2q32Pseudo }; - return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0); + return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr); } case Intrinsic::arm_neon_vld3: { @@ -3165,7 +3165,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VST1d32, ARM::VST1d64 }; static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16, ARM::VST1q32, ARM::VST1q64 }; - return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0); + return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr); } case Intrinsic::arm_neon_vst2: { @@ -3173,7 +3173,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { ARM::VST2d32, ARM::VST1q64 }; static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo, ARM::VST2q32Pseudo }; - return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0); + return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr); } case Intrinsic::arm_neon_vst3: { @@ -3307,7 +3307,8 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ // them into a GPRPair. SDLoc dl(N); - SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0); + SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) + : SDValue(nullptr,0); SmallVector OpChanged; // Glue node will be appended late. @@ -3436,7 +3437,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ if (Glue.getNode()) AsmNodeOperands.push_back(Glue); if (!Changed) - return NULL; + return nullptr; SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0], diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 77d8f2109de..7c302ca2e4d 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -248,9 +248,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) } // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, 0); - setLibcallName(RTLIB::SRL_I128, 0); - setLibcallName(RTLIB::SRA_I128, 0); + setLibcallName(RTLIB::SHL_I128, nullptr); + setLibcallName(RTLIB::SRL_I128, nullptr); + setLibcallName(RTLIB::SRA_I128, nullptr); if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() && !Subtarget->isTargetWindows()) { @@ -915,7 +915,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // and extractions. std::pair ARMTargetLowering::findRepresentativeClass(MVT VT) const{ - const TargetRegisterClass *RRC = 0; + const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: @@ -952,7 +952,7 @@ ARMTargetLowering::findRepresentativeClass(MVT VT) const{ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; case ARMISD::Wrapper: return "ARMISD::Wrapper"; case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; @@ -1359,7 +1359,7 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); else { assert(NextVA.isMemLoc()); - if (StackPtr.getNode() == 0) + if (!StackPtr.getNode()) StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), @@ -2839,8 +2839,9 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, // If there is no regs to be stored, just point address after last // argument passed via stack. int FrameIndex = - StoreByValRegs(CCInfo, DAG, dl, Chain, 0, CCInfo.getInRegsParamsCount(), - 0, ArgOffset, 0, ForceMutable, 0, TotalArgRegsSaveSize); + StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, + CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable, + 0, TotalArgRegsSaveSize); AFI->setVarArgsFrameIndex(FrameIndex); } @@ -6760,8 +6761,8 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI, MachineFunction *MF = BB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); unsigned UnitSize = 0; - const TargetRegisterClass *TRC = 0; - const TargetRegisterClass *VecTRC = 0; + const TargetRegisterClass *TRC = nullptr; + const TargetRegisterClass *VecTRC = nullptr; bool IsThumb1 = Subtarget->isThumb1Only(); bool IsThumb2 = Subtarget->isThumb2(); @@ -6795,7 +6796,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI, ? (const TargetRegisterClass *)&ARM::DPairRegClass : UnitSize == 8 ? (const TargetRegisterClass *)&ARM::DPRRegClass - : 0; + : nullptr; unsigned BytesLeft = SizeVal % UnitSize; unsigned LoopSize = SizeVal - BytesLeft; @@ -7586,7 +7587,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, // Look for the glued ADDE. SDNode* AddeNode = AddcNode->getGluedUser(); - if (AddeNode == NULL) + if (!AddeNode) return SDValue(); // Make sure it is really an ADDE. @@ -7621,9 +7622,9 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, // Figure out the high and low input values to the MLAL node. SDValue* HiMul = &MULOp; - SDValue* HiAdd = NULL; - SDValue* LoMul = NULL; - SDValue* LowAdd = NULL; + SDValue* HiAdd = nullptr; + SDValue* LoMul = nullptr; + SDValue* LowAdd = nullptr; if (IsLeftOperandMUL) HiAdd = &AddeOp1; @@ -7640,7 +7641,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, LowAdd = &AddcOp0; } - if (LoMul == NULL) + if (!LoMul) return SDValue(); if (LoMul->getNode() != HiMul->getNode()) @@ -10058,7 +10059,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight( Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. @@ -10137,7 +10138,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector&Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result; // Currently only support length 1 constraints. if (Constraint.length() != 1) return; diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 3ba54a0170e..db1e24c84a3 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1747,8 +1747,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB, while (Ops.size() > 1) { unsigned FirstLoc = ~0U; unsigned LastLoc = 0; - MachineInstr *FirstOp = 0; - MachineInstr *LastOp = 0; + MachineInstr *FirstOp = nullptr; + MachineInstr *LastOp = nullptr; int LastOffset = 0; unsigned LastOpcode = 0; unsigned LastBytes = 0; diff --git a/lib/Target/ARM/ARMTargetObjectFile.cpp b/lib/Target/ARM/ARMTargetObjectFile.cpp index 3379f85b4ec..6cefee5c00d 100644 --- a/lib/Target/ARM/ARMTargetObjectFile.cpp +++ b/lib/Target/ARM/ARMTargetObjectFile.cpp @@ -31,7 +31,7 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx, InitializeELF(isAAPCS_ABI); if (isAAPCS_ABI) { - LSDASection = NULL; + LSDASection = nullptr; } AttributesSection = diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 402f31ece39..ebb25f49fa4 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -43,7 +43,7 @@ class ARMTTI final : public ImmutablePass, public TargetTransformInfo { unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; public: - ARMTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { + ARMTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 71b558d46bb..d1cfde11ec8 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -1099,7 +1099,7 @@ public: if (!isMem()) return false; // No offset of any kind. - return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && + return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && (alignOK || Memory.Alignment == Alignment); } bool isMemPCRelImm12() const { @@ -1708,7 +1708,7 @@ public: void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediates when possible. Null MCExpr = 0. - if (Expr == 0) + if (!Expr) Inst.addOperand(MCOperand::CreateImm(0)); else if (const MCConstantExpr *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); @@ -2993,7 +2993,7 @@ int ARMAsmParser::tryParseShiftRegister( Parser.getTok().is(AsmToken::Dollar)) { Parser.Lex(); // Eat hash. SMLoc ImmLoc = Parser.getTok().getLoc(); - const MCExpr *ShiftExpr = 0; + const MCExpr *ShiftExpr = nullptr; if (getParser().parseExpression(ShiftExpr, EndLoc)) { Error(ImmLoc, "invalid immediate shift value"); return -1; @@ -4491,8 +4491,9 @@ parseMemory(SmallVectorImpl &Operands) { E = Tok.getEndLoc(); Parser.Lex(); // Eat right bracket token. - Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, - 0, 0, false, S, E)); + Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, + ARM_AM::no_shift, 0, 0, false, + S, E)); // If there's a pre-indexing writeback marker, '!', just add it as a token // operand. It's rather odd, but syntactically valid. @@ -4547,7 +4548,7 @@ parseMemory(SmallVectorImpl &Operands) { // Don't worry about range checking the value here. That's handled by // the is*() predicates. - Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, + Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, ARM_AM::no_shift, 0, Align, false, S, E, AlignmentLoc)); @@ -4640,7 +4641,7 @@ parseMemory(SmallVectorImpl &Operands) { E = Parser.getTok().getEndLoc(); Parser.Lex(); // Eat right bracket token. - Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, + Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum, ShiftType, ShiftImm, 0, isNegative, S, E)); diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 5f706dfe894..3e87c6a7a80 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -62,7 +62,7 @@ static const char *GetFPUName(unsigned ID) { #define ARM_FPU_NAME(NAME, ID) case ARM::ID: return NAME; #include "ARMFPUName.def" } - return NULL; + return nullptr; } static const char *GetArchName(unsigned ID) { @@ -75,7 +75,7 @@ static const char *GetArchName(unsigned ID) { #define ARM_ARCH_ALIAS(NAME, ID) /* empty */ #include "ARMArchName.def" } - return NULL; + return nullptr; } static const char *GetArchDefaultCPUName(unsigned ID) { @@ -88,7 +88,7 @@ static const char *GetArchDefaultCPUName(unsigned ID) { #define ARM_ARCH_ALIAS(NAME, ID) /* empty */ #include "ARMArchName.def" } - return NULL; + return nullptr; } static unsigned GetArchDefaultCPUArch(unsigned ID) { @@ -310,7 +310,7 @@ private: for (size_t i = 0; i < Contents.size(); ++i) if (Contents[i].Tag == Attribute) return &Contents[i]; - return 0; + return nullptr; } void setAttributeItem(unsigned Attribute, unsigned Value, @@ -415,7 +415,7 @@ public: ARMTargetELFStreamer(MCStreamer &S) : ARMTargetStreamer(S), CurrentVendor("aeabi"), FPU(ARM::INVALID_FPU), Arch(ARM::INVALID_ARCH), EmittedArch(ARM::INVALID_ARCH), - AttributeSection(0) {} + AttributeSection(nullptr) {} }; /// Extend the generic ELFStreamer class so that it can emit mapping symbols at @@ -1013,7 +1013,7 @@ inline void ARMELFStreamer::SwitchToEHSection(const char *Prefix, } // Get .ARM.extab or .ARM.exidx section - const MCSectionELF *EHSection = NULL; + const MCSectionELF *EHSection = nullptr; if (const MCSymbol *Group = FnSection.getGroup()) { EHSection = getContext().getELFSection( EHSecName, Type, Flags | ELF::SHF_GROUP, Kind, @@ -1050,9 +1050,9 @@ void ARMELFStreamer::EmitFixup(const MCExpr *Expr, MCFixupKind Kind) { } void ARMELFStreamer::Reset() { - ExTab = NULL; - FnStart = NULL; - Personality = NULL; + ExTab = nullptr; + FnStart = nullptr; + Personality = nullptr; PersonalityIndex = ARM::EHABI::NUM_PERSONALITY_INDEX; FPReg = ARM::SP; FPOffset = 0; diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp index b7f96e08808..e93b16c6b53 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp @@ -25,7 +25,7 @@ ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin(StringRef TT) { (TheTriple.getArch() == Triple::thumbeb)) IsLittleEndian = false; - Data64bitsDirective = 0; + Data64bitsDirective = nullptr; CommentString = "@"; Code16Directive = ".code\t16"; Code32Directive = ".code\t32"; @@ -50,7 +50,7 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo(StringRef TT) { // ".comm align is in bytes but .align is pow-2." AlignmentIsInBytes = false; - Data64bitsDirective = 0; + Data64bitsDirective = nullptr; CommentString = "@"; Code16Directive = ".code\t16"; Code32Directive = ".code\t32"; diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp index cc1c3f17378..0d2aaa2eed8 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -247,7 +247,7 @@ static MCAsmInfo *createARMMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { } unsigned Reg = MRI.getDwarfRegNum(ARM::SP, true); - MAI->addInitialFrameState(MCCFIInstruction::createDefCfa(0, Reg, 0)); + MAI->addInitialFrameState(MCCFIInstruction::createDefCfa(nullptr, Reg, 0)); return MAI; } @@ -297,7 +297,7 @@ static MCInstPrinter *createARMMCInstPrinter(const Target &T, const MCSubtargetInfo &STI) { if (SyntaxVariant == 0) return new ARMInstPrinter(MAI, MII, MRI, STI); - return 0; + return nullptr; } static MCRelocationInfo *createARMMCRelocationInfo(StringRef TT, diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp index f2e4532611c..38ed5632e46 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -378,7 +378,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, } // Get the symbol data, if any. - const MCSymbolData *SD = 0; + const MCSymbolData *SD = nullptr; if (Target.getSymA()) SD = &Asm.getSymbolData(Target.getSymA()->getSymbol()); diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp index fdc0ed7d67f..6e8a62cd2e8 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp @@ -109,7 +109,7 @@ ConstantPool * AssemblerConstantPools::getConstantPool(const MCSection *Section) { ConstantPoolMapTy::iterator CP = ConstantPools.find(Section); if (CP == ConstantPools.end()) - return 0; + return nullptr; return &CP->second; } diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index 285171aa089..f6d24e9e4f9 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -74,7 +74,7 @@ namespace { } void MLxExpansion::clearStack() { - std::fill(LastMIs, LastMIs + 4, (MachineInstr*)0); + std::fill(LastMIs, LastMIs + 4, nullptr); MIIdx = 0; } @@ -89,7 +89,7 @@ MachineInstr *MLxExpansion::getAccDefMI(MachineInstr *MI) const { // real definition MI. This is important for _sfp instructions. unsigned Reg = MI->getOperand(1).getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) - return 0; + return nullptr; MachineBasicBlock *MBB = MI->getParent(); MachineInstr *DefMI = MRI->getVRegDef(Reg); @@ -353,7 +353,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) { if (Domain == ARMII::DomainGeneral) { if (++Skip == 2) // Assume dual issues of non-VFP / NEON instructions. - pushStack(0); + pushStack(nullptr); } else { Skip = 0; diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index 0efadd9aea3..6267ecf53c7 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -916,9 +916,9 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { // Yes, CPSR could be livein. bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); - MachineInstr *BundleMI = 0; + MachineInstr *BundleMI = nullptr; - CPSRDef = 0; + CPSRDef = nullptr; HighLatencyCPSR = false; // Check predecessors for the latest CPSRDef. @@ -984,7 +984,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); if (MI->isCall()) { // Calls don't really set CPSR. - CPSRDef = 0; + CPSRDef = nullptr; HighLatencyCPSR = false; IsSelfLoop = false; } else if (DefCPSR) { diff --git a/lib/Target/ARM64/ARM64AddressTypePromotion.cpp b/lib/Target/ARM64/ARM64AddressTypePromotion.cpp index b34a81081fc..c6e45def9a4 100644 --- a/lib/Target/ARM64/ARM64AddressTypePromotion.cpp +++ b/lib/Target/ARM64/ARM64AddressTypePromotion.cpp @@ -71,7 +71,7 @@ class ARM64AddressTypePromotion : public FunctionPass { public: static char ID; ARM64AddressTypePromotion() - : FunctionPass(ID), Func(NULL), ConsideredSExtType(NULL) { + : FunctionPass(ID), Func(nullptr), ConsideredSExtType(nullptr) { initializeARM64AddressTypePromotionPass(*PassRegistry::getPassRegistry()); } @@ -344,7 +344,7 @@ ARM64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) { SExtForOpnd->moveBefore(Inst); Inst->setOperand(OpIdx, SExtForOpnd); // If more sext are required, new instructions will have to be created. - SExtForOpnd = NULL; + SExtForOpnd = nullptr; } if (SExtForOpnd == SExt) { DEBUG(dbgs() << "Sign extension is useless now\n"); @@ -466,10 +466,10 @@ void ARM64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) { if (insert || AlreadySeen != SeenChains.end()) { DEBUG(dbgs() << "Insert\n"); SExtInsts.push_back(SExt); - if (AlreadySeen != SeenChains.end() && AlreadySeen->second != NULL) { + if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) { DEBUG(dbgs() << "Insert chain member\n"); SExtInsts.push_back(AlreadySeen->second); - SeenChains[Last] = NULL; + SeenChains[Last] = nullptr; } } else { DEBUG(dbgs() << "Record its chain membership\n"); diff --git a/lib/Target/ARM64/ARM64AsmPrinter.cpp b/lib/Target/ARM64/ARM64AsmPrinter.cpp index 6692e662e1c..615cb2884d2 100644 --- a/lib/Target/ARM64/ARM64AsmPrinter.cpp +++ b/lib/Target/ARM64/ARM64AsmPrinter.cpp @@ -53,7 +53,7 @@ class ARM64AsmPrinter : public AsmPrinter { public: ARM64AsmPrinter(TargetMachine &TM, MCStreamer &Streamer) : AsmPrinter(TM, Streamer), Subtarget(&TM.getSubtarget()), - MCInstLowering(OutContext, *Mang, *this), SM(*this), ARM64FI(NULL), + MCInstLowering(OutContext, *Mang, *this), SM(*this), ARM64FI(nullptr), LOHLabelCounter(0) {} virtual const char *getPassName() const { return "ARM64 Assembly Printer"; } diff --git a/lib/Target/ARM64/ARM64CollectLOH.cpp b/lib/Target/ARM64/ARM64CollectLOH.cpp index abe1b4615d5..7b8a270b897 100644 --- a/lib/Target/ARM64/ARM64CollectLOH.cpp +++ b/lib/Target/ARM64/ARM64CollectLOH.cpp @@ -262,7 +262,7 @@ static const SetOfMachineInstr *getUses(const InstrToInstrs *sets, unsigned reg, InstrToInstrs::const_iterator Res = sets[reg].find(&MI); if (Res != sets[reg].end()) return &(Res->second); - return NULL; + return nullptr; } /// Initialize the reaching definition algorithm: @@ -335,7 +335,7 @@ static void initReachingDef(MachineFunction &MF, // Do not register clobbered definition for no ADRP. // This definition is not used anyway (otherwise register // allocation is wrong). - BBGen[Reg] = ADRPMode ? &MI : NULL; + BBGen[Reg] = ADRPMode ? &MI : nullptr; BBKillSet.set(Reg); } } @@ -451,7 +451,7 @@ static void finitReachingDef(BlockToSetOfInstrsPerColor &In, static void reachingDef(MachineFunction &MF, InstrToInstrs *ColorOpToReachedUses, const MapRegToId &RegToId, bool ADRPMode = false, - const MachineInstr *DummyOp = NULL) { + const MachineInstr *DummyOp = nullptr) { // structures: // For each basic block. // Out: a set per color of definitions that reach the @@ -784,7 +784,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs, const InstrToInstrs *DefsPerColorToUses, ARM64FunctionInfo &ARM64FI, const MapRegToId &RegToId, const MachineDominatorTree *MDT) { - SetOfMachineInstr *InvolvedInLOHs = NULL; + SetOfMachineInstr *InvolvedInLOHs = nullptr; #ifdef DEBUG SetOfMachineInstr InvolvedInLOHsStorage; InvolvedInLOHs = &InvolvedInLOHsStorage; @@ -837,7 +837,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs, const MachineInstr *Def = *UseToDefs.find(Candidate)->second.begin(); // Record the elements of the chain. const MachineInstr *L1 = Def; - const MachineInstr *L2 = NULL; + const MachineInstr *L2 = nullptr; unsigned ImmediateDefOpc = Def->getOpcode(); if (Def->getOpcode() != ARM64::ADRP) { // Check the number of users of this node. @@ -907,7 +907,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs, SmallVector Args; MCLOHType Kind; if (isCandidateLoad(Candidate)) { - if (L2 == NULL) { + if (!L2) { // At this point, the candidate LOH indicates that the ldr instruction // may use a direct access to the symbol. There is not such encoding // for loads of byte and half. @@ -1057,7 +1057,7 @@ bool ARM64CollectLOH::runOnMachineFunction(MachineFunction &MF) { if (RegToId.empty()) return false; - MachineInstr *DummyOp = NULL; + MachineInstr *DummyOp = nullptr; if (BasicBlockScopeOnly) { const ARM64InstrInfo *TII = static_cast(TM.getInstrInfo()); diff --git a/lib/Target/ARM64/ARM64ConditionalCompares.cpp b/lib/Target/ARM64/ARM64ConditionalCompares.cpp index 72848330bdc..16324ffa77b 100644 --- a/lib/Target/ARM64/ARM64ConditionalCompares.cpp +++ b/lib/Target/ARM64/ARM64ConditionalCompares.cpp @@ -298,7 +298,7 @@ static bool parseCond(ArrayRef Cond, ARM64CC::CondCode &CC) { MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { MachineBasicBlock::iterator I = MBB->getFirstTerminator(); if (I == MBB->end()) - return 0; + return nullptr; // The terminator must be controlled by the flags. if (!I->readsRegister(ARM64::CPSR)) { switch (I->getOpcode()) { @@ -311,7 +311,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { } ++NumCmpTermRejs; DEBUG(dbgs() << "Flags not used by terminator: " << *I); - return 0; + return nullptr; } // Now find the instruction controlling the terminator. @@ -330,7 +330,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { if (I->getOperand(3).getImm() || !isUInt<5>(I->getOperand(2).getImm())) { DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I); ++NumImmRangeRejs; - return 0; + return nullptr; } // Fall through. case ARM64::SUBSWrr: @@ -341,7 +341,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { return I; DEBUG(dbgs() << "Can't convert compare with live destination: " << *I); ++NumLiveDstRejs; - return 0; + return nullptr; case ARM64::FCMPSrr: case ARM64::FCMPDrr: case ARM64::FCMPESrr: @@ -359,17 +359,17 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { // besides the terminators. DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I); ++NumMultCPSRUses; - return 0; + return nullptr; } if (PRI.Clobbers) { DEBUG(dbgs() << "Not convertible compare: " << *I); ++NumUnknCPSRDefs; - return 0; + return nullptr; } } DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n'); - return 0; + return nullptr; } /// Determine if all the instructions in MBB can safely @@ -416,7 +416,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, // We never speculate stores, so an AA pointer isn't necessary. bool DontMoveAcrossStore = true; - if (!I.isSafeToMove(TII, 0, DontMoveAcrossStore)) { + if (!I.isSafeToMove(TII, nullptr, DontMoveAcrossStore)) { DEBUG(dbgs() << "Can't speculate: " << I); return false; } @@ -435,7 +435,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, /// bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { Head = MBB; - Tail = CmpBB = 0; + Tail = CmpBB = nullptr; if (Head->succ_size() != 2) return false; @@ -495,7 +495,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { // The branch we're looking to eliminate must be analyzable. HeadCond.clear(); - MachineBasicBlock *TBB = 0, *FBB = 0; + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; if (TII->AnalyzeBranch(*Head, TBB, FBB, HeadCond)) { DEBUG(dbgs() << "Head branch not analyzable.\n"); ++NumHeadBranchRejs; @@ -523,7 +523,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { } CmpBBCond.clear(); - TBB = FBB = 0; + TBB = FBB = nullptr; if (TII->AnalyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) { DEBUG(dbgs() << "CmpBB branch not analyzable.\n"); ++NumCmpBranchRejs; @@ -897,7 +897,7 @@ bool ARM64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) { DomTree = &getAnalysis(); Loops = getAnalysisIfAvailable(); Traces = &getAnalysis(); - MinInstr = 0; + MinInstr = nullptr; MinSize = MF.getFunction()->getAttributes().hasAttribute( AttributeSet::FunctionIndex, Attribute::MinSize); diff --git a/lib/Target/ARM64/ARM64FastISel.cpp b/lib/Target/ARM64/ARM64FastISel.cpp index ffd56adbf20..459c48030f4 100644 --- a/lib/Target/ARM64/ARM64FastISel.cpp +++ b/lib/Target/ARM64/ARM64FastISel.cpp @@ -303,7 +303,7 @@ unsigned ARM64FastISel::TargetMaterializeConstant(const Constant *C) { // Computes the address to get to an object. bool ARM64FastISel::ComputeAddress(const Value *Obj, Address &Addr) { - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from @@ -1281,7 +1281,7 @@ bool ARM64FastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, } bool ARM64FastISel::SelectCall(const Instruction *I, - const char *IntrMemName = 0) { + const char *IntrMemName = nullptr) { const CallInst *CI = cast(I); const Value *Callee = CI->getCalledValue(); diff --git a/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp b/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp index 43620ef8c38..986c2ad049f 100644 --- a/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp +++ b/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp @@ -454,7 +454,7 @@ SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) { if (Op1.getOpcode() != ISD::MUL || !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2, LaneIdx)) - return 0; + return nullptr; } SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64); @@ -490,7 +490,7 @@ SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) { if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1, LaneIdx)) - return 0; + return nullptr; SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64); @@ -852,7 +852,7 @@ SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) { LoadSDNode *LD = cast(N); if (LD->isUnindexed()) - return NULL; + return nullptr; EVT VT = LD->getMemoryVT(); EVT DstVT = N->getValueType(0); ISD::MemIndexedMode AM = LD->getAddressingMode(); @@ -910,7 +910,7 @@ SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) { } else if (VT == MVT::f64) { Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel; } else - return NULL; + return nullptr; SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); ConstantSDNode *OffsetOp = cast(LD->getOffset()); @@ -929,7 +929,7 @@ SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) { ReplaceUses(SDValue(N, 0), SDValue(Sub, 0)); ReplaceUses(SDValue(N, 1), SDValue(Res, 1)); ReplaceUses(SDValue(N, 2), SDValue(Res, 2)); - return 0; + return nullptr; } return Res; } @@ -977,7 +977,7 @@ SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc, ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1)); - return 0; + return nullptr; } SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs, @@ -1371,7 +1371,7 @@ SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) { unsigned Opc, LSB, MSB; SDValue Opd0; if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB)) - return NULL; + return nullptr; EVT VT = N->getValueType(0); @@ -1767,14 +1767,14 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst, SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) { if (N->getOpcode() != ISD::OR) - return NULL; + return nullptr; unsigned Opc; unsigned LSB, MSB; SDValue Opd0, Opd1; if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG)) - return NULL; + return nullptr; EVT VT = N->getValueType(0); SDValue Ops[] = { Opd0, @@ -1795,14 +1795,14 @@ SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) { } else if (VT == MVT::f64) { Variant = 1; } else - return 0; // Unrecognized argument type. Fall back on default codegen. + return nullptr; // Unrecognized argument type. Fall back on default codegen. // Pick the FRINTX variant needed to set the flags. unsigned FRINTXOpc = FRINTXOpcs[Variant]; switch (N->getOpcode()) { default: - return 0; // Unrecognized libm ISD node. Fall back on default codegen. + return nullptr; // Unrecognized libm ISD node. Fall back on default codegen. case ISD::FCEIL: { unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr }; Opc = FRINTPOpcs[Variant]; @@ -1892,11 +1892,11 @@ SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) { if (Node->isMachineOpcode()) { DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); - return NULL; + return nullptr; } // Few custom selection stuff. - SDNode *ResNode = 0; + SDNode *ResNode = nullptr; EVT VT = Node->getValueType(0); switch (Node->getOpcode()) { @@ -2455,7 +2455,7 @@ SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) { ResNode = SelectCode(Node); DEBUG(errs() << "=> "); - if (ResNode == NULL || ResNode == Node) + if (ResNode == nullptr || ResNode == Node) DEBUG(Node->dump(CurDAG)); else DEBUG(ResNode->dump(CurDAG)); diff --git a/lib/Target/ARM64/ARM64ISelLowering.cpp b/lib/Target/ARM64/ARM64ISelLowering.cpp index 58e425938e1..37eccb1499c 100644 --- a/lib/Target/ARM64/ARM64ISelLowering.cpp +++ b/lib/Target/ARM64/ARM64ISelLowering.cpp @@ -619,7 +619,7 @@ ARM64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, const char *ARM64TargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { default: - return 0; + return nullptr; case ARM64ISD::CALL: return "ARM64ISD::CALL"; case ARM64ISD::ADRP: return "ARM64ISD::ADRP"; case ARM64ISD::ADDlow: return "ARM64ISD::ADDlow"; @@ -2565,7 +2565,7 @@ SDValue ARM64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { RHS = DAG.getConstant(0, LHS.getValueType()); CC = ISD::SETNE; } @@ -2815,7 +2815,7 @@ SDValue ARM64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); // If softenSetCCOperands returned a scalar, use it. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { assert(LHS.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"); return LHS; @@ -2939,7 +2939,7 @@ SDValue ARM64TargetLowering::LowerSELECT_CC(SDValue Op, // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { RHS = DAG.getConstant(0, LHS.getValueType()); CC = ISD::SETNE; } @@ -3563,7 +3563,7 @@ ARM64TargetLowering::getSingleConstraintMatchWeight( Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. @@ -3617,7 +3617,7 @@ ARM64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); // Not found as a standard register? - if (Res.second == 0) { + if (!Res.second) { unsigned Size = Constraint.size(); if ((Size == 4 || Size == 5) && Constraint[0] == '{' && tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { @@ -3642,7 +3642,7 @@ ARM64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, void ARM64TargetLowering::LowerAsmOperandForConstraint( SDValue Op, std::string &Constraint, std::vector &Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result; // Currently only support length 1 constraints. if (Constraint.length() != 1) diff --git a/lib/Target/ARM64/ARM64InstrInfo.cpp b/lib/Target/ARM64/ARM64InstrInfo.cpp index 6a86723e04d..95b247a0a2f 100644 --- a/lib/Target/ARM64/ARM64InstrInfo.cpp +++ b/lib/Target/ARM64/ARM64InstrInfo.cpp @@ -260,7 +260,7 @@ unsigned ARM64InstrInfo::InsertBranch( // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) // Unconditional branch? BuildMI(&MBB, DL, get(ARM64::B)).addMBB(TBB); else @@ -289,7 +289,7 @@ static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { // csel instruction. If so, return the folded opcode, and the replacement // register. static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg, - unsigned *NewVReg = 0) { + unsigned *NewVReg = nullptr) { VReg = removeCopies(MRI, VReg); if (!TargetRegisterInfo::isVirtualRegister(VReg)) return 0; @@ -469,7 +469,7 @@ void ARM64InstrInfo::insertSelect(MachineBasicBlock &MBB, } unsigned Opc = 0; - const TargetRegisterClass *RC = 0; + const TargetRegisterClass *RC = nullptr; bool TryFold = false; if (MRI.constrainRegClass(DstReg, &ARM64::GPR64RegClass)) { RC = &ARM64::GPR64RegClass; @@ -1667,16 +1667,16 @@ ARM64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, unsigned SrcReg = MI->getOperand(1).getReg(); if (SrcReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(DstReg)) { MF.getRegInfo().constrainRegClass(DstReg, &ARM64::GPR64RegClass); - return 0; + return nullptr; } if (DstReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(SrcReg)) { MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass); - return 0; + return nullptr; } } // Cannot fold. - return 0; + return nullptr; } int llvm::isARM64FrameOffsetLegal(const MachineInstr &MI, int &Offset, diff --git a/lib/Target/ARM64/ARM64PromoteConstant.cpp b/lib/Target/ARM64/ARM64PromoteConstant.cpp index 6fc57505942..2eef90d6cfc 100644 --- a/lib/Target/ARM64/ARM64PromoteConstant.cpp +++ b/lib/Target/ARM64/ARM64PromoteConstant.cpp @@ -489,8 +489,8 @@ ARM64PromoteConstant::insertDefinitions(Constant *Cst, ModuleToMergedGV.find(M); if (MapIt == ModuleToMergedGV.end()) { PromotedGV = new GlobalVariable( - *M, Cst->getType(), true, GlobalValue::InternalLinkage, 0, - "_PromotedConst", 0, GlobalVariable::NotThreadLocal); + *M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr, + "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal); PromotedGV->setInitializer(Cst); ModuleToMergedGV[M] = PromotedGV; DEBUG(dbgs() << "Global replacement: "); diff --git a/lib/Target/ARM64/ARM64RegisterInfo.cpp b/lib/Target/ARM64/ARM64RegisterInfo.cpp index 21d3d955700..aa7d9b70b50 100644 --- a/lib/Target/ARM64/ARM64RegisterInfo.cpp +++ b/lib/Target/ARM64/ARM64RegisterInfo.cpp @@ -136,7 +136,7 @@ ARM64RegisterInfo::getPointerRegClass(const MachineFunction &MF, const TargetRegisterClass * ARM64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { if (RC == &ARM64::CCRRegClass) - return NULL; // Can't copy CPSR. + return nullptr; // Can't copy CPSR. return RC; } diff --git a/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp b/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp index 49c3c0c1bf3..a087b407d97 100644 --- a/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp +++ b/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp @@ -30,7 +30,7 @@ SDValue ARM64SelectionDAGInfo::EmitTargetCodeForMemset( ConstantSDNode *V = dyn_cast(Src); ConstantSDNode *SizeValue = dyn_cast(Size); const char *bzeroEntry = - (V && V->isNullValue()) ? Subtarget->getBZeroEntry() : 0; + (V && V->isNullValue()) ? Subtarget->getBZeroEntry() : nullptr; // For small size (< 256), it is not beneficial to use bzero // instead of memset. if (bzeroEntry && (!SizeValue || SizeValue->getZExtValue() > 256)) { diff --git a/lib/Target/ARM64/ARM64StorePairSuppress.cpp b/lib/Target/ARM64/ARM64StorePairSuppress.cpp index 15b465da5ad..5416f11510c 100644 --- a/lib/Target/ARM64/ARM64StorePairSuppress.cpp +++ b/lib/Target/ARM64/ARM64StorePairSuppress.cpp @@ -126,7 +126,7 @@ bool ARM64StorePairSuppress::runOnMachineFunction(MachineFunction &mf) { SchedModel.init(*ST.getSchedModel(), &ST, TII); Traces = &getAnalysis(); - MinInstr = 0; + MinInstr = nullptr; DEBUG(dbgs() << "*** " << getPassName() << ": " << MF->getName() << '\n'); diff --git a/lib/Target/ARM64/ARM64TargetTransformInfo.cpp b/lib/Target/ARM64/ARM64TargetTransformInfo.cpp index 40228021e42..ac7142f3feb 100644 --- a/lib/Target/ARM64/ARM64TargetTransformInfo.cpp +++ b/lib/Target/ARM64/ARM64TargetTransformInfo.cpp @@ -45,7 +45,7 @@ class ARM64TTI final : public ImmutablePass, public TargetTransformInfo { unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; public: - ARM64TTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { + ARM64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp b/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp index 0e57565baa0..5fe0acc59da 100644 --- a/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp +++ b/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp @@ -964,7 +964,7 @@ public: return false; if (Mem.Mode != ImmediateOffset) return false; - return Mem.OffsetImm == 0; + return Mem.OffsetImm == nullptr; } bool isMemoryIndexedSImm9() const { if (!isMem() || Mem.Mode != ImmediateOffset) @@ -1041,7 +1041,7 @@ public: void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediates when possible. Null MCExpr = 0. - if (Expr == 0) + if (!Expr) Inst.addOperand(MCOperand::CreateImm(0)); else if (const MCConstantExpr *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); @@ -1688,7 +1688,7 @@ public: ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx); Op->Mem.BaseRegNum = BaseReg; Op->Mem.OffsetRegNum = OffsetReg; - Op->Mem.OffsetImm = 0; + Op->Mem.OffsetImm = nullptr; Op->Mem.ExtType = ExtType; Op->Mem.ShiftVal = ShiftVal; Op->Mem.ExplicitShift = ExplicitShift; @@ -2379,7 +2379,7 @@ bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, StringRef Op = Tok.getString(); SMLoc S = Tok.getLoc(); - const MCExpr *Expr = 0; + const MCExpr *Expr = nullptr; #define SYS_ALIAS(op1, Cn, Cm, op2) \ do { \ @@ -2799,7 +2799,7 @@ ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) { Parser.Lex(); // Eat right bracket token. - Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext())); + Operands.push_back(ARM64Operand::CreateMem(Reg, nullptr, S, E, E, getContext())); return MatchOperand_Success; } @@ -2818,7 +2818,7 @@ bool ARM64AsmParser::parseMemory(OperandVector &Operands) { return Error(BaseRegTok.getLoc(), "register expected"); // If there is an offset expression, parse it. - const MCExpr *OffsetExpr = 0; + const MCExpr *OffsetExpr = nullptr; SMLoc OffsetLoc; if (Parser.getTok().is(AsmToken::Comma)) { Parser.Lex(); // Eat the comma. @@ -3848,7 +3848,7 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, const char *Repl = StringSwitch(Tok) .Case("cmp", "subs") .Case("cmn", "adds") - .Default(0); + .Default(nullptr); assert(Repl && "Unknown compare instruction"); delete Operands[0]; Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext()); diff --git a/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp b/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp index 4ce432372a0..2f8e516d185 100644 --- a/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp +++ b/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp @@ -167,7 +167,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand( } } - const MCExpr *Add = NULL; + const MCExpr *Add = nullptr; if (SymbolicOp.AddSymbol.Present) { if (SymbolicOp.AddSymbol.Name) { StringRef Name(SymbolicOp.AddSymbol.Name); @@ -182,7 +182,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand( } } - const MCExpr *Sub = NULL; + const MCExpr *Sub = nullptr; if (SymbolicOp.SubtractSymbol.Present) { if (SymbolicOp.SubtractSymbol.Name) { StringRef Name(SymbolicOp.SubtractSymbol.Name); @@ -193,7 +193,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand( } } - const MCExpr *Off = NULL; + const MCExpr *Off = nullptr; if (SymbolicOp.Value != 0) Off = MCConstantExpr::Create(SymbolicOp.Value, Ctx); @@ -204,17 +204,17 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand( LHS = MCBinaryExpr::CreateSub(Add, Sub, Ctx); else LHS = MCUnaryExpr::CreateMinus(Sub, Ctx); - if (Off != 0) + if (Off) Expr = MCBinaryExpr::CreateAdd(LHS, Off, Ctx); else Expr = LHS; } else if (Add) { - if (Off != 0) + if (Off) Expr = MCBinaryExpr::CreateAdd(Add, Off, Ctx); else Expr = Add; } else { - if (Off != 0) + if (Off) Expr = Off; else Expr = MCConstantExpr::Create(0, Ctx); diff --git a/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp b/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp index 0dea241ed18..b683ae130c3 100644 --- a/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp +++ b/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp @@ -85,7 +85,7 @@ void ARM64InstPrinter::printInst(const MCInst *MI, raw_ostream &O, if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) { bool IsSigned = (Opcode == ARM64::SBFMXri || Opcode == ARM64::SBFMWri); - const char *AsmMnemonic = 0; + const char *AsmMnemonic = nullptr; switch (Op3.getImm()) { default: @@ -115,7 +115,7 @@ void ARM64InstPrinter::printInst(const MCInst *MI, raw_ostream &O, // instruction. In all cases the immediate shift amount shift must be in // the range 0 to (reg.size -1). if (Op2.isImm() && Op3.isImm()) { - const char *AsmMnemonic = 0; + const char *AsmMnemonic = nullptr; int shift = 0; int64_t immr = Op2.getImm(); int64_t imms = Op3.getImm(); @@ -693,7 +693,7 @@ static LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) { if (LdStNInstInfo[Idx].Opcode == Opcode) return &LdStNInstInfo[Idx]; - return 0; + return nullptr; } void ARM64AppleInstPrinter::printInst(const MCInst *MI, raw_ostream &O, @@ -754,7 +754,7 @@ bool ARM64InstPrinter::printSysAlias(const MCInst *MI, raw_ostream &O) { assert(Opcode == ARM64::SYSxt && "Invalid opcode for SYS alias!"); #endif - const char *Asm = 0; + const char *Asm = nullptr; const MCOperand &Op1 = MI->getOperand(0); const MCOperand &Cn = MI->getOperand(1); const MCOperand &Cm = MI->getOperand(2); @@ -968,7 +968,7 @@ bool ARM64InstPrinter::printSysAlias(const MCInst *MI, raw_ostream &O) { O << ", " << getRegisterName(Reg); } - return Asm != 0; + return Asm != nullptr; } void ARM64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo, diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp index 30dcda49ff6..9775a471f52 100644 --- a/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp @@ -71,7 +71,7 @@ static MCAsmInfo *createARM64MCAsmInfo(const MCRegisterInfo &MRI, // Initial state of the frame pointer is SP. unsigned Reg = MRI.getDwarfRegNum(ARM64::SP, true); - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0); MAI->addInitialFrameState(Inst); return MAI; @@ -119,7 +119,7 @@ static MCInstPrinter *createARM64MCInstPrinter(const Target &T, if (SyntaxVariant == 1) return new ARM64AppleInstPrinter(MAI, MII, MRI, STI); - return 0; + return nullptr; } static MCStreamer *createMCStreamer(const Target &T, StringRef TT, diff --git a/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp b/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp index 9d2dcb6c5de..ba725069a37 100644 --- a/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp +++ b/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp @@ -241,14 +241,14 @@ void ARM64MachObjectWriter::RecordRelocation( Asm.getContext().FatalError(Fixup.getLoc(), "unsupported relocation with identical base"); - Value += (A_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress( - &A_SD, Layout)) - - (A_Base == NULL || A_Base->getFragment() == NULL + Value += (!A_SD.getFragment() ? 0 + : Writer->getSymbolAddress(&A_SD, Layout)) - + (!A_Base || !A_Base->getFragment() ? 0 : Writer->getSymbolAddress(A_Base, Layout)); - Value -= (B_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress( - &B_SD, Layout)) - - (B_Base == NULL || B_Base->getFragment() == NULL + Value -= (!B_SD.getFragment() ? 0 + : Writer->getSymbolAddress(&B_SD, Layout)) - + (!B_Base || !B_Base->getFragment() ? 0 : Writer->getSymbolAddress(B_Base, Layout)); @@ -302,7 +302,7 @@ void ARM64MachObjectWriter::RecordRelocation( // have already been fixed up. if (Symbol->isInSection()) { if (Section.hasAttribute(MachO::S_ATTR_DEBUG)) - Base = 0; + Base = nullptr; } // ARM64 uses external relocations as much as possible. For debug sections, diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index afd1f518cc2..f16547a44e5 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -396,7 +396,7 @@ std::string CppWriter::getCppName(Type* Ty) { return I->second; // Okay, let's build a new name for this type. Start with a prefix - const char* prefix = 0; + const char* prefix = nullptr; switch (Ty->getTypeID()) { case Type::FunctionTyID: prefix = "FuncTy_"; break; case Type::StructTyID: prefix = "StructTy_"; break; diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp index f15765a6726..2e011bd1274 100644 --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -225,7 +225,7 @@ static MCInstPrinter *createHexagonMCInstPrinter(const Target &T, if (SyntaxVariant == 0) return(new HexagonInstPrinter(MAI, MII, MRI)); else - return NULL; + return nullptr; } extern "C" void LLVMInitializeHexagonAsmPrinter() { diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp index 4d72c6b0d39..a6f807fda89 100644 --- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp +++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp @@ -147,8 +147,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) { MachineBasicBlock::succ_iterator SI = MBB->succ_begin(); MachineBasicBlock* FirstSucc = *SI; MachineBasicBlock* SecondSucc = *(++SI); - MachineBasicBlock* LayoutSucc = NULL; - MachineBasicBlock* JumpAroundTarget = NULL; + MachineBasicBlock* LayoutSucc = nullptr; + MachineBasicBlock* JumpAroundTarget = nullptr; if (MBB->isLayoutSuccessor(FirstSucc)) { LayoutSucc = FirstSucc; @@ -162,7 +162,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) { // The target of the unconditional branch must be JumpAroundTarget. // TODO: If not, we should not invert the unconditional branch. - MachineBasicBlock* CondBranchTarget = NULL; + MachineBasicBlock* CondBranchTarget = nullptr; if ((MI->getOpcode() == Hexagon::JMP_t) || (MI->getOpcode() == Hexagon::JMP_f)) { CondBranchTarget = MI->getOperand(1).getMBB(); @@ -240,7 +240,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) { static void initializePassOnce(PassRegistry &Registry) { PassInfo *PI = new PassInfo("Hexagon CFG Optimizer", "hexagon-cfg", - &HexagonCFGOptimizer::ID, 0, false, false); + &HexagonCFGOptimizer::ID, nullptr, false, false); Registry.registerPass(*PI, true); } diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp index 14aceb8ee0e..063a932786c 100644 --- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -262,7 +262,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, unsigned KilledOperand = 0; if (I2->killsRegister(I2UseReg)) KilledOperand = I2UseReg; - MachineInstr *KillingInstr = 0; + MachineInstr *KillingInstr = nullptr; for (; I != End; ++I) { // If the intervening instruction I: @@ -306,7 +306,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, // Track killed operands. If we move across an instruction that kills our // operand, we need to update the kill information on the moved I1. It kills // the operand now. - MachineInstr *KillingInstr = 0; + MachineInstr *KillingInstr = nullptr; unsigned KilledOperand = 0; while(++I != End) { @@ -506,7 +506,7 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1, // Not safe. Stop searching. break; } - return 0; + return nullptr; } void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2, diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp index 8a5991fbc4f..e4590bbd4ed 100644 --- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp +++ b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp @@ -187,7 +187,7 @@ static void initializePassOnce(PassRegistry &Registry) { const char *Name = "Hexagon Expand Predicate Spill Code"; PassInfo *PI = new PassInfo(Name, "hexagon-spill-pred", &HexagonExpandPredSpillCode::ID, - 0, false, false); + nullptr, false, false); Registry.registerPass(*PI, true); } diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index 0ea13d4c80e..d551ca9dc7f 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -246,7 +246,7 @@ HexagonFrameLowering::spillCalleeSavedRegisters( // unsigned SuperReg = uniqueSuperReg(Reg, TRI); bool CanUseDblStore = false; - const TargetRegisterClass* SuperRegClass = 0; + const TargetRegisterClass* SuperRegClass = nullptr; if (ContiguousRegs && (i < CSI.size()-1)) { unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI); @@ -300,7 +300,7 @@ bool HexagonFrameLowering::restoreCalleeSavedRegisters( // Check if we can use a double-word load. // unsigned SuperReg = uniqueSuperReg(Reg, TRI); - const TargetRegisterClass* SuperRegClass = 0; + const TargetRegisterClass* SuperRegClass = nullptr; bool CanUseDblLoad = false; if (ContiguousRegs && (i < CSI.size()-1)) { unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index cb7e3ae0381..db83b1ab397 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -265,8 +265,8 @@ namespace { return Contents.ImmVal; } - void print(raw_ostream &OS, const TargetMachine *TM = 0) const { - const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : 0; + void print(raw_ostream &OS, const TargetMachine *TM = nullptr) const { + const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : nullptr; if (isReg()) { OS << PrintReg(Contents.R.Reg, TRI, Contents.R.Sub); } if (isImm()) { OS << Contents.ImmVal; } } @@ -370,7 +370,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L, } // for (instr) SmallVector Cond; - MachineBasicBlock *TB = 0, *FB = 0; + MachineBasicBlock *TB = nullptr, *FB = nullptr; bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false); if (NotAnalyzed) return false; @@ -435,37 +435,37 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, "Loop must have more than one incoming edge!"); MachineBasicBlock *Backedge = *PI++; if (PI == TopMBB->pred_end()) // dead loop? - return 0; + return nullptr; MachineBasicBlock *Incoming = *PI++; if (PI != TopMBB->pred_end()) // multiple backedges? - return 0; + return nullptr; // Make sure there is one incoming and one backedge and determine which // is which. if (L->contains(Incoming)) { if (L->contains(Backedge)) - return 0; + return nullptr; std::swap(Incoming, Backedge); } else if (!L->contains(Backedge)) - return 0; + return nullptr; // Look for the cmp instruction to determine if we can get a useful trip // count. The trip count can be either a register or an immediate. The // location of the value depends upon the type (reg or imm). MachineBasicBlock *Latch = L->getLoopLatch(); if (!Latch) - return 0; + return nullptr; unsigned IVReg = 0; int64_t IVBump = 0; MachineInstr *IVOp; bool FoundIV = findInductionRegister(L, IVReg, IVBump, IVOp); if (!FoundIV) - return 0; + return nullptr; MachineBasicBlock *Preheader = L->getLoopPreheader(); - MachineOperand *InitialValue = 0; + MachineOperand *InitialValue = nullptr; MachineInstr *IV_Phi = MRI->getVRegDef(IVReg); for (unsigned i = 1, n = IV_Phi->getNumOperands(); i < n; i += 2) { MachineBasicBlock *MBB = IV_Phi->getOperand(i+1).getMBB(); @@ -475,13 +475,13 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, IVReg = IV_Phi->getOperand(i).getReg(); // Want IV reg after bump. } if (!InitialValue) - return 0; + return nullptr; SmallVector Cond; - MachineBasicBlock *TB = 0, *FB = 0; + MachineBasicBlock *TB = nullptr, *FB = nullptr; bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false); if (NotAnalyzed) - return 0; + return nullptr; MachineBasicBlock *Header = L->getHeader(); // TB must be non-null. If FB is also non-null, one of them must be @@ -490,7 +490,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, assert (TB && "Latch block without a branch?"); assert ((!FB || TB == Header || FB == Header) && "Branches not to header?"); if (!TB || (FB && TB != Header && FB != Header)) - return 0; + return nullptr; // Branches of form "if (!P) ..." cause HexagonInstrInfo::AnalyzeBranch // to put imm(0), followed by P in the vector Cond. @@ -506,7 +506,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, bool AnalyzedCmp = TII->analyzeCompare(CondI, CmpReg1, CmpReg2, Mask, ImmValue); if (!AnalyzedCmp) - return 0; + return nullptr; // The comparison operator type determines how we compute the loop // trip count. @@ -522,7 +522,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, bool isSwapped = false; const MachineOperand &Op1 = CondI->getOperand(1); const MachineOperand &Op2 = CondI->getOperand(2); - const MachineOperand *EndValue = 0; + const MachineOperand *EndValue = nullptr; if (Op1.isReg()) { if (Op2.isImm() || Op1.getReg() == IVReg) @@ -534,7 +534,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, } if (!EndValue) - return 0; + return nullptr; switch (CondOpc) { case Hexagon::CMPEQri: @@ -553,7 +553,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, case Hexagon::CMPbEQri_V4: case Hexagon::CMPhEQri_V4: { if (IVBump != 1) - return 0; + return nullptr; int64_t InitV, EndV; // Since the comparisons are "ri", the EndValue should be an @@ -563,26 +563,26 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, // Allow InitialValue to be a register defined with an immediate. if (InitialValue->isReg()) { if (!defWithImmediate(InitialValue->getReg())) - return 0; + return nullptr; InitV = getImmediate(*InitialValue); } else { assert(InitialValue->isImm()); InitV = InitialValue->getImm(); } if (InitV >= EndV) - return 0; + return nullptr; if (CondOpc == Hexagon::CMPbEQri_V4) { if (!isInt<8>(InitV) || !isInt<8>(EndV)) - return 0; + return nullptr; } else { // Hexagon::CMPhEQri_V4 if (!isInt<16>(InitV) || !isInt<16>(EndV)) - return 0; + return nullptr; } Cmp = !Negated ? Comparison::EQ : Comparison::NE; break; } default: - return 0; + return nullptr; } if (isSwapped) @@ -592,14 +592,14 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, unsigned R = InitialValue->getReg(); MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent(); if (!MDT->properlyDominates(DefBB, Header)) - return 0; + return nullptr; OldInsts.push_back(MRI->getVRegDef(R)); } if (EndValue->isReg()) { unsigned R = EndValue->getReg(); MachineBasicBlock *DefBB = MRI->getVRegDef(R)->getParent(); if (!MDT->properlyDominates(DefBB, Header)) - return 0; + return nullptr; } return computeCount(L, InitialValue, EndValue, IVReg, IVBump, Cmp); @@ -617,7 +617,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, Comparison::Kind Cmp) const { // Cannot handle comparison EQ, i.e. while (A == B). if (Cmp == Comparison::EQ) - return 0; + return nullptr; // Check if either the start or end values are an assignment of an immediate. // If so, use the immediate value rather than the register. @@ -643,11 +643,11 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // If loop executes while iv is "less" with the iv value going down, then // the iv must wrap. if (CmpLess && IVBump < 0) - return 0; + return nullptr; // If loop executes while iv is "greater" with the iv value going up, then // the iv must wrap. if (CmpGreater && IVBump > 0) - return 0; + return nullptr; if (Start->isImm() && End->isImm()) { // Both, start and end are immediates. @@ -655,15 +655,15 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, int64_t EndV = End->getImm(); int64_t Dist = EndV - StartV; if (Dist == 0) - return 0; + return nullptr; bool Exact = (Dist % IVBump) == 0; if (Cmp == Comparison::NE) { if (!Exact) - return 0; + return nullptr; if ((Dist < 0) ^ (IVBump < 0)) - return 0; + return nullptr; } // For comparisons that include the final value (i.e. include equality @@ -684,7 +684,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, uint64_t Count = Dist1; if (Count > 0xFFFFFFFFULL) - return 0; + return nullptr; return new CountValue(CountValue::CV_Immediate, Count); } @@ -696,7 +696,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // If the induction variable bump is not a power of 2, quit. // Othwerise we'd need a general integer division. if (!isPowerOf2_64(abs64(IVBump))) - return 0; + return nullptr; MachineBasicBlock *PH = Loop->getLoopPreheader(); assert (PH && "Should have a preheader by now"); @@ -767,7 +767,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // Hardware loops cannot handle 64-bit registers. If it's a double // register, it has to have a subregister. if (!SR && RC == &Hexagon::DoubleRegsRegClass) - return 0; + return nullptr; const TargetRegisterClass *IntRC = &Hexagon::IntRegsRegClass; // Compute DistR (register with the distance between Start and End). @@ -1014,7 +1014,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) { MachineBasicBlock *LastMBB = L->getExitingBlock(); // Don't generate hw loop if the loop has more than one exit. - if (LastMBB == 0) + if (!LastMBB) return false; MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator(); @@ -1036,7 +1036,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) { SmallVector OldInsts; // Are we able to determine the trip count for the loop? CountValue *TripCount = getLoopTripCount(L, OldInsts); - if (TripCount == 0) + if (!TripCount) return false; // Is the trip count available in the preheader? @@ -1128,7 +1128,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) { if (LastI != LastMBB->end()) LastI = LastMBB->erase(LastI); SmallVector Cond; - TII->InsertBranch(*LastMBB, BranchTarget, 0, Cond, LastIDL); + TII->InsertBranch(*LastMBB, BranchTarget, nullptr, Cond, LastIDL); } } else { // Conditional branch to loop start; just delete it. @@ -1197,7 +1197,7 @@ MachineInstr *HexagonHardwareLoops::defWithImmediate(unsigned R) { case Hexagon::CONST64_Int_Real: return DI; } - return 0; + return nullptr; } @@ -1292,7 +1292,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) { if (IndRegs.empty()) return false; - MachineBasicBlock *TB = 0, *FB = 0; + MachineBasicBlock *TB = nullptr, *FB = nullptr; SmallVector Cond; // AnalyzeBranch returns true if it fails to analyze branch. bool NotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Cond, false); @@ -1323,7 +1323,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) { return false; SmallSet CmpRegs; - MachineOperand *CmpImmOp = 0; + MachineOperand *CmpImmOp = nullptr; // Go over all operands to the compare and look for immediate and register // operands. Assume that if the compare has a single register use and a @@ -1421,7 +1421,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( DebugLoc DL; if (!Latch || Header->hasAddressTaken()) - return 0; + return nullptr; typedef MachineBasicBlock::instr_iterator instr_iterator; @@ -1430,17 +1430,17 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( typedef std::vector MBBVector; MBBVector Preds(Header->pred_begin(), Header->pred_end()); SmallVector Tmp1; - MachineBasicBlock *TB = 0, *FB = 0; + MachineBasicBlock *TB = nullptr, *FB = nullptr; if (TII->AnalyzeBranch(*Latch, TB, FB, Tmp1, false)) - return 0; + return nullptr; for (MBBVector::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) { MachineBasicBlock *PB = *I; if (PB != Latch) { bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp1, false); if (NotAnalyzed) - return 0; + return nullptr; } } @@ -1516,7 +1516,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( SmallVector Tmp2; SmallVector EmptyCond; - TB = FB = 0; + TB = FB = nullptr; for (MBBVector::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) { MachineBasicBlock *PB = *I; @@ -1526,22 +1526,22 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( (void)NotAnalyzed; // suppress compiler warning assert (!NotAnalyzed && "Should be analyzable!"); if (TB != Header && (Tmp2.empty() || FB != Header)) - TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL); + TII->InsertBranch(*PB, NewPH, nullptr, EmptyCond, DL); PB->ReplaceUsesOfBlockWith(Header, NewPH); } } // It can happen that the latch block will fall through into the header. // Insert an unconditional branch to the header. - TB = FB = 0; + TB = FB = nullptr; bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false); (void)LatchNotAnalyzed; // suppress compiler warning assert (!LatchNotAnalyzed && "Should be analyzable!"); if (!TB && !FB) - TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL); + TII->InsertBranch(*Latch, Header, nullptr, EmptyCond, DL); // Finally, the branch from the preheader to the header. - TII->InsertBranch(*NewPH, Header, 0, EmptyCond, DL); + TII->InsertBranch(*NewPH, Header, nullptr, EmptyCond, DL); NewPH->addSuccessor(Header); return NewPH; diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index be4ac1c6c44..687f097351d 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -187,7 +187,7 @@ FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM, static void initializePassOnce(PassRegistry &Registry) { const char *Name = "Hexagon DAG->DAG Pattern Instruction Selection"; PassInfo *PI = new PassInfo(Name, "hexagon-isel", - &SelectionDAGISel::ID, 0, false, false); + &SelectionDAGISel::ID, nullptr, false, false); Registry.registerPass(*PI, true); } @@ -1239,7 +1239,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1, SDValue(Arg, 0)); Ops.push_back(SDValue(PdRs,0)); - } else if (RC == NULL && (dyn_cast(Arg) != NULL)) { + } else if (!RC && (dyn_cast(Arg) != nullptr)) { // This is immediate operand. Lower it here making sure that we DO have // const SDNode for immediate value. int32_t Val = cast(Arg)->getSExtValue(); @@ -1347,7 +1347,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) { SDNode *HexagonDAGToDAGISel::Select(SDNode *N) { if (N->isMachineOpcode()) { N->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 2ffe12bd327..44f55503fcf 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -412,7 +412,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, int NumNamedVarArgParams = -1; if (GlobalAddressSDNode *GA = dyn_cast(Callee)) { - const Function* CalleeFn = NULL; + const Function* CalleeFn = nullptr; Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32); if ((CalleeFn = dyn_cast(GA->getGlobal()))) { @@ -1482,7 +1482,7 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; case HexagonISD::CONST32: return "HexagonISD::CONST32"; case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP"; case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real"; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 918d85c6ba4..ea6367a89bc 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -138,7 +138,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, regPos = 1; } - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) { // Due to a bug in TailMerging/CFG Optimization, we need to add a // special case handling of a predicated jump followed by an @@ -154,7 +154,7 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, if (NewTBB == NextBB) { ReverseBranchCondition(Cond); RemoveBranch(MBB); - return InsertBranch(MBB, TBB, 0, Cond, DL); + return InsertBranch(MBB, TBB, nullptr, Cond, DL); } } BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); @@ -177,8 +177,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, bool AllowModify) const { - TBB = NULL; - FBB = NULL; + TBB = nullptr; + FBB = nullptr; // If the block has no terminators, it just falls into the block after it. MachineBasicBlock::instr_iterator I = MBB.instr_end(); @@ -227,7 +227,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // Get the last instruction in the block. MachineInstr *LastInst = I; - MachineInstr *SecondLastInst = NULL; + MachineInstr *SecondLastInst = nullptr; // Find one more terminator if present. do { if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) { @@ -560,7 +560,7 @@ MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, const SmallVectorImpl &Ops, int FI) const { // Hexagon_TODO: Implement. - return(0); + return nullptr; } unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp index 585d11c521a..7dd6e956ccb 100644 --- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp +++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -22,7 +22,7 @@ using namespace llvm; /// Platform specific modifications to DAG. void VLIWMachineScheduler::postprocessDAG() { - SUnit* LastSequentialCall = NULL; + SUnit* LastSequentialCall = nullptr; // Currently we only catch the situation when compare gets scheduled // before preceding call. for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { @@ -398,13 +398,13 @@ SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() { for (unsigned i = 0; Available.empty(); ++i) { assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && "permanent hazard"); (void)i; - ResourceModel->reserveResources(0); + ResourceModel->reserveResources(nullptr); bumpCycle(); releasePending(); } if (Available.size() == 1) return *Available.begin(); - return NULL; + return nullptr; } #ifndef NDEBUG @@ -424,7 +424,7 @@ void ConvergingVLIWScheduler::traceCandidate(const char *Label, /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor /// of SU, return it, otherwise return null. static SUnit *getSingleUnscheduledPred(SUnit *SU) { - SUnit *OnlyAvailablePred = 0; + SUnit *OnlyAvailablePred = nullptr; for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { SUnit &Pred = *I->getSUnit(); @@ -432,7 +432,7 @@ static SUnit *getSingleUnscheduledPred(SUnit *SU) { // We found an available, but not scheduled, predecessor. If it's the // only one we have found, keep track of it... otherwise give up. if (OnlyAvailablePred && OnlyAvailablePred != &Pred) - return 0; + return nullptr; OnlyAvailablePred = &Pred; } } @@ -442,7 +442,7 @@ static SUnit *getSingleUnscheduledPred(SUnit *SU) { /// getSingleUnscheduledSucc - If there is exactly one unscheduled successor /// of SU, return it, otherwise return null. static SUnit *getSingleUnscheduledSucc(SUnit *SU) { - SUnit *OnlyAvailableSucc = 0; + SUnit *OnlyAvailableSucc = nullptr; for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { SUnit &Succ = *I->getSUnit(); @@ -450,7 +450,7 @@ static SUnit *getSingleUnscheduledSucc(SUnit *SU) { // We found an available, but not scheduled, successor. If it's the // only one we have found, keep track of it... otherwise give up. if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ) - return 0; + return nullptr; OnlyAvailableSucc = &Succ; } } @@ -639,7 +639,7 @@ SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) { if (DAG->top() == DAG->bottom()) { assert(Top.Available.empty() && Top.Pending.empty() && Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); - return NULL; + return nullptr; } SUnit *SU; if (llvm::ForceTopDown) { diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp index d829dbc82a2..65bbad5225e 100644 --- a/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -394,8 +394,8 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { bool MO2IsKill = false; MachineBasicBlock::iterator jmpPos; MachineBasicBlock::iterator cmpPos; - MachineInstr *cmpInstr = NULL, *jmpInstr = NULL; - MachineBasicBlock *jmpTarget = NULL; + MachineInstr *cmpInstr = nullptr, *jmpInstr = nullptr; + MachineBasicBlock *jmpTarget = nullptr; bool afterRA = false; bool isSecondOpReg = false; bool isSecondOpNewified = false; diff --git a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp index ca7f9f63104..2f1ee41bb83 100644 --- a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp +++ b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp @@ -222,7 +222,8 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) { static void initializePassOnce(PassRegistry &Registry) { const char *Name = "Hexagon Split TFRCondSets"; PassInfo *PI = new PassInfo(Name, "hexagon-split-tfr", - &HexagonSplitTFRCondSets::ID, 0, false, false); + &HexagonSplitTFRCondSets::ID, nullptr, false, + false); Registry.registerPass(*PI, true); } diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index b0d88609b7f..867b705a058 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -604,7 +604,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI, // evaluate identically unsigned predRegNumSrc = 0; unsigned predRegNumDst = 0; - const TargetRegisterClass* predRegClass = NULL; + const TargetRegisterClass* predRegClass = nullptr; // Get predicate register used in the source instruction for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) { @@ -1173,7 +1173,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { // of that (IsCallDependent) function. Bug 6216 is opened for this. // unsigned DepReg = 0; - const TargetRegisterClass* RC = NULL; + const TargetRegisterClass* RC = nullptr; if (DepType == SDep::Data) { DepReg = SUJ->Succs[i].getReg(); RC = QRI->getMinimalPhysRegClass(DepReg); diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp index f1a65c3f506..141e514ce41 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp @@ -21,7 +21,7 @@ void HexagonMCAsmInfo::anchor() {} HexagonMCAsmInfo::HexagonMCAsmInfo(StringRef TT) { Data16bitsDirective = "\t.half\t"; Data32bitsDirective = "\t.word\t"; - Data64bitsDirective = 0; // .xword is only supported by V9. + Data64bitsDirective = nullptr; // .xword is only supported by V9. ZeroDirective = "\t.skip\t"; CommentString = "//"; HasLEB128 = true; diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp index 25a6532222c..581674dd6cb 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp @@ -60,7 +60,7 @@ static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI, // VirtualFP = (R30 + #0). MCCFIInstruction Inst = MCCFIInstruction::createDefCfa( - 0, Hexagon::R30, 0); + nullptr, Hexagon::R30, 0); MAI->addInitialFrameState(Inst); return MAI; diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp index b40f37c12a9..72adb45f9be 100644 --- a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp +++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp @@ -66,7 +66,7 @@ static MCInstPrinter *createMSP430MCInstPrinter(const Target &T, const MCSubtargetInfo &STI) { if (SyntaxVariant == 0) return new MSP430InstPrinter(MAI, MII, MRI); - return 0; + return nullptr; } extern "C" void LLVMInitializeMSP430TargetMC() { diff --git a/lib/Target/MSP430/MSP430AsmPrinter.cpp b/lib/Target/MSP430/MSP430AsmPrinter.cpp index c65f1bda2d8..73ef2cdb233 100644 --- a/lib/Target/MSP430/MSP430AsmPrinter.cpp +++ b/lib/Target/MSP430/MSP430AsmPrinter.cpp @@ -47,7 +47,7 @@ namespace { } void printOperand(const MachineInstr *MI, int OpNum, - raw_ostream &O, const char* Modifier = 0); + raw_ostream &O, const char* Modifier = nullptr); void printSrcMemOperand(const MachineInstr *MI, int OpNum, raw_ostream &O); bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp index ce078a30db4..82c8b298c51 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -242,7 +242,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, // alignment boundary. Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; - MachineInstr *New = 0; + MachineInstr *New = nullptr; if (Old->getOpcode() == TII.getCallFrameSetupOpcode()) { New = BuildMI(MF, Old->getDebugLoc(), TII.get(MSP430::SUB16ri), MSP430::SPW) diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index 9b33275deeb..235a4b5eaf8 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -54,17 +54,17 @@ namespace { unsigned Align; // CP alignment. MSP430ISelAddressMode() - : BaseType(RegBase), Disp(0), GV(0), CP(0), BlockAddr(0), - ES(0), JT(-1), Align(0) { + : BaseType(RegBase), Disp(0), GV(nullptr), CP(nullptr), + BlockAddr(nullptr), ES(nullptr), JT(-1), Align(0) { } bool hasSymbolicDisplacement() const { - return GV != 0 || CP != 0 || ES != 0 || JT != -1; + return GV != nullptr || CP != nullptr || ES != nullptr || JT != -1; } void dump() { errs() << "MSP430ISelAddressMode " << this << '\n'; - if (BaseType == RegBase && Base.Reg.getNode() != 0) { + if (BaseType == RegBase && Base.Reg.getNode() != nullptr) { errs() << "Base.Reg "; Base.Reg.getNode()->dump(); } else if (BaseType == FrameIndexBase) { @@ -201,7 +201,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) { case ISD::FrameIndex: if (AM.BaseType == MSP430ISelAddressMode::RegBase - && AM.Base.Reg.getNode() == 0) { + && AM.Base.Reg.getNode() == nullptr) { AM.BaseType = MSP430ISelAddressMode::FrameIndexBase; AM.Base.FrameIndex = cast(N)->getIndex(); return false; @@ -230,7 +230,7 @@ bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) { // Start with the LHS as an addr mode. if (!MatchAddress(N.getOperand(0), AM) && // Address could not have picked a GV address for the displacement. - AM.GV == NULL && + AM.GV == nullptr && // Check to see if the LHS & C is zero. CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { AM.Disp += Offset; @@ -332,7 +332,7 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) { SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) { LoadSDNode *LD = cast(N); if (!isValidIndexedLoad(LD)) - return NULL; + return nullptr; MVT VT = LD->getMemoryVT().getSimpleVT(); @@ -345,7 +345,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) { Opcode = MSP430::MOV16rm_POST; break; default: - return NULL; + return nullptr; } return CurDAG->getMachineNode(Opcode, SDLoc(N), @@ -361,7 +361,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op, IsLegalToFold(N1, Op, Op, OptLevel)) { LoadSDNode *LD = cast(N1); if (!isValidIndexedLoad(LD)) - return NULL; + return nullptr; MVT VT = LD->getMemoryVT().getSimpleVT(); unsigned Opc = (VT == MVT::i16 ? Opc16 : Opc8); @@ -380,7 +380,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op, return ResNode; } - return NULL; + return nullptr; } @@ -398,7 +398,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) { Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); - return NULL; + return nullptr; } // Few custom selection stuff. @@ -486,7 +486,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) { SDNode *ResNode = SelectCode(Node); DEBUG(errs() << "=> "); - if (ResNode == NULL || ResNode == Node) + if (ResNode == nullptr || ResNode == Node) DEBUG(Node->dump(CurDAG)); else DEBUG(ResNode->dump(CurDAG)); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 434ab98b8ae..541e5a1aaac 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -629,7 +629,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, } else { assert(VA.isMemLoc()); - if (StackPtr.getNode() == 0) + if (!StackPtr.getNode()) StackPtr = DAG.getCopyFromReg(Chain, dl, MSP430::SPW, getPointerTy()); SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), @@ -1148,7 +1148,7 @@ bool MSP430TargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, const char *MSP430TargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return NULL; + default: return nullptr; case MSP430ISD::RET_FLAG: return "MSP430ISD::RET_FLAG"; case MSP430ISD::RETI_FLAG: return "MSP430ISD::RETI_FLAG"; case MSP430ISD::RRA: return "MSP430ISD::RRA"; diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp index 91c19b8ba28..0c04ddbb110 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -208,11 +208,11 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, while (std::next(I) != MBB.end()) std::next(I)->eraseFromParent(); Cond.clear(); - FBB = 0; + FBB = nullptr; // Delete the JMP if it's equivalent to a fall-through. if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { - TBB = 0; + TBB = nullptr; I->eraseFromParent(); I = MBB.end(); continue; diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index ae64809d57b..ac899a51062 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -469,7 +469,7 @@ private: public: void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediate when possible. Null MCExpr = 0. - if (Expr == 0) + if (!Expr) Inst.addOperand(MCOperand::CreateImm(0)); else if (const MCConstantExpr *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); @@ -1616,7 +1616,7 @@ bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) { MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( SmallVectorImpl &Operands) { DEBUG(dbgs() << "parseMemOperand\n"); - const MCExpr *IdVal = 0; + const MCExpr *IdVal = nullptr; SMLoc S; bool isParenExpr = false; MipsAsmParser::OperandMatchResultTy Res = MatchOperand_NoMatch; @@ -1672,7 +1672,7 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( Parser.Lex(); // Eat the ')' token. - if (IdVal == 0) + if (!IdVal) IdVal = MCConstantExpr::Create(0, getContext()); // Replace the register operand with the memory operand. diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp index 0f99ecc5484..153974e470f 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -31,7 +31,7 @@ using namespace llvm; // Prepare value for the target space for it static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value, - MCContext *Ctx = NULL) { + MCContext *Ctx = nullptr) { unsigned Kind = Fixup.getKind(); diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp index 8577e270982..8e5abaf8308 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -79,7 +79,7 @@ static MCAsmInfo *createMipsMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { MCAsmInfo *MAI = new MipsMCAsmInfo(TT); unsigned SP = MRI.getDwarfRegNum(Mips::SP, true); - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, SP, 0); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0); MAI->addInitialFrameState(Inst); return MAI; diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp index 4ad15126732..14055d608e1 100644 --- a/lib/Target/Mips/Mips16HardFloat.cpp +++ b/lib/Target/Mips/Mips16HardFloat.cpp @@ -407,11 +407,11 @@ static bool fixupFPReturnAndCall CallInst::Create(F, Params, "", &Inst ); } else if (const CallInst *CI = dyn_cast(I)) { const Value* V = CI->getCalledValue(); - const Type* T = 0; + const Type* T = nullptr; if (V) T = V->getType(); - const PointerType *PFT=0; + const PointerType *PFT=nullptr; if (T) PFT = dyn_cast(T); - const FunctionType *FT=0; + const FunctionType *FT=nullptr; if (PFT) FT = dyn_cast(PFT->getElementType()); Function *F_ = CI->getCalledFunction(); if (FT && needsFPReturnHelper(*FT) && diff --git a/lib/Target/Mips/Mips16HardFloatInfo.cpp b/lib/Target/Mips/Mips16HardFloatInfo.cpp index d8b685e0dca..2eb6e5ddd2d 100644 --- a/lib/Target/Mips/Mips16HardFloatInfo.cpp +++ b/lib/Target/Mips/Mips16HardFloatInfo.cpp @@ -30,7 +30,7 @@ const FuncNameSignature PredefinedFuncs[] = { { "__fixunssfsi", { FSig, NoFPRet } }, { "__fixunssfdi", { FSig, NoFPRet } }, { "__floatundisf", { NoSig, FRet } }, - { 0, { NoSig, NoFPRet } } + { nullptr, { NoSig, NoFPRet } } }; // just do a search for now. there are very few of these special cases. @@ -44,7 +44,7 @@ extern FuncSignature const *findFuncSignature(const char *name) { return &PredefinedFuncs[i].Signature; i++; } - return 0; + return nullptr; } } } diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp index cec6aed7bec..92b9e6d74f1 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -45,7 +45,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { std::pair Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty, bool HasLo, bool HasHi) { - SDNode *Lo = 0, *Hi = 0; + SDNode *Lo = nullptr, *Hi = nullptr; SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0), N->getOperand(1)); SDValue InFlag = SDValue(Mul, 0); @@ -298,7 +298,7 @@ std::pair Mips16DAGToDAGISel::selectNode(SDNode *Node) { if (!SDValue(Node, 1).use_empty()) ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0)); - return std::make_pair(true, (SDNode*)NULL); + return std::make_pair(true, nullptr); } case ISD::MULHS: @@ -309,7 +309,7 @@ std::pair Mips16DAGToDAGISel::selectNode(SDNode *Node) { } } - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); } FunctionPass *llvm::createMips16ISelDag(MipsTargetMachine &TM) { diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index 4b4f648bfe7..0c7dc91ec44 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -354,7 +354,7 @@ unsigned int Mips16TargetLowering::getMips16HelperFunctionStubNumber #define T P "0" , T1 #define P P_ static char const * vMips16Helper[MAX_STUB_NUMBER+1] = - {0, T1 }; + {nullptr, T1 }; #undef P #define P P_ "sf_" static char const * sfMips16Helper[MAX_STUB_NUMBER+1] = @@ -431,7 +431,7 @@ getOpndList(SmallVectorImpl &Ops, SelectionDAG &DAG = CLI.DAG; MachineFunction &MF = DAG.getMachineFunction(); MipsFunctionInfo *FuncInfo = MF.getInfo(); - const char* Mips16HelperFunction = 0; + const char* Mips16HelperFunction = nullptr; bool NeedMips16Helper = false; if (Subtarget->inMips16HardFloat()) { diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index ffa56d98f6c..6d3a4f4a244 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -837,7 +837,7 @@ void MipsAsmPrinter::EmitFPCallStub( const MCSectionELF *M = OutContext.getELFSection( ".mips16.call.fp." + std::string(Symbol), ELF::SHT_PROGBITS, ELF::SHF_ALLOC | ELF::SHF_EXECINSTR, SectionKind::getText()); - OutStreamer.SwitchSection(M, 0); + OutStreamer.SwitchSection(M, nullptr); // // .align 2 // diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp index 13a1dd38972..de9c5353a28 100644 --- a/lib/Target/Mips/MipsCodeEmitter.cpp +++ b/lib/Target/Mips/MipsCodeEmitter.cpp @@ -66,8 +66,8 @@ class MipsCodeEmitter : public MachineFunctionPass { public: MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) - : MachineFunctionPass(ID), JTI(0), II(0), TD(0), - TM(tm), MCE(mce), MCPEs(0), MJTEs(0), + : MachineFunctionPass(ID), JTI(nullptr), II(nullptr), TD(nullptr), + TM(tm), MCE(mce), MCPEs(nullptr), MJTEs(nullptr), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} bool runOnMachineFunction(MachineFunction &MF); @@ -139,7 +139,7 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) { TD = Target.getDataLayout(); Subtarget = &TM.getSubtarget (); MCPEs = &MF.getConstantPool()->getConstants(); - MJTEs = 0; + MJTEs = nullptr; if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables(); JTI->Initialize(MF, IsPIC, Subtarget->isLittle()); MCE.setModuleInfo(&getAnalysis ()); diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index 9d28727eed0..34f68f18599 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -368,7 +368,7 @@ namespace { : MachineFunctionPass(ID), TM(tm), IsPIC(TM.getRelocationModel() == Reloc::PIC_), ABI(TM.getSubtarget().getTargetABI()), - STI(&TM.getSubtarget()), MF(0), MCP(0), + STI(&TM.getSubtarget()), MF(nullptr), MCP(nullptr), PrescannedForConstants(false){} virtual const char *getPassName() const { @@ -628,7 +628,7 @@ MipsConstantIslands::CPEntry if (CPEs[i].CPEMI == CPEMI) return &CPEs[i]; } - return NULL; + return nullptr; } /// getCPELogAlign - Returns the required alignment of the constant pool entry @@ -1065,7 +1065,7 @@ bool MipsConstantIslands::decrementCPEReferenceCount(unsigned CPI, assert(CPE && "Unexpected!"); if (--CPE->RefCount == 0) { removeDeadCPEMI(CPEMI); - CPE->CPEMI = NULL; + CPE->CPEMI = nullptr; --NumCPEs; return true; } @@ -1098,7 +1098,7 @@ int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) if (CPEs[i].CPEMI == CPEMI) continue; // Removing CPEs can leave empty entries, skip - if (CPEs[i].CPEMI == NULL) + if (CPEs[i].CPEMI == nullptr) continue; if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(), U.NegOk)) { @@ -1154,7 +1154,7 @@ int MipsConstantIslands::findLongFormInRangeCPEntry if (CPEs[i].CPEMI == CPEMI) continue; // Removing CPEs can leave empty entries, skip - if (CPEs[i].CPEMI == NULL) + if (CPEs[i].CPEMI == nullptr) continue; if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getLongFormMaxDisp(), U.NegOk)) { @@ -1486,7 +1486,7 @@ bool MipsConstantIslands::removeUnusedCPEntries() { for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) { if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) { removeDeadCPEMI(CPEs[j].CPEMI); - CPEs[j].CPEMI = NULL; + CPEs[j].CPEMI = nullptr; MadeChange = true; } } diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index 84228b44805..4549873b235 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -408,7 +408,7 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) { (*MI.memoperands_begin())->getPseudoValue()) { if (isa(PSV)) return false; - return !PSV->isConstant(0) && PSV != PseudoSourceValue::getStack(); + return !PSV->isConstant(nullptr) && PSV != PseudoSourceValue::getStack(); } return true; @@ -644,7 +644,7 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const { MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const { if (B.succ_empty()) - return NULL; + return nullptr; // Select the successor with the larget edge weight. auto &Prob = getAnalysis(); @@ -653,14 +653,14 @@ MachineBasicBlock *Filler::selectSuccBB(MachineBasicBlock &B) const { const MachineBasicBlock *Dst1) { return Prob.getEdgeWeight(&B, Dst0) < Prob.getEdgeWeight(&B, Dst1); }); - return S->isLandingPad() ? NULL : S; + return S->isLandingPad() ? nullptr : S; } std::pair Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const { const MipsInstrInfo *TII = static_cast(TM.getInstrInfo()); - MachineBasicBlock *TrueBB = 0, *FalseBB = 0; + MachineBasicBlock *TrueBB = nullptr, *FalseBB = nullptr; SmallVector BranchInstrs; SmallVector Cond; @@ -668,11 +668,11 @@ Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const { TII->AnalyzeBranch(MBB, TrueBB, FalseBB, Cond, false, BranchInstrs); if ((R == MipsInstrInfo::BT_None) || (R == MipsInstrInfo::BT_NoBranch)) - return std::make_pair(R, (MachineInstr*)NULL); + return std::make_pair(R, nullptr); if (R != MipsInstrInfo::BT_CondUncond) { if (!hasUnoccupiedSlot(BranchInstrs[0])) - return std::make_pair(MipsInstrInfo::BT_None, (MachineInstr*)NULL); + return std::make_pair(MipsInstrInfo::BT_None, nullptr); assert(((R != MipsInstrInfo::BT_Uncond) || (TrueBB == &Dst))); @@ -689,7 +689,7 @@ Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const { if (hasUnoccupiedSlot(BranchInstrs[1]) && (FalseBB == &Dst)) return std::make_pair(MipsInstrInfo::BT_Uncond, BranchInstrs[1]); - return std::make_pair(MipsInstrInfo::BT_None, (MachineInstr*)NULL); + return std::make_pair(MipsInstrInfo::BT_None, nullptr); } bool Filler::examinePred(MachineBasicBlock &Pred, const MachineBasicBlock &Succ, diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index f1cd55c2a64..4eb9d4356ef 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -183,7 +183,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { if (Node->isMachineOpcode()) { DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); - return NULL; + return nullptr; } // See if subclasses can handle this node. @@ -213,7 +213,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { SDNode *ResNode = SelectCode(Node); DEBUG(errs() << "=> "); - if (ResNode == NULL || ResNode == Node) + if (ResNode == nullptr || ResNode == Node) DEBUG(Node->dump(CurDAG)); else DEBUG(ResNode->dump(CurDAG)); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 2f19701c69c..7b0074e66df 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -204,7 +204,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { case MipsISD::PCKEV: return "MipsISD::PCKEV"; case MipsISD::PCKOD: return "MipsISD::PCKOD"; case MipsISD::INSVE: return "MipsISD::INSVE"; - default: return NULL; + default: return nullptr; } } @@ -2819,7 +2819,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight( Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. @@ -2897,12 +2897,12 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const { std::pair R = parsePhysicalReg(C, Prefix, Reg); if (!R.first) - return std::make_pair((unsigned)0, (const TargetRegisterClass*)0); + return std::make_pair(0U, nullptr); if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo. // No numeric characters follow "hi" or "lo". if (R.second) - return std::make_pair((unsigned)0, (const TargetRegisterClass*)0); + return std::make_pair(0U, nullptr); RC = TRI->getRegClass(Prefix == "hi" ? Mips::HI32RegClassID : Mips::LO32RegClassID); @@ -2912,7 +2912,7 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const { // No numeric characters follow the name. if (R.second) - return std::make_pair((unsigned)0, (const TargetRegisterClass *)0); + return std::make_pair(0U, nullptr); Reg = StringSwitch(Prefix) .Case("$msair", Mips::MSAIR) @@ -2926,14 +2926,14 @@ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const { .Default(0); if (!Reg) - return std::make_pair((unsigned)0, (const TargetRegisterClass *)0); + return std::make_pair(0U, nullptr); RC = TRI->getRegClass(Mips::MSACtrlRegClassID); return std::make_pair(Reg, RC); } if (!R.second) - return std::make_pair((unsigned)0, (const TargetRegisterClass*)0); + return std::make_pair(0U, nullptr); if (Prefix == "$f") { // Parse $f0-$f31. // If the size of FP registers is 64-bit or Reg is an even number, select @@ -2981,7 +2981,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const if (VT == MVT::i64 && isGP64bit()) return std::make_pair(0U, &Mips::GPR64RegClass); // This will generate an error message - return std::make_pair(0u, static_cast(0)); + return std::make_pair(0U, nullptr); case 'f': // FPU or MSA register if (VT == MVT::v16i8) return std::make_pair(0U, &Mips::MSA128BRegClass); @@ -3011,7 +3011,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const case 'x': // register suitable for indirect jump // Fixme: Not triggering the use of both hi and low // This will generate an error message - return std::make_pair(0u, static_cast(0)); + return std::make_pair(0U, nullptr); } } @@ -3030,7 +3030,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector&Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result; // Only support length 1 constraints for now. if (Constraint.length() > 1) return; @@ -3293,7 +3293,7 @@ analyzeFormalArguments(const SmallVectorImpl &Args, continue; } - MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), 0, IsSoftFloat); + MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), nullptr, IsSoftFloat); if (!FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo)) continue; @@ -3341,7 +3341,7 @@ analyzeCallResult(const SmallVectorImpl &Ins, bool IsSoftFloat, void MipsTargetLowering::MipsCC:: analyzeReturn(const SmallVectorImpl &Outs, bool IsSoftFloat, const Type *RetTy) const { - analyzeReturn(Outs, IsSoftFloat, 0, RetTy); + analyzeReturn(Outs, IsSoftFloat, nullptr, RetTy); } void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT, @@ -3611,7 +3611,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy()); SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo(), false, false, 0); - cast(Store.getNode())->getMemOperand()->setValue((Value*)0); + cast(Store.getNode())->getMemOperand()->setValue((Value*)nullptr); OutChains.push_back(Store); } } diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index c411c039fa5..d6da6c6b172 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -195,7 +195,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, if (I == REnd || !isUnpredicatedTerminator(&*I)) { // This block ends with no branches (it just falls through to its succ). // Leave TBB/FBB null. - TBB = FBB = NULL; + TBB = FBB = nullptr; return BT_NoBranch; } @@ -209,7 +209,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, // Get the second to last instruction in the block. unsigned SecondLastOpc = 0; - MachineInstr *SecondLastInst = NULL; + MachineInstr *SecondLastInst = nullptr; if (++I != REnd) { SecondLastInst = &*I; diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp index e028994876f..e9514c3b20d 100644 --- a/lib/Target/Mips/MipsLongBranch.cpp +++ b/lib/Target/Mips/MipsLongBranch.cpp @@ -56,7 +56,7 @@ namespace { bool HasLongBranch; MachineInstr *Br; - MBBInfo() : Size(0), HasLongBranch(false), Br(0) {} + MBBInfo() : Size(0), HasLongBranch(false), Br(nullptr) {} }; class MipsLongBranch : public MachineFunctionPass { @@ -111,7 +111,7 @@ static MachineBasicBlock *getTargetMBB(const MachineInstr &Br) { } assert(false && "This instruction does not have an MBB operand."); - return 0; + return nullptr; } // Traverse the list of instructions backwards until a non-debug instruction is diff --git a/lib/Target/Mips/MipsOptimizePICCall.cpp b/lib/Target/Mips/MipsOptimizePICCall.cpp index eead786f856..f06cadd8079 100644 --- a/lib/Target/Mips/MipsOptimizePICCall.cpp +++ b/lib/Target/Mips/MipsOptimizePICCall.cpp @@ -103,13 +103,13 @@ char OptimizePICCall::ID = 0; /// Return the first MachineOperand of MI if it is a used virtual register. static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) { if (MI.getNumOperands() == 0) - return 0; + return nullptr; MachineOperand &MO = MI.getOperand(0); if (!MO.isReg() || !MO.isUse() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) - return 0; + return nullptr; return &MO; } @@ -158,7 +158,7 @@ static void eraseGPOpnd(MachineInstr &MI) { llvm_unreachable(0); } -MBBInfo::MBBInfo(MachineDomTreeNode *N) : Node(N), HTScope(0) {} +MBBInfo::MBBInfo(MachineDomTreeNode *N) : Node(N), HTScope(nullptr) {} const MachineDomTreeNode *MBBInfo::getNode() const { return Node; } @@ -256,7 +256,7 @@ bool OptimizePICCall::isCallViaRegister(MachineInstr &MI, unsigned &Reg, // Get the instruction that loads the function address from the GOT. Reg = MO->getReg(); - Val = (Value*)0; + Val = (Value*)nullptr; MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); MachineInstr *DefMI = MRI.getVRegDef(Reg); diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index b53e26e018f..d5385be7ddb 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -413,7 +413,7 @@ bool MipsSEDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const { BuildVectorSDNode *Node = dyn_cast(N); - if (Node == NULL) + if (!Node) return false; APInt SplatValue, SplatUndef; @@ -814,16 +814,16 @@ std::pair MipsSEDAGToDAGISel::selectNode(SDNode *Node) { EVT ViaVecTy; if (!Subtarget.hasMSA() || !BVN->getValueType(0).is128BitVector()) - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, 8, !Subtarget.isLittle())) - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); switch (SplatBitSize) { default: - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); case 8: LdiOp = Mips::LDI_B; ViaVecTy = MVT::v16i8; @@ -843,7 +843,7 @@ std::pair MipsSEDAGToDAGISel::selectNode(SDNode *Node) { } if (!SplatValue.isSignedIntN(10)) - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); SDValue Imm = CurDAG->getTargetConstant(SplatValue, ViaVecTy.getVectorElementType()); @@ -869,7 +869,7 @@ std::pair MipsSEDAGToDAGISel::selectNode(SDNode *Node) { } - return std::make_pair(false, (SDNode*)NULL); + return std::make_pair(false, nullptr); } FunctionPass *llvm::createMipsSEISelDag(MipsTargetMachine &TM) { diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index 628f0bdeae0..783b5c92c9b 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -508,7 +508,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) { BuildVectorSDNode *Node = dyn_cast(N.getNode()); - if (Node == NULL) + if (!Node) return false; APInt SplatValue, SplatUndef; @@ -1356,7 +1356,7 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG, } } - if (Exp2Imm.getNode() == NULL) { + if (!Exp2Imm.getNode()) { // We couldnt constant fold, do a vector shift instead // Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index 094ee296295..f6f364f1a36 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -368,7 +368,7 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, if (isInt<16>(Amount))// addi sp, sp, amount BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount); else { // Expand immediate that doesn't fit in 16-bit. - unsigned Reg = loadImmediate(Amount, MBB, I, DL, 0); + unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr); BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill); } } diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp index 3178ba92ddf..0af1a6b9e5c 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -189,7 +189,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, *static_cast( MBB.getParent()->getTarget().getInstrInfo()); unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL, - OffsetBitSize == 16 ? &NewImm : NULL); + OffsetBitSize == 16 ? &NewImm : nullptr); BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg) .addReg(Reg, RegState::Kill); diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp index af405aa5027..158ca901e58 100644 --- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp +++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp @@ -66,7 +66,7 @@ static MCInstPrinter *createNVPTXMCInstPrinter(const Target &T, const MCSubtargetInfo &STI) { if (SyntaxVariant == 0) return new NVPTXInstPrinter(MAI, MII, MRI, STI); - return 0; + return nullptr; } // Force static initialization. diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index af6fc094331..4ec575ff34c 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -132,7 +132,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx); const ConstantExpr *CE = dyn_cast(CV); - if (CE == 0) + if (!CE) llvm_unreachable("Unknown constant value to lower!"); switch (CE->getOpcode()) { @@ -150,7 +150,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/ false, - !AP.MF ? 0 : AP.MF->getFunction()->getParent()); + !AP.MF ? nullptr : AP.MF->getFunction()->getParent()); report_fatal_error(OS.str()); } case Instruction::AddrSpaceCast: { @@ -165,7 +165,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/ false, - !AP.MF ? 0 : AP.MF->getFunction()->getParent()); + !AP.MF ? nullptr : AP.MF->getFunction()->getParent()); report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { @@ -1038,7 +1038,7 @@ static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) { if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED) return false; - const Function *oneFunc = 0; + const Function *oneFunc = nullptr; bool flag = usedInOneFunc(gv, oneFunc); if (flag == false) @@ -1395,10 +1395,10 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar, if (llvm::isSampler(*GVar)) { O << ".global .samplerref " << llvm::getSamplerName(*GVar); - const Constant *Initializer = NULL; + const Constant *Initializer = nullptr; if (GVar->hasInitializer()) Initializer = GVar->getInitializer(); - const ConstantInt *CI = NULL; + const ConstantInt *CI = nullptr; if (Initializer) CI = dyn_cast(Initializer); if (CI) { @@ -1465,7 +1465,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar, return; } - const Function *demotedFunc = 0; + const Function *demotedFunc = nullptr; if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) { O << "// " << GVar->getName().str() << " has been demoted\n"; if (localDecls.find(demotedFunc) != localDecls.end()) @@ -1637,7 +1637,7 @@ NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const { return "u32"; } llvm_unreachable("unexpected type"); - return NULL; + return nullptr; } void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar, @@ -2447,7 +2447,7 @@ void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) { } LineReader *NVPTXAsmPrinter::getReader(std::string filename) { - if (reader == NULL) { + if (!reader) { reader = new LineReader(filename); } diff --git a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp index 45f0734c16c..4209ba36024 100644 --- a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp +++ b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp @@ -88,7 +88,8 @@ bool GenericToNVVM::runOnModule(Module &M) { !GV->getName().startswith("llvm.")) { GlobalVariable *NewGV = new GlobalVariable( M, GV->getType()->getElementType(), GV->isConstant(), - GV->getLinkage(), GV->hasInitializer() ? GV->getInitializer() : NULL, + GV->getLinkage(), + GV->hasInitializer() ? GV->getInitializer() : nullptr, "", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL); NewGV->copyAttributesFrom(GV); GVMap[GV] = NewGV; @@ -162,7 +163,7 @@ Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F, GlobalVariable *GV, IRBuilder<> &Builder) { PointerType *GVType = GV->getType(); - Value *CVTA = NULL; + Value *CVTA = nullptr; // See if the address space conversion requires the operand to be bitcast // to i8 addrspace(n)* first. diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index b874bdbba9c..ada4c223876 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -119,10 +119,10 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) { if (N->isMachineOpcode()) { N->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } - SDNode *ResNode = NULL; + SDNode *ResNode = nullptr; switch (N->getOpcode()) { case ISD::LOAD: ResNode = SelectLoad(N); @@ -289,7 +289,7 @@ SDNode *NVPTXDAGToDAGISel::SelectIntrinsicNoChain(SDNode *N) { unsigned IID = cast(N->getOperand(0))->getZExtValue(); switch (IID) { default: - return NULL; + return nullptr; case Intrinsic::nvvm_texsurf_handle_internal: return SelectTexSurfHandle(N); } @@ -367,14 +367,14 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { SDLoc dl(N); LoadSDNode *LD = cast(N); EVT LoadedVT = LD->getMemoryVT(); - SDNode *NVPTXLD = NULL; + SDNode *NVPTXLD = nullptr; // do not support pre/post inc/dec if (LD->isIndexed()) - return NULL; + return nullptr; if (!LoadedVT.isSimple()) - return NULL; + return nullptr; // Address Space Setting unsigned int codeAddrSpace = getCodeAddrSpace(LD, Subtarget); @@ -397,7 +397,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { else if (num == 4) vecType = NVPTX::PTXLdStInstCode::V4; else - return NULL; + return nullptr; } // Type Setting: fromType + fromTypeWidth @@ -446,7 +446,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_avar; break; default: - return NULL; + return nullptr; } SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), getI32Imm(vecType), getI32Imm(fromType), @@ -475,7 +475,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_asi; break; default: - return NULL; + return nullptr; } SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), getI32Imm(vecType), getI32Imm(fromType), @@ -505,7 +505,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_ari_64; break; default: - return NULL; + return nullptr; } } else { switch (TargetVT) { @@ -528,7 +528,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_ari; break; default: - return NULL; + return nullptr; } } SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), @@ -557,7 +557,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_areg_64; break; default: - return NULL; + return nullptr; } } else { switch (TargetVT) { @@ -580,7 +580,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { Opcode = NVPTX::LD_f64_areg; break; default: - return NULL; + return nullptr; } } SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace), @@ -589,7 +589,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops); } - if (NVPTXLD != NULL) { + if (NVPTXLD) { MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); MemRefs0[0] = cast(N)->getMemOperand(); cast(NVPTXLD)->setMemRefs(MemRefs0, MemRefs0 + 1); @@ -610,7 +610,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { EVT LoadedVT = MemSD->getMemoryVT(); if (!LoadedVT.isSimple()) - return NULL; + return nullptr; // Address Space Setting unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD, Subtarget); @@ -656,7 +656,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { VecType = NVPTX::PTXLdStInstCode::V4; break; default: - return NULL; + return nullptr; } EVT EltVT = N->getValueType(0); @@ -664,11 +664,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { if (SelectDirectAddr(Op1, Addr)) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_avar; break; @@ -692,7 +692,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_avar; break; @@ -718,11 +718,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { : SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_asi; break; @@ -746,7 +746,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_asi; break; @@ -773,11 +773,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_ari_64; break; @@ -801,7 +801,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_ari_64; break; @@ -820,11 +820,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_ari; break; @@ -848,7 +848,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_ari; break; @@ -875,11 +875,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_areg_64; break; @@ -903,7 +903,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_areg_64; break; @@ -922,11 +922,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v2_areg; break; @@ -950,7 +950,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { case NVPTXISD::LoadV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::LDV_i8_v4_areg; break; @@ -996,11 +996,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { if (SelectDirectAddr(Op1, Addr)) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LDGV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_avar; break; @@ -1024,7 +1024,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_avar; break; @@ -1048,7 +1048,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDGV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_avar; break; @@ -1066,7 +1066,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_avar; break; @@ -1092,11 +1092,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LDGV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari64; break; @@ -1120,7 +1120,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari64; break; @@ -1144,7 +1144,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDGV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari64; break; @@ -1162,7 +1162,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari64; break; @@ -1181,11 +1181,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LDGV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari32; break; @@ -1209,7 +1209,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari32; break; @@ -1233,7 +1233,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDGV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari32; break; @@ -1251,7 +1251,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari32; break; @@ -1277,11 +1277,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LDGV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64; break; @@ -1305,7 +1305,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64; break; @@ -1329,7 +1329,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDGV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64; break; @@ -1347,7 +1347,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64; break; @@ -1366,11 +1366,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LDGV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32; break; @@ -1394,7 +1394,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32; break; @@ -1418,7 +1418,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDGV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32; break; @@ -1436,7 +1436,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { case NVPTXISD::LDUV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32; break; @@ -1470,14 +1470,14 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { SDLoc dl(N); StoreSDNode *ST = cast(N); EVT StoreVT = ST->getMemoryVT(); - SDNode *NVPTXST = NULL; + SDNode *NVPTXST = nullptr; // do not support pre/post inc/dec if (ST->isIndexed()) - return NULL; + return nullptr; if (!StoreVT.isSimple()) - return NULL; + return nullptr; // Address Space Setting unsigned int codeAddrSpace = getCodeAddrSpace(ST, Subtarget); @@ -1500,7 +1500,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { else if (num == 4) vecType = NVPTX::PTXLdStInstCode::V4; else - return NULL; + return nullptr; } // Type Setting: toType + toTypeWidth @@ -1544,7 +1544,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_avar; break; default: - return NULL; + return nullptr; } SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), getI32Imm(vecType), getI32Imm(toType), @@ -1573,7 +1573,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_asi; break; default: - return NULL; + return nullptr; } SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), getI32Imm(vecType), getI32Imm(toType), @@ -1603,7 +1603,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_ari_64; break; default: - return NULL; + return nullptr; } } else { switch (SourceVT) { @@ -1626,7 +1626,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_ari; break; default: - return NULL; + return nullptr; } } SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), @@ -1655,7 +1655,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_areg_64; break; default: - return NULL; + return nullptr; } } else { switch (SourceVT) { @@ -1678,7 +1678,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { Opcode = NVPTX::ST_f64_areg; break; default: - return NULL; + return nullptr; } } SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace), @@ -1687,7 +1687,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops); } - if (NVPTXST != NULL) { + if (NVPTXST) { MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1); MemRefs0[0] = cast(N)->getMemOperand(); cast(NVPTXST)->setMemRefs(MemRefs0, MemRefs0 + 1); @@ -1754,7 +1754,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { N2 = N->getOperand(5); break; default: - return NULL; + return nullptr; } StOps.push_back(getI32Imm(IsVolatile)); @@ -1766,11 +1766,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { if (SelectDirectAddr(N2, Addr)) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_avar; break; @@ -1794,7 +1794,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_avar; break; @@ -1816,11 +1816,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { : SelectADDRsi(N2.getNode(), N2, Base, Offset)) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_asi; break; @@ -1844,7 +1844,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_asi; break; @@ -1868,11 +1868,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_ari_64; break; @@ -1896,7 +1896,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_ari_64; break; @@ -1915,11 +1915,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_ari; break; @@ -1943,7 +1943,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_ari; break; @@ -1966,11 +1966,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { if (Subtarget.is64Bit()) { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_areg_64; break; @@ -1994,7 +1994,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_areg_64; break; @@ -2013,11 +2013,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { } else { switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreV2: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v2_areg; break; @@ -2041,7 +2041,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { case NVPTXISD::StoreV4: switch (EltVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i8: Opcode = NVPTX::STV_i8_v4_areg; break; @@ -2082,7 +2082,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) { unsigned VecSize; switch (Node->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::LoadParam: VecSize = 1; break; @@ -2101,11 +2101,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) { switch (VecSize) { default: - return NULL; + return nullptr; case 1: switch (MemVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opc = NVPTX::LoadParamMemI8; break; @@ -2132,7 +2132,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) { case 2: switch (MemVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opc = NVPTX::LoadParamMemV2I8; break; @@ -2159,7 +2159,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) { case 4: switch (MemVT.getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opc = NVPTX::LoadParamMemV4I8; break; @@ -2212,7 +2212,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) { unsigned NumElts = 1; switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreRetval: NumElts = 1; break; @@ -2237,11 +2237,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) { unsigned Opcode = 0; switch (NumElts) { default: - return NULL; + return nullptr; case 1: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreRetvalI8; break; @@ -2268,7 +2268,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) { case 2: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreRetvalV2I8; break; @@ -2295,7 +2295,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) { case 4: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreRetvalV4I8; break; @@ -2338,7 +2338,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) { unsigned NumElts = 1; switch (N->getOpcode()) { default: - return NULL; + return nullptr; case NVPTXISD::StoreParamU32: case NVPTXISD::StoreParamS32: case NVPTXISD::StoreParam: @@ -2369,11 +2369,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) { default: switch (NumElts) { default: - return NULL; + return nullptr; case 1: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreParamI8; break; @@ -2400,7 +2400,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) { case 2: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreParamV2I8; break; @@ -2427,7 +2427,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) { case 4: switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) { default: - return NULL; + return nullptr; case MVT::i1: Opcode = NVPTX::StoreParamV4I8; break; @@ -2484,12 +2484,12 @@ SDNode *NVPTXDAGToDAGISel::SelectTextureIntrinsic(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue TexRef = N->getOperand(1); SDValue SampRef = N->getOperand(2); - SDNode *Ret = NULL; + SDNode *Ret = nullptr; unsigned Opc = 0; SmallVector Ops; switch (N->getOpcode()) { - default: return NULL; + default: return nullptr; case NVPTXISD::Tex1DFloatI32: Opc = NVPTX::TEX_1D_F32_I32; break; @@ -2628,11 +2628,11 @@ SDNode *NVPTXDAGToDAGISel::SelectTextureIntrinsic(SDNode *N) { SDNode *NVPTXDAGToDAGISel::SelectSurfaceIntrinsic(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue TexHandle = N->getOperand(1); - SDNode *Ret = NULL; + SDNode *Ret = nullptr; unsigned Opc = 0; SmallVector Ops; switch (N->getOpcode()) { - default: return NULL; + default: return nullptr; case NVPTXISD::Suld1DI8Trap: Opc = NVPTX::SULD_1D_I8_TRAP; Ops.push_back(TexHandle); @@ -3055,7 +3055,7 @@ bool NVPTXDAGToDAGISel::SelectADDRri64(SDNode *OpNode, SDValue Addr, bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const { - const Value *Src = NULL; + const Value *Src = nullptr; // Even though MemIntrinsicSDNode is a subclas of MemSDNode, // the classof() for MemSDNode does not include MemIntrinsicSDNode // (See SelectionDAGNodes.h). So we need to check for both. diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index aee1832d7fc..029dab30b32 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -75,7 +75,7 @@ static bool IsPTXVectorType(MVT VT) { /// LowerCall, and LowerReturn. static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty, SmallVectorImpl &ValueVTs, - SmallVectorImpl *Offsets = 0, + SmallVectorImpl *Offsets = nullptr, uint64_t StartingOffset = 0) { SmallVector TempVTs; SmallVector TempOffsets; @@ -245,7 +245,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { default: - return 0; + return nullptr; case NVPTXISD::CALL: return "NVPTXISD::CALL"; case NVPTXISD::RET_FLAG: @@ -1539,7 +1539,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( if (isImageOrSamplerVal( theArgs[i], (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent() - : 0))) { + : nullptr))) { assert(isKernel && "Only kernels can have image/sampler params"); InVals.push_back(DAG.getConstant(i + 1, MVT::i32)); continue; @@ -2265,7 +2265,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: { Info.opc = getOpcForTextureInstr(Intrinsic); Info.memVT = MVT::f32; - Info.ptrVal = NULL; + Info.ptrVal = nullptr; Info.offset = 0; Info.vol = 0; Info.readMem = true; @@ -2295,7 +2295,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: { Info.opc = getOpcForTextureInstr(Intrinsic); Info.memVT = MVT::i32; - Info.ptrVal = NULL; + Info.ptrVal = nullptr; Info.offset = 0; Info.vol = 0; Info.readMem = true; @@ -2320,7 +2320,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( case Intrinsic::nvvm_suld_3d_v4i8_trap: { Info.opc = getOpcForSurfaceInstr(Intrinsic); Info.memVT = MVT::i8; - Info.ptrVal = NULL; + Info.ptrVal = nullptr; Info.offset = 0; Info.vol = 0; Info.readMem = true; @@ -2345,7 +2345,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( case Intrinsic::nvvm_suld_3d_v4i16_trap: { Info.opc = getOpcForSurfaceInstr(Intrinsic); Info.memVT = MVT::i16; - Info.ptrVal = NULL; + Info.ptrVal = nullptr; Info.offset = 0; Info.vol = 0; Info.readMem = true; @@ -2370,7 +2370,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( case Intrinsic::nvvm_suld_3d_v4i32_trap: { Info.opc = getOpcForSurfaceInstr(Intrinsic); Info.memVT = MVT::i32; - Info.ptrVal = NULL; + Info.ptrVal = nullptr; Info.offset = 0; Info.vol = 0; Info.readMem = true; diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp index 6808f0e67cc..cdc80887dc2 100644 --- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp +++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp @@ -257,7 +257,7 @@ unsigned NVPTXInstrInfo::InsertBranch( "NVPTX branch conditions have two components!"); // One-way branch. - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(NVPTX::GOTO)).addMBB(TBB); else // Conditional branch diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp index 47f2c385ee1..c2ae5d3adf7 100644 --- a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp +++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp @@ -60,7 +60,7 @@ bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { if (!MI->getOperand(i).isFI()) continue; - TRI.eliminateFrameIndex(MI, 0, i, NULL); + TRI.eliminateFrameIndex(MI, 0, i, nullptr); Modified = true; } } diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp index 8be22104a8f..387e5a9f705 100644 --- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp +++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp @@ -87,7 +87,7 @@ NVPTXRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { // NVPTX Callee Saved Reg Classes const TargetRegisterClass *const * NVPTXRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { - static const TargetRegisterClass *const CalleeSavedRegClasses[] = { 0 }; + static const TargetRegisterClass *const CalleeSavedRegClasses[] = { nullptr }; return CalleeSavedRegClasses; } diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp index 222662749b4..97dbde928b2 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -175,7 +175,7 @@ bool NVPTXPassConfig::addPostRegAlloc() { } FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) { - return 0; // No reg alloc + return nullptr; // No reg alloc } void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { diff --git a/lib/Target/NVPTX/NVPTXUtilities.cpp b/lib/Target/NVPTX/NVPTXUtilities.cpp index 082d32ec2f0..a9fd190b7ff 100644 --- a/lib/Target/NVPTX/NVPTXUtilities.cpp +++ b/lib/Target/NVPTX/NVPTXUtilities.cpp @@ -393,12 +393,12 @@ llvm::skipPointerTransfer(const Value *V, bool ignore_GEP_indices) { const Value * llvm::skipPointerTransfer(const Value *V, std::set &processed) { if (processed.find(V) != processed.end()) - return NULL; + return nullptr; processed.insert(V); const Value *V2 = V->stripPointerCasts(); if (V2 != V && processed.find(V2) != processed.end()) - return NULL; + return nullptr; processed.insert(V2); V = V2; @@ -414,20 +414,20 @@ llvm::skipPointerTransfer(const Value *V, std::set &processed) { continue; } else if (const PHINode *PN = dyn_cast(V)) { if (V != V2 && processed.find(V) != processed.end()) - return NULL; + return nullptr; processed.insert(PN); - const Value *common = 0; + const Value *common = nullptr; for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { const Value *pv = PN->getIncomingValue(i); const Value *base = skipPointerTransfer(pv, processed); if (base) { - if (common == 0) + if (!common) common = base; else if (common != base) return PN; } } - if (common == 0) + if (!common) return PN; V = common; } @@ -445,7 +445,7 @@ BasicBlock *llvm::getParentBlock(Value *v) { if (Instruction *I = dyn_cast(v)) return I->getParent(); - return 0; + return nullptr; } Function *llvm::getParentFunction(Value *v) { @@ -458,13 +458,13 @@ Function *llvm::getParentFunction(Value *v) { if (BasicBlock *B = dyn_cast(v)) return B->getParent(); - return 0; + return nullptr; } // Dump a block by name void llvm::dumpBlock(Value *v, char *blockName) { Function *F = getParentFunction(v); - if (F == 0) + if (!F) return; for (Function::iterator it = F->begin(), ie = F->end(); it != ie; ++it) { @@ -479,8 +479,8 @@ void llvm::dumpBlock(Value *v, char *blockName) { // Find an instruction by name Instruction *llvm::getInst(Value *base, char *instName) { Function *F = getParentFunction(base); - if (F == 0) - return 0; + if (!F) + return nullptr; for (inst_iterator it = inst_begin(F), ie = inst_end(F); it != ie; ++it) { Instruction *I = &*it; @@ -489,7 +489,7 @@ Instruction *llvm::getInst(Value *base, char *instName) { } } - return 0; + return nullptr; } // Dump an instruction by nane diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp index f270eac9c28..62413fb58d0 100644 --- a/lib/Target/NVPTX/NVVMReflect.cpp +++ b/lib/Target/NVPTX/NVVMReflect.cpp @@ -51,13 +51,13 @@ private: public: static char ID; - NVVMReflect() : ModulePass(ID), ReflectFunction(0) { + NVVMReflect() : ModulePass(ID), ReflectFunction(nullptr) { initializeNVVMReflectPass(*PassRegistry::getPassRegistry()); VarMap.clear(); } NVVMReflect(const StringMap &Mapping) - : ModulePass(ID), ReflectFunction(0) { + : ModulePass(ID), ReflectFunction(nullptr) { initializeNVVMReflectPass(*PassRegistry::getPassRegistry()); for (StringMap::const_iterator I = Mapping.begin(), E = Mapping.end(); I != E; ++I) { @@ -128,7 +128,7 @@ bool NVVMReflect::runOnModule(Module &M) { // If reflect function is not used, then there will be // no entry in the module. - if (ReflectFunction == 0) + if (!ReflectFunction) return false; // Validate _reflect function diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp index 6127cef24d2..c7a6431e8fc 100644 --- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp +++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp @@ -1022,7 +1022,7 @@ ExtractModifierFromExpr(const MCExpr *E, switch (E->getKind()) { case MCExpr::Target: case MCExpr::Constant: - return 0; + return nullptr; case MCExpr::SymbolRef: { const MCSymbolRefExpr *SRE = cast(E); @@ -1050,7 +1050,7 @@ ExtractModifierFromExpr(const MCExpr *E, Variant = PPCMCExpr::VK_PPC_HIGHESTA; break; default: - return 0; + return nullptr; } return MCSymbolRefExpr::Create(&SRE->getSymbol(), Context); @@ -1060,7 +1060,7 @@ ExtractModifierFromExpr(const MCExpr *E, const MCUnaryExpr *UE = cast(E); const MCExpr *Sub = ExtractModifierFromExpr(UE->getSubExpr(), Variant); if (!Sub) - return 0; + return nullptr; return MCUnaryExpr::Create(UE->getOpcode(), Sub, Context); } @@ -1071,7 +1071,7 @@ ExtractModifierFromExpr(const MCExpr *E, const MCExpr *RHS = ExtractModifierFromExpr(BE->getRHS(), RHSVariant); if (!LHS && !RHS) - return 0; + return nullptr; if (!LHS) LHS = BE->getLHS(); if (!RHS) RHS = BE->getRHS(); @@ -1083,7 +1083,7 @@ ExtractModifierFromExpr(const MCExpr *E, else if (LHSVariant == RHSVariant) Variant = LHSVariant; else - return 0; + return nullptr; return MCBinaryExpr::Create(BE->getOpcode(), LHS, RHS, Context); } @@ -1594,6 +1594,6 @@ PPCAsmParser::applyModifierToExpr(const MCExpr *E, case MCSymbolRefExpr::VK_PPC_HIGHESTA: return PPCMCExpr::Create(PPCMCExpr::VK_PPC_HIGHESTA, E, false, Ctx); default: - return 0; + return nullptr; } } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index 18609e15b29..b95a2ac13e0 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -28,7 +28,7 @@ PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit, const Triple& T) { ExceptionsType = ExceptionHandling::DwarfCFI; if (!is64Bit) - Data64bitsDirective = 0; // We can't emit a 64-bit unit in PPC32 mode. + Data64bitsDirective = nullptr; // We can't emit a 64-bit unit in PPC32 mode. AssemblerDialect = 1; // New-Style mnemonics. SupportsDebugInformation= true; // Debug information. @@ -71,7 +71,7 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit, const Triple& T) { ExceptionsType = ExceptionHandling::DwarfCFI; ZeroDirective = "\t.space\t"; - Data64bitsDirective = is64Bit ? "\t.quad\t" : 0; + Data64bitsDirective = is64Bit ? "\t.quad\t" : nullptr; AssemblerDialect = 1; // New-Style mnemonics. if (T.getOS() == llvm::Triple::FreeBSD || diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp index 6e43f3a35cf..bbdc7744dfe 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -80,7 +80,7 @@ static MCAsmInfo *createPPCMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { // Initial state of the frame pointer is R1. unsigned Reg = isPPC64 ? PPC::X1 : PPC::R1; MCCFIInstruction Inst = - MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(Reg, true), 0); + MCCFIInstruction::createDefCfa(nullptr, MRI.getDwarfRegNum(Reg, true), 0); MAI->addInitialFrameState(Inst); return MAI; diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp index 4b8f32523ca..503619d5a58 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp @@ -324,7 +324,7 @@ void PPCMachObjectWriter::RecordPPCRelocation( // this doesn't seem right for RIT_PPC_BR24 // Get the symbol data, if any. - const MCSymbolData *SD = 0; + const MCSymbolData *SD = nullptr; if (Target.getSymA()) SD = &Asm.getSymbolData(Target.getSymA()->getSymbol()); diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index c236d7af79a..c564dfd4f2f 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -181,7 +181,7 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, MachineModuleInfoImpl::StubValueTy &StubSym = MMI->getObjFileInfo() .getGVStubEntry(SymToPrint); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); } else if (GV->isDeclaration() || GV->hasCommonLinkage() || @@ -191,7 +191,7 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, MachineModuleInfoImpl::StubValueTy &StubSym = MMI->getObjFileInfo(). getHiddenGVStubEntry(SymToPrint); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); } else { @@ -289,9 +289,9 @@ MCSymbol *PPCAsmPrinter::lookUpOrCreateTOCEntry(MCSymbol *Sym) { MCSymbol *&TOCEntry = TOC[Sym]; // To avoid name clash check if the name already exists. - while (TOCEntry == 0) { + while (!TOCEntry) { if (OutContext.LookupSymbol(Twine(DL->getPrivateGlobalPrefix()) + - "C" + Twine(TOCLabelID++)) == 0) { + "C" + Twine(TOCLabelID++)) == nullptr) { TOCEntry = GetTempSymbol("C", TOCLabelID); } } @@ -343,7 +343,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { // Map symbol -> label of TOC entry assert(MO.isGlobal() || MO.isCPI() || MO.isJTI()); - MCSymbol *MOSymbol = 0; + MCSymbol *MOSymbol = nullptr; if (MO.isGlobal()) MOSymbol = getSymbol(MO.getGlobal()); else if (MO.isCPI()) @@ -373,7 +373,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MachineOperand &MO = MI->getOperand(2); assert((MO.isGlobal() || MO.isCPI() || MO.isJTI()) && "Invalid operand for ADDIStocHA!"); - MCSymbol *MOSymbol = 0; + MCSymbol *MOSymbol = nullptr; bool IsExternal = false; bool IsFunction = false; bool IsCommon = false; @@ -417,7 +417,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MachineOperand &MO = MI->getOperand(1); assert((MO.isGlobal() || MO.isJTI() || MO.isCPI()) && "Invalid operand for LDtocL!"); - MCSymbol *MOSymbol = 0; + MCSymbol *MOSymbol = nullptr; if (MO.isJTI()) MOSymbol = lookUpOrCreateTOCEntry(GetJTISymbol(MO.getIndex())); @@ -457,7 +457,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { TmpInst.setOpcode(PPC::ADDI8); const MachineOperand &MO = MI->getOperand(2); assert((MO.isGlobal() || MO.isCPI()) && "Invalid operand for ADDItocL"); - MCSymbol *MOSymbol = 0; + MCSymbol *MOSymbol = nullptr; bool IsExternal = false; bool IsFunction = false; diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp index 8ce3bf90da4..8a851d85255 100644 --- a/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -113,7 +113,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) { unsigned MBBStartOffset = 0; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { - MachineBasicBlock *Dest = 0; + MachineBasicBlock *Dest = nullptr; if (I->getOpcode() == PPC::BCC && !I->getOperand(2).isImm()) Dest = I->getOperand(2).getMBB(); else if ((I->getOpcode() == PPC::BC || I->getOpcode() == PPC::BCn) && diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index c46482f7b1e..2ef405fd204 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -84,7 +84,7 @@ namespace { public: static char ID; - PPCCTRLoops() : FunctionPass(ID), TM(0) { + PPCCTRLoops() : FunctionPass(ID), TM(nullptr) { initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); } PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) { @@ -172,7 +172,7 @@ bool PPCCTRLoops::runOnFunction(Function &F) { SE = &getAnalysis(); DT = &getAnalysis().getDomTree(); DataLayoutPass *DLP = getAnalysisIfAvailable(); - DL = DLP ? &DLP->getDataLayout() : 0; + DL = DLP ? &DLP->getDataLayout() : nullptr; LibInfo = getAnalysisIfAvailable(); bool MadeChange = false; @@ -424,9 +424,9 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); - BasicBlock *CountedExitBlock = 0; - const SCEV *ExitCount = 0; - BranchInst *CountedExitBranch = 0; + BasicBlock *CountedExitBlock = nullptr; + const SCEV *ExitCount = nullptr; + BranchInst *CountedExitBranch = nullptr; for (SmallVectorImpl::iterator I = ExitingBlocks.begin(), IE = ExitingBlocks.end(); I != IE; ++I) { const SCEV *EC = SE->getExitCount(L, *I); diff --git a/lib/Target/PowerPC/PPCCodeEmitter.cpp b/lib/Target/PowerPC/PPCCodeEmitter.cpp index 84fc888112f..c46a09416bf 100644 --- a/lib/Target/PowerPC/PPCCodeEmitter.cpp +++ b/lib/Target/PowerPC/PPCCodeEmitter.cpp @@ -102,7 +102,7 @@ bool PPCCodeEmitter::runOnMachineFunction(MachineFunction &MF) { MMI = &getAnalysis(); MCE.setModuleInfo(MMI); do { - MovePCtoLROffset = 0; + MovePCtoLROffset = nullptr; MCE.startFunction(MF); for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) emitBasicBlock(*BB); diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp index d886feda110..0a0eeeaeea2 100644 --- a/lib/Target/PowerPC/PPCFastISel.cpp +++ b/lib/Target/PowerPC/PPCFastISel.cpp @@ -283,7 +283,7 @@ bool PPCFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { // Given a value Obj, create an Address object Addr that represents its // address. Return false if we can't handle it. bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) { - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from @@ -557,7 +557,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) { // to constrain RA from using R0/X0 when this is not legal. unsigned AssignedReg = FuncInfo.ValueMap[I]; const TargetRegisterClass *RC = - AssignedReg ? MRI.getRegClass(AssignedReg) : 0; + AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr; unsigned ResultReg = 0; if (!PPCEmitLoad(VT, ResultReg, Addr, RC)) @@ -1013,7 +1013,7 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT, // to determine the required register class. unsigned AssignedReg = FuncInfo.ValueMap[I]; const TargetRegisterClass *RC = - AssignedReg ? MRI.getRegClass(AssignedReg) : 0; + AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr; unsigned ResultReg = 0; if (!PPCEmitLoad(VT, ResultReg, Addr, RC, !IsSigned)) @@ -2150,7 +2150,7 @@ bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, unsigned ResultReg = MI->getOperand(0).getReg(); - if (!PPCEmitLoad(VT, ResultReg, Addr, 0, IsZExt)) + if (!PPCEmitLoad(VT, ResultReg, Addr, nullptr, IsZExt)) return false; MI->eraseFromParent(); @@ -2262,6 +2262,6 @@ namespace llvm { if (Subtarget->isPPC64() && Subtarget->isSVR4ABI()) return new PPCFastISel(FuncInfo, LibInfo); - return 0; + return nullptr; } } diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp index 189c8e68b7a..7ca706b81c3 100644 --- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp +++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp @@ -227,7 +227,7 @@ void PPCDispatchGroupSBHazardRecognizer::EmitNoop() { CurGroup.clear(); CurSlots = CurBranches = 0; } else { - CurGroup.push_back(0); + CurGroup.push_back(nullptr); ++CurSlots; } } diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 24aca56460c..b9f26bacdd3 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -482,7 +482,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops); } } - return 0; + return nullptr; } /// SelectCC - Select a comparison of the specified values with the specified @@ -884,7 +884,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { } if (PPCSubTarget.useCRBits()) - return 0; + return nullptr; bool Inv; unsigned Idx = getCRIdxForSetCC(CC, Inv); @@ -894,7 +894,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { // Force the ccreg into CR7. SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32); - SDValue InFlag(0, 0); // Null incoming flag value. + SDValue InFlag(nullptr, 0); // Null incoming flag value. CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg, InFlag).getValue(1); @@ -919,7 +919,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { SDLoc dl(N); if (N->isMachineOpcode()) { N->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } switch (N->getOpcode()) { @@ -1187,7 +1187,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { // AND X, 0 -> 0, not "rlwinm 32". if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) { ReplaceUses(SDValue(N, 0), N->getOperand(1)); - return NULL; + return nullptr; } // ISD::OR doesn't get all the bitfield insertion fun. // (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) is a bitfield insert @@ -2204,8 +2204,8 @@ FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM) { static void initializePassOnce(PassRegistry &Registry) { const char *Name = "PowerPC DAG->DAG Pattern Instruction Selection"; - PassInfo *PI = new PassInfo(Name, "ppc-codegen", &SelectionDAGISel::ID, 0, - false, false); + PassInfo *PI = new PassInfo(Name, "ppc-codegen", &SelectionDAGISel::ID, + nullptr, false, false); Registry.registerPass(*PI, true); } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index fce7d2fbea2..4764bf8a529 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -758,7 +758,7 @@ unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; case PPCISD::FSEL: return "PPCISD::FSEL"; case PPCISD::FCFID: return "PPCISD::FCFID"; case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; @@ -1019,7 +1019,7 @@ unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { /// the constant being splatted. The ByteSize field indicates the number of /// bytes of each element [124] -> [bhw]. SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { - SDValue OpVal(0, 0); + SDValue OpVal(nullptr, 0); // If ByteSize of the splat is bigger than the element size of the // build_vector, then we have a case where we are checking for a splat where @@ -1038,7 +1038,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { if (!isa(N->getOperand(i))) return SDValue(); - if (UniquedVals[i&(Multiple-1)].getNode() == 0) + if (!UniquedVals[i&(Multiple-1)].getNode()) UniquedVals[i&(Multiple-1)] = N->getOperand(i); else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) return SDValue(); // no match. @@ -1053,21 +1053,21 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { bool LeadingZero = true; bool LeadingOnes = true; for (unsigned i = 0; i != Multiple-1; ++i) { - if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. + if (!UniquedVals[i].getNode()) continue; // Must have been undefs. LeadingZero &= cast(UniquedVals[i])->isNullValue(); LeadingOnes &= cast(UniquedVals[i])->isAllOnesValue(); } // Finally, check the least significant entry. if (LeadingZero) { - if (UniquedVals[Multiple-1].getNode() == 0) + if (!UniquedVals[Multiple-1].getNode()) return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef int Val = cast(UniquedVals[Multiple-1])->getZExtValue(); if (Val < 16) return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) } if (LeadingOnes) { - if (UniquedVals[Multiple-1].getNode() == 0) + if (!UniquedVals[Multiple-1].getNode()) return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef int Val =cast(UniquedVals[Multiple-1])->getSExtValue(); if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) @@ -1080,13 +1080,13 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { // Check to see if this buildvec has a single non-undef value in its elements. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; - if (OpVal.getNode() == 0) + if (!OpVal.getNode()) OpVal = N->getOperand(i); else if (OpVal != N->getOperand(i)) return SDValue(); } - if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. + if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. unsigned ValSizeInBytes = EltSize; uint64_t Value = 0; @@ -1439,7 +1439,8 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, /// GetLabelAccessInfo - Return true if we should reference labels using a /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, - unsigned &LoOpFlags, const GlobalValue *GV = 0) { + unsigned &LoOpFlags, + const GlobalValue *GV = nullptr) { HiOpFlags = PPCII::MO_HA; LoOpFlags = PPCII::MO_LO; @@ -3174,12 +3175,12 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, /// 32-bit value is representable in the immediate field of a BxA instruction. static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { ConstantSDNode *C = dyn_cast(Op); - if (!C) return 0; + if (!C) return nullptr; int Addr = C->getZExtValue(); if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. SignExtend32<26>(Addr) != Addr) - return 0; // Top 6 bits have to be sext of immediate. + return nullptr; // Top 6 bits have to be sext of immediate. return DAG.getConstant((int)C->getZExtValue() >> 2, DAG.getTargetLoweringInfo().getPointerTy()).getNode(); @@ -3514,7 +3515,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, } Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, - 2 + (InFlag.getNode() != 0)); + InFlag.getNode() ? 3 : 2); InFlag = Chain.getValue(1); NodeTys.clear(); @@ -3522,7 +3523,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, NodeTys.push_back(MVT::Glue); Ops.push_back(Chain); CallOpc = PPCISD::BCTRL; - Callee.setNode(0); + Callee.setNode(nullptr); // Add use of X11 (holding environment pointer) if (isSVR4ABI && isPPC64) Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); @@ -7236,8 +7237,8 @@ static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, return true; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - const GlobalValue *GV1 = NULL; - const GlobalValue *GV2 = NULL; + const GlobalValue *GV1 = nullptr; + const GlobalValue *GV2 = nullptr; int64_t Offset1 = 0; int64_t Offset2 = 0; bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); @@ -7887,7 +7888,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, if (N->getOperand(1).getOpcode() == ISD::FSQRT) { SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { DCI.AddToWorklist(RV.getNode()); return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), N->getOperand(0), RV); @@ -7897,7 +7898,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { DCI.AddToWorklist(RV.getNode()); RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), N->getValueType(0), RV); @@ -7910,7 +7911,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { DCI.AddToWorklist(RV.getNode()); RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), N->getValueType(0), RV, @@ -7922,7 +7923,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, } SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { DCI.AddToWorklist(RV.getNode()); return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), N->getOperand(0), RV); @@ -7937,10 +7938,10 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the // reciprocal sqrt. SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { DCI.AddToWorklist(RV.getNode()); RV = DAGCombineFastRecip(RV, DCI); - if (RV.getNode() != 0) { + if (RV.getNode()) { // Unfortunately, RV is now NaN if the input was exactly 0. Select out // this case and force the answer to 0. @@ -8254,7 +8255,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, !N->getOperand(2).hasOneUse()) { // Scan all of the users of the LHS, looking for VCMPo's that match. - SDNode *VCMPoNode = 0; + SDNode *VCMPoNode = nullptr; SDNode *LHSN = N->getOperand(0).getNode(); for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); @@ -8275,9 +8276,9 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // Look at the (necessarily single) use of the flag value. If it has a // chain, this transformation is more complex. Note that multiple things // could use the value result, which we should ignore. - SDNode *FlagUser = 0; + SDNode *FlagUser = nullptr; for (SDNode::use_iterator UI = VCMPoNode->use_begin(); - FlagUser == 0; ++UI) { + FlagUser == nullptr; ++UI) { assert(UI != VCMPoNode->use_end() && "Didn't find user!"); SDNode *User = *UI; for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { @@ -8497,7 +8498,7 @@ PPCTargetLowering::getSingleConstraintMatchWeight( Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); @@ -8603,7 +8604,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector&Ops, SelectionDAG &DAG) const { - SDValue Result(0,0); + SDValue Result; // Only support length 1 constraints. if (Constraint.length() > 1) return; diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index f49d1e926c9..ae41fa0bd65 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -232,7 +232,7 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { // Cannot commute if it has a non-zero rotate count. if (MI->getOperand(3).getImm() != 0) - return 0; + return nullptr; // If we have a zero rotate count, we have: // M = mask(MB,ME) @@ -541,7 +541,7 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); // One-way branch. - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB); else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) @@ -1401,10 +1401,10 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, // There are two possible candidates which can be changed to set CR[01]. // One is MI, the other is a SUB instruction. // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). - MachineInstr *Sub = NULL; + MachineInstr *Sub = nullptr; if (SrcReg2 != 0) // MI is not a candidate for CMPrr. - MI = NULL; + MI = nullptr; // FIXME: Conservatively refuse to convert an instruction which isn't in the // same BB as the comparison. This is to allow the check below to avoid calls // (and other explicit clobbers); instead we should really check for these diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp index 029bb8a70a5..f8e84a5731c 100644 --- a/lib/Target/PowerPC/PPCMCInstLower.cpp +++ b/lib/Target/PowerPC/PPCMCInstLower.cpp @@ -96,7 +96,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){ (MO.getTargetFlags() & PPCII::MO_NLP_HIDDEN_FLAG) ? MachO.getHiddenGVStubEntry(Sym) : MachO.getGVStubEntry(Sym); - if (StubSym.getPointer() == 0) { + if (!StubSym.getPointer()) { assert(MO.isGlobal() && "Extern symbol not handled yet"); StubSym = MachineModuleInfoImpl:: StubValueTy(AP.getSymbol(MO.getGlobal()), diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp index 910dde96faa..2cc1dfc02cc 100644 --- a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -42,7 +42,7 @@ class PPCTTI final : public ImmutablePass, public TargetTransformInfo { const PPCTargetLowering *TLI; public: - PPCTTI() : ImmutablePass(ID), ST(0), TLI(0) { + PPCTTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/R600/AMDGPUFrameLowering.cpp b/lib/Target/R600/AMDGPUFrameLowering.cpp index 0325a00c177..e7e90d3184d 100644 --- a/lib/Target/R600/AMDGPUFrameLowering.cpp +++ b/lib/Target/R600/AMDGPUFrameLowering.cpp @@ -97,7 +97,7 @@ int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF, const TargetFrameLowering::SpillSlot * AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const { NumEntries = 0; - return 0; + return nullptr; } void AMDGPUFrameLowering::emitPrologue(MachineFunction &MF) const { diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 948533bc09f..97107a58e71 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -1265,7 +1265,7 @@ SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; // AMDIL DAG nodes NODE_NAME_CASE(CALL); NODE_NAME_CASE(UMUL); diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp index 064590f24e9..4f3ab5f5443 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.cpp +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -84,7 +84,7 @@ AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const { // TODO: Implement this function - return NULL; + return nullptr; } bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter, MachineBasicBlock &MBB) const { @@ -175,7 +175,7 @@ AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, const SmallVectorImpl &Ops, int FrameIndex) const { // TODO: Implement this function - return 0; + return nullptr; } MachineInstr* AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, @@ -183,7 +183,7 @@ AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, const SmallVectorImpl &Ops, MachineInstr *LoadMI) const { // TODO: Implement this function - return 0; + return nullptr; } bool AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp index 6794d74f660..402fdbfd2eb 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.cpp +++ b/lib/Target/R600/AMDGPUTargetMachine.cpp @@ -108,7 +108,7 @@ public: const AMDGPUSubtarget &ST = TM->getSubtarget(); if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) return createR600MachineScheduler(C); - return 0; + return nullptr; } virtual bool addPreISel(); diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp index 1a1bdb35458..96174e95f97 100644 --- a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp +++ b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp @@ -46,7 +46,7 @@ class AMDGPUTTI final : public ImmutablePass, public TargetTransformInfo { unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; public: - AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { + AMDGPUTTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp index b60fd7e854b..ba43d8c31f3 100644 --- a/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -135,7 +135,7 @@ public: static char ID; AMDGPUCFGStructurizer() : - MachineFunctionPass(ID), TII(NULL), TRI(NULL) { + MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) { initializeAMDGPUCFGStructurizerPass(*PassRegistry::getPassRegistry()); } @@ -334,7 +334,7 @@ protected: MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I); void recordSccnum(MachineBasicBlock *MBB, int SCCNum); void retireBlock(MachineBasicBlock *MBB); - void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL); + void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = nullptr); MachineBasicBlock *findNearestCommonPostDom(std::set&); /// This is work around solution for findNearestCommonDominator not avaiable @@ -361,7 +361,7 @@ MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep) const { LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep); if (It == LLInfoMap.end()) - return NULL; + return nullptr; return (*It).second; } @@ -632,7 +632,7 @@ MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr( MachineInstr *MI = &*It; if (MI && (isCondBranch(MI) || isUncondBranch(MI))) return MI; - return NULL; + return nullptr; } MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr( @@ -648,7 +648,7 @@ MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr( break; } } - return NULL; + return nullptr; } MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) { @@ -658,7 +658,7 @@ MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) { if (instr->getOpcode() == AMDGPU::RETURN) return instr; } - return NULL; + return nullptr; } MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) { @@ -668,7 +668,7 @@ MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) { if (MI->getOpcode() == AMDGPU::CONTINUE) return MI; } - return NULL; + return nullptr; } bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) { @@ -819,7 +819,7 @@ bool AMDGPUCFGStructurizer::run() { SmallVectorImpl::const_iterator SccBeginIter = It; - MachineBasicBlock *SccBeginMBB = NULL; + MachineBasicBlock *SccBeginMBB = nullptr; int SccNumBlk = 0; // The number of active blocks, init to a // maximum possible number. int SccNumIter; // Number of iteration in this SCC. @@ -874,7 +874,7 @@ bool AMDGPUCFGStructurizer::run() { } if (ContNextScc) - SccBeginMBB = NULL; + SccBeginMBB = nullptr; } //while, "one iteration" over the function. MachineBasicBlock *EntryMBB = @@ -1026,7 +1026,7 @@ int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) { } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) { // Triangle pattern, false is empty LandBlk = FalseMBB; - FalseMBB = NULL; + FalseMBB = nullptr; } else if (FalseMBB->succ_size() == 1 && *FalseMBB->succ_begin() == TrueMBB) { // Triangle pattern, true is empty @@ -1034,7 +1034,7 @@ int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) { std::swap(TrueMBB, FalseMBB); reversePredicateSetter(MBB->end()); LandBlk = FalseMBB; - FalseMBB = NULL; + FalseMBB = nullptr; } else if (FalseMBB->succ_size() == 1 && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) { LandBlk = *FalseMBB->succ_begin(); @@ -1242,7 +1242,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB, DEBUG( dbgs() << " not working\n"; ); - DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL; + DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : nullptr; } // walk down the postDomTree return Num; @@ -1721,11 +1721,11 @@ AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) { const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32); if (!LoopHeader || !LoopLatch) - return NULL; + return nullptr; MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch); // Is LoopRep an infinite loop ? if (!BranchMI || !isUncondBranch(BranchMI)) - return NULL; + return nullptr; MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock(); FuncRep->push_back(DummyExitBlk); //insert to function @@ -1858,7 +1858,7 @@ AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1, return findNearestCommonPostDom(MBB1, *MBB2->succ_begin()); if (!Node1 || !Node2) - return NULL; + return nullptr; Node1 = Node1->getIDom(); while (Node1) { @@ -1867,7 +1867,7 @@ AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1, Node1 = Node1->getIDom(); } - return NULL; + return nullptr; } MachineBasicBlock * diff --git a/lib/Target/R600/AMDILIntrinsicInfo.cpp b/lib/Target/R600/AMDILIntrinsicInfo.cpp index 762ee39e469..fab4a3b8961 100644 --- a/lib/Target/R600/AMDILIntrinsicInfo.cpp +++ b/lib/Target/R600/AMDILIntrinsicInfo.cpp @@ -38,7 +38,7 @@ AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys, }; if (IntrID < Intrinsic::num_intrinsics) { - return 0; + return nullptr; } assert(IntrID < AMDGPUIntrinsic::num_AMDGPU_intrinsics && "Invalid intrinsic ID"); diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp index aee9bd1d664..78bbe0a163c 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp @@ -35,7 +35,7 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() { Data16bitsDirective = ".short\t"; Data32bitsDirective = ".long\t"; Data64bitsDirective = ".quad\t"; - GPRel32Directive = 0; + GPRel32Directive = nullptr; SunStyleELFSectionSwitchSyntax = true; UsesELFSectionDirectiveForBSS = true; @@ -58,5 +58,5 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() { const MCSection* AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const { - return 0; + return nullptr; } diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp index b1be2b9ba2f..ef9b8cea010 100644 --- a/lib/Target/R600/R600ControlFlowFinalizer.cpp +++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp @@ -469,7 +469,7 @@ private: public: R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID), - TII (0), TRI(0), + TII (nullptr), TRI(nullptr), ST(tm.getSubtarget()) { const AMDGPUSubtarget &ST = tm.getSubtarget(); MaxFetchInst = ST.getTexVTXClauseSize(); @@ -502,13 +502,13 @@ public: DEBUG(dbgs() << CfCount << ":"; I->dump();); FetchClauses.push_back(MakeFetchClause(MBB, I)); CfCount++; - LastAlu.back() = 0; + LastAlu.back() = nullptr; continue; } MachineBasicBlock::iterator MI = I; if (MI->getOpcode() != AMDGPU::ENDIF) - LastAlu.back() = 0; + LastAlu.back() = nullptr; if (MI->getOpcode() == AMDGPU::CF_ALU) LastAlu.back() = MI; I++; @@ -559,7 +559,7 @@ public: break; } case AMDGPU::IF_PREDICATE_SET: { - LastAlu.push_back(0); + LastAlu.push_back(nullptr); MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP)) .addImm(0) diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp index 5bd793a3d35..660ce0c2db3 100644 --- a/lib/Target/R600/R600EmitClauseMarkers.cpp +++ b/lib/Target/R600/R600EmitClauseMarkers.cpp @@ -291,7 +291,7 @@ private: public: static char ID; - R600EmitClauseMarkers() : MachineFunctionPass(ID), TII(0), Address(0) { + R600EmitClauseMarkers() : MachineFunctionPass(ID), TII(nullptr), Address(0) { initializeR600EmitClauseMarkersPass(*PassRegistry::getPassRegistry()); } diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp index ca1189dac95..0a130e13656 100644 --- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp +++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp @@ -38,7 +38,7 @@ private: public: R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID), - TII(0) { } + TII(nullptr) { } virtual bool runOnMachineFunction(MachineFunction &MF); diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp index a48afc9eaa1..b0d9ae3e70e 100644 --- a/lib/Target/R600/R600InstrInfo.cpp +++ b/lib/Target/R600/R600InstrInfo.cpp @@ -677,7 +677,7 @@ findFirstPredicateSetterFrom(MachineBasicBlock &MBB, return MI; } - return NULL; + return nullptr; } static @@ -797,7 +797,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, DebugLoc DL) const { assert(TBB && "InsertBranch must not be told to insert a fallthrough"); - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) { BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB); return 1; diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp index 46e31556477..d1655d1ddc3 100644 --- a/lib/Target/R600/R600MachineScheduler.cpp +++ b/lib/Target/R600/R600MachineScheduler.cpp @@ -56,7 +56,7 @@ unsigned getWFCountLimitedByGPR(unsigned GPRCount) { } SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { - SUnit *SU = 0; + SUnit *SU = nullptr; NextInstKind = IDOther; IsTopNode = false; @@ -316,7 +316,7 @@ int R600SchedStrategy::getInstKind(SUnit* SU) { SUnit *R600SchedStrategy::PopInst(std::vector &Q, bool AnyALU) { if (Q.empty()) - return NULL; + return nullptr; for (std::vector::reverse_iterator It = Q.rbegin(), E = Q.rend(); It != E; ++It) { SUnit *SU = *It; @@ -331,7 +331,7 @@ SUnit *R600SchedStrategy::PopInst(std::vector &Q, bool AnyALU) { InstructionsGroupCandidate.pop_back(); } } - return NULL; + return nullptr; } void R600SchedStrategy::LoadAlu() { @@ -448,11 +448,11 @@ SUnit* R600SchedStrategy::pickAlu() { } PrepareNextSlot(); } - return NULL; + return nullptr; } SUnit* R600SchedStrategy::pickOther(int QID) { - SUnit *SU = 0; + SUnit *SU = nullptr; std::vector &AQ = Available[QID]; if (AQ.empty()) { diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/R600/R600OptimizeVectorRegisters.cpp index 372946c30db..86c75613922 100644 --- a/lib/Target/R600/R600OptimizeVectorRegisters.cpp +++ b/lib/Target/R600/R600OptimizeVectorRegisters.cpp @@ -108,7 +108,7 @@ private: public: static char ID; R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID), - TII(0) { } + TII(nullptr) { } void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp index 4d6db698920..1186a629565 100644 --- a/lib/Target/R600/SIAnnotateControlFlow.cpp +++ b/lib/Target/R600/SIAnnotateControlFlow.cpp @@ -118,7 +118,7 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) { Void = Type::getVoidTy(Context); Boolean = Type::getInt1Ty(Context); Int64 = Type::getInt64Ty(Context); - ReturnStruct = StructType::get(Boolean, Int64, (Type *)0); + ReturnStruct = StructType::get(Boolean, Int64, (Type *)nullptr); BoolTrue = ConstantInt::getTrue(Context); BoolFalse = ConstantInt::getFalse(Context); @@ -126,25 +126,25 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) { Int64Zero = ConstantInt::get(Int64, 0); If = M.getOrInsertFunction( - IfIntrinsic, ReturnStruct, Boolean, (Type *)0); + IfIntrinsic, ReturnStruct, Boolean, (Type *)nullptr); Else = M.getOrInsertFunction( - ElseIntrinsic, ReturnStruct, Int64, (Type *)0); + ElseIntrinsic, ReturnStruct, Int64, (Type *)nullptr); Break = M.getOrInsertFunction( - BreakIntrinsic, Int64, Int64, (Type *)0); + BreakIntrinsic, Int64, Int64, (Type *)nullptr); IfBreak = M.getOrInsertFunction( - IfBreakIntrinsic, Int64, Boolean, Int64, (Type *)0); + IfBreakIntrinsic, Int64, Boolean, Int64, (Type *)nullptr); ElseBreak = M.getOrInsertFunction( - ElseBreakIntrinsic, Int64, Int64, Int64, (Type *)0); + ElseBreakIntrinsic, Int64, Int64, Int64, (Type *)nullptr); Loop = M.getOrInsertFunction( - LoopIntrinsic, Boolean, Int64, (Type *)0); + LoopIntrinsic, Boolean, Int64, (Type *)nullptr); EndCf = M.getOrInsertFunction( - EndCfIntrinsic, Void, Int64, (Type *)0); + EndCfIntrinsic, Void, Int64, (Type *)nullptr); return false; } diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 12b9ae7637e..9844151cab5 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -700,7 +700,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) { if (I->getOpcode() == Opcode) return *I; } - return 0; + return nullptr; } /// This transforms the control flow intrinsics to get the branch destination as @@ -712,7 +712,7 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, SDNode *Intr = BRCOND.getOperand(1).getNode(); SDValue Target = BRCOND.getOperand(2); - SDNode *BR = 0; + SDNode *BR = nullptr; if (Intr->getOpcode() == ISD::SETCC) { // As long as we negate the condition everything is fine @@ -1022,7 +1022,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N, SDValue Arg0 = N->getOperand(0); SDValue Arg1 = N->getOperand(1); SDValue CC = N->getOperand(2); - ConstantSDNode * C = NULL; + ConstantSDNode * C = nullptr; ISD::CondCode CCOp = dyn_cast(CC)->get(); // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne) @@ -1093,7 +1093,7 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate, MachineSDNode *Mov = dyn_cast(Operand); const SIInstrInfo *TII = static_cast(getTargetMachine().getInstrInfo()); - if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode())) + if (!Mov || !TII->isMov(Mov->getMachineOpcode())) return false; const SDValue &Op = Mov->getOperand(0); @@ -1140,7 +1140,7 @@ const TargetRegisterClass *SITargetLowering::getRegClassForNode( } return TRI.getPhysRegClass(Reg); } - default: return NULL; + default: return nullptr; } } const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode()); @@ -1244,14 +1244,14 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, // Commuted opcode if available int OpcodeRev = Desc->isCommutable() ? TII->commuteOpcode(Opcode) : -1; - const MCInstrDesc *DescRev = OpcodeRev == -1 ? 0 : &TII->get(OpcodeRev); + const MCInstrDesc *DescRev = OpcodeRev == -1 ? nullptr : &TII->get(OpcodeRev); assert(!DescRev || DescRev->getNumDefs() == NumDefs); assert(!DescRev || DescRev->getNumOperands() == NumOps); // e64 version if available, -1 otherwise int OpcodeE64 = AMDGPU::getVOPe64(Opcode); - const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64); + const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? nullptr : &TII->get(OpcodeE64); assert(!DescE64 || DescE64->getNumDefs() == NumDefs); assert(!DescE64 || DescE64->getNumOperands() == (NumOps + 4)); @@ -1324,7 +1324,7 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, std::swap(Ops[0], Ops[1]); Desc = DescRev; - DescRev = 0; + DescRev = nullptr; continue; } } @@ -1345,7 +1345,7 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, Immediate = -1; Promote2e64 = true; Desc = DescE64; - DescE64 = 0; + DescE64 = nullptr; } } } diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp index 695ec407fdb..f48a55431a5 100644 --- a/lib/Target/R600/SIInsertWaits.cpp +++ b/lib/Target/R600/SIInsertWaits.cpp @@ -97,8 +97,8 @@ private: public: SIInsertWaits(TargetMachine &tm) : MachineFunctionPass(ID), - TII(0), - TRI(0), + TII(nullptr), + TRI(nullptr), ExpInstrTypesSeen(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index 96eeea56279..4c57d16c7f3 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -247,18 +247,18 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg()) - return 0; + return nullptr; // Cannot commute VOP2 if src0 is SGPR. if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() && RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg()))) - return 0; + return nullptr; if (!MI->getOperand(2).isReg()) { // XXX: Commute instructions with FPImm operands if (NewMI || MI->getOperand(2).isFPImm() || (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) { - return 0; + return nullptr; } // XXX: Commute VOP3 instructions with abs and neg set. @@ -267,7 +267,7 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, AMDGPU::OpName::abs)).getImm() || MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::neg)).getImm())) - return 0; + return nullptr; unsigned Reg = MI->getOperand(1).getReg(); unsigned SubReg = MI->getOperand(1).getSubReg(); @@ -755,7 +755,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { // class of the output. if (MI->getOpcode() == AMDGPU::REG_SEQUENCE || MI->getOpcode() == AMDGPU::PHI) { - const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL; + const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) { if (!MI->getOperand(i).isReg() || !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg())) diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp index c2f86964731..e3cbf02f29d 100644 --- a/lib/Target/R600/SILowerControlFlow.cpp +++ b/lib/Target/R600/SILowerControlFlow.cpp @@ -92,7 +92,7 @@ private: public: SILowerControlFlowPass(TargetMachine &tm) : - MachineFunctionPass(ID), TRI(0), TII(0) { } + MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } virtual bool runOnMachineFunction(MachineFunction &MF); diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp index 6cef1954935..5897fbca94c 100644 --- a/lib/Target/R600/SIRegisterInfo.cpp +++ b/lib/Target/R600/SIRegisterInfo.cpp @@ -77,7 +77,7 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const { return BaseClasses[i]; } } - return NULL; + return nullptr; } bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const { @@ -113,7 +113,7 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass( } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) { return &AMDGPU::VReg_512RegClass; } - return NULL; + return nullptr; } const TargetRegisterClass *SIRegisterInfo::getSubRegClass( diff --git a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp index c29a5bf6fa8..bf3f7e9df25 100644 --- a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp +++ b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp @@ -265,7 +265,7 @@ public: void addExpr(MCInst &Inst, const MCExpr *Expr) const{ // Add as immediate when possible. Null MCExpr = 0. - if (Expr == 0) + if (!Expr) Inst.addOperand(MCOperand::CreateImm(0)); else if (const MCConstantExpr *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); @@ -324,7 +324,7 @@ public: assert(Op->Reg.Kind == rk_FloatReg); unsigned regIdx = Reg - Sparc::F0; if (regIdx % 2 || regIdx > 31) - return 0; + return nullptr; Op->Reg.RegNum = DoubleRegs[regIdx / 2]; Op->Reg.Kind = rk_DoubleReg; return Op; @@ -338,13 +338,13 @@ public: case rk_FloatReg: regIdx = Reg - Sparc::F0; if (regIdx % 4 || regIdx > 31) - return 0; + return nullptr; Reg = QuadFPRegs[regIdx / 4]; break; case rk_DoubleReg: regIdx = Reg - Sparc::D0; if (regIdx % 2 || regIdx > 31) - return 0; + return nullptr; Reg = QuadFPRegs[regIdx / 2]; break; } @@ -358,7 +358,7 @@ public: Op->Kind = k_MemoryReg; Op->Mem.Base = Base; Op->Mem.OffsetReg = offsetReg; - Op->Mem.Off = 0; + Op->Mem.Off = nullptr; return Op; } @@ -565,7 +565,7 @@ parseMEMOperand(SmallVectorImpl &Operands) case AsmToken::Comma: case AsmToken::RBrac: case AsmToken::EndOfStatement: - Operands.push_back(SparcOperand::CreateMEMri(BaseReg, 0, S, E)); + Operands.push_back(SparcOperand::CreateMEMri(BaseReg, nullptr, S, E)); return MatchOperand_Success; case AsmToken:: Plus: @@ -575,7 +575,7 @@ parseMEMOperand(SmallVectorImpl &Operands) break; } - SparcOperand *Offset = 0; + SparcOperand *Offset = nullptr; OperandMatchResultTy ResTy = parseSparcAsmOperand(Offset); if (ResTy != MatchOperand_Success || !Offset) return MatchOperand_NoMatch; @@ -637,7 +637,7 @@ parseOperand(SmallVectorImpl &Operands, return MatchOperand_Success; } - SparcOperand *Op = 0; + SparcOperand *Op = nullptr; ResTy = parseSparcAsmOperand(Op, (Mnemonic == "call")); if (ResTy != MatchOperand_Success || !Op) @@ -657,7 +657,7 @@ SparcAsmParser::parseSparcAsmOperand(SparcOperand *&Op, bool isCall) SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); const MCExpr *EVal; - Op = 0; + Op = nullptr; switch (getLexer().getKind()) { default: break; diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp index ef5f8ce1558..6875fc65354 100644 --- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp +++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp @@ -32,7 +32,7 @@ SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) { Data16bitsDirective = "\t.half\t"; Data32bitsDirective = "\t.word\t"; // .xword is only supported by V9. - Data64bitsDirective = (isV9) ? "\t.xword\t" : 0; + Data64bitsDirective = (isV9) ? "\t.xword\t" : nullptr; ZeroDirective = "\t.skip\t"; CommentString = "!"; HasLEB128 = true; diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp index 393bee0eb9f..8a35c85cc53 100644 --- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp +++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp @@ -37,7 +37,7 @@ static MCAsmInfo *createSparcMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT); unsigned Reg = MRI.getDwarfRegNum(SP::O6, true); - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0); MAI->addInitialFrameState(Inst); return MAI; } @@ -46,7 +46,7 @@ static MCAsmInfo *createSparcV9MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT); unsigned Reg = MRI.getDwarfRegNum(SP::O6, true); - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 2047); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 2047); MAI->addInitialFrameState(Inst); return MAI; } diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp index be992a5e91d..e185a7a5072 100644 --- a/lib/Target/Sparc/SparcAsmPrinter.cpp +++ b/lib/Target/Sparc/SparcAsmPrinter.cpp @@ -52,7 +52,7 @@ namespace { void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS, - const char *Modifier = 0); + const char *Modifier = nullptr); void printCCOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); virtual void EmitFunctionBodyStart(); diff --git a/lib/Target/Sparc/SparcCodeEmitter.cpp b/lib/Target/Sparc/SparcCodeEmitter.cpp index 54efcf29501..4011ca5d5ee 100644 --- a/lib/Target/Sparc/SparcCodeEmitter.cpp +++ b/lib/Target/Sparc/SparcCodeEmitter.cpp @@ -49,8 +49,8 @@ class SparcCodeEmitter : public MachineFunctionPass { public: SparcCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) - : MachineFunctionPass(ID), JTI(0), II(0), TD(0), - TM(tm), MCE(mce), MCPEs(0), + : MachineFunctionPass(ID), JTI(nullptr), II(nullptr), TD(nullptr), + TM(tm), MCE(mce), MCPEs(nullptr), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} bool runOnMachineFunction(MachineFunction &MF); diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index b012bfdb010..1946c0f9985 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -143,7 +143,7 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) { SDLoc dl(N); if (N->isMachineOpcode()) { N->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } switch (N->getOpcode()) { diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index d44835a7b63..1b86e378c9e 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -663,7 +663,7 @@ static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, if (CS) return CS->hasFnAttr(Attribute::ReturnsTwice); - const Function *CalleeFn = 0; + const Function *CalleeFn = nullptr; if (GlobalAddressSDNode *G = dyn_cast(Callee)) { CalleeFn = dyn_cast(G->getGlobal()); } else if (ExternalSymbolSDNode *E = @@ -961,9 +961,9 @@ static bool isFP128ABICall(const char *CalleeName) "_Q_sqrt", "_Q_neg", "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", "_Q_lltoq", "_Q_ulltoq", - 0 + nullptr }; - for (const char * const *I = ABICalls; *I != 0; ++I) + for (const char * const *I = ABICalls; *I != nullptr; ++I) if (strcmp(CalleeName, *I) == 0) return true; return false; @@ -972,7 +972,7 @@ static bool isFP128ABICall(const char *CalleeName) unsigned SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const { - const Function *CalleeFn = 0; + const Function *CalleeFn = nullptr; if (GlobalAddressSDNode *G = dyn_cast(Callee)) { CalleeFn = dyn_cast(G->getGlobal()); } else if (ExternalSymbolSDNode *E = @@ -1263,7 +1263,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Set inreg flag manually for codegen generated library calls that // return float. - if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == 0) + if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr) CLI.Ins[0].Flags.setInReg(); RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); @@ -1677,7 +1677,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return 0; + default: return nullptr; case SPISD::CMPICC: return "SPISD::CMPICC"; case SPISD::CMPFCC: return "SPISD::CMPFCC"; case SPISD::BRICC: return "SPISD::BRICC"; @@ -2065,7 +2065,7 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, SDLoc DL, SelectionDAG &DAG) const { - const char *LibCall = 0; + const char *LibCall = nullptr; bool is64Bit = Subtarget->is64Bit(); switch(SPCC) { default: llvm_unreachable("Unhandled conditional code!"); @@ -2174,7 +2174,7 @@ LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); llvm_unreachable("fpextend with non-float operand!"); - return SDValue(0, 0); + return SDValue(); } static SDValue @@ -2192,7 +2192,7 @@ LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); llvm_unreachable("fpround to non-float!"); - return SDValue(0, 0); + return SDValue(); } static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, @@ -2213,7 +2213,7 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, // Expand if the resulting type is illegal. if (!TLI.isTypeLegal(VT)) - return SDValue(0, 0); + return SDValue(); // Otherwise, Convert the fp value to integer in an FP register. if (VT == MVT::i32) @@ -2244,7 +2244,7 @@ static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, // Expand if the operand type is illegal. if (!TLI.isTypeLegal(OpVT)) - return SDValue(0, 0); + return SDValue(); // Otherwise, Convert the int value to FP in an FP register. SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); @@ -2262,7 +2262,7 @@ static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, // quad floating point instructions and the resulting type is legal. if (Op.getOperand(0).getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(VT))) - return SDValue(0, 0); + return SDValue(); assert(VT == MVT::i32 || VT == MVT::i64); @@ -2283,7 +2283,7 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, // Expand if it does not involve f128 or the target has support for // quad floating point instructions and the operand type is legal. if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) - return SDValue(0, 0); + return SDValue(); return TLI.LowerF128Op(Op, DAG, TLI.getLibcallName(OpVT == MVT::i32 @@ -3092,7 +3092,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; // Look at the constraint type. @@ -3117,7 +3117,7 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector &Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result(nullptr, 0); // Only support length 1 constraints for now. if (Constraint.length() > 1) diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index 514d862e5aa..8b2e6bc5f32 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -161,10 +161,10 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, std::next(I)->eraseFromParent(); Cond.clear(); - FBB = 0; + FBB = nullptr; if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { - TBB = 0; + TBB = nullptr; I->eraseFromParent(); I = MBB.end(); UnCondBrIter = MBB.end(); @@ -284,7 +284,7 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, bool KillSrc) const { unsigned numSubRegs = 0; unsigned movOpc = 0; - const unsigned *subRegIdx = 0; + const unsigned *subRegIdx = nullptr; const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 }; @@ -328,11 +328,11 @@ void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } else llvm_unreachable("Impossible reg-to-reg copy"); - if (numSubRegs == 0 || subRegIdx == 0 || movOpc == 0) + if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0) return; const TargetRegisterInfo *TRI = &getRegisterInfo(); - MachineInstr *MovMI = 0; + MachineInstr *MovMI = nullptr; for (unsigned i = 0; i != numSubRegs; ++i) { unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]); diff --git a/lib/Target/Sparc/SparcMCInstLower.cpp b/lib/Target/Sparc/SparcMCInstLower.cpp index 737e37875a1..9e94d2c3140 100644 --- a/lib/Target/Sparc/SparcMCInstLower.cpp +++ b/lib/Target/Sparc/SparcMCInstLower.cpp @@ -34,7 +34,7 @@ static MCOperand LowerSymbolOperand(const MachineInstr *MI, SparcMCExpr::VariantKind Kind = (SparcMCExpr::VariantKind)MO.getTargetFlags(); - const MCSymbol *Symbol = 0; + const MCSymbol *Symbol = nullptr; switch(MO.getType()) { default: llvm_unreachable("Unknown type in LowerSymbolOperand"); diff --git a/lib/Target/Sparc/SparcTargetObjectFile.cpp b/lib/Target/Sparc/SparcTargetObjectFile.cpp index f1630e0c319..32b2240f87e 100644 --- a/lib/Target/Sparc/SparcTargetObjectFile.cpp +++ b/lib/Target/Sparc/SparcTargetObjectFile.cpp @@ -28,7 +28,7 @@ const MCExpr *SparcELFTargetObjectFile::getTTypeGlobalReference( // Add information about the stub reference to ELFMMI so that the stub // gets emitted by the asmprinter. MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym); - if (StubSym.getPointer() == 0) { + if (!StubSym.getPointer()) { MCSymbol *Sym = TM.getSymbol(GV, Mang); StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage()); } diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index da98fa7dd9f..71de64fff87 100644 --- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -110,7 +110,7 @@ private: void addExpr(MCInst &Inst, const MCExpr *Expr) const { // Add as immediates when possible. Null MCExpr = 0. - if (Expr == 0) + if (!Expr) Inst.addOperand(MCOperand::CreateImm(0)); else if (auto *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); @@ -208,7 +208,7 @@ public: return (Kind == KindMem && Mem.RegKind == RegKind && (MemKind == BDXMem || !Mem.Index) && - (MemKind == BDLMem) == (Mem.Length != 0)); + (MemKind == BDLMem) == (Mem.Length != nullptr)); } bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const { return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff); @@ -527,7 +527,7 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp, // Parse the optional base and index. Index = 0; Base = 0; - Length = 0; + Length = nullptr; if (getLexer().is(AsmToken::LParen)) { Parser.Lex(); @@ -759,7 +759,7 @@ parseAccessReg(SmallVectorImpl &Operands) { return MatchOperand_NoMatch; Register Reg; - if (parseRegister(Reg, RegAccess, 0)) + if (parseRegister(Reg, RegAccess, nullptr)) return MatchOperand_ParseFail; Operands.push_back(SystemZOperand::createAccessReg(Reg.Num, diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp index 9858dac3345..cc94869eb5f 100644 --- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp +++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp @@ -98,7 +98,8 @@ static MCAsmInfo *createSystemZMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { MCAsmInfo *MAI = new SystemZMCAsmInfo(TT); MCCFIInstruction Inst = - MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(SystemZ::R15D, true), + MCCFIInstruction::createDefCfa(nullptr, + MRI.getDwarfRegNum(SystemZ::R15D, true), SystemZMC::CFAOffsetFromInitialSP); MAI->addInitialFrameState(Inst); return MAI; diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp index 8f3eef1c505..cb1789733c4 100644 --- a/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -64,7 +64,7 @@ class SystemZElimCompare : public MachineFunctionPass { public: static char ID; SystemZElimCompare(const SystemZTargetMachine &tm) - : MachineFunctionPass(ID), TII(0), TRI(0) {} + : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {} const char *getPassName() const override { return "SystemZ Comparison Elimination"; diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 81f6338721a..c2600064744 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -74,14 +74,14 @@ struct SystemZAddressingMode { errs() << "SystemZAddressingMode " << this << '\n'; errs() << " Base "; - if (Base.getNode() != 0) + if (Base.getNode()) Base.getNode()->dump(); else errs() << "null\n"; if (hasIndexField()) { errs() << " Index "; - if (Index.getNode() != 0) + if (Index.getNode()) Index.getNode()->dump(); else errs() << "null\n"; @@ -869,12 +869,12 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND) Count += 1; if (Count == 0) - return 0; + return nullptr; if (Count == 1) { // Prefer to use normal shift instructions over RISBG, since they can handle // all cases and are sometimes shorter. if (N->getOpcode() != ISD::AND) - return 0; + return nullptr; // Prefer register extensions like LLC over RISBG. Also prefer to start // out with normal ANDs if one instruction would be enough. We can convert @@ -891,7 +891,7 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask); return SelectCode(N); } - return 0; + return nullptr; } } @@ -929,7 +929,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { // Do nothing if neither operand is suitable. if (Count[0] == 0 && Count[1] == 0) - return 0; + return nullptr; // Pick the deepest second operand. unsigned I = Count[0] > Count[1] ? 0 : 1; @@ -939,7 +939,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) if (auto *Load = dyn_cast(Op0.getNode())) if (Load->getMemoryVT() == MVT::i8) - return 0; + return nullptr; // See whether we can avoid an AND in the first operand by converting // ROSBG to RISBG. @@ -1039,11 +1039,11 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { if (Node->isMachineOpcode()) { DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); - return 0; + return nullptr; } unsigned Opcode = Node->getOpcode(); - SDNode *ResNode = 0; + SDNode *ResNode = nullptr; switch (Opcode) { case ISD::OR: if (Node->getOperand(1).getOpcode() != ISD::Constant) @@ -1116,7 +1116,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { ResNode = SelectCode(Node); DEBUG(errs() << "=> "; - if (ResNode == NULL || ResNode == Node) + if (ResNode == nullptr || ResNode == Node) Node->dump(CurDAG); else ResNode->dump(CurDAG); diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 685a4f07053..96b713fdf7c 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -424,7 +424,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. @@ -492,7 +492,7 @@ parseRegisterNumber(const std::string &Constraint, if (Index < 16 && Map[Index]) return std::make_pair(Map[Index], RC); } - return std::make_pair(0u, static_cast(0)); + return std::make_pair(0U, nullptr); } std::pair SystemZTargetLowering:: @@ -1489,7 +1489,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { // Check whether the nonconstant input is an AND with a constant mask. Comparison NewC(C); uint64_t MaskVal; - ConstantSDNode *Mask = 0; + ConstantSDNode *Mask = nullptr; if (C.Op0.getOpcode() == ISD::AND) { NewC.Op0 = C.Op0.getOperand(0); NewC.Op1 = C.Op0.getOperand(1); @@ -2517,7 +2517,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(ATOMIC_CMP_SWAPW); OPCODE(PREFETCH); } - return NULL; + return nullptr; #undef OPCODE } @@ -3116,7 +3116,7 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, // When generating more than one CLC, all but the last will need to // branch to the end when a difference is found. MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? - splitBlockAfter(MI, MBB) : 0); + splitBlockAfter(MI, MBB) : nullptr); // Check for the loop form, in which operand 5 is the trip count. if (MI->getNumExplicitOperands() > 5) { diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 729c1436fb9..6a18b2dea9e 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -284,11 +284,11 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, std::next(I)->eraseFromParent(); Cond.clear(); - FBB = 0; + FBB = nullptr; // Delete the JMP if it's equivalent to a fall-through. if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { - TBB = 0; + TBB = nullptr; I->eraseFromParent(); I = MBB.end(); continue; @@ -418,7 +418,7 @@ bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI, static MachineInstr *getDef(unsigned Reg, const MachineRegisterInfo *MRI) { if (TargetRegisterInfo::isPhysicalRegister(Reg)) - return 0; + return nullptr; return MRI->getUniqueVRegDef(Reg); } @@ -442,7 +442,7 @@ static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) { static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg, const MachineRegisterInfo *MRI, const TargetRegisterInfo *TRI) { - MachineInstr *LGFR = 0; + MachineInstr *LGFR = nullptr; MachineInstr *RLL = getDef(SrcReg, MRI); if (RLL && RLL->getOpcode() == SystemZ::LGFR) { LGFR = RLL; @@ -740,7 +740,7 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return finishConvertToThreeAddress(MI, MIB, LV); } } - return 0; + return nullptr; } MachineInstr * @@ -761,12 +761,12 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, .addFrameIndex(FrameIndex).addImm(0) .addImm(MI->getOperand(2).getImm()); } - return 0; + return nullptr; } // All other cases require a single operand. if (Ops.size() != 1) - return 0; + return nullptr; unsigned OpNum = Ops[0]; assert(Size == MF.getRegInfo() @@ -858,14 +858,14 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, } } - return 0; + return nullptr; } MachineInstr * SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, const SmallVectorImpl &Ops, MachineInstr* LoadMI) const { - return 0; + return nullptr; } bool diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp index 6534709c68e..760f115b1d8 100644 --- a/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -111,7 +111,8 @@ struct TerminatorInfo { // otherwise it is zero. unsigned ExtraRelaxSize; - TerminatorInfo() : Branch(0), Size(0), TargetBlock(0), ExtraRelaxSize(0) {} + TerminatorInfo() : Branch(nullptr), Size(0), TargetBlock(0), + ExtraRelaxSize(0) {} }; // Used to keep track of the current position while iterating over the blocks. @@ -131,7 +132,7 @@ class SystemZLongBranch : public MachineFunctionPass { public: static char ID; SystemZLongBranch(const SystemZTargetMachine &tm) - : MachineFunctionPass(ID), TII(0) {} + : MachineFunctionPass(ID), TII(nullptr) {} const char *getPassName() const override { return "SystemZ Long Branch"; @@ -424,7 +425,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) { Terminator.Size += Terminator.ExtraRelaxSize; Terminator.ExtraRelaxSize = 0; - Terminator.Branch = 0; + Terminator.Branch = nullptr; ++LongBranches; } diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index f2e253e8a9f..05f3c2e0250 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -53,7 +53,7 @@ FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) { } SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm) - : MachineFunctionPass(ID), TII(0), LowGPRs(), HighGPRs() { + : MachineFunctionPass(ID), TII(nullptr), LowGPRs(), HighGPRs() { // Set up LowGPRs and HighGPRs. for (unsigned I = 0; I < 16; ++I) { LowGPRs[SystemZMC::GR32Regs[I]] |= 1 << I; diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp index cf7abbc01aa..2f37ede4402 100644 --- a/lib/Target/TargetLoweringObjectFile.cpp +++ b/lib/Target/TargetLoweringObjectFile.cpp @@ -138,7 +138,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV, // Early exit - functions should be always in text sections. const GlobalVariable *GVar = dyn_cast(GV); - if (GVar == 0) + if (!GVar) return SectionKind::getText(); // Handle thread-local data first. @@ -284,10 +284,10 @@ TargetLoweringObjectFile::SelectSectionForGlobal(const GlobalValue *GV, if (Kind.isText()) return getTextSection(); - if (Kind.isBSS() && BSSSection != 0) + if (Kind.isBSS() && BSSSection != nullptr) return BSSSection; - if (Kind.isReadOnly() && ReadOnlySection != 0) + if (Kind.isReadOnly() && ReadOnlySection != nullptr) return ReadOnlySection; return getDataSection(); @@ -298,7 +298,7 @@ TargetLoweringObjectFile::SelectSectionForGlobal(const GlobalValue *GV, /// should be placed in. const MCSection * TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind) const { - if (Kind.isReadOnly() && ReadOnlySection != 0) + if (Kind.isReadOnly() && ReadOnlySection != nullptr) return ReadOnlySection; return DataSection; diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp index 7c40bcd559b..325381be88f 100644 --- a/lib/Target/TargetMachine.cpp +++ b/lib/Target/TargetMachine.cpp @@ -54,7 +54,7 @@ TargetMachine::TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options) : TheTarget(T), TargetTriple(TT), TargetCPU(CPU), TargetFS(FS), - CodeGenInfo(0), AsmInfo(0), + CodeGenInfo(nullptr), AsmInfo(nullptr), MCRelaxAll(false), MCNoExecStack(false), MCSaveTempLabels(false), diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp index a2829d4c027..fa3e67a880a 100644 --- a/lib/Target/TargetMachineC.cpp +++ b/lib/Target/TargetMachineC.cpp @@ -62,7 +62,7 @@ inline LLVMTargetRef wrap(const Target * P) { LLVMTargetRef LLVMGetFirstTarget() { if(TargetRegistry::begin() == TargetRegistry::end()) { - return NULL; + return nullptr; } const Target* target = &*TargetRegistry::begin(); @@ -80,7 +80,7 @@ LLVMTargetRef LLVMGetTargetFromName(const char *Name) { return wrap(&*IT); } - return NULL; + return nullptr; } LLVMBool LLVMGetTargetFromTriple(const char* TripleStr, LLVMTargetRef *T, diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index b84bcf9dd7e..d3e695e31ab 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -259,7 +259,7 @@ private: public: IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) : State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0), - Scale(1), Imm(imm), Sym(0), StopOnLBrac(stoponlbrac), + Scale(1), Imm(imm), Sym(nullptr), StopOnLBrac(stoponlbrac), AddImmPrefix(addimmprefix) { Info.clear(); } unsigned getBaseReg() { return BaseReg; } @@ -620,7 +620,7 @@ private: X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) { Error(Loc, Msg); - return 0; + return nullptr; } X86Operand *DefaultMemSIOperand(SMLoc Loc); @@ -714,7 +714,8 @@ public: X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser, const MCInstrInfo &mii, const MCTargetOptions &Options) - : MCTargetAsmParser(), STI(sti), Parser(parser), MII(mii), InstInfo(0) { + : MCTargetAsmParser(), STI(sti), Parser(parser), MII(mii), + InstInfo(nullptr) { // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); @@ -1178,9 +1179,9 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, // expression. IntelExprStateMachine SM(ImmDisp, /*StopOnLBrac=*/false, /*AddImmPrefix=*/true); if (ParseIntelExpression(SM, End)) - return 0; + return nullptr; - const MCExpr *Disp = 0; + const MCExpr *Disp = nullptr; if (const MCExpr *Sym = SM.getSym()) { // A symbolic displacement. Disp = Sym; @@ -1204,7 +1205,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, if (Tok.getString().find('.') != StringRef::npos) { const MCExpr *NewDisp; if (ParseIntelDotOperator(Disp, NewDisp)) - return 0; + return nullptr; End = Tok.getEndLoc(); Parser.Lex(); // Eat the field. @@ -1225,7 +1226,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start, StringRef ErrMsg; if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) { Error(StartInBrac, ErrMsg); - return 0; + return nullptr; } return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, Start, End, Size); @@ -1242,7 +1243,7 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val, InlineAsmIdentifierInfo &Info, bool IsUnevaluatedOperand, SMLoc &End) { assert (isParsingInlineAsm() && "Expected to be parsing inline assembly."); - Val = 0; + Val = nullptr; StringRef LineBuf(Identifier.data()); SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand); @@ -1314,7 +1315,7 @@ X86Operand *X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, StringRef Identifier = Tok.getString(); if (ParseIntelIdentifier(Val, Identifier, Info, /*Unevaluated=*/false, End)) - return 0; + return nullptr; return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0,/*IndexReg=*/0, /*Scale=*/1, Start, End, Size, Identifier, Info); } @@ -1342,7 +1343,7 @@ X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start, StringRef Identifier = Tok.getString(); if (ParseIntelIdentifier(Val, Identifier, Info, /*Unevaluated=*/false, End)) - return 0; + return nullptr; if (!getLexer().is(AsmToken::LBrac)) return CreateMemForInlineAsm(/*SegReg=*/0, Val, /*BaseReg=*/0, /*IndexReg=*/0, @@ -1354,19 +1355,19 @@ X86Operand *X86AsmParser::ParseIntelMemOperand(int64_t ImmDisp, SMLoc Start, IntelExprStateMachine SM(/*ImmDisp=*/0, /*StopOnLBrac=*/true, /*AddImmPrefix=*/false); if (ParseIntelExpression(SM, End)) - return 0; + return nullptr; if (SM.getSym()) { Error(Start, "cannot use more than one symbol in memory operand"); - return 0; + return nullptr; } if (SM.getBaseReg()) { Error(Start, "cannot use base register with variable reference"); - return 0; + return nullptr; } if (SM.getIndexReg()) { Error(Start, "cannot use index register with variable reference"); - return 0; + return nullptr; } const MCExpr *Disp = MCConstantExpr::Create(SM.getImm(), getContext()); @@ -1435,7 +1436,7 @@ X86Operand *X86AsmParser::ParseIntelOffsetOfOperator() { StringRef Identifier = Tok.getString(); if (ParseIntelIdentifier(Val, Identifier, Info, /*Unevaluated=*/false, End)) - return 0; + return nullptr; // Don't emit the offset operator. InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); @@ -1466,13 +1467,13 @@ X86Operand *X86AsmParser::ParseIntelOperator(unsigned OpKind) { SMLoc TypeLoc = Tok.getLoc(); Parser.Lex(); // Eat operator. - const MCExpr *Val = 0; + const MCExpr *Val = nullptr; InlineAsmIdentifierInfo Info; SMLoc Start = Tok.getLoc(), End; StringRef Identifier = Tok.getString(); if (ParseIntelIdentifier(Val, Identifier, Info, /*Unevaluated=*/true, End)) - return 0; + return nullptr; if (!Info.OpDecl) return ErrorOperand(Start, "unable to lookup expression"); @@ -1527,7 +1528,7 @@ X86Operand *X86AsmParser::ParseIntelOperand() { IntelExprStateMachine SM(/*Imm=*/0, /*StopOnLBrac=*/true, /*AddImmPrefix=*/false); if (ParseIntelExpression(SM, End)) - return 0; + return nullptr; int64_t Imm = SM.getImm(); if (isParsingInlineAsm()) { @@ -1585,11 +1586,11 @@ X86Operand *X86AsmParser::ParseATTOperand() { // Read the register. unsigned RegNo; SMLoc Start, End; - if (ParseRegister(RegNo, Start, End)) return 0; + if (ParseRegister(RegNo, Start, End)) return nullptr; if (RegNo == X86::EIZ || RegNo == X86::RIZ) { Error(Start, "%eiz and %riz can only be used as index registers", SMRange(Start, End)); - return 0; + return nullptr; } // If this is a segment register followed by a ':', then this is the start @@ -1606,7 +1607,7 @@ X86Operand *X86AsmParser::ParseATTOperand() { Parser.Lex(); const MCExpr *Val; if (getParser().parseExpression(Val, End)) - return 0; + return nullptr; return X86Operand::CreateImm(Val, Start, End); } } @@ -1635,7 +1636,7 @@ X86AsmParser::HandleAVX512Operand(SmallVectorImpl &Operands StringSwitch(getLexer().getTok().getIdentifier()) .Case("to8", "{1to8}") .Case("to16", "{1to16}") - .Default(0); + .Default(nullptr); if (!BroadcastPrimitive) return !ErrorAndEatStatement(getLexer().getLoc(), "Invalid memory broadcast primitive."); @@ -1690,7 +1691,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); if (getLexer().isNot(AsmToken::LParen)) { SMLoc ExprEnd; - if (getParser().parseExpression(Disp, ExprEnd)) return 0; + if (getParser().parseExpression(Disp, ExprEnd)) return nullptr; // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. @@ -1717,7 +1718,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // It must be an parenthesized expression, parse it now. if (getParser().parseParenExpression(Disp, ExprEnd)) - return 0; + return nullptr; // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. @@ -1741,11 +1742,11 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { if (getLexer().is(AsmToken::Percent)) { SMLoc StartLoc, EndLoc; BaseLoc = Parser.getTok().getLoc(); - if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0; + if (ParseRegister(BaseReg, StartLoc, EndLoc)) return nullptr; if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { Error(StartLoc, "eiz and riz can only be used as index registers", SMRange(StartLoc, EndLoc)); - return 0; + return nullptr; } } @@ -1761,7 +1762,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this. if (getLexer().is(AsmToken::Percent)) { SMLoc L; - if (ParseRegister(IndexReg, L, L)) return 0; + if (ParseRegister(IndexReg, L, L)) return nullptr; if (getLexer().isNot(AsmToken::RParen)) { // Parse the scale amount: @@ -1769,7 +1770,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { if (getLexer().isNot(AsmToken::Comma)) { Error(Parser.getTok().getLoc(), "expected comma in scale expression"); - return 0; + return nullptr; } Parser.Lex(); // Eat the comma. @@ -1779,18 +1780,18 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { int64_t ScaleVal; if (getParser().parseAbsoluteExpression(ScaleVal)){ Error(Loc, "expected scale expression"); - return 0; + return nullptr; } // Validate the scale amount. if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && ScaleVal != 1) { Error(Loc, "scale factor in 16-bit address must be 1"); - return 0; + return nullptr; } if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){ Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); - return 0; + return nullptr; } Scale = (unsigned)ScaleVal; } @@ -1802,7 +1803,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { int64_t Value; if (getParser().parseAbsoluteExpression(Value)) - return 0; + return nullptr; if (Value != 1) Warning(Loc, "scale factor without index register is ignored"); @@ -1813,7 +1814,7 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. if (getLexer().isNot(AsmToken::RParen)) { Error(Parser.getTok().getLoc(), "unexpected token in memory operand"); - return 0; + return nullptr; } SMLoc MemEnd = Parser.getTok().getEndLoc(); Parser.Lex(); // Eat the ')'. @@ -1826,18 +1827,18 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { BaseReg != X86::SI && BaseReg != X86::DI)) && BaseReg != X86::DX) { Error(BaseLoc, "invalid 16-bit base register"); - return 0; + return nullptr; } if (BaseReg == 0 && X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) { Error(IndexLoc, "16-bit memory operand may not include only index register"); - return 0; + return nullptr; } StringRef ErrMsg; if (CheckBaseRegAndIndexReg(BaseReg, IndexReg, ErrMsg)) { Error(BaseLoc, ErrMsg); - return 0; + return nullptr; } return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, @@ -1856,7 +1857,7 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, PatchedName = PatchedName.substr(0, Name.size()-1); // FIXME: Hack to recognize cmp{ss,sd,ps,pd}. - const MCExpr *ExtraImmOp = 0; + const MCExpr *ExtraImmOp = nullptr; if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) && (PatchedName.endswith("ss") || PatchedName.endswith("sd") || PatchedName.endswith("ps") || PatchedName.endswith("pd"))) { @@ -2299,7 +2300,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, .Case("fstsw", "fnstsw") .Case("fstsww", "fnstsw") .Case("fclex", "fnclex") - .Default(0); + .Default(nullptr); assert(Repl && "Unknown wait-prefixed instruction"); delete Operands[0]; Operands[0] = X86Operand::CreateToken(Repl, IDLoc); diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index c9426fb1a2f..5658cf8df54 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -140,7 +140,7 @@ X86GenericDisassembler::getInstruction(MCInst &instr, dlog_t loggerFn = logger; if (&vStream == &nulls()) - loggerFn = 0; // Disable logging completely if it's going to nulls(). + loggerFn = nullptr; // Disable logging completely if it's going to nulls(). int ret = decodeInstruction(&internalInstr, regionReader, diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp index db61fb03d4c..baf6507f824 100644 --- a/lib/Target/X86/InstPrinter/X86InstComments.cpp +++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp @@ -32,7 +32,7 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, const char *(*getRegName)(unsigned)) { // If this is a shuffle operation, the switch should fill in this state. SmallVector ShuffleMask; - const char *DestName = 0, *Src1Name = 0, *Src2Name = 0; + const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr; switch (MI->getOpcode()) { case X86::INSERTPSrr: @@ -492,7 +492,7 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, // If this was a shuffle operation, print the shuffle mask. if (!ShuffleMask.empty()) { - if (DestName == 0) DestName = Src1Name; + if (!DestName) DestName = Src1Name; OS << (DestName ? DestName : "mem") << " = "; // If the two sources are the same, canonicalize the input elements to be diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp index 4fa519c9ba1..b6793168fac 100644 --- a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp @@ -39,7 +39,7 @@ public: if (Sym->isVariable() == false) Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx)); - const MCExpr *Expr = 0; + const MCExpr *Expr = nullptr; // If hasAddend is true, then we need to add Addend (r_addend) to Expr. bool hasAddend = false; diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp index 724ea358f40..39480eaaac1 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp @@ -51,7 +51,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) { TextAlignFillValue = 0x90; if (!is64Bit) - Data64bitsDirective = 0; // we can't emit a 64-bit unit + Data64bitsDirective = nullptr; // we can't emit a 64-bit unit // Use ## as a comment string so that .s files generated by llvm can go // through the GCC preprocessor without causing an error. This is needed @@ -115,7 +115,7 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) { // into two .words. if ((T.getOS() == Triple::OpenBSD || T.getOS() == Triple::Bitrig) && T.getArch() == Triple::x86) - Data64bitsDirective = 0; + Data64bitsDirective = nullptr; // Always enable the integrated assembler by default. // Clang also enabled it when the OS is Solaris but that is redundant here. diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index f593694ab0c..c9bbdade19d 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -286,7 +286,7 @@ enum GlobalOffsetTableExprKind { }; static GlobalOffsetTableExprKind StartsWithGlobalOffsetTable(const MCExpr *Expr) { - const MCExpr *RHS = 0; + const MCExpr *RHS = nullptr; if (Expr->getKind() == MCExpr::Binary) { const MCBinaryExpr *BE = static_cast(Expr); Expr = BE->getLHS(); @@ -317,7 +317,7 @@ void X86MCCodeEmitter:: EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, SmallVectorImpl &Fixups, int ImmOffset) const { - const MCExpr *Expr = NULL; + const MCExpr *Expr = nullptr; if (DispOp.isImm()) { // If this is a simple integer displacement that doesn't require a // relocation, emit it now. diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp index a8e59a936a3..1a8ee9d7f89 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -287,13 +287,13 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) { // Initial state of the frame pointer is esp+stackGrowth. unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP; MCCFIInstruction Inst = MCCFIInstruction::createDefCfa( - 0, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth); + nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth); MAI->addInitialFrameState(Inst); // Add return address to move list unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP; MCCFIInstruction Inst2 = MCCFIInstruction::createOffset( - 0, MRI.getDwarfRegNum(InstPtr, true), stackGrowth); + nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth); MAI->addInitialFrameState(Inst2); return MAI; @@ -377,7 +377,7 @@ static MCInstPrinter *createX86MCInstPrinter(const Target &T, return new X86ATTInstPrinter(MAI, MII, MRI); if (SyntaxVariant == 1) return new X86IntelInstPrinter(MAI, MII, MRI); - return 0; + return nullptr; } static MCRelocationInfo *createX86MCRelocationInfo(StringRef TT, diff --git a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp index f2023e3b525..3b81d53e948 100644 --- a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp @@ -40,7 +40,7 @@ public: // FIXME: check that the value is actually the same. if (Sym->isVariable() == false) Sym->setVariableValue(MCConstantExpr::Create(SymAddr, Ctx)); - const MCExpr *Expr = 0; + const MCExpr *Expr = nullptr; switch(RelType) { case X86_64_RELOC_TLV: diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp index 8bfb7d5e3b3..ead33383009 100644 --- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp @@ -186,9 +186,9 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer, false); Value += Writer->getSymbolAddress(&A_SD, Layout) - - (A_Base == NULL ? 0 : Writer->getSymbolAddress(A_Base, Layout)); + (!A_Base ? 0 : Writer->getSymbolAddress(A_Base, Layout)); Value -= Writer->getSymbolAddress(&B_SD, Layout) - - (B_Base == NULL ? 0 : Writer->getSymbolAddress(B_Base, Layout)); + (!B_Base ? 0 : Writer->getSymbolAddress(B_Base, Layout)); if (A_Base) { Index = A_Base->getIndex(); @@ -231,7 +231,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer, const MCSectionMachO &Section = static_cast( Fragment->getParent()->getSection()); if (Section.hasAttribute(MachO::S_ATTR_DEBUG)) - Base = 0; + Base = nullptr; } // x86_64 almost always uses external relocations, except when there is no @@ -525,7 +525,7 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer, } // Get the symbol data, if any. - const MCSymbolData *SD = 0; + const MCSymbolData *SD = nullptr; if (Target.getSymA()) SD = &Asm.getSymbolData(Target.getSymA()->getSymbol()); diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index fb66acc084d..c0c6f2148f5 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -102,7 +102,7 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO, MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr"); MachineModuleInfoImpl::StubValueTy &StubSym = P.MMI->getObjFileInfo().getGVStubEntry(Sym); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage()); } else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){ @@ -110,14 +110,14 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO, MachineModuleInfoImpl::StubValueTy &StubSym = P.MMI->getObjFileInfo().getHiddenGVStubEntry( Sym); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage()); } else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) { MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$stub"); MachineModuleInfoImpl::StubValueTy &StubSym = P.MMI->getObjFileInfo().getFnStubEntry(Sym); - if (StubSym.getPointer() == 0) + if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage()); } @@ -174,7 +174,7 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO, static void printOperand(X86AsmPrinter &P, const MachineInstr *MI, unsigned OpNo, raw_ostream &O, - const char *Modifier = 0, unsigned AsmVariant = 0); + const char *Modifier = nullptr, unsigned AsmVariant = 0); /// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value. These print slightly differently, for @@ -232,7 +232,7 @@ static void printOperand(X86AsmPrinter &P, const MachineInstr *MI, static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI, unsigned Op, raw_ostream &O, - const char *Modifier = NULL) { + const char *Modifier = nullptr) { const MachineOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg); const MachineOperand &IndexReg = MI->getOperand(Op+X86::AddrIndexReg); const MachineOperand &DispSpec = MI->getOperand(Op+X86::AddrDisp); @@ -284,7 +284,7 @@ static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI, static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI, unsigned Op, raw_ostream &O, - const char *Modifier = NULL) { + const char *Modifier = nullptr) { assert(isMem(MI, Op) && "Invalid memory reference!"); const MachineOperand &Segment = MI->getOperand(Op+X86::AddrSegmentReg); if (Segment.getReg()) { @@ -296,7 +296,7 @@ static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI, static void printIntelMemReference(X86AsmPrinter &P, const MachineInstr *MI, unsigned Op, raw_ostream &O, - const char *Modifier = NULL, + const char *Modifier = nullptr, unsigned AsmVariant = 1) { const MachineOperand &BaseReg = MI->getOperand(Op+X86::AddrBaseReg); unsigned ScaleVal = MI->getOperand(Op+X86::AddrScaleAmt).getImm(); @@ -464,7 +464,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, } } - printOperand(*this, MI, OpNo, O, /*Modifier*/ 0, AsmVariant); + printOperand(*this, MI, OpNo, O, /*Modifier*/ nullptr, AsmVariant); return false; } diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index 475eaa38b1a..0be680b182c 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -53,7 +53,7 @@ namespace { public: static char ID; explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce) - : MachineFunctionPass(ID), II(0), TD(0), TM(tm), + : MachineFunctionPass(ID), II(nullptr), TD(nullptr), TM(tm), MCE(mce), PICBaseOffset(0), Is64BitMode(false), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} @@ -451,7 +451,7 @@ void Emitter::emitMemModRMByte(const MachineInstr &MI, intptr_t PCAdj) { const MachineOperand &Op3 = MI.getOperand(Op+3); int DispVal = 0; - const MachineOperand *DispForReloc = 0; + const MachineOperand *DispForReloc = nullptr; // Figure out what sort of displacement we have to handle here. if (Op3.isGlobal()) { diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 79baca83040..571ecc2b415 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -183,7 +183,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &ResultReg) { // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; switch (VT.getSimpleVT().SimpleTy) { default: return false; case MVT::i1: @@ -406,7 +406,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { } else { // Issue load from stub. unsigned Opc = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; X86AddressMode StubAM; StubAM.Base.Reg = AM.Base.Reg; StubAM.GV = GV; @@ -441,7 +441,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { // Now construct the final address. Note that the Disp, Scale, // and Index values may already be set here. AM.Base.Reg = LoadReg; - AM.GV = 0; + AM.GV = nullptr; return true; } } @@ -467,7 +467,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { SmallVector GEPs; redo_gep: - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; if (const Instruction *I = dyn_cast(V)) { // Don't walk into other basic blocks; it's possible we haven't @@ -626,7 +626,7 @@ redo_gep: /// X86SelectCallAddress - Attempt to fill in an address from the given value. /// bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { - const User *U = NULL; + const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; const Instruction *I = dyn_cast(V); // Record if the value is defined in the same basic block. @@ -1247,7 +1247,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned CReg = 0, OpReg = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; if (I->getType()->isIntegerTy(8)) { CReg = X86::CL; RC = &X86::GR8RegClass; @@ -1487,7 +1487,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { if (!Subtarget->hasCMov()) return false; unsigned Opc = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; if (VT == MVT::i16) { Opc = X86::CMOVE16rr; RC = &X86::GR16RegClass; @@ -1865,7 +1865,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (cast(I)->isTailCall()) return false; - return DoSelectCall(I, 0); + return DoSelectCall(I, nullptr); } static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget, @@ -1936,8 +1936,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { if (!X86SelectCallAddress(Callee, CalleeAM)) return false; unsigned CalleeOp = 0; - const GlobalValue *GV = 0; - if (CalleeAM.GV != 0) { + const GlobalValue *GV = nullptr; + if (CalleeAM.GV != nullptr) { GV = CalleeAM.GV; } else if (CalleeAM.Base.Reg != 0) { CalleeOp = CalleeAM.Base.Reg; @@ -2387,7 +2387,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; switch (VT.SimpleTy) { default: return 0; case MVT::i8: @@ -2437,7 +2437,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // If the expression is just a basereg, then we're done, otherwise we need // to emit an LEA. if (AM.BaseType == X86AddressMode::RegBase && - AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == 0) + AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) return AM.Base.Reg; Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r; @@ -2510,7 +2510,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { // Get opcode and regclass for the given zero. unsigned Opc = 0; - const TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC = nullptr; switch (VT.SimpleTy) { default: return 0; case MVT::f32: @@ -2558,7 +2558,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, MachineInstr *Result = XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment); - if (Result == 0) return false; + if (!Result) return false; FuncInfo.MBB->insert(FuncInfo.InsertPt, Result); MI->eraseFromParent(); diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index a0283a33b1d..6886a65e4be 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -124,7 +124,7 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, if (!MI->getOperand(2).isImm()) { // convertToThreeAddress will call getImm() // which requires isImm() to be true - return 0; + return nullptr; } break; case X86::ADD16rr: @@ -133,10 +133,10 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, // if src1 != src2, then convertToThreeAddress will // need to create a Virtual register, which we cannot do // after register allocation. - return 0; + return nullptr; } } - return TII->convertToThreeAddress(MFI, MBBI, 0); + return TII->convertToThreeAddress(MFI, MBBI, nullptr); } FunctionPass *llvm::createX86FixupLEAs() { @@ -212,7 +212,7 @@ MachineBasicBlock::iterator FixupLEAPass::searchBackwards(MachineOperand& p, InstrDistance += TII->getInstrLatency(TM->getInstrItineraryData(), CurInst); Found = getPreviousInstr(CurInst, MFI); } - return 0; + return nullptr; } void FixupLEAPass::processInstruction(MachineBasicBlock::iterator& I, diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 754f567b295..c8a3ab3082a 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -431,7 +431,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { if (FPInstClass == X86II::NotFP) continue; // Efficiently ignore non-fp insts! - MachineInstr *PrevMI = 0; + MachineInstr *PrevMI = nullptr; if (I != BB.begin()) PrevMI = std::prev(I); diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 0a2f8eab476..71d4e4cfa73 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -182,7 +182,7 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, } } - MachineInstr *MI = NULL; + MachineInstr *MI = nullptr; if (UseLEA) { MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), @@ -204,7 +204,7 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator. static void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, uint64_t *NumBytes = NULL) { + unsigned StackPtr, uint64_t *NumBytes = nullptr) { if (MBBI == MBB.begin()) return; MachineBasicBlock::iterator PI = std::prev(MBBI); @@ -229,7 +229,7 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, static void mergeSPUpdatesDown(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, - unsigned StackPtr, uint64_t *NumBytes = NULL) { + unsigned StackPtr, uint64_t *NumBytes = nullptr) { // FIXME: THIS ISN'T RUN!!! return; @@ -269,7 +269,8 @@ static int mergeSPUpdates(MachineBasicBlock &MBB, return 0; MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI; - MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : std::next(MBBI); + MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr + : std::next(MBBI); unsigned Opc = PI->getOpcode(); int Offset = 0; @@ -366,7 +367,8 @@ void X86FrameLowering::emitCalleeSavedFrameMoves( unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); unsigned CFIIndex = - MMI.addFrameInst(MCCFIInstruction::createOffset(0, DwarfReg, Offset)); + MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, + Offset)); BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)).addCFIIndex(CFIIndex); } } @@ -511,14 +513,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // Define the current CFA rule to use the provided offset. assert(StackSize); unsigned CFIIndex = MMI.addFrameInst( - MCCFIInstruction::createDefCfaOffset(0, 2 * stackGrowth)); + MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth)); BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); // Change the rule for the FramePtr to be an "offset" rule. unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); CFIIndex = MMI.addFrameInst( - MCCFIInstruction::createOffset(0, DwarfFramePtr, 2 * stackGrowth)); + MCCFIInstruction::createOffset(nullptr, + DwarfFramePtr, 2 * stackGrowth)); BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); } @@ -534,7 +537,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // Define the current CFA to use the EBP/RBP register. unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true); unsigned CFIIndex = MMI.addFrameInst( - MCCFIInstruction::createDefCfaRegister(0, DwarfFramePtr)); + MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr)); BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); } @@ -698,7 +701,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // Define the current CFA rule to use the provided offset. assert(StackSize); unsigned CFIIndex = MMI.addFrameInst( - MCCFIInstruction::createDefCfaOffset(0, -StackSize + stackGrowth)); + MCCFIInstruction::createDefCfaOffset(nullptr, + -StackSize + stackGrowth)); BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); @@ -1514,7 +1518,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; - MachineInstr *New = 0; + MachineInstr *New = nullptr; if (Opcode == TII.getCallFrameSetupOpcode()) { New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), StackPtr) diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index b8c6281567b..44b7b3dbbd5 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -71,17 +71,18 @@ namespace { X86ISelAddressMode() : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), - Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), - SymbolFlags(X86II::MO_NO_FLAG) { + Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr), + JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) { } bool hasSymbolicDisplacement() const { - return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; + return GV != nullptr || CP != nullptr || ES != nullptr || + JT != -1 || BlockAddr != nullptr; } bool hasBaseOrIndexReg() const { return BaseType == FrameIndexBase || - IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; + IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr; } /// isRIPRelative - Return true if this addressing mode is already RIP @@ -613,7 +614,7 @@ bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ // gs:0 (or fs:0 on X86-64) contains its own address. // For more information see http://people.redhat.com/drepper/tls.pdf if (ConstantSDNode *C = dyn_cast(Address)) - if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && + if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr && Subtarget->isTargetLinux()) switch (N->getPointerInfo().getAddrSpace()) { case 256: @@ -734,7 +735,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { // a smaller encoding and avoids a scaled-index. if (AM.Scale == 2 && AM.BaseType == X86ISelAddressMode::RegBase && - AM.Base_Reg.getNode() == 0) { + AM.Base_Reg.getNode() == nullptr) { AM.Base_Reg = AM.IndexReg; AM.Scale = 1; } @@ -746,8 +747,8 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { Subtarget->is64Bit() && AM.Scale == 1 && AM.BaseType == X86ISelAddressMode::RegBase && - AM.Base_Reg.getNode() == 0 && - AM.IndexReg.getNode() == 0 && + AM.Base_Reg.getNode() == nullptr && + AM.IndexReg.getNode() == nullptr && AM.SymbolFlags == X86II::MO_NO_FLAG && AM.hasSymbolicDisplacement()) AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); @@ -1010,7 +1011,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, case ISD::FrameIndex: if (AM.BaseType == X86ISelAddressMode::RegBase && - AM.Base_Reg.getNode() == 0 && + AM.Base_Reg.getNode() == nullptr && (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { AM.BaseType = X86ISelAddressMode::FrameIndexBase; AM.Base_FrameIndex = cast(N)->getIndex(); @@ -1019,7 +1020,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, break; case ISD::SHL: - if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) + if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; if (ConstantSDNode @@ -1053,7 +1054,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, case ISD::SRL: { // Scale must not be used already. - if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; + if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; SDValue And = N.getOperand(0); if (And.getOpcode() != ISD::AND) break; @@ -1087,8 +1088,8 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, case X86ISD::MUL_IMM: // X*[3,5,9] -> X+X*[2,4,8] if (AM.BaseType == X86ISelAddressMode::RegBase && - AM.Base_Reg.getNode() == 0 && - AM.IndexReg.getNode() == 0) { + AM.Base_Reg.getNode() == nullptr && + AM.IndexReg.getNode() == nullptr) { if (ConstantSDNode *CN = dyn_cast(N.getNode()->getOperand(1))) if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || @@ -1238,7 +1239,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, // with a constant to enable use of the scaled offset field. // Scale must not be used already. - if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; + if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; SDValue Shift = N.getOperand(0); if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; @@ -1277,7 +1278,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { // Is the base register already occupied? if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { // If so, check to see if the scale index register is set. - if (AM.IndexReg.getNode() == 0) { + if (!AM.IndexReg.getNode()) { AM.IndexReg = N; AM.Scale = 1; return false; @@ -1568,7 +1569,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) - return NULL; + return nullptr; MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast(Node)->getMemOperand(); const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; @@ -1757,7 +1758,7 @@ static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) { if (Node->hasAnyUseOfValue(0)) - return 0; + return nullptr; SDLoc dl(Node); @@ -1769,13 +1770,13 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) { SDValue Val = Node->getOperand(2); SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) - return 0; + return nullptr; // Which index into the table. enum AtomicOpc Op; switch (Node->getOpcode()) { default: - return 0; + return nullptr; case ISD::ATOMIC_LOAD_OR: Op = OR; break; @@ -1796,7 +1797,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) { unsigned Opc = 0; switch (NVT.SimpleTy) { - default: return 0; + default: return nullptr; case MVT::i8: if (isCN) Opc = AtomicOpcTbl[Op][ConstantI8]; @@ -2028,7 +2029,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { SDValue VMask = Node->getOperand(5); ConstantSDNode *Scale = dyn_cast(Node->getOperand(6)); if (!Scale) - return 0; + return nullptr; SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), MVT::Other); @@ -2059,7 +2060,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { if (Node->isMachineOpcode()) { DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); Node->setNodeId(-1); - return NULL; // Already selected. + return nullptr; // Already selected. } switch (Opcode) { @@ -2109,7 +2110,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDNode *RetVal = SelectGather(Node, Opc); if (RetVal) // We already called ReplaceUses inside SelectGather. - return NULL; + return nullptr; break; } } @@ -2260,7 +2261,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); - return NULL; + return nullptr; } case ISD::SMUL_LOHI: @@ -2387,7 +2388,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { } // Copy the low half of the result, if it is needed. if (!SDValue(Node, 0).use_empty()) { - if (ResLo.getNode() == 0) { + if (!ResLo.getNode()) { assert(LoReg && "Register for low half is not defined!"); ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, InFlag); @@ -2398,7 +2399,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { } // Copy the high half of the result, if it is needed. if (!SDValue(Node, 1).use_empty()) { - if (ResHi.getNode() == 0) { + if (!ResHi.getNode()) { assert(HiReg && "Register for high half is not defined!"); ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, InFlag); @@ -2408,7 +2409,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); } - return NULL; + return nullptr; } case ISD::SDIVREM: @@ -2576,7 +2577,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { ReplaceUses(SDValue(Node, 1), Result); DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); } - return NULL; + return nullptr; } case X86ISD::CMP: @@ -2633,7 +2634,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // one, do not call ReplaceAllUsesWith. ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), SDValue(NewNode, 0)); - return NULL; + return nullptr; } // For example, "testl %eax, $2048" to "testb %ah, $8". @@ -2670,7 +2671,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // one, do not call ReplaceAllUsesWith. ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), SDValue(NewNode, 0)); - return NULL; + return nullptr; } // For example, "testl %eax, $32776" to "testw %ax, $32776". @@ -2692,7 +2693,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // one, do not call ReplaceAllUsesWith. ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), SDValue(NewNode, 0)); - return NULL; + return nullptr; } // For example, "testq %rax, $268468232" to "testl %eax, $268468232". @@ -2714,7 +2715,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // one, do not call ReplaceAllUsesWith. ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), SDValue(NewNode, 0)); - return NULL; + return nullptr; } } break; @@ -2741,7 +2742,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue StoredVal = StoreNode->getOperand(1); unsigned Opc = StoredVal->getOpcode(); - LoadSDNode *LoadNode = 0; + LoadSDNode *LoadNode = nullptr; SDValue InputChain; if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, LoadNode, InputChain)) @@ -2791,7 +2792,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, case 'v': // not offsetable ?? default: return true; case 'm': // memory - if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) + if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4)) return true; break; } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e59560ed88b..4b4c01fb732 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -266,10 +266,10 @@ void X86TargetLowering::resetOperationActions() { // The _ftol2 runtime function has an unusual calling conv, which // is modeled by a special pseudo-instruction. - setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); - setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); - setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); - setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); + setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr); + setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr); + setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr); + setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr); } if (Subtarget->isTargetDarwin()) { @@ -1498,9 +1498,9 @@ void X86TargetLowering::resetOperationActions() { if (!Subtarget->is64Bit()) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, 0); - setLibcallName(RTLIB::SRL_I128, 0); - setLibcallName(RTLIB::SRA_I128, 0); + setLibcallName(RTLIB::SHL_I128, nullptr); + setLibcallName(RTLIB::SRL_I128, nullptr); + setLibcallName(RTLIB::SRA_I128, nullptr); } // Combine sin / cos into one node or libcall if possible. @@ -1738,7 +1738,7 @@ getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, // FIXME: Why this routine is here? Move to RegInfo! std::pair X86TargetLowering::findRepresentativeClass(MVT VT) const{ - const TargetRegisterClass *RRC = 0; + const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: @@ -2687,7 +2687,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } } else if (!IsSibcall && (!isTailCall || isByVal)) { assert(VA.isMemLoc()); - if (StackPtr.getNode() == 0) + if (!StackPtr.getNode()) StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), getPointerTy()); MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, @@ -2776,7 +2776,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (Flags.isByVal()) { // Copy relative to framepointer. SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); - if (StackPtr.getNode() == 0) + if (!StackPtr.getNode()) StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), getPointerTy()); @@ -4681,7 +4681,7 @@ static bool ShouldXformToMOVHLPS(ArrayRef Mask, MVT VT) { /// isScalarLoadToVector - Returns true if the node is a scalar load that /// is promoted to a vector. It also returns the LoadSDNode by reference if /// required. -static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { +static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) { if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) return false; N = N->getOperand(0).getNode(); @@ -5311,7 +5311,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, return SDValue(); SDLoc dl(Op); - SDValue V(0, 0); + SDValue V; bool First = true; for (unsigned i = 0; i < 16; ++i) { bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; @@ -5324,7 +5324,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, } if ((i & 1) != 0) { - SDValue ThisElt(0, 0), LastElt(0, 0); + SDValue ThisElt, LastElt; bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; if (LastIsNonZero) { LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, @@ -5359,7 +5359,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, return SDValue(); SDLoc dl(Op); - SDValue V(0, 0); + SDValue V; bool First = true; for (unsigned i = 0; i < 8; ++i) { bool isNonZero = (NonZeros & (1 << i)) != 0; @@ -5484,7 +5484,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl &Elts, EVT EltVT = VT.getVectorElementType(); unsigned NumElems = Elts.size(); - LoadSDNode *LDBase = NULL; + LoadSDNode *LDBase = nullptr; unsigned LastLoadedElt = -1U; // For each element in the initializer, see if we've found a load or an undef. @@ -5665,7 +5665,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget, unsigned ScalarSize = CVT.getSizeInBits(); if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) { - const Constant *C = 0; + const Constant *C = nullptr; if (ConstantSDNode *CI = dyn_cast(Ld)) C = CI->getConstantIntValue(); else if (ConstantFPSDNode *CF = dyn_cast(Ld)) @@ -5787,10 +5787,10 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) { if (ExtractedFromVec.getValueType() != VT) return SDValue(); - if (VecIn1.getNode() == 0) + if (!VecIn1.getNode()) VecIn1 = ExtractedFromVec; else if (VecIn1 != ExtractedFromVec) { - if (VecIn2.getNode() == 0) + if (!VecIn2.getNode()) VecIn2 = ExtractedFromVec; else if (VecIn2 != ExtractedFromVec) // Quit if more than 2 vectors to shuffle @@ -5803,7 +5803,7 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) { Mask[i] = Idx + NumElems; } - if (VecIn1.getNode() == 0) + if (!VecIn1.getNode()) return SDValue(); VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); @@ -6861,7 +6861,7 @@ static SDValue getVZextMovL(MVT VT, MVT OpVT, SDValue SrcOp, SelectionDAG &DAG, const X86Subtarget *Subtarget, SDLoc dl) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { - LoadSDNode *LD = NULL; + LoadSDNode *LD = nullptr; if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) LD = dyn_cast(SrcOp); if (!LD) { @@ -8418,7 +8418,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, static SDValue LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT) { - return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, + return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX, X86II::MO_TLSGD); } @@ -8435,7 +8435,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, SDValue Base; if (is64Bit) { - Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, + Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX, X86II::MO_TLSLD, /*LocalDynamic=*/true); } else { SDValue InFlag; @@ -9377,7 +9377,7 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, /*IsSigned=*/ true, /*IsReplace=*/ false); SDValue FIST = Vals.first, StackSlot = Vals.second; // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. - if (FIST.getNode() == 0) return Op; + if (!FIST.getNode()) return Op; if (StackSlot.getNode()) // Load the result. @@ -10629,7 +10629,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { Res = DAG.getNOT(DL, Res, Res.getValueType()); ConstantSDNode *N2C = dyn_cast(Op2); - if (N2C == 0 || !N2C->isNullValue()) + if (!N2C || !N2C->isNullValue()) Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); return Res; } @@ -13243,7 +13243,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, uint64_t ShiftAmt = 0; for (unsigned i = 0; i != Ratio; ++i) { ConstantSDNode *C = dyn_cast(Amt.getOperand(i)); - if (C == 0) + if (!C) return SDValue(); // 6 == Log2(64) ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2))); @@ -13254,7 +13254,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, for (unsigned j = 0; j != Ratio; ++j) { ConstantSDNode *C = dyn_cast(Amt.getOperand(i + j)); - if (C == 0) + if (!C) return SDValue(); // 6 == Log2(64) ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2))); @@ -13336,7 +13336,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, BaseShAmt = InVec.getOperand(1); } } - if (BaseShAmt.getNode() == 0) + if (!BaseShAmt.getNode()) BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt, DAG.getIntPtrConstant(0)); } @@ -14191,10 +14191,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, std::pair Vals = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); SDValue FIST = Vals.first, StackSlot = Vals.second; - if (FIST.getNode() != 0) { + if (FIST.getNode()) { EVT VT = N->getValueType(0); // Return a load from the stack slot. - if (StackSlot.getNode() != 0) + if (StackSlot.getNode()) Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo(), false, false, false, 0)); @@ -14349,7 +14349,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { - default: return NULL; + default: return nullptr; case X86ISD::BSF: return "X86ISD::BSF"; case X86ISD::BSR: return "X86ISD::BSR"; case X86ISD::SHLD: return "X86ISD::SHLD"; @@ -14524,7 +14524,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, Reloc::Model R = getTargetMachine().getRelocationModel(); // X86 allows a sign-extended 32-bit immediate field as a displacement. - if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) + if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr)) return false; if (AM.BaseGV) { @@ -15650,7 +15650,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter( OffsetDestReg = 0; // unused OverflowDestReg = DestReg; - offsetMBB = NULL; + offsetMBB = nullptr; overflowMBB = thisMBB; endMBB = thisMBB; } else { @@ -17891,7 +17891,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { SDValue Op2 = Cmp.getOperand(1); SDValue SetCC; - const ConstantSDNode* C = 0; + const ConstantSDNode* C = nullptr; bool needOppositeCond = (CC == X86::COND_E); bool checkAgainstTrue = false; // Is it a comparison against 1? @@ -18142,7 +18142,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, // the DCI.xxxx conditions are provided to postpone the optimization as // late as possible. - ConstantSDNode *CmpAgainst = 0; + ConstantSDNode *CmpAgainst = nullptr; if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && (CmpAgainst = dyn_cast(Cond.getOperand(1))) && !isa(Cond.getOperand(0))) { @@ -19150,7 +19150,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, !cast(St->getValue())->isVolatile() && St->getChain().hasOneUse() && !St->isVolatile()) { SDNode* LdVal = St->getValue().getNode(); - LoadSDNode *Ld = 0; + LoadSDNode *Ld = nullptr; int TokenFactorIndex = -1; SmallVector Ops; SDNode* ChainVal = St->getChain().getNode(); @@ -20265,7 +20265,7 @@ TargetLowering::ConstraintWeight Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. - if (CallOperandVal == NULL) + if (!CallOperandVal) return CW_Default; Type *type = CallOperandVal->getType(); // Look at the constraint type. @@ -20383,7 +20383,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector&Ops, SelectionDAG &DAG) const { - SDValue Result(0, 0); + SDValue Result; // Only support length 1 constraints for now. if (Constraint.length() > 1) return; @@ -20466,7 +20466,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, // If we are in non-pic codegen mode, we allow the address of a global (with // an optional displacement) to be used with 'i'. - GlobalAddressSDNode *GA = 0; + GlobalAddressSDNode *GA = nullptr; int64_t Offset = 0; // Match either (GA), (GA+C), (GA+C1+C2), etc. @@ -20622,7 +20622,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); // Not found as a standard register? - if (Res.second == 0) { + if (!Res.second) { // Map st(0) -> st(7) -> ST0 if (Constraint.size() == 7 && Constraint[0] == '{' && tolower(Constraint[1]) == 's' && diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index ee59faf7b9c..cc38c393b13 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2000,7 +2000,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, unsigned Src2 = MI->getOperand(2).getReg(); bool isKill2 = MI->getOperand(2).isKill(); unsigned leaInReg2 = 0; - MachineInstr *InsMI2 = 0; + MachineInstr *InsMI2 = nullptr; if (Src == Src2) { // ADD16rr %reg1028, %reg1028 // just a single insert_subreg. @@ -2064,14 +2064,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, // convert them to equivalent lea if the condition code register def's // are dead! if (hasLiveCondCodeDef(MI)) - return 0; + return nullptr; MachineFunction &MF = *MI->getParent()->getParent(); // All instructions input are two-addr instructions. Get the known operands. const MachineOperand &Dest = MI->getOperand(0); const MachineOperand &Src = MI->getOperand(1); - MachineInstr *NewMI = NULL; + MachineInstr *NewMI = nullptr; // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When // we have better subtarget support, enable the 16-bit LEA generation here. // 16-bit LEA is also slow on Core2. @@ -2082,11 +2082,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, switch (MIOpc) { case X86::SHUFPSrri: { assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); - if (!TM.getSubtarget().hasSSE2()) return 0; + if (!TM.getSubtarget().hasSSE2()) return nullptr; unsigned B = MI->getOperand(1).getReg(); unsigned C = MI->getOperand(2).getReg(); - if (B != C) return 0; + if (B != C) return nullptr; unsigned M = MI->getOperand(3).getImm(); NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri)) .addOperand(Dest).addOperand(Src).addImm(M); @@ -2094,11 +2094,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, } case X86::SHUFPDrri: { assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!"); - if (!TM.getSubtarget().hasSSE2()) return 0; + if (!TM.getSubtarget().hasSSE2()) return nullptr; unsigned B = MI->getOperand(1).getReg(); unsigned C = MI->getOperand(2).getReg(); - if (B != C) return 0; + if (B != C) return nullptr; unsigned M = MI->getOperand(3).getImm(); // Convert to PSHUFD mask. @@ -2111,13 +2111,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::SHL64ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); unsigned ShAmt = getTruncatedShiftCount(MI, 2); - if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; + if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; // LEA can't handle RSP. if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && !MF.getRegInfo().constrainRegClass(Src.getReg(), &X86::GR64_NOSPRegClass)) - return 0; + return nullptr; NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) .addOperand(Dest) @@ -2127,7 +2127,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::SHL32ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); unsigned ShAmt = getTruncatedShiftCount(MI, 2); - if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; + if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; @@ -2137,7 +2137,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, isUndef, ImplicitOp)) - return 0; + return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest) @@ -2153,10 +2153,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::SHL16ri: { assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); unsigned ShAmt = getTruncatedShiftCount(MI, 2); - if (!isTruncatedShiftCountForLEA(ShAmt)) return 0; + if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; if (DisableLEA16) - return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; + return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr; NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) .addOperand(Dest) .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); @@ -2165,7 +2165,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, default: { switch (MIOpc) { - default: return 0; + default: return nullptr; case X86::INC64r: case X86::INC32r: case X86::INC64_32r: { @@ -2177,7 +2177,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, isUndef, ImplicitOp)) - return 0; + return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest) @@ -2191,7 +2191,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::INC16r: case X86::INC64_16r: if (DisableLEA16) - return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; + return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) + : nullptr; assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) .addOperand(Dest).addOperand(Src), 1); @@ -2208,7 +2209,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill, isUndef, ImplicitOp)) - return 0; + return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest) @@ -2223,7 +2224,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::DEC16r: case X86::DEC64_16r: if (DisableLEA16) - return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; + return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) + : nullptr; assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) .addOperand(Dest).addOperand(Src), -1); @@ -2244,7 +2246,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, SrcReg, isKill, isUndef, ImplicitOp)) - return 0; + return nullptr; const MachineOperand &Src2 = MI->getOperand(2); bool isKill2, isUndef2; @@ -2252,7 +2254,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, SrcReg2, isKill2, isUndef2, ImplicitOp2)) - return 0; + return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest); @@ -2274,7 +2276,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD16rr: case X86::ADD16rr_DB: { if (DisableLEA16) - return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; + return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) + : nullptr; assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Src2 = MI->getOperand(2).getReg(); bool isKill2 = MI->getOperand(2).isKill(); @@ -2313,7 +2316,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, SrcReg, isKill, isUndef, ImplicitOp)) - return 0; + return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) .addOperand(Dest) @@ -2329,7 +2332,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD16ri_DB: case X86::ADD16ri8_DB: if (DisableLEA16) - return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; + return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) + : nullptr; assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) .addOperand(Dest).addOperand(Src), @@ -2339,7 +2343,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, } } - if (!NewMI) return 0; + if (!NewMI) return nullptr; if (LV) { // Update live variables if (Src.isKill()) @@ -2791,11 +2795,11 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, std::next(I)->eraseFromParent(); Cond.clear(); - FBB = 0; + FBB = nullptr; // Delete the JMP if it's equivalent to a fall-through. if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { - TBB = 0; + TBB = nullptr; I->eraseFromParent(); I = MBB.end(); UnCondBrIter = MBB.end(); @@ -3623,7 +3627,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // We are searching for an earlier instruction that can make CmpInstr // redundant and that instruction will be saved in Sub. - MachineInstr *Sub = NULL; + MachineInstr *Sub = nullptr; const TargetRegisterInfo *TRI = &getRegisterInfo(); // We iterate backward, starting from the instruction before CmpInstr and @@ -3636,7 +3640,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, RE = CmpInstr->getParent() == MI->getParent() ? MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ : CmpInstr->getParent()->rend(); - MachineInstr *Movr0Inst = 0; + MachineInstr *Movr0Inst = nullptr; for (; RI != RE; ++RI) { MachineInstr *Instr = &*RI; // Check whether CmpInstr can be made redundant by the current instruction. @@ -3811,19 +3815,19 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI, unsigned &FoldAsLoadDefReg, MachineInstr *&DefMI) const { if (FoldAsLoadDefReg == 0) - return 0; + return nullptr; // To be conservative, if there exists another load, clear the load candidate. if (MI->mayLoad()) { FoldAsLoadDefReg = 0; - return 0; + return nullptr; } // Check whether we can move DefMI here. DefMI = MRI->getVRegDef(FoldAsLoadDefReg); assert(DefMI); bool SawStore = false; - if (!DefMI->isSafeToMove(this, 0, SawStore)) - return 0; + if (!DefMI->isSafeToMove(this, nullptr, SawStore)) + return nullptr; // We try to commute MI if possible. unsigned IdxEnd = (MI->isCommutable()) ? 2 : 1; @@ -3840,12 +3844,12 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI, continue; // Do not fold if we have a subreg use or a def or multiple uses. if (MO.getSubReg() || MO.isDef() || FoundSrcOperand) - return 0; + return nullptr; SrcOperandId = i; FoundSrcOperand = true; } - if (!FoundSrcOperand) return 0; + if (!FoundSrcOperand) return nullptr; // Check whether we can fold the def into SrcOperandId. SmallVector Ops; @@ -3859,22 +3863,22 @@ optimizeLoadInstr(MachineInstr *MI, const MachineRegisterInfo *MRI, if (Idx == 1) { // MI was changed but it didn't help, commute it back! commuteInstruction(MI, false); - return 0; + return nullptr; } // Check whether we can commute MI and enable folding. if (MI->isCommutable()) { MachineInstr *NewMI = commuteInstruction(MI, false); // Unable to commute. - if (!NewMI) return 0; + if (!NewMI) return nullptr; if (NewMI != MI) { // New instruction. It doesn't need to be kept. NewMI->eraseFromParent(); - return 0; + return nullptr; } } } - return 0; + return nullptr; } /// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr @@ -4009,7 +4013,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, unsigned i, const SmallVectorImpl &MOs, unsigned Size, unsigned Align) const { - const DenseMap > *OpcodeTablePtr = 0; + const DenseMap > *OpcodeTablePtr = nullptr; bool isCallRegIndirect = TM.getSubtarget().callRegIndirect(); bool isTwoAddrFold = false; @@ -4017,7 +4022,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // when X86Subtarget is Atom. if (isCallRegIndirect && (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) { - return NULL; + return nullptr; } unsigned NumOps = MI->getDesc().getNumOperands(); @@ -4028,9 +4033,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. if (MI->getOpcode() == X86::ADD32ri && MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) - return NULL; + return nullptr; - MachineInstr *NewMI = NULL; + MachineInstr *NewMI = nullptr; // Folding a memory location into the two-address part of a two-address // instruction is different than folding it other places. It requires // replacing the *two* registers with the memory location. @@ -4065,7 +4070,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, unsigned Opcode = I->second.first; unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; if (Align < MinAlign) - return NULL; + return nullptr; bool NarrowToMOV32rm = false; if (Size) { unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize(); @@ -4073,12 +4078,12 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Check if it's safe to fold the load. If the size of the object is // narrower than the load width, then it's not. if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) - return NULL; + return nullptr; // If this is a 64-bit load, but the spill slot is 32, then we can do // a 32-bit load which is implicitly zero-extended. This likely is due // to liveintervalanalysis remat'ing a load from stack slot. if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg()) - return NULL; + return nullptr; Opcode = X86::MOV32rm; NarrowToMOV32rm = true; } @@ -4107,7 +4112,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // No fusion if (PrintFailedFusing && !MI->isCopy()) dbgs() << "We failed to fuse operand " << i << " in " << *MI; - return NULL; + return nullptr; } /// hasPartialRegUpdate - Return true for all instructions that only update @@ -4272,14 +4277,14 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl &Ops, int FrameIndex) const { // Check switch flag - if (NoFusing) return NULL; + if (NoFusing) return nullptr; // Unless optimizing for size, don't fold to avoid partial // register update stalls if (!MF.getFunction()->getAttributes(). hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) - return 0; + return nullptr; const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); @@ -4292,7 +4297,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, unsigned NewOpc = 0; unsigned RCSize = 0; switch (MI->getOpcode()) { - default: return NULL; + default: return nullptr; case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; @@ -4301,12 +4306,12 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, // Check if it's safe to fold the load. If the size of the object is // narrower than the load width, then it's not. if (Size < RCSize) - return NULL; + return nullptr; // Change to CMPXXri r, 0 first. MI->setDesc(get(NewOpc)); MI->getOperand(1).ChangeToImmediate(0); } else if (Ops.size() != 1) - return NULL; + return nullptr; SmallVector MOs; MOs.push_back(MachineOperand::CreateFI(FrameIndex)); @@ -4324,14 +4329,14 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); // Check switch flag - if (NoFusing) return NULL; + if (NoFusing) return nullptr; // Unless optimizing for size, don't fold to avoid partial // register update stalls if (!MF.getFunction()->getAttributes(). hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) - return 0; + return nullptr; // Determine the alignment of the load. unsigned Alignment = 0; @@ -4354,12 +4359,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, Alignment = 4; break; default: - return 0; + return nullptr; } if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { unsigned NewOpc = 0; switch (MI->getOpcode()) { - default: return NULL; + default: return nullptr; case X86::TEST8rr: NewOpc = X86::CMP8ri; break; case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; @@ -4369,12 +4374,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MI->setDesc(get(NewOpc)); MI->getOperand(1).ChangeToImmediate(0); } else if (Ops.size() != 1) - return NULL; + return nullptr; // Make sure the subregisters match. // Otherwise we risk changing the size of the load. if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg()) - return NULL; + return nullptr; SmallVector MOs; switch (LoadMI->getOpcode()) { @@ -4390,7 +4395,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Medium and large mode can't fold loads this way. if (TM.getCodeModel() != CodeModel::Small && TM.getCodeModel() != CodeModel::Kernel) - return NULL; + return nullptr; // x86-32 PIC requires a PIC base register for constant pools. unsigned PICBase = 0; @@ -4402,7 +4407,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // This doesn't work for several reasons. // 1. GlobalBaseReg may have been spilled. // 2. It may not be live at MI. - return NULL; + return nullptr; } // Create a constant-pool entry. @@ -4438,14 +4443,14 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, > 4) // These instructions only load 32 bits, we can't fold them if the // destination register is wider than 32 bits (4 bytes). - return NULL; + return nullptr; if ((LoadMI->getOpcode() == X86::MOVSDrm || LoadMI->getOpcode() == X86::VMOVSDrm) && MF.getRegInfo().getRegClass(LoadMI->getOperand(0).getReg())->getSize() > 8) // These instructions only load 64 bits, we can't fold them if the // destination register is wider than 64 bits (8 bytes). - return NULL; + return nullptr; // Folding a normal load. Just copy the load's address operands. for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i) @@ -4491,7 +4496,8 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, // Folding a memory location into the two-address part of a two-address // instruction is different than folding it other places. It requires // replacing the *two* registers with the memory location. - const DenseMap > *OpcodeTablePtr = 0; + const DenseMap > *OpcodeTablePtr = nullptr; if (isTwoAddr && NumOps >= 2 && OpNum < 2) { OpcodeTablePtr = &RegOp2MemOpTable2Addr; } else if (OpNum == 0) { // If operand 0 @@ -4673,7 +4679,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, AddrOps.push_back(Chain); // Emit the load instruction. - SDNode *Load = 0; + SDNode *Load = nullptr; if (FoldedLoad) { EVT VT = *RC->vt_begin(); std::pair VTs; - const TargetRegisterClass *DstRC = 0; + const TargetRegisterClass *DstRC = nullptr; if (MCID.getNumDefs() > 0) { DstRC = getRegClass(MCID, 0, &RI, MF); VTs.push_back(*DstRC->vt_begin()); @@ -5192,14 +5198,14 @@ static const uint16_t *lookup(unsigned opcode, unsigned domain) { for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i) if (ReplaceableInstrs[i][domain-1] == opcode) return ReplaceableInstrs[i]; - return 0; + return nullptr; } static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) { for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i) if (ReplaceableInstrsAVX2[i][domain-1] == opcode) return ReplaceableInstrsAVX2[i]; - return 0; + return nullptr; } std::pair diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index dcfed059170..e969ef2cf3d 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -443,7 +443,7 @@ X86JITInfo::getLazyResolverFunction(JITCompilerFn F) { X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) { Subtarget = &TM.getSubtarget(); useGOT = 0; - TLSOffset = 0; + TLSOffset = nullptr; } void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index 6d7f3cbec60..0190080b935 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -120,7 +120,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { MachineModuleInfoImpl::StubValueTy &StubSym = getMachOMMI().getGVStubEntry(Sym); - if (StubSym.getPointer() == 0) { + if (!StubSym.getPointer()) { assert(MO.isGlobal() && "Extern symbol not handled yet"); StubSym = MachineModuleInfoImpl:: @@ -132,7 +132,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: { MachineModuleInfoImpl::StubValueTy &StubSym = getMachOMMI().getHiddenGVStubEntry(Sym); - if (StubSym.getPointer() == 0) { + if (!StubSym.getPointer()) { assert(MO.isGlobal() && "Extern symbol not handled yet"); StubSym = MachineModuleInfoImpl:: @@ -168,7 +168,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const { // FIXME: We would like an efficient form for this, so we don't have to do a // lot of extra uniquing. - const MCExpr *Expr = 0; + const MCExpr *Expr = nullptr; MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; switch (MO.getTargetFlags()) { @@ -223,7 +223,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, break; } - if (Expr == 0) + if (!Expr) Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx); if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp index 76b318eca3c..84521ccee48 100644 --- a/lib/Target/X86/X86PadShortFunction.cpp +++ b/lib/Target/X86/X86PadShortFunction.cpp @@ -50,7 +50,7 @@ namespace { struct PadShortFunc : public MachineFunctionPass { static char ID; PadShortFunc() : MachineFunctionPass(ID) - , Threshold(4), TM(0), TII(0) {} + , Threshold(4), TM(nullptr), TII(nullptr) {} bool runOnMachineFunction(MachineFunction &MF) override; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index e1d50dfe10f..a83e1e487b8 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -129,7 +129,7 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, if (!Is64Bit && SubIdx == X86::sub_8bit) { A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); if (!A) - return 0; + return nullptr; } return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); } diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index 4c4f9bb6048..b422480dd6f 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -51,7 +51,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, ConstantSDNode *V = dyn_cast(Src); if (const char *bzeroEntry = V && - V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { + V->isNullValue() ? Subtarget->getBZeroEntry() : nullptr) { EVT IntPtr = TLI.getPointerTy(); Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; @@ -78,7 +78,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, } uint64_t SizeVal = ConstantSize->getZExtValue(); - SDValue InFlag(0, 0); + SDValue InFlag; EVT AVT; SDValue Count; ConstantSDNode *ValC = dyn_cast(Src); @@ -226,7 +226,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Count = DAG.getIntPtrConstant(CountVal); unsigned BytesLeft = SizeVal % UBytes; - SDValue InFlag(0, 0); + SDValue InFlag; Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX : X86::ECX, Count, InFlag); diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 08edc613308..45bf979ed9a 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -154,7 +154,7 @@ const char *X86Subtarget::getBZeroEntry() const { !getTargetTriple().isMacOSXVersionLT(10, 6)) return "__bzero"; - return 0; + return nullptr; } bool X86Subtarget::hasSinCos() const { diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp index 0a88e984c8b..b62cc257697 100644 --- a/lib/Target/X86/X86TargetObjectFile.cpp +++ b/lib/Target/X86/X86TargetObjectFile.cpp @@ -62,7 +62,7 @@ const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol( // operation. const SubOperator *Sub = dyn_cast(CE); if (!Sub) - return 0; + return nullptr; // Symbols must first be numbers before we can subtract them, we need to see a // ptrtoint on both subtraction operands. @@ -71,13 +71,13 @@ const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol( const PtrToIntOperator *SubRHS = dyn_cast(Sub->getOperand(1)); if (!SubLHS || !SubRHS) - return 0; + return nullptr; // Our symbols should exist in address space zero, cowardly no-op if // otherwise. if (SubLHS->getPointerAddressSpace() != 0 || SubRHS->getPointerAddressSpace() != 0) - return 0; + return nullptr; // Both ptrtoint instructions must wrap global variables: // - Only global variables are eligible for image relative relocations. @@ -87,7 +87,7 @@ const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol( const GlobalVariable *GVRHS = dyn_cast(SubRHS->getPointerOperand()); if (!GVLHS || !GVRHS) - return 0; + return nullptr; // We expect __ImageBase to be a global variable without a section, externally // defined. @@ -96,11 +96,11 @@ const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol( if (GVRHS->isThreadLocal() || GVRHS->getName() != "__ImageBase" || !GVRHS->hasExternalLinkage() || GVRHS->hasInitializer() || GVRHS->hasSection()) - return 0; + return nullptr; // An image-relative, thread-local, symbol makes no sense. if (GVLHS->isThreadLocal()) - return 0; + return nullptr; return MCSymbolRefExpr::Create(TM.getSymbol(GVLHS, Mang), MCSymbolRefExpr::VK_COFF_IMGREL32, diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index 9d4391fefee..5158004cadb 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -57,7 +57,7 @@ class X86TTI final : public ImmutablePass, public TargetTransformInfo { unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; public: - X86TTI() : ImmutablePass(ID), ST(0), TLI(0) { + X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { llvm_unreachable("This pass cannot be directly constructed"); } diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp index f788c5957bb..56659117afc 100644 --- a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp +++ b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp @@ -17,7 +17,7 @@ XCoreMCAsmInfo::XCoreMCAsmInfo(StringRef TT) { SupportsDebugInformation = true; Data16bitsDirective = "\t.short\t"; Data32bitsDirective = "\t.long\t"; - Data64bitsDirective = 0; + Data64bitsDirective = nullptr; ZeroDirective = "\t.space\t"; CommentString = "#"; diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp index e5dd99e86eb..27a6e62cf31 100644 --- a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp +++ b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp @@ -58,7 +58,7 @@ static MCAsmInfo *createXCoreMCAsmInfo(const MCRegisterInfo &MRI, MCAsmInfo *MAI = new XCoreMCAsmInfo(TT); // Initial state of the frame pointer is SP. - MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, XCore::SP, 0); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, XCore::SP, 0); MAI->addInitialFrameState(Inst); return MAI; diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp index 5b0fcfa15e4..d9588d6f32f 100644 --- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp +++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -89,14 +89,14 @@ FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM, bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base, SDValue &Offset) { - FrameIndexSDNode *FIN = 0; + FrameIndexSDNode *FIN = nullptr; if ((FIN = dyn_cast(Addr))) { Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); Offset = CurDAG->getTargetConstant(0, MVT::i32); return true; } if (Addr.getOpcode() == ISD::ADD) { - ConstantSDNode *CN = 0; + ConstantSDNode *CN = nullptr; if ((FIN = dyn_cast(Addr.getOperand(0))) && (CN = dyn_cast(Addr.getOperand(1))) && (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) { @@ -237,10 +237,10 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue Addr = N->getOperand(1); if (Addr->getOpcode() != ISD::INTRINSIC_W_CHAIN) - return 0; + return nullptr; unsigned IntNo = cast(Addr->getOperand(1))->getZExtValue(); if (IntNo != Intrinsic::xcore_checkevent) - return 0; + return nullptr; SDValue nextAddr = Addr->getOperand(2); SDValue CheckEventChainOut(Addr.getNode(), 1); if (!CheckEventChainOut.use_empty()) { @@ -252,7 +252,7 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) { SDValue NewChain = replaceInChain(CurDAG, Chain, CheckEventChainOut, CheckEventChainIn); if (!NewChain.getNode()) - return 0; + return nullptr; Chain = NewChain; } // Enable events on the thread using setsr 1 and then disable them immediately diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 5cf0cb3ef5f..38dfaceb06a 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -64,7 +64,7 @@ getTargetNodeName(unsigned Opcode) const case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; - default : return NULL; + default : return nullptr; } } @@ -741,7 +741,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const if (N->getOpcode() == ISD::ADD) { SDValue Result = TryExpandADDWithMul(N, DAG); - if (Result.getNode() != 0) + if (Result.getNode()) return Result; } diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp index 6dd12334a70..984f0cd9c4d 100644 --- a/lib/Target/XCore/XCoreInstrInfo.cpp +++ b/lib/Target/XCore/XCoreInstrInfo.cpp @@ -288,7 +288,7 @@ XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, assert((Cond.size() == 2 || Cond.size() == 0) && "Unexpected number of components!"); - if (FBB == 0) { // One way branch. + if (!FBB) { // One way branch. if (Cond.empty()) { // Unconditional branch BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(TBB); diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp index b398c2d2f8b..b5aebbe0d51 100644 --- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -189,13 +189,14 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) { // Create replacement global. ArrayType *NewType = createLoweredType(GV->getType()->getElementType()); - Constant *NewInitializer = 0; + Constant *NewInitializer = nullptr; if (GV->hasInitializer()) NewInitializer = createLoweredInitializer(NewType, GV->getInitializer()); GlobalVariable *NewGV = new GlobalVariable(*M, NewType, GV->isConstant(), GV->getLinkage(), - NewInitializer, "", 0, GlobalVariable::NotThreadLocal, + NewInitializer, "", nullptr, + GlobalVariable::NotThreadLocal, GV->getType()->getAddressSpace(), GV->isExternallyInitialized()); -- 2.34.1