From: Jim Grosbach Date: Tue, 11 Aug 2009 15:33:49 +0000 (+0000) Subject: Whitespace cleanup. Remove trailing whitespace. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=764ab52dd80310a205c9888bf166d09dab858f90;p=oota-llvm.git Whitespace cleanup. Remove trailing whitespace. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78666 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARM.h b/lib/Target/ARM/ARM.h index df1a68f8fd0..e95dfc02b60 100644 --- a/lib/Target/ARM/ARM.h +++ b/lib/Target/ARM/ARM.h @@ -98,7 +98,7 @@ FunctionPass *createARMCodeEmitterPass(ARMBaseTargetMachine &TM, MachineCodeEmitter &MCE); FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM, JITCodeEmitter &JCE); -FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM, +FunctionPass *createARMObjectCodeEmitterPass(ARMBaseTargetMachine &TM, ObjectCodeEmitter &OCE); FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false); diff --git a/lib/Target/ARM/ARMAddressingModes.h b/lib/Target/ARM/ARMAddressingModes.h index f85cb54c88d..18391533519 100644 --- a/lib/Target/ARM/ARMAddressingModes.h +++ b/lib/Target/ARM/ARMAddressingModes.h @@ -20,7 +20,7 @@ #include namespace llvm { - + /// ARM_AM - ARM Addressing Mode Stuff namespace ARM_AM { enum ShiftOpc { @@ -31,11 +31,11 @@ namespace ARM_AM { ror, rrx }; - + enum AddrOpc { add = '+', sub = '-' }; - + static inline const char *getShiftOpcStr(ShiftOpc Op) { switch (Op) { default: llvm_unreachable("Unknown shift opc!"); @@ -46,7 +46,7 @@ namespace ARM_AM { case ARM_AM::rrx: return "rrx"; } } - + static inline ShiftOpc getShiftOpcForNode(SDValue N) { switch (N.getOpcode()) { default: return ARM_AM::no_shift; @@ -95,14 +95,14 @@ namespace ARM_AM { assert(Amt < 32 && "Invalid rotate amount"); return (Val >> Amt) | (Val << ((32-Amt)&31)); } - + /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits. /// static inline unsigned rotl32(unsigned Val, unsigned Amt) { assert(Amt < 32 && "Invalid rotate amount"); return (Val << Amt) | (Val >> ((32-Amt)&31)); } - + //===--------------------------------------------------------------------===// // Addressing Mode #1: shift_operand with registers //===--------------------------------------------------------------------===// @@ -137,7 +137,7 @@ namespace ARM_AM { static inline unsigned getSOImmValRot(unsigned Imm) { return (Imm >> 8) * 2; } - + /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand, /// computing the rotate amount to use. If this immediate value cannot be /// handled with a single shifter-op, determine a good rotate amount that will @@ -146,14 +146,14 @@ namespace ARM_AM { // 8-bit (or less) immediates are trivially shifter_operands with a rotate // of zero. if ((Imm & ~255U) == 0) return 0; - + // Use CTZ to compute the rotate amount. unsigned TZ = CountTrailingZeros_32(Imm); - + // Rotate amount must be even. Something like 0x200 must be rotated 8 bits, // not 9. unsigned RotAmt = TZ & ~1; - + // If we can handle this spread, return it. if ((rotr32(Imm, RotAmt) & ~255U) == 0) return (32-RotAmt)&31; // HW rotates right, not left. @@ -166,16 +166,16 @@ namespace ARM_AM { // Restart the search for a high-order bit after the initial seconds of // ones. unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1)); - + // Rotate amount must be even. unsigned RotAmt2 = TZ2 & ~1; - + // If this fits, use it. if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0) return (32-RotAmt2)&31; // HW rotates right, not left. } } - + // Otherwise, we have no way to cover this span of bits with a single // shifter_op immediate. Return a chunk of bits that will be useful to // handle. @@ -189,17 +189,17 @@ namespace ARM_AM { // 8-bit (or less) immediates are trivially shifter_operands with a rotate // of zero. if ((Arg & ~255U) == 0) return Arg; - + unsigned RotAmt = getSOImmValRotate(Arg); // If this cannot be handled with a single shifter_op, bail out. if (rotr32(~255U, RotAmt) & Arg) return -1; - + // Encode this correctly. return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8); } - + /// isSOImmTwoPartVal - Return true if the specified value can be obtained by /// or'ing together two SOImmVal's. static inline bool isSOImmTwoPartVal(unsigned V) { @@ -207,12 +207,12 @@ namespace ARM_AM { V = rotr32(~255U, getSOImmValRotate(V)) & V; if (V == 0) return false; - + // If this can be handled with two shifter_op's, accept. V = rotr32(~255U, getSOImmValRotate(V)) & V; return V == 0; } - + /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, /// return the first chunk of it. static inline unsigned getSOImmTwoPartFirst(unsigned V) { @@ -222,14 +222,14 @@ namespace ARM_AM { /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, /// return the second chunk of it. static inline unsigned getSOImmTwoPartSecond(unsigned V) { - // Mask out the first hunk. + // Mask out the first hunk. V = rotr32(~255U, getSOImmValRotate(V)) & V; - + // Take what's left. assert(V == (rotr32(255U, getSOImmValRotate(V)) & V)); return V; } - + /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed /// by a left shift. Returns the shift amount to use. static inline unsigned getThumbImmValShift(unsigned Imm) { @@ -244,7 +244,7 @@ namespace ARM_AM { /// isThumbImmShiftedVal - Return true if the specified value can be obtained /// by left shifting a 8-bit immediate. static inline bool isThumbImmShiftedVal(unsigned V) { - // If this can be handled with + // If this can be handled with V = (~255U << getThumbImmValShift(V)) & V; return V == 0; } @@ -260,10 +260,10 @@ namespace ARM_AM { return CountTrailingZeros_32(Imm); } - /// isThumbImm16ShiftedVal - Return true if the specified value can be + /// isThumbImm16ShiftedVal - Return true if the specified value can be /// obtained by left shifting a 16-bit immediate. static inline bool isThumbImm16ShiftedVal(unsigned V) { - // If this can be handled with + // If this can be handled with V = (~65535U << getThumbImm16ValShift(V)) & V; return V == 0; } @@ -287,9 +287,9 @@ namespace ARM_AM { static inline int getT2SOImmValSplatVal(unsigned V) { unsigned u, Vs, Imm; // control = 0 - if ((V & 0xffffff00) == 0) + if ((V & 0xffffff00) == 0) return V; - + // If the value is zeroes in the first byte, just shift those off Vs = ((V & 0xff) == 0) ? V >> 8 : V; // Any passing value only has 8 bits of payload, splatted across the word @@ -325,7 +325,7 @@ namespace ARM_AM { } /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit - /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit + /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit /// encoding for it. If not, return -1. /// See ARM Reference Manual A6.3.2. static inline int getT2SOImmVal(unsigned Arg) { @@ -333,7 +333,7 @@ namespace ARM_AM { int Splat = getT2SOImmValSplatVal(Arg); if (Splat != -1) return Splat; - + // If 'Arg' can be handled with a single shifter_op return the value. int Rot = getT2SOImmValRotateVal(Arg); if (Rot != -1) @@ -341,7 +341,7 @@ namespace ARM_AM { return -1; } - + //===--------------------------------------------------------------------===// // Addressing Mode #2 @@ -359,7 +359,7 @@ namespace ARM_AM { // If this addressing mode is a frame index (before prolog/epilog insertion // and code rewriting), this operand will have the form: FI#, reg0, // with no shift amount for the frame offset. - // + // static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) { assert(Imm12 < (1 << 12) && "Imm too large!"); bool isSub = Opc == sub; @@ -374,8 +374,8 @@ namespace ARM_AM { static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) { return (ShiftOpc)(AM2Opc >> 13); } - - + + //===--------------------------------------------------------------------===// // Addressing Mode #3 //===--------------------------------------------------------------------===// @@ -388,7 +388,7 @@ namespace ARM_AM { // The first operand is always a Reg. The second operand is a reg if in // reg/reg form, otherwise it's reg#0. The third field encodes the operation // in bit 8, the immediate in bits 0-7. - + /// getAM3Opc - This function encodes the addrmode3 opc field. static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) { bool isSub = Opc == sub; @@ -400,7 +400,7 @@ namespace ARM_AM { static inline AddrOpc getAM3Op(unsigned AM3Opc) { return ((AM3Opc >> 8) & 1) ? sub : add; } - + //===--------------------------------------------------------------------===// // Addressing Mode #4 //===--------------------------------------------------------------------===// @@ -448,7 +448,7 @@ namespace ARM_AM { // // IA - Increment after // DB - Decrement before - + /// getAM5Opc - This function encodes the addrmode5 opc field. static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) { bool isSub = Opc == sub; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 9dd89fd33cc..ff109edffe4 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -523,7 +523,7 @@ ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI, return false; } -unsigned +unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const { switch (MI->getOpcode()) { @@ -805,7 +805,7 @@ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, return NewMI; } -MachineInstr* +MachineInstr* ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, const SmallVectorImpl &Ops, @@ -899,11 +899,11 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, const TargetInstrDesc &Desc = MI.getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); bool isSub = false; - + // Memory operands in inline assembly always use AddrMode2. if (Opcode == ARM::INLINEASM) AddrMode = ARMII::AddrMode2; - + if (Opcode == ARM::ADDri) { Offset += MI.getOperand(FrameRegIdx+1).getImm(); if (Offset == 0) { @@ -997,7 +997,7 @@ int llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, ImmOp.ChangeToImmediate(ImmedOffset); return 0; } - + // Otherwise, it didn't fit. Pull in what we can to simplify the immed. ImmedOffset = ImmedOffset & Mask; if (isSub) diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index bc474f264b7..4810d3baabc 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -243,7 +243,7 @@ public: virtual bool canFoldMemoryOperand(const MachineInstr *MI, const SmallVectorImpl &Ops) const; - + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, const SmallVectorImpl &Ops, @@ -311,7 +311,7 @@ void emitT2RegPlusImmediate(MachineBasicBlock &MBB, const ARMBaseInstrInfo &TII); -/// rewriteARMFrameIndex / rewriteT2FrameIndex - +/// rewriteARMFrameIndex / rewriteT2FrameIndex - /// Rewrite MI to access 'Offset' bytes from the FP. Return the offset that /// could not be handled directly in MI. int rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 368a1770d1f..98a08e12d68 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -470,13 +470,13 @@ ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const { I != E; ++I) { for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { if (!I->getOperand(i).isFI()) continue; - + const TargetInstrDesc &Desc = TII.get(I->getOpcode()); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); if (AddrMode == ARMII::AddrMode3 || AddrMode == ARMII::AddrModeT2_i8) return (1 << 8) - 1; - + if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrModeT2_i8s4) Limit = std::min(Limit, ((1U << 8) - 1) * 4); @@ -1235,7 +1235,7 @@ static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) { } static bool isCSRestore(MachineInstr *MI, - const ARMBaseInstrInfo &TII, + const ARMBaseInstrInfo &TII, const unsigned *CSRegs) { return ((MI->getOpcode() == (int)ARM::FLDD || MI->getOpcode() == (int)ARM::LDR || @@ -1297,7 +1297,7 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { ARMCC::AL, 0, TII); } else { // Thumb2 or ARM. - if (isARM) + if (isARM) BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) .addReg(FramePtr) .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index 7b497a76904..d5f9cd1314f 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -204,10 +204,10 @@ bool Emitter::runOnMachineFunction(MachineFunction &MF) { JTI->Initialize(MF, IsPIC); do { - DEBUG(errs() << "JITTing function '" + DEBUG(errs() << "JITTing function '" << MF.getFunction()->getName() << "'\n"); MCE.startFunction(MF); - for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); + for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { MCE.StartMachineBasicBlock(MBB); for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); @@ -300,7 +300,7 @@ void Emitter::emitConstPoolAddress(unsigned CPI, /// be emitted to the current location in the function, and allow it to be PC /// relative. template -void Emitter::emitJumpTableAddress(unsigned JTIndex, +void Emitter::emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) { MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(), Reloc, JTIndex, 0, true)); @@ -408,7 +408,7 @@ void Emitter::emitConstPoolInstruction(const MachineInstr &MI) { unsigned CPI = MI.getOperand(0).getImm(); // CP instruction index. unsigned CPIndex = MI.getOperand(1).getIndex(); // Actual cp entry index. const MachineConstantPoolEntry &MCPE = (*MCPEs)[CPIndex]; - + // Remember the CONSTPOOL_ENTRY address for later relocation. JTI->addConstantPoolEntryAddr(CPI, MCE.getCurrentPCValue()); @@ -428,7 +428,7 @@ void Emitter::emitConstPoolInstruction(const MachineInstr &MI) { MCE.addRelocation(MachineRelocation::getIndirectSymbol( MCE.getCurrentPCOffset(), ARM::reloc_arm_machine_cp_entry, GV, (intptr_t)ACPV, false)); - else + else emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry, ACPV->isStub() || isa(GV), (intptr_t)ACPV); } else { @@ -515,7 +515,7 @@ void Emitter::emitMOVi2piecesInstruction(const MachineInstr &MI) { template void Emitter::emitLEApcrelJTInstruction(const MachineInstr &MI) { // It's basically add r, pc, (LJTI - $+8) - + const TargetInstrDesc &TID = MI.getDesc(); // Emit the 'add' instruction. @@ -1117,7 +1117,7 @@ void Emitter::emitMiscArithInstruction(const MachineInstr &MI) { unsigned ShiftAmt = MI.getOperand(OpIdx).getImm(); assert(ShiftAmt < 32 && "shift_imm range is 0 to 31!"); Binary |= ShiftAmt << ARMII::ShiftShift; - + emitWordLE(Binary); } @@ -1194,7 +1194,7 @@ void Emitter::emitMiscBranchInstruction(const MachineInstr &MI) { if (TID.Opcode == ARM::BX_RET) // The return register is LR. Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR); - else + else // otherwise, set the return register Binary |= getMachineOpValue(MI, 0); @@ -1279,7 +1279,7 @@ void Emitter::emitVFPArithInstruction(const MachineInstr &MI) { // Encode Dm / Sm. Binary |= encodeVFPRm(MI, OpIdx); - + emitWordLE(Binary); } diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index fe7593ee965..d88d5e7d8e3 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -907,7 +907,7 @@ static inline unsigned getUnconditionalBrDisp(int Opc) { default: break; } - + return ((1<<23)-1)*4; } diff --git a/lib/Target/ARM/ARMConstantPoolValue.h b/lib/Target/ARM/ARMConstantPoolValue.h index bb5232a53bb..77516e58e0d 100644 --- a/lib/Target/ARM/ARMConstantPoolValue.h +++ b/lib/Target/ARM/ARMConstantPoolValue.h @@ -73,7 +73,7 @@ public: return 2; } - + virtual int getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment); @@ -91,7 +91,7 @@ inline std::ostream &operator<<(std::ostream &O, V.print(O); return O; } - + inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) { V.print(O); return O; diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 0924b9d9a55..d236945f534 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -152,7 +152,7 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op, // Don't match base register only case. That is matched to a separate // lower complexity pattern with explicit register operand. if (ShOpcVal == ARM_AM::no_shift) return false; - + BaseReg = N.getOperand(0); unsigned ShImmVal = 0; if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { @@ -206,7 +206,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N, EVT::i32); return true; } - + // Match simple R +/- imm12 operands. if (N.getOpcode() == ISD::ADD) if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { @@ -231,15 +231,15 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N, return true; } } - + // Otherwise this is R +/- [possibly shifted] R ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub; ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1)); unsigned ShAmt = 0; - + Base = N.getOperand(0); Offset = N.getOperand(1); - + if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. @@ -251,7 +251,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N, ShOpcVal = ARM_AM::no_shift; } } - + // Try matching (R shl C) + (R). if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) { ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0)); @@ -268,7 +268,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N, } } } - + Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), EVT::i32); return true; @@ -323,7 +323,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N, Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32); return true; } - + if (N.getOpcode() != ISD::ADD) { Base = N; if (N.getOpcode() == ISD::FrameIndex) { @@ -334,7 +334,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N, Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32); return true; } - + // If the RHS is +/- imm8, fold into addr mode. if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); @@ -356,7 +356,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N, return true; } } - + Base = N.getOperand(0); Offset = N.getOperand(1); Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32); @@ -406,7 +406,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N, EVT::i32); return true; } - + // If the RHS is +/- imm8, fold into addr mode. if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); @@ -431,7 +431,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N, } } } - + Base = N; Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), EVT::i32); @@ -579,7 +579,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N, } } } - + return false; } @@ -659,7 +659,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N, int RHSC = (int)RHS->getSExtValue(); if (N.getOpcode() == ISD::SUB) RHSC = -RHSC; - + if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative) Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { @@ -747,8 +747,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N, ShOpcVal = ARM_AM::getShiftOpcForNode(Base); if (ShOpcVal == ARM_AM::lsl) std::swap(Base, OffReg); - } - + } + if (ShOpcVal == ARM_AM::lsl) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. @@ -763,7 +763,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N, ShOpcVal = ARM_AM::no_shift; } } - + ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32); return true; @@ -901,7 +901,7 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) { // Use tADDrSPr since Thumb1 does not have a sub r, sp, r. ARMISelLowering // should have negated the size operand already. FIXME: We can't insert // new target independent node at this stage so we are forced to negate - // it earlier. Is there a better solution? + // it earlier. Is there a better solution? return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size, Chain); } else if (Subtarget->isThumb2()) { @@ -964,7 +964,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { Ops, 4); } else { SDValue Ops[] = { - CPIdx, + CPIdx, CurDAG->getRegister(0, EVT::i32), CurDAG->getTargetConstant(0, EVT::i32), getAL(CurDAG), @@ -977,7 +977,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { ReplaceUses(Op, SDValue(ResNode, 0)); return NULL; } - + // Other cases are autogenerated. break; } @@ -1096,7 +1096,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc) // Pattern complexity = 6 cost = 1 size = 0 - unsigned Opc = Subtarget->isThumb() ? + unsigned Opc = Subtarget->isThumb() ? ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; SDValue Chain = Op.getOperand(0); SDValue N1 = Op.getOperand(1); @@ -1111,7 +1111,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { cast(N2)->getZExtValue()), EVT::i32); SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; - SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other, + SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other, EVT::Flag, Ops, 5); Chain = SDValue(ResNode, 0); if (Op.getNode()->getNumValues() == 2) { @@ -1233,7 +1233,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { break; case EVT::f64: Opc = ARM::FCPYDcc; - break; + break; } return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5); } @@ -1297,7 +1297,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { ReplaceUses(Op.getValue(0), Chain); return NULL; } - + SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(), TLI.getPointerTy()); SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy()); @@ -1519,7 +1519,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, SDValue Base, Offset, Opc; if (!SelectAddrMode2(Op, Op, Base, Offset, Opc)) return true; - + OutOps.push_back(Base); OutOps.push_back(Offset); OutOps.push_back(Opc); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 1a662d9d872..316a6df93a9 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -66,7 +66,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, PromotedLdStVT.getSimpleVT()); setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); - AddPromotedToType (ISD::STORE, VT.getSimpleVT(), + AddPromotedToType (ISD::STORE, VT.getSimpleVT(), PromotedLdStVT.getSimpleVT()); } @@ -91,10 +91,10 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, AddPromotedToType (ISD::AND, VT.getSimpleVT(), PromotedBitwiseVT.getSimpleVT()); setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); - AddPromotedToType (ISD::OR, VT.getSimpleVT(), + AddPromotedToType (ISD::OR, VT.getSimpleVT(), PromotedBitwiseVT.getSimpleVT()); setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); - AddPromotedToType (ISD::XOR, VT.getSimpleVT(), + AddPromotedToType (ISD::XOR, VT.getSimpleVT(), PromotedBitwiseVT.getSimpleVT()); } } @@ -3317,7 +3317,7 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, if (Subtarget->isThumb() && Subtarget->hasThumb2()) isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, Offset, isInc, DAG); - else + else isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, Offset, isInc, DAG); if (!isLegal) @@ -3354,7 +3354,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, if (Subtarget->isThumb() && Subtarget->hasThumb2()) isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, isInc, DAG); - else + else isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, isInc, DAG); if (!isLegal) diff --git a/lib/Target/ARM/ARMJITInfo.cpp b/lib/Target/ARM/ARMJITInfo.cpp index 50a798f14c1..10c765cec2c 100644 --- a/lib/Target/ARM/ARMJITInfo.cpp +++ b/lib/Target/ARM/ARMJITInfo.cpp @@ -47,7 +47,7 @@ static TargetJITInfo::JITCompilerFn JITCompilerFunction; // CompilationCallback stub - We can't use a C function with inline assembly in // it, because we the prolog/epilog inserted by GCC won't work for us (we need // to preserve more context and manipulate the stack directly). Instead, -// write our own wrapper, which does things our way, so we have complete +// write our own wrapper, which does things our way, so we have complete // control over register saving and restoring. extern "C" { #if defined(__arm__) @@ -79,11 +79,11 @@ extern "C" { // order for the registers. // +--------+ // 0 | LR | Original return address - // +--------+ + // +--------+ // 1 | LR | Stub address (start of stub) // 2-5 | R3..R0 | Saved registers (we need to preserve all regs) // 6-20 | D0..D7 | Saved VFP registers - // +--------+ + // +--------+ // #ifndef __SOFTFP__ // Restore VFP caller-saved registers. @@ -110,9 +110,9 @@ extern "C" { #endif } -/// ARMCompilationCallbackC - This is the target-specific function invoked -/// by the function stub when we did not know the real target of a call. -/// This function must locate the start of the stub or call site and pass +/// ARMCompilationCallbackC - This is the target-specific function invoked +/// by the function stub when we did not know the real target of a call. +/// This function must locate the start of the stub or call site and pass /// it into the JIT compiler function. extern "C" void ARMCompilationCallbackC(intptr_t StubAddr) { // Get the address of the compiled code for this function. @@ -161,7 +161,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn, // In PIC mode, the function stub is loading a lazy-ptr. LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE); DEBUG(if (F) - errs() << "JIT: Indirect symbol emitted at [" << LazyPtr + errs() << "JIT: Indirect symbol emitted at [" << LazyPtr << "] for GV '" << F->getName() << "'\n"; else errs() << "JIT: Stub emitted at [" << LazyPtr @@ -184,7 +184,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn, } } else { // The compilation callback will overwrite the first two words of this - // stub with indirect branch instructions targeting the compiled code. + // stub with indirect branch instructions targeting the compiled code. // This stub sets the return address to restart the stub, so that // the new branch will be invoked when we come back. // diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 3c2bdc756f6..11d4887e259 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -162,7 +162,7 @@ static bool isi32Store(unsigned Opc) { /// MergeOps - Create and insert a LDM or STM with Base as base register and /// registers in Regs as the register operands that would be loaded / stored. -/// It returns true if the transformation is done. +/// It returns true if the transformation is done. bool ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, @@ -968,7 +968,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI)) ++NumMerges; - // RS may be pointing to an instruction that's deleted. + // RS may be pointing to an instruction that's deleted. RS->skipTo(prior(MBBI)); } else if (NumMemOps == 1) { // Try folding preceeding/trailing base inc/dec into the single diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h index 1f8c58f01ab..7ae287d56b3 100644 --- a/lib/Target/ARM/ARMMachineFunctionInfo.h +++ b/lib/Target/ARM/ARMMachineFunctionInfo.h @@ -1,10 +1,10 @@ //====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===// -// +// // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. -// +// //===----------------------------------------------------------------------===// // // This file declares ARM-specific per-machine-function information. @@ -140,7 +140,7 @@ public: unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; } void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; } - + unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; } unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; } unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; } diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index b56b55e200a..ca0a98ea782 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -65,7 +65,7 @@ protected: /// Selected instruction itineraries (one entry per itinerary class.) InstrItineraryData InstrItins; - + public: enum { isELF, isDarwin @@ -103,9 +103,9 @@ protected: bool hasVFP2() const { return ARMFPUType >= VFPv2; } bool hasVFP3() const { return ARMFPUType >= VFPv3; } bool hasNEON() const { return ARMFPUType >= NEON; } - bool useNEONForSinglePrecisionFP() const { + bool useNEONForSinglePrecisionFP() const { return hasNEON() && UseNEONForSinglePrecisionFP; } - + bool isTargetDarwin() const { return TargetType == isDarwin; } bool isTargetELF() const { return TargetType == isELF; } @@ -121,7 +121,7 @@ protected: const std::string & getCPUString() const { return CPUString; } - /// getInstrItins - Return the instruction itineraies based on subtarget + /// getInstrItins - Return the instruction itineraies based on subtarget /// selection. const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 6d76193c800..fd6e765d938 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -27,7 +27,7 @@ static cl::opt DisableLdStOpti("disable-arm-loadstore-opti", cl::Hidden, static cl::opt DisableIfConversion("disable-arm-if-conversion",cl::Hidden, cl::desc("Disable if-conversion pass")); -extern "C" void LLVMInitializeARMTarget() { +extern "C" void LLVMInitializeARMTarget() { // Register the target. RegisterTargetMachine X(TheARMTarget); RegisterTargetMachine Y(TheThumbTarget); diff --git a/lib/Target/ARM/ARMTargetObjectFile.h b/lib/Target/ARM/ARMTargetObjectFile.h index 9a9f5bb28d3..13fa7e202a0 100644 --- a/lib/Target/ARM/ARMTargetObjectFile.h +++ b/lib/Target/ARM/ARMTargetObjectFile.h @@ -13,14 +13,14 @@ #include "llvm/Target/TargetLoweringObjectFile.h" namespace llvm { - + class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF { public: ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {} - + void Initialize(MCContext &Ctx, const TargetMachine &TM) { TargetLoweringObjectFileELF::Initialize(Ctx, TM); - + // FIXME: Add new attribute/flag to MCSection for init_array/fini_array. // That will allow not treating these as "directives". if (TM.getSubtarget().isAAPCS_ABI()) { diff --git a/lib/Target/ARM/Thumb1InstrInfo.h b/lib/Target/ARM/Thumb1InstrInfo.h index 646f4298e36..13cc5787b5b 100644 --- a/lib/Target/ARM/Thumb1InstrInfo.h +++ b/lib/Target/ARM/Thumb1InstrInfo.h @@ -69,7 +69,7 @@ public: MachineInstr* MI, const SmallVectorImpl &Ops, int FrameIndex) const; - + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, const SmallVectorImpl &Ops, diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index f862114d3aa..727581445a7 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -389,7 +389,7 @@ static void removeOperands(MachineInstr &MI, unsigned i) { int Thumb1RegisterInfo:: rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int Offset, - unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const + unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const { // if/when eliminateFrameIndex() conforms with ARMBaseRegisterInfo // version then can pull out Thumb1 specific parts here diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp index ecfd085ccf8..d37f61e490c 100644 --- a/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -330,7 +330,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, // Memory operands in inline assembly always use AddrModeT2_i12. if (Opcode == ARM::INLINEASM) AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? - + if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { Offset += MI.getOperand(FrameRegIdx+1).getImm(); @@ -389,7 +389,7 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); return Offset; } - + MI.RemoveOperand(FrameRegIdx+1); MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); NewOpc = immediateOffsetOpcode(Opcode); @@ -444,13 +444,13 @@ int llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, if (AddrMode == ARMII::AddrMode5) // FIXME: Not consistent. ImmedOffset |= 1 << NumBits; - else + else ImmedOffset = -ImmedOffset; } ImmOp.ChangeToImmediate(ImmedOffset); return 0; } - + // Otherwise, offset doesn't fit. Pull in what we can to simplify ImmedOffset = ImmedOffset & Mask; if (isSub) {