#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/CFG.h"
using namespace llvm;
unsigned emitIntegerCast (MachineBasicBlock *BB,
MachineBasicBlock::iterator IP,
const Type *oldTy, unsigned SrcReg,
- const Type *newTy, unsigned DestReg);
+ const Type *newTy, unsigned DestReg,
+ bool castToLong = false);
void emitFPToIntegerCast (MachineBasicBlock *BB,
MachineBasicBlock::iterator IP, const Type *oldTy,
unsigned SrcReg, const Type *newTy,
if (C->getType() == Type::BoolTy) {
Val = (C == ConstantBool::True);
} else {
- ConstantInt *CI = cast<ConstantInt> (C);
+ ConstantIntegral *CI = cast<ConstantIntegral> (C);
Val = CI->getRawValue ();
}
- switch (Class) {
- case cByte: Val = (int8_t) Val; break;
- case cShort: Val = (int16_t) Val; break;
- case cInt: Val = (int32_t) Val; break;
- default:
- std::cerr << "Offending constant: " << *C << "\n";
- assert (0 && "Can't copy this kind of constant into register yet");
- return;
+ if (C->getType()->isSigned()) {
+ switch (Class) {
+ case cByte: Val = (int8_t) Val; break;
+ case cShort: Val = (int16_t) Val; break;
+ case cInt: Val = (int32_t) Val; break;
+ }
+ } else {
+ switch (Class) {
+ case cByte: Val = (uint8_t) Val; break;
+ case cShort: Val = (uint16_t) Val; break;
+ case cInt: Val = (uint32_t) Val; break;
+ }
}
if (Val == 0) {
BuildMI (*MBB, IP, V8::ORrr, 2, R).addReg (V8::G0).addReg(V8::G0);
unsigned V8ISel::emitIntegerCast (MachineBasicBlock *BB,
MachineBasicBlock::iterator IP, const Type *oldTy,
unsigned SrcReg, const Type *newTy,
- unsigned DestReg) {
- if (oldTy == newTy) {
+ unsigned DestReg, bool castToLong) {
+ unsigned shiftWidth = 32 - (8 * TM.getTargetData ().getTypeSize (newTy));
+ if (oldTy == newTy || (!castToLong && shiftWidth == 0)) {
// No-op cast - just emit a copy; assume the reg. allocator will zap it.
BuildMI (*BB, IP, V8::ORrr, 2, DestReg).addReg (V8::G0).addReg(SrcReg);
return SrcReg;
}
// Emit left-shift, then right-shift to sign- or zero-extend.
unsigned TmpReg = makeAnotherReg (newTy);
- unsigned shiftWidth = 32 - (8 * TM.getTargetData ().getTypeSize (newTy));
BuildMI (*BB, IP, V8::SLLri, 2, TmpReg).addZImm (shiftWidth).addReg(SrcReg);
if (newTy->isSigned ()) { // sign-extend with SRA
BuildMI(*BB, IP, V8::SRAri, 2, DestReg).addZImm (shiftWidth).addReg(TmpReg);
const Type *OldHalfTy = oldTy->isSigned() ? Type::IntTy : Type::UIntTy;
const Type *NewHalfTy = newTy->isSigned() ? Type::IntTy : Type::UIntTy;
unsigned TempReg = emitIntegerCast (BB, IP, OldHalfTy, SrcReg,
- NewHalfTy, DestReg+1);
+ NewHalfTy, DestReg+1, true);
if (newTy->isSigned ()) {
BuildMI (*BB, IP, V8::SRAri, 2, DestReg).addReg (TempReg)
.addZImm (31);
return I != BB->getParent()->end() ? &*I : 0;
}
+/// canFoldSetCCIntoBranch - Return the setcc instruction if we can fold it
+/// into the conditional branch which is the only user of the cc instruction.
+/// This is the case if the conditional branch is the only user of the setcc.
+///
+static SetCondInst *canFoldSetCCIntoBranch(Value *V) {
+ //return 0; // disable.
+ if (SetCondInst *SCI = dyn_cast<SetCondInst>(V))
+ if (SCI->hasOneUse()) {
+ BranchInst *User = dyn_cast<BranchInst>(SCI->use_back());
+ if (User
+ && (SCI->getNext() == User)
+ && (getClassB(SCI->getOperand(0)->getType()) != cLong)
+ && User->isConditional() && (User->getCondition() == V))
+ return SCI;
+ }
+ return 0;
+}
+
/// visitBranchInst - Handles conditional and unconditional branches.
///
void V8ISel::visitBranchInst(BranchInst &I) {
MachineBasicBlock *notTakenSuccMBB = MBBMap[notTakenSucc];
BB->addSuccessor (notTakenSuccMBB);
- // CondReg=(<condition>);
- // If (CondReg==0) goto notTakenSuccMBB;
- unsigned CondReg = getReg (I.getCondition ());
- BuildMI (BB, V8::CMPri, 2).addSImm (0).addReg (CondReg);
- BuildMI (BB, V8::BE, 1).addMBB (notTakenSuccMBB);
+ // See if we can fold a previous setcc instr into this branch.
+ SetCondInst *SCI = canFoldSetCCIntoBranch(I.getCondition());
+ if (SCI == 0) {
+ // The condition did not come from a setcc which we could fold.
+ // CondReg=(<condition>);
+ // If (CondReg==0) goto notTakenSuccMBB;
+ unsigned CondReg = getReg (I.getCondition ());
+ BuildMI (BB, V8::CMPri, 2).addSImm (0).addReg (CondReg);
+ BuildMI (BB, V8::BE, 1).addMBB (notTakenSuccMBB);
+ BuildMI (BB, V8::BA, 1).addMBB (takenSuccMBB);
+ return;
+ }
+
+ // Fold the setCC instr into the branch.
+ unsigned Op0Reg = getReg (SCI->getOperand (0));
+ unsigned Op1Reg = getReg (SCI->getOperand (1));
+ const Type *Ty = SCI->getOperand (0)->getType ();
+
+ // Compare the two values.
+ if (getClass (Ty) < cLong) {
+ BuildMI(BB, V8::SUBCCrr, 2, V8::G0).addReg(Op0Reg).addReg(Op1Reg);
+ } else if (getClass (Ty) == cLong) {
+ assert (0 && "Can't fold setcc long/ulong into branch");
+ } else if (getClass (Ty) == cFloat) {
+ BuildMI(BB, V8::FCMPS, 2).addReg(Op0Reg).addReg(Op1Reg);
+ } else if (getClass (Ty) == cDouble) {
+ BuildMI(BB, V8::FCMPD, 2).addReg(Op0Reg).addReg(Op1Reg);
+ }
+
+ unsigned BranchIdx;
+ switch (SCI->getOpcode()) {
+ default: assert(0 && "Unknown setcc instruction!");
+ case Instruction::SetEQ: BranchIdx = 0; break;
+ case Instruction::SetNE: BranchIdx = 1; break;
+ case Instruction::SetLT: BranchIdx = 2; break;
+ case Instruction::SetGT: BranchIdx = 3; break;
+ case Instruction::SetLE: BranchIdx = 4; break;
+ case Instruction::SetGE: BranchIdx = 5; break;
+ }
+
+ unsigned Column = 0;
+ if (Ty->isSigned() && !Ty->isFloatingPoint()) Column = 1;
+ if (Ty->isFloatingPoint()) Column = 2;
+ static unsigned OpcodeTab[3*6] = {
+ // LLVM SparcV8
+ // unsigned signed fp
+ V8::BE, V8::BE, V8::FBE, // seteq = be be fbe
+ V8::BNE, V8::BNE, V8::FBNE, // setne = bne bne fbne
+ V8::BCS, V8::BL, V8::FBL, // setlt = bcs bl fbl
+ V8::BGU, V8::BG, V8::FBG, // setgt = bgu bg fbg
+ V8::BLEU, V8::BLE, V8::FBLE, // setle = bleu ble fble
+ V8::BCC, V8::BGE, V8::FBGE // setge = bcc bge fbge
+ };
+ unsigned Opcode = OpcodeTab[3*BranchIdx + Column];
+ BuildMI (BB, Opcode, 1).addMBB (takenSuccMBB);
+ BuildMI (BB, V8::BA, 1).addMBB (notTakenSuccMBB);
+ } else {
+ // goto takenSuccMBB;
+ BuildMI (BB, V8::BA, 1).addMBB (takenSuccMBB);
}
- // goto takenSuccMBB;
- BuildMI (BB, V8::BA, 1).addMBB (takenSuccMBB);
}
/// emitGEPOperation - Common code shared between visitGetElementPtrInst and
unsigned memberOffset =
TD.getStructLayout (StTy)->MemberOffsets[fieldIndex];
// Emit an ADD to add memberOffset to the basePtr.
- // We might have to copy memberOffset into a register first, if it's
- // big.
+ // We might have to copy memberOffset into a register first, if
+ // it's big.
if (memberOffset + 4096 < 8191) {
BuildMI (*MBB, IP, V8::ADDri, 2,
nextBasePtrReg).addReg (basePtrReg).addSImm (memberOffset);
} else {
unsigned offsetReg = makeAnotherReg (Type::IntTy);
copyConstantToRegister (MBB, IP,
- ConstantInt::get(Type::IntTy, memberOffset), offsetReg);
+ ConstantSInt::get(Type::IntTy, memberOffset), offsetReg);
BuildMI (*MBB, IP, V8::ADDrr, 2,
nextBasePtrReg).addReg (basePtrReg).addReg (offsetReg);
}
// type is the type of the elements in the array).
Ty = SqTy->getElementType ();
unsigned elementSize = TD.getTypeSize (Ty);
- unsigned idxReg = getReg (idx, MBB, IP);
- unsigned OffsetReg = makeAnotherReg (Type::IntTy);
- unsigned elementSizeReg = makeAnotherReg (Type::UIntTy);
- copyConstantToRegister (MBB, IP,
- ConstantUInt::get(Type::UIntTy, elementSize), elementSizeReg);
- // Emit a SMUL to multiply the register holding the index by
- // elementSize, putting the result in OffsetReg.
- BuildMI (*MBB, IP, V8::SMULrr, 2,
- OffsetReg).addReg (elementSizeReg).addReg (idxReg);
- // Emit an ADD to add OffsetReg to the basePtr.
- BuildMI (*MBB, IP, V8::ADDrr, 2,
- nextBasePtrReg).addReg (basePtrReg).addReg (OffsetReg);
+ unsigned OffsetReg = ~0U;
+ int64_t Offset = -1;
+ bool addImmed = false;
+ if (isa<ConstantIntegral> (idx)) {
+ // If idx is a constant, we don't have to emit the multiply.
+ int64_t Val = cast<ConstantIntegral> (idx)->getRawValue ();
+ if ((Val * elementSize) + 4096 < 8191) {
+ // (Val * elementSize) is constant and fits in an immediate field.
+ // emit: nextBasePtrReg = ADDri basePtrReg, (Val * elementSize)
+ addImmed = true;
+ Offset = Val * elementSize;
+ } else {
+ // (Val * elementSize) is constant, but doesn't fit in an immediate
+ // field. emit: OffsetReg = (Val * elementSize)
+ // nextBasePtrReg = ADDrr OffsetReg, basePtrReg
+ OffsetReg = makeAnotherReg (Type::IntTy);
+ copyConstantToRegister (MBB, IP,
+ ConstantSInt::get(Type::IntTy, Val * elementSize), OffsetReg);
+ }
+ } else {
+ // idx is not constant, we have to shift or multiply.
+ OffsetReg = makeAnotherReg (Type::IntTy);
+ unsigned idxReg = getReg (idx, MBB, IP);
+ switch (elementSize) {
+ case 1:
+ BuildMI (*MBB, IP, V8::ORrr, 2, OffsetReg).addReg (V8::G0).addReg (idxReg);
+ break;
+ case 2:
+ BuildMI (*MBB, IP, V8::SLLri, 2, OffsetReg).addReg (idxReg).addZImm (1);
+ break;
+ case 4:
+ BuildMI (*MBB, IP, V8::SLLri, 2, OffsetReg).addReg (idxReg).addZImm (2);
+ break;
+ case 8:
+ BuildMI (*MBB, IP, V8::SLLri, 2, OffsetReg).addReg (idxReg).addZImm (3);
+ break;
+ default: {
+ if (elementSize + 4096 < 8191) {
+ // Emit a SMUL to multiply the register holding the index by
+ // elementSize, putting the result in OffsetReg.
+ BuildMI (*MBB, IP, V8::SMULri, 2,
+ OffsetReg).addReg (idxReg).addSImm (elementSize);
+ } else {
+ unsigned elementSizeReg = makeAnotherReg (Type::UIntTy);
+ copyConstantToRegister (MBB, IP,
+ ConstantUInt::get(Type::UIntTy, elementSize), elementSizeReg);
+ // Emit a SMUL to multiply the register holding the index by
+ // the register w/ elementSize, putting the result in OffsetReg.
+ BuildMI (*MBB, IP, V8::SMULrr, 2,
+ OffsetReg).addReg (idxReg).addReg (elementSizeReg);
+ }
+ break;
+ }
+ }
+ }
+ if (addImmed) {
+ // Emit an ADD to add the constant immediate Offset to the basePtr.
+ BuildMI (*MBB, IP, V8::ADDri, 2,
+ nextBasePtrReg).addReg (basePtrReg).addSImm (Offset);
+ } else {
+ // Emit an ADD to add OffsetReg to the basePtr.
+ BuildMI (*MBB, IP, V8::ADDrr, 2,
+ nextBasePtrReg).addReg (basePtrReg).addReg (OffsetReg);
+ }
}
basePtrReg = nextBasePtrReg;
}
}
void V8ISel::visitSetCondInst(SetCondInst &I) {
+ if (canFoldSetCCIntoBranch(&I))
+ return; // Fold this into a branch.
+
unsigned Op0Reg = getReg (I.getOperand (0));
unsigned Op1Reg = getReg (I.getOperand (1));
unsigned DestReg = getReg (I);