public:
explicit X86FastISel(MachineFunction &mf,
MachineModuleInfo *mmi,
+ DwarfWriter *dw,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am
, SmallSet<Instruction*, 8> &cil
#endif
)
- : FastISel(mf, mmi, vm, bm, am
+ : FastISel(mf, mmi, dw, vm, bm, am
#ifndef NDEBUG
, cil
#endif
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
Disp += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getABITypeSize(GTI.getIndexedType());
+ uint64_t S = TD.getTypePaddedSize(GTI.getIndexedType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
Disp += CI->getSExtValue() * S;
// %obit = extractvalue { i32, i1 } %t, 1
// br i1 %obit, label %overflow, label %normal
//
- // The %sum and %obit are converted in an ADD and a SETO/SETC before
+ // The %sum and %obit are converted in an ADD and a SETO/SETB before
// reaching the branch. Therefore, we search backwards through the MBB
- // looking for the SETO/SETC instruction. If an instruction modifies the
- // EFLAGS register before we reach the SETO/SETC instruction, then we can't
- // convert the branch into a JO/JC instruction.
+ // looking for the SETO/SETB instruction. If an instruction modifies the
+ // EFLAGS register before we reach the SETO/SETB instruction, then we can't
+ // convert the branch into a JO/JB instruction.
Value *Agg = EI->getAggregateOperand();
const MachineInstr &MI = *RI;
if (MI.modifiesRegister(Reg)) {
- unsigned Src, Dst;
+ unsigned Src, Dst, SrcSR, DstSR;
- if (getInstrInfo()->isMoveInstr(MI, Src, Dst)) {
+ if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
Reg = Src;
continue;
}
if (SetMI) {
unsigned OpCode = SetMI->getOpcode();
- if (OpCode == X86::SETOr || OpCode == X86::SETCr) {
+ if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
BuildMI(MBB, TII.get((OpCode == X86::SETOr) ?
- X86::JO : X86::JC)).addMBB(TrueMBB);
+ X86::JO : X86::JB)).addMBB(TrueMBB);
FastEmitBranch(FalseMBB);
MBB->addSuccessor(TrueMBB);
return true;
BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
- unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg, X86::SUBREG_8BIT);
+ unsigned ResultReg = FastEmitInst_extractsubreg(DstVT.getSimpleVT(),
+ CopyReg, X86::SUBREG_8BIT);
if (!ResultReg)
return false;
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
BuildMI(MBB, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
- X86::SETOr : X86::SETCr), ResultReg);
+ X86::SETOr : X86::SETBr), ResultReg);
return true;
}
}
unsigned Align = TD.getPreferredTypeAlignmentShift(C->getType());
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TD.getABITypeSize(C->getType());
+ Align = TD.getTypePaddedSize(C->getType());
Align = Log2_64(Align);
}
namespace llvm {
llvm::FastISel *X86::createFastISel(MachineFunction &mf,
MachineModuleInfo *mmi,
+ DwarfWriter *dw,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am
, SmallSet<Instruction*, 8> &cil
#endif
) {
- return new X86FastISel(mf, mmi, vm, bm, am
+ return new X86FastISel(mf, mmi, dw, vm, bm, am
#ifndef NDEBUG
, cil
#endif