BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg())
.addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
- unsigned tt1 = F->getRegInfo().createVirtualRegister(RC);
- unsigned tt2 = F->getRegInfo().createVirtualRegister(RC);
+ // The subsequent operations should be using the destination registers of
+ //the PHI instructions.
if (invSrc) {
- MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt1).addReg(t1);
- MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt2).addReg(t2);
+ t1 = F->getRegInfo().createVirtualRegister(RC);
+ t2 = F->getRegInfo().createVirtualRegister(RC);
+ MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg());
+ MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg());
} else {
- tt1 = t1;
- tt2 = t2;
+ t1 = dest1Oper.getReg();
+ t2 = dest2Oper.getReg();
}
int valArgIndx = lastAddrIndx + 1;
else
MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5);
if (regOpcL != X86::MOV32rr)
- MIB.addReg(tt1);
+ MIB.addReg(t1);
(*MIB).addOperand(*argOpers[valArgIndx]);
assert(argOpers[valArgIndx + 1]->isReg() ==
argOpers[valArgIndx]->isReg());
else
MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6);
if (regOpcH != X86::MOV32rr)
- MIB.addReg(tt2);
+ MIB.addReg(t2);
(*MIB).addOperand(*argOpers[valArgIndx + 1]);
MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EAX);
--- /dev/null
+; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
+; rdar://r7512579
+
+; PHI defs in the atomic loop should be used by the add / adc
+; instructions. They should not be dead.
+
+define void @t(i64* nocapture %p) nounwind ssp {
+entry:
+; CHECK: t:
+; CHECK: movl $1
+; CHECK: movl (%ebp), %eax
+; CHECK: movl 4(%ebp), %edx
+; CHECK: LBB1_1:
+; CHECK-NOT: movl $1
+; CHECK-NOT: movl $0
+; CHECK: addl
+; CHECK: adcl
+; CHECK: lock
+; CHECK: cmpxchg8b
+; CHECK: jne
+ tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
+ tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+ ret void
+}
+
+declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
+
+declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind