From e61e516a51f1211c1385a3043523552f4e56003d Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 9 Oct 2012 23:48:33 +0000 Subject: [PATCH] When expanding atomic load arith instructions, do not lose target flags. rdar://12453106 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165568 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 7 +- test/CodeGen/X86/atomic-minmax-i6432.ll | 100 ++++++++++++++---------- 2 files changed, 63 insertions(+), 44 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 5e0b9c0034a..7e43e5432d8 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -12389,9 +12389,12 @@ X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, // Hi MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX); for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { - if (i == X86::AddrDisp) + if (i == X86::AddrDisp) { MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) - else + // Don't forget to transfer the target flag. + MachineOperand &MO = MIB->getOperand(MIB->getNumOperands()-1); + MO.setTargetFlags(MI->getOperand(MemOpndSlot + i).getTargetFlags()); + } else MIB.addOperand(MI->getOperand(MemOpndSlot + i)); } MIB.setMemRefs(MMOBegin, MMOEnd); diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll index 01a926489be..e3ef605f7f1 100644 --- a/test/CodeGen/X86/atomic-minmax-i6432.ll +++ b/test/CodeGen/X86/atomic-minmax-i6432.ll @@ -1,51 +1,67 @@ -; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux < %s | FileCheck %s +; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux < %s | FileCheck %s -check-prefix=LINUX +; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC + @sc64 = external global i64 define void @atomic_maxmin_i6432() { -; CHECK: atomic_maxmin_i6432 +; LINUX: atomic_maxmin_i6432 %1 = atomicrmw max i64* @sc64, i64 5 acquire -; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]] -; CHECK: cmpl -; CHECK: setl -; CHECK: cmpl -; CHECK: setl -; CHECK: cmovne -; CHECK: cmovne -; CHECK: lock -; CHECK-NEXT: cmpxchg8b -; CHECK: jne [[LABEL]] +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setl +; LINUX: cmpl +; LINUX: setl +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] %2 = atomicrmw min i64* @sc64, i64 6 acquire -; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]] -; CHECK: cmpl -; CHECK: setg -; CHECK: cmpl -; CHECK: setg -; CHECK: cmovne -; CHECK: cmovne -; CHECK: lock -; CHECK-NEXT: cmpxchg8b -; CHECK: jne [[LABEL]] +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setg +; LINUX: cmpl +; LINUX: setg +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] %3 = atomicrmw umax i64* @sc64, i64 7 acquire -; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]] -; CHECK: cmpl -; CHECK: setb -; CHECK: cmpl -; CHECK: setb -; CHECK: cmovne -; CHECK: cmovne -; CHECK: lock -; CHECK-NEXT: cmpxchg8b -; CHECK: jne [[LABEL]] +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: setb +; LINUX: cmpl +; LINUX: setb +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] %4 = atomicrmw umin i64* @sc64, i64 8 acquire -; CHECK: [[LABEL:.LBB[0-9]+_[0-9]+]] -; CHECK: cmpl -; CHECK: seta -; CHECK: cmpl -; CHECK: seta -; CHECK: cmovne -; CHECK: cmovne -; CHECK: lock -; CHECK-NEXT: cmpxchg8b -; CHECK: jne [[LABEL]] +; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]] +; LINUX: cmpl +; LINUX: seta +; LINUX: cmpl +; LINUX: seta +; LINUX: cmovne +; LINUX: cmovne +; LINUX: lock +; LINUX-NEXT: cmpxchg8b +; LINUX: jne [[LABEL]] + ret void +} + +; rdar://12453106 +@id = internal global i64 0, align 8 + +define void @tf_bug(i8* %ptr) nounwind { +; PIC: tf_bug: +; PIC: movl _id-L1$pb( +; PIC: movl (_id-L1$pb)+4( + %tmp1 = atomicrmw add i64* @id, i64 1 seq_cst + %tmp2 = add i64 %tmp1, 1 + %tmp3 = bitcast i8* %ptr to i64* + store i64 %tmp2, i64* %tmp3, align 4 ret void } -- 2.34.1