1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s
4 ; Make sure that flags are properly preserved despite atomic optimizations.
6 define i32 @atomic_and_flags_1(i8* %p, i32 %a, i32 %b) {
7 ; CHECK-LABEL: atomic_and_flags_1:
9 ; Generate flags value, and use it.
12 %cmp = icmp eq i32 %a, %b
13 br i1 %cmp, label %L1, label %L2
16 ; The following pattern will get folded.
18 %1 = load atomic i8, i8* %p seq_cst, align 1
19 %2 = add i8 %1, 1 ; This forces the INC instruction to be generated.
20 store atomic i8 %2, i8* %p release, align 1
22 ; Use the comparison result again. We need to rematerialize the comparison
23 ; somehow. This test checks that cmpl gets emitted again, but any
24 ; rematerialization would work (the optimizer used to clobber the flags with
28 br i1 %cmp, label %L3, label %L4
40 ; Same as above, but using 2 as immediate to avoid the INC instruction.
41 define i32 @atomic_and_flags_2(i8* %p, i32 %a, i32 %b) {
42 ; CHECK-LABEL: atomic_and_flags_2:
45 %cmp = icmp eq i32 %a, %b
46 br i1 %cmp, label %L1, label %L2
49 %1 = load atomic i8, i8* %p seq_cst, align 1
51 store atomic i8 %2, i8* %p release, align 1
54 br i1 %cmp, label %L3, label %L4