1 ; RUN: llc < %s -march=x86-64 | FileCheck %s
5 define void @t1(i64* %p, i32 %b) nounwind {
7 %p.addr = alloca i64*, align 8
8 store i64* %p, i64** %p.addr, align 8
9 %tmp = load i64** %p.addr, align 8
10 call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
12 ; CHECK: movl $2147483648, %eax
14 ; CHECK-NEXT: orq %r{{.*}}, (%r{{.*}})
15 %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483648)
16 call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
20 define void @t2(i64* %p, i32 %b) nounwind {
22 %p.addr = alloca i64*, align 8
23 store i64* %p, i64** %p.addr, align 8
24 %tmp = load i64** %p.addr, align 8
25 call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
29 ; CHECK-NEXT: orq $2147483644, (%r{{.*}})
30 %0 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %tmp, i64 2147483644)
31 call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
35 declare i64 @llvm.atomic.load.or.i64.p0i64(i64* nocapture, i64) nounwind
36 declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind