1 ; RUN: llc < %s -march=x86-64 | FileCheck %s
5 define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp {
9 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
13 define void @inc4(i64* nocapture %p) nounwind ssp {
17 %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
21 declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
23 define void @add8(i64* nocapture %p) nounwind ssp {
27 %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
31 define void @add4(i64* nocapture %p, i32 %v) nounwind ssp {
35 %0 = sext i32 %v to i64 ; <i64> [#uses=1]
36 %1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
40 define void @inc3(i8* nocapture %p) nounwind ssp {
44 %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
48 declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
50 define void @add7(i8* nocapture %p) nounwind ssp {
54 %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
58 define void @add3(i8* nocapture %p, i32 %v) nounwind ssp {
62 %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
63 %1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
67 define void @inc2(i16* nocapture %p) nounwind ssp {
71 %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
75 declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind
77 define void @add6(i16* nocapture %p) nounwind ssp {
81 %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
85 define void @add2(i16* nocapture %p, i32 %v) nounwind ssp {
89 %0 = trunc i32 %v to i16 ; <i16> [#uses=1]
90 %1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
94 define void @inc1(i32* nocapture %p) nounwind ssp {
98 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
102 declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
104 define void @add5(i32* nocapture %p) nounwind ssp {
108 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
112 define void @add1(i32* nocapture %p, i32 %v) nounwind ssp {
116 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
120 define void @dec4(i64* nocapture %p) nounwind ssp {
124 %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
128 declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind
130 define void @sub8(i64* nocapture %p) nounwind ssp {
134 %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
138 define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp {
142 %0 = sext i32 %v to i64 ; <i64> [#uses=1]
143 %1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
147 define void @dec3(i8* nocapture %p) nounwind ssp {
151 %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
155 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
157 define void @sub7(i8* nocapture %p) nounwind ssp {
161 %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
165 define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp {
169 %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
170 %1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
174 define void @dec2(i16* nocapture %p) nounwind ssp {
178 %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
182 declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind
184 define void @sub6(i16* nocapture %p) nounwind ssp {
188 %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
192 define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp {
196 %0 = trunc i32 %v to i16 ; <i16> [#uses=1]
197 %1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
201 define void @dec1(i32* nocapture %p) nounwind ssp {
205 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
209 declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind
211 define void @sub5(i32* nocapture %p) nounwind ssp {
215 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]