1 ; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s
4 declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
5 declare i32 @llvm.atomic.load.nand.i32.p0i32(i32* nocapture, i32) nounwind
6 declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
7 declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
9 declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
10 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
11 declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* nocapture, i8) nounwind
12 declare i8 @llvm.atomic.swap.i8.p0i8(i8* nocapture, i8) nounwind
13 declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
16 @x = common global i32 0, align 4
18 define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
20 %0 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* @x, i32 %incr)
23 ; CHECK: AtomicLoadAdd32:
24 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
25 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
26 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
27 ; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
28 ; CHECK: sc $[[R2]], 0($[[R0]])
29 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
32 define i32 @AtomicLoadNand32(i32 %incr) nounwind {
34 %0 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* @x, i32 %incr)
37 ; CHECK: AtomicLoadNand32:
38 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
39 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
40 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
41 ; CHECK: and $[[R3:[0-9]+]], $[[R1]], $4
42 ; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R3]]
43 ; CHECK: sc $[[R2]], 0($[[R0]])
44 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
47 define i32 @AtomicSwap32(i32 %newval) nounwind {
49 %newval.addr = alloca i32, align 4
50 store i32 %newval, i32* %newval.addr, align 4
51 %tmp = load i32* %newval.addr, align 4
52 %0 = call i32 @llvm.atomic.swap.i32.p0i32(i32* @x, i32 %tmp)
55 ; CHECK: AtomicSwap32:
56 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
57 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
58 ; CHECK: ll ${{[0-9]+}}, 0($[[R0]])
59 ; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
60 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
63 define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
65 %newval.addr = alloca i32, align 4
66 store i32 %newval, i32* %newval.addr, align 4
67 %tmp = load i32* %newval.addr, align 4
68 %0 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* @x, i32 %oldval, i32 %tmp)
71 ; CHECK: AtomicCmpSwap32:
72 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
73 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
74 ; CHECK: ll $2, 0($[[R0]])
75 ; CHECK: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
76 ; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
77 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
83 @y = common global i8 0, align 1
85 define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
87 %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @y, i8 %incr)
90 ; CHECK: AtomicLoadAdd8:
91 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
92 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
93 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
94 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
95 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
96 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
97 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
98 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
99 ; CHECK: sll $[[R9:[0-9]+]], $4, $[[R4]]
101 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
102 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
103 ; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
104 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
105 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
106 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
107 ; CHECK: sc $[[R14]], 0($[[R2]])
108 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
110 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
111 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
112 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
113 ; CHECK: sra $2, $[[R17]], 24
116 define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
118 %0 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @y, i8 %incr)
121 ; CHECK: AtomicLoadSub8:
122 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
123 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
124 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
125 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
126 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
127 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
128 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
129 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
130 ; CHECK: sll $[[R9:[0-9]+]], $4, $[[R4]]
132 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
133 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
134 ; CHECK: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
135 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
136 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
137 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
138 ; CHECK: sc $[[R14]], 0($[[R2]])
139 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
141 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
142 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
143 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
144 ; CHECK: sra $2, $[[R17]], 24
147 define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
149 %0 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @y, i8 %incr)
152 ; CHECK: AtomicLoadNand8:
153 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
154 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
155 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
156 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
157 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
158 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
159 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
160 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
161 ; CHECK: sll $[[R9:[0-9]+]], $4, $[[R4]]
163 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
164 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
165 ; CHECK: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
166 ; CHECK: nor $[[R11:[0-9]+]], $zero, $[[R18]]
167 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
168 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
169 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
170 ; CHECK: sc $[[R14]], 0($[[R2]])
171 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
173 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
174 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
175 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
176 ; CHECK: sra $2, $[[R17]], 24
179 define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
181 %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @y, i8 %newval)
184 ; CHECK: AtomicSwap8:
185 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
186 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
187 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
188 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
189 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
190 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
191 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
192 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
193 ; CHECK: sll $[[R9:[0-9]+]], $4, $[[R4]]
195 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
196 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
197 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
198 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R9]]
199 ; CHECK: sc $[[R14]], 0($[[R2]])
200 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
202 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
203 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
204 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
205 ; CHECK: sra $2, $[[R17]], 24
208 define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
210 %0 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @y, i8 %oldval, i8 %newval)
213 ; CHECK: AtomicCmpSwap8:
214 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
215 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
216 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
217 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
218 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
219 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
220 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
221 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
222 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
223 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
224 ; CHECK: andi $[[R10:[0-9]+]], $5, 255
225 ; CHECK: sll $[[R11:[0-9]+]], $[[R10]], $[[R4]]
227 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
228 ; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]])
229 ; CHECK: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
230 ; CHECK: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
232 ; CHECK: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
233 ; CHECK: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
234 ; CHECK: sc $[[R15]], 0($[[R2]])
235 ; CHECK: beq $[[R15]], $zero, $[[BB0]]
238 ; CHECK: srl $[[R16:[0-9]+]], $[[R13]], $[[R4]]
239 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
240 ; CHECK: sra $2, $[[R17]], 24