1 ; RUN: llc -march=mipsel --disable-machine-licm < %s | FileCheck %s
3 @x = common global i32 0, align 4
5 define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
7 %0 = atomicrmw add i32* @x, i32 %incr monotonic
10 ; CHECK: AtomicLoadAdd32:
11 ; CHECK: lw $[[R0:[0-9]+]], %got(x)
12 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
13 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
14 ; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
15 ; CHECK: sc $[[R2]], 0($[[R0]])
16 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
19 define i32 @AtomicLoadNand32(i32 %incr) nounwind {
21 %0 = atomicrmw nand i32* @x, i32 %incr monotonic
24 ; CHECK: AtomicLoadNand32:
25 ; CHECK: lw $[[R0:[0-9]+]], %got(x)
26 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
27 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
28 ; CHECK: and $[[R3:[0-9]+]], $[[R1]], $4
29 ; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R3]]
30 ; CHECK: sc $[[R2]], 0($[[R0]])
31 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
34 define i32 @AtomicSwap32(i32 %newval) nounwind {
36 %newval.addr = alloca i32, align 4
37 store i32 %newval, i32* %newval.addr, align 4
38 %tmp = load i32* %newval.addr, align 4
39 %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
42 ; CHECK: AtomicSwap32:
43 ; CHECK: lw $[[R0:[0-9]+]], %got(x)
44 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
45 ; CHECK: ll ${{[0-9]+}}, 0($[[R0]])
46 ; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
47 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
50 define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
52 %newval.addr = alloca i32, align 4
53 store i32 %newval, i32* %newval.addr, align 4
54 %tmp = load i32* %newval.addr, align 4
55 %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
58 ; CHECK: AtomicCmpSwap32:
59 ; CHECK: lw $[[R0:[0-9]+]], %got(x)
60 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
61 ; CHECK: ll $2, 0($[[R0]])
62 ; CHECK: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
63 ; CHECK: sc $[[R2:[0-9]+]], 0($[[R0]])
64 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
70 @y = common global i8 0, align 1
72 define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
74 %0 = atomicrmw add i8* @y, i8 %incr monotonic
77 ; CHECK: AtomicLoadAdd8:
78 ; CHECK: lw $[[R0:[0-9]+]], %got(y)
79 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
80 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
81 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
82 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
83 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
84 ; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
85 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
86 ; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
88 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
89 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
90 ; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
91 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
92 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
93 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
94 ; CHECK: sc $[[R14]], 0($[[R2]])
95 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
97 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
98 ; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
99 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
100 ; CHECK: sra $2, $[[R17]], 24
103 define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
105 %0 = atomicrmw sub i8* @y, i8 %incr monotonic
108 ; CHECK: AtomicLoadSub8:
109 ; CHECK: lw $[[R0:[0-9]+]], %got(y)
110 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
111 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
112 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
113 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
114 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
115 ; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
116 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
117 ; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
119 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
120 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
121 ; CHECK: subu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
122 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
123 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
124 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
125 ; CHECK: sc $[[R14]], 0($[[R2]])
126 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
128 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
129 ; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
130 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
131 ; CHECK: sra $2, $[[R17]], 24
134 define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
136 %0 = atomicrmw nand i8* @y, i8 %incr monotonic
139 ; CHECK: AtomicLoadNand8:
140 ; CHECK: lw $[[R0:[0-9]+]], %got(y)
141 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
142 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
143 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
144 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
145 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
146 ; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
147 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
148 ; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
150 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
151 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
152 ; CHECK: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
153 ; CHECK: nor $[[R11:[0-9]+]], $zero, $[[R18]]
154 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
155 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
156 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
157 ; CHECK: sc $[[R14]], 0($[[R2]])
158 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
160 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
161 ; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
162 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
163 ; CHECK: sra $2, $[[R17]], 24
166 define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
168 %0 = atomicrmw xchg i8* @y, i8 %newval monotonic
171 ; CHECK: AtomicSwap8:
172 ; CHECK: lw $[[R0:[0-9]+]], %got(y)
173 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
174 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
175 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
176 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
177 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
178 ; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
179 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
180 ; CHECK: sllv $[[R9:[0-9]+]], $4, $[[R4]]
182 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
183 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
184 ; CHECK: and $[[R18:[0-9]+]], $[[R9]], $[[R6]]
185 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
186 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
187 ; CHECK: sc $[[R14]], 0($[[R2]])
188 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
190 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
191 ; CHECK: srlv $[[R16:[0-9]+]], $[[R15]], $[[R4]]
192 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
193 ; CHECK: sra $2, $[[R17]], 24
196 define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
198 %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
201 ; CHECK: AtomicCmpSwap8:
202 ; CHECK: lw $[[R0:[0-9]+]], %got(y)
203 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
204 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
205 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
206 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
207 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
208 ; CHECK: sllv $[[R6:[0-9]+]], $[[R5]], $[[R4]]
209 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
210 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
211 ; CHECK: sllv $[[R9:[0-9]+]], $[[R8]], $[[R4]]
212 ; CHECK: andi $[[R10:[0-9]+]], $5, 255
213 ; CHECK: sllv $[[R11:[0-9]+]], $[[R10]], $[[R4]]
215 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
216 ; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]])
217 ; CHECK: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
218 ; CHECK: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
220 ; CHECK: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
221 ; CHECK: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
222 ; CHECK: sc $[[R15]], 0($[[R2]])
223 ; CHECK: beq $[[R15]], $zero, $[[BB0]]
226 ; CHECK: srlv $[[R16:[0-9]+]], $[[R13]], $[[R4]]
227 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
228 ; CHECK: sra $2, $[[R17]], 24
231 @countsint = common global i32 0, align 4
233 define i32 @CheckSync(i32 %v) nounwind noinline {
235 %0 = atomicrmw add i32* @countsint, i32 %v seq_cst
246 ; make sure that this assertion in
247 ; TwoAddressInstructionPass::TryInstructionTransform does not fail:
249 ; line 1203: assert(TargetRegisterInfo::isVirtualRegister(regB) &&
251 ; it failed when MipsDAGToDAGISel::ReplaceUsesWithZeroReg replaced an
252 ; operand of an atomic instruction with register $zero.
253 @a = external global i32
255 define i32 @zeroreg() nounwind {
257 %0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst
258 %1 = icmp eq i32 %0, 1
259 %conv = zext i1 %1 to i32