1 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
2 ; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
7 define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
8 ; CHECK-LABEL: test_lsl_arith:
10 %rhs1 = load volatile i32* @var32
11 %shift1 = shl i32 %rhs1, 18
12 %val1 = add i32 %lhs32, %shift1
13 store volatile i32 %val1, i32* @var32
14 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
16 %rhs2 = load volatile i32* @var32
17 %shift2 = shl i32 %rhs2, 31
18 %val2 = add i32 %shift2, %lhs32
19 store volatile i32 %val2, i32* @var32
20 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
22 %rhs3 = load volatile i32* @var32
23 %shift3 = shl i32 %rhs3, 5
24 %val3 = sub i32 %lhs32, %shift3
25 store volatile i32 %val3, i32* @var32
26 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
28 ; Subtraction is not commutative!
29 %rhs4 = load volatile i32* @var32
30 %shift4 = shl i32 %rhs4, 19
31 %val4 = sub i32 %shift4, %lhs32
32 store volatile i32 %val4, i32* @var32
33 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
35 %lhs4a = load volatile i32* @var32
36 %shift4a = shl i32 %lhs4a, 15
37 %val4a = sub i32 0, %shift4a
38 store volatile i32 %val4a, i32* @var32
39 ; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
40 ; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15
42 %rhs5 = load volatile i64* @var64
43 %shift5 = shl i64 %rhs5, 18
44 %val5 = add i64 %lhs64, %shift5
45 store volatile i64 %val5, i64* @var64
46 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
48 %rhs6 = load volatile i64* @var64
49 %shift6 = shl i64 %rhs6, 31
50 %val6 = add i64 %shift6, %lhs64
51 store volatile i64 %val6, i64* @var64
52 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
54 %rhs7 = load volatile i64* @var64
55 %shift7 = shl i64 %rhs7, 5
56 %val7 = sub i64 %lhs64, %shift7
57 store volatile i64 %val7, i64* @var64
58 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
60 ; Subtraction is not commutative!
61 %rhs8 = load volatile i64* @var64
62 %shift8 = shl i64 %rhs8, 19
63 %val8 = sub i64 %shift8, %lhs64
64 store volatile i64 %val8, i64* @var64
65 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
67 %lhs8a = load volatile i64* @var64
68 %shift8a = shl i64 %lhs8a, 60
69 %val8a = sub i64 0, %shift8a
70 store volatile i64 %val8a, i64* @var64
71 ; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
72 ; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60
78 define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
79 ; CHECK-LABEL: test_lsr_arith:
81 %shift1 = lshr i32 %rhs32, 18
82 %val1 = add i32 %lhs32, %shift1
83 store volatile i32 %val1, i32* @var32
84 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
86 %shift2 = lshr i32 %rhs32, 31
87 %val2 = add i32 %shift2, %lhs32
88 store volatile i32 %val2, i32* @var32
89 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
91 %shift3 = lshr i32 %rhs32, 5
92 %val3 = sub i32 %lhs32, %shift3
93 store volatile i32 %val3, i32* @var32
94 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
96 ; Subtraction is not commutative!
97 %shift4 = lshr i32 %rhs32, 19
98 %val4 = sub i32 %shift4, %lhs32
99 store volatile i32 %val4, i32* @var32
100 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
102 %shift4a = lshr i32 %lhs32, 15
103 %val4a = sub i32 0, %shift4a
104 store volatile i32 %val4a, i32* @var32
105 ; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
106 ; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15
108 %shift5 = lshr i64 %rhs64, 18
109 %val5 = add i64 %lhs64, %shift5
110 store volatile i64 %val5, i64* @var64
111 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
113 %shift6 = lshr i64 %rhs64, 31
114 %val6 = add i64 %shift6, %lhs64
115 store volatile i64 %val6, i64* @var64
116 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
118 %shift7 = lshr i64 %rhs64, 5
119 %val7 = sub i64 %lhs64, %shift7
120 store volatile i64 %val7, i64* @var64
121 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
123 ; Subtraction is not commutative!
124 %shift8 = lshr i64 %rhs64, 19
125 %val8 = sub i64 %shift8, %lhs64
126 store volatile i64 %val8, i64* @var64
127 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
129 %shift8a = lshr i64 %lhs64, 45
130 %val8a = sub i64 0, %shift8a
131 store volatile i64 %val8a, i64* @var64
132 ; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
133 ; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45
139 define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
140 ; CHECK-LABEL: test_asr_arith:
142 %shift1 = ashr i32 %rhs32, 18
143 %val1 = add i32 %lhs32, %shift1
144 store volatile i32 %val1, i32* @var32
145 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
147 %shift2 = ashr i32 %rhs32, 31
148 %val2 = add i32 %shift2, %lhs32
149 store volatile i32 %val2, i32* @var32
150 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
152 %shift3 = ashr i32 %rhs32, 5
153 %val3 = sub i32 %lhs32, %shift3
154 store volatile i32 %val3, i32* @var32
155 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
157 ; Subtraction is not commutative!
158 %shift4 = ashr i32 %rhs32, 19
159 %val4 = sub i32 %shift4, %lhs32
160 store volatile i32 %val4, i32* @var32
161 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
163 %shift4a = ashr i32 %lhs32, 15
164 %val4a = sub i32 0, %shift4a
165 store volatile i32 %val4a, i32* @var32
166 ; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
167 ; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15
169 %shift5 = ashr i64 %rhs64, 18
170 %val5 = add i64 %lhs64, %shift5
171 store volatile i64 %val5, i64* @var64
172 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
174 %shift6 = ashr i64 %rhs64, 31
175 %val6 = add i64 %shift6, %lhs64
176 store volatile i64 %val6, i64* @var64
177 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
179 %shift7 = ashr i64 %rhs64, 5
180 %val7 = sub i64 %lhs64, %shift7
181 store volatile i64 %val7, i64* @var64
182 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
184 ; Subtraction is not commutative!
185 %shift8 = ashr i64 %rhs64, 19
186 %val8 = sub i64 %shift8, %lhs64
187 store volatile i64 %val8, i64* @var64
188 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
190 %shift8a = ashr i64 %lhs64, 45
191 %val8a = sub i64 0, %shift8a
192 store volatile i64 %val8a, i64* @var64
193 ; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
194 ; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45
200 define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
201 ; CHECK-LABEL: test_cmp:
203 %shift1 = shl i32 %rhs32, 13
204 %tst1 = icmp uge i32 %lhs32, %shift1
205 br i1 %tst1, label %t2, label %end
206 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
209 %shift2 = lshr i32 %rhs32, 20
210 %tst2 = icmp ne i32 %lhs32, %shift2
211 br i1 %tst2, label %t3, label %end
212 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
215 %shift3 = ashr i32 %rhs32, 9
216 %tst3 = icmp ne i32 %lhs32, %shift3
217 br i1 %tst3, label %t4, label %end
218 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
221 %shift4 = shl i64 %rhs64, 43
222 %tst4 = icmp uge i64 %lhs64, %shift4
223 br i1 %tst4, label %t5, label %end
224 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
227 %shift5 = lshr i64 %rhs64, 20
228 %tst5 = icmp ne i64 %lhs64, %shift5
229 br i1 %tst5, label %t6, label %end
230 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
233 %shift6 = ashr i64 %rhs64, 59
234 %tst6 = icmp ne i64 %lhs64, %shift6
235 br i1 %tst6, label %t7, label %end
236 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
246 define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
247 ; CHECK-LABEL: test_cmn:
249 %shift1 = shl i32 %rhs32, 13
250 %val1 = sub i32 0, %shift1
251 %tst1 = icmp uge i32 %lhs32, %val1
252 br i1 %tst1, label %t2, label %end
253 ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
254 ; 0 then the results will differ.
255 ; CHECK-AARCH64: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
256 ; CHECK-ARM64: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13
257 ; CHECK: cmp {{w[0-9]+}}, [[RHS]]
260 %shift2 = lshr i32 %rhs32, 20
261 %val2 = sub i32 0, %shift2
262 %tst2 = icmp ne i32 %lhs32, %val2
263 br i1 %tst2, label %t3, label %end
264 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
267 %shift3 = ashr i32 %rhs32, 9
268 %val3 = sub i32 0, %shift3
269 %tst3 = icmp eq i32 %lhs32, %val3
270 br i1 %tst3, label %t4, label %end
271 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
274 %shift4 = shl i64 %rhs64, 43
275 %val4 = sub i64 0, %shift4
276 %tst4 = icmp slt i64 %lhs64, %val4
277 br i1 %tst4, label %t5, label %end
278 ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
279 ; CHECK-AARCH64: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
280 ; CHECK-ARM64: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43
281 ; CHECK: cmp {{x[0-9]+}}, [[RHS]]
284 %shift5 = lshr i64 %rhs64, 20
285 %val5 = sub i64 0, %shift5
286 %tst5 = icmp ne i64 %lhs64, %val5
287 br i1 %tst5, label %t6, label %end
288 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
291 %shift6 = ashr i64 %rhs64, 59
292 %val6 = sub i64 0, %shift6
293 %tst6 = icmp ne i64 %lhs64, %val6
294 br i1 %tst6, label %t7, label %end
295 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59