1 ; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
6 define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
7 ; CHECK-LABEL: test_lsl_arith:
9 %rhs1 = load volatile i32* @var32
10 %shift1 = shl i32 %rhs1, 18
11 %val1 = add i32 %lhs32, %shift1
12 store volatile i32 %val1, i32* @var32
13 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
15 %rhs2 = load volatile i32* @var32
16 %shift2 = shl i32 %rhs2, 31
17 %val2 = add i32 %shift2, %lhs32
18 store volatile i32 %val2, i32* @var32
19 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
21 %rhs3 = load volatile i32* @var32
22 %shift3 = shl i32 %rhs3, 5
23 %val3 = sub i32 %lhs32, %shift3
24 store volatile i32 %val3, i32* @var32
25 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
27 ; Subtraction is not commutative!
28 %rhs4 = load volatile i32* @var32
29 %shift4 = shl i32 %rhs4, 19
30 %val4 = sub i32 %shift4, %lhs32
31 store volatile i32 %val4, i32* @var32
32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
34 %lhs4a = load volatile i32* @var32
35 %shift4a = shl i32 %lhs4a, 15
36 %val4a = sub i32 0, %shift4a
37 store volatile i32 %val4a, i32* @var32
38 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
40 %rhs5 = load volatile i64* @var64
41 %shift5 = shl i64 %rhs5, 18
42 %val5 = add i64 %lhs64, %shift5
43 store volatile i64 %val5, i64* @var64
44 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
46 %rhs6 = load volatile i64* @var64
47 %shift6 = shl i64 %rhs6, 31
48 %val6 = add i64 %shift6, %lhs64
49 store volatile i64 %val6, i64* @var64
50 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
52 %rhs7 = load volatile i64* @var64
53 %shift7 = shl i64 %rhs7, 5
54 %val7 = sub i64 %lhs64, %shift7
55 store volatile i64 %val7, i64* @var64
56 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
58 ; Subtraction is not commutative!
59 %rhs8 = load volatile i64* @var64
60 %shift8 = shl i64 %rhs8, 19
61 %val8 = sub i64 %shift8, %lhs64
62 store volatile i64 %val8, i64* @var64
63 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
65 %lhs8a = load volatile i64* @var64
66 %shift8a = shl i64 %lhs8a, 60
67 %val8a = sub i64 0, %shift8a
68 store volatile i64 %val8a, i64* @var64
69 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
75 define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
76 ; CHECK-LABEL: test_lsr_arith:
78 %shift1 = lshr i32 %rhs32, 18
79 %val1 = add i32 %lhs32, %shift1
80 store volatile i32 %val1, i32* @var32
81 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
83 %shift2 = lshr i32 %rhs32, 31
84 %val2 = add i32 %shift2, %lhs32
85 store volatile i32 %val2, i32* @var32
86 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
88 %shift3 = lshr i32 %rhs32, 5
89 %val3 = sub i32 %lhs32, %shift3
90 store volatile i32 %val3, i32* @var32
91 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
93 ; Subtraction is not commutative!
94 %shift4 = lshr i32 %rhs32, 19
95 %val4 = sub i32 %shift4, %lhs32
96 store volatile i32 %val4, i32* @var32
97 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
99 %shift4a = lshr i32 %lhs32, 15
100 %val4a = sub i32 0, %shift4a
101 store volatile i32 %val4a, i32* @var32
102 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
104 %shift5 = lshr i64 %rhs64, 18
105 %val5 = add i64 %lhs64, %shift5
106 store volatile i64 %val5, i64* @var64
107 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
109 %shift6 = lshr i64 %rhs64, 31
110 %val6 = add i64 %shift6, %lhs64
111 store volatile i64 %val6, i64* @var64
112 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
114 %shift7 = lshr i64 %rhs64, 5
115 %val7 = sub i64 %lhs64, %shift7
116 store volatile i64 %val7, i64* @var64
117 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
119 ; Subtraction is not commutative!
120 %shift8 = lshr i64 %rhs64, 19
121 %val8 = sub i64 %shift8, %lhs64
122 store volatile i64 %val8, i64* @var64
123 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
125 %shift8a = lshr i64 %lhs64, 45
126 %val8a = sub i64 0, %shift8a
127 store volatile i64 %val8a, i64* @var64
128 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
134 define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
135 ; CHECK-LABEL: test_asr_arith:
137 %shift1 = ashr i32 %rhs32, 18
138 %val1 = add i32 %lhs32, %shift1
139 store volatile i32 %val1, i32* @var32
140 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
142 %shift2 = ashr i32 %rhs32, 31
143 %val2 = add i32 %shift2, %lhs32
144 store volatile i32 %val2, i32* @var32
145 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
147 %shift3 = ashr i32 %rhs32, 5
148 %val3 = sub i32 %lhs32, %shift3
149 store volatile i32 %val3, i32* @var32
150 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
152 ; Subtraction is not commutative!
153 %shift4 = ashr i32 %rhs32, 19
154 %val4 = sub i32 %shift4, %lhs32
155 store volatile i32 %val4, i32* @var32
156 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
158 %shift4a = ashr i32 %lhs32, 15
159 %val4a = sub i32 0, %shift4a
160 store volatile i32 %val4a, i32* @var32
161 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
163 %shift5 = ashr i64 %rhs64, 18
164 %val5 = add i64 %lhs64, %shift5
165 store volatile i64 %val5, i64* @var64
166 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
168 %shift6 = ashr i64 %rhs64, 31
169 %val6 = add i64 %shift6, %lhs64
170 store volatile i64 %val6, i64* @var64
171 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
173 %shift7 = ashr i64 %rhs64, 5
174 %val7 = sub i64 %lhs64, %shift7
175 store volatile i64 %val7, i64* @var64
176 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
178 ; Subtraction is not commutative!
179 %shift8 = ashr i64 %rhs64, 19
180 %val8 = sub i64 %shift8, %lhs64
181 store volatile i64 %val8, i64* @var64
182 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
184 %shift8a = ashr i64 %lhs64, 45
185 %val8a = sub i64 0, %shift8a
186 store volatile i64 %val8a, i64* @var64
187 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
193 define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
194 ; CHECK-LABEL: test_cmp:
196 %shift1 = shl i32 %rhs32, 13
197 %tst1 = icmp uge i32 %lhs32, %shift1
198 br i1 %tst1, label %t2, label %end
199 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
202 %shift2 = lshr i32 %rhs32, 20
203 %tst2 = icmp ne i32 %lhs32, %shift2
204 br i1 %tst2, label %t3, label %end
205 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
208 %shift3 = ashr i32 %rhs32, 9
209 %tst3 = icmp ne i32 %lhs32, %shift3
210 br i1 %tst3, label %t4, label %end
211 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
214 %shift4 = shl i64 %rhs64, 43
215 %tst4 = icmp uge i64 %lhs64, %shift4
216 br i1 %tst4, label %t5, label %end
217 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
220 %shift5 = lshr i64 %rhs64, 20
221 %tst5 = icmp ne i64 %lhs64, %shift5
222 br i1 %tst5, label %t6, label %end
223 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
226 %shift6 = ashr i64 %rhs64, 59
227 %tst6 = icmp ne i64 %lhs64, %shift6
228 br i1 %tst6, label %t7, label %end
229 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
239 define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
240 ; CHECK-LABEL: test_cmn:
242 %shift1 = shl i32 %rhs32, 13
243 %val1 = sub i32 0, %shift1
244 %tst1 = icmp uge i32 %lhs32, %val1
245 br i1 %tst1, label %t2, label %end
246 ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
247 ; 0 then the results will differ.
248 ; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
249 ; CHECK: cmp {{w[0-9]+}}, [[RHS]]
252 %shift2 = lshr i32 %rhs32, 20
253 %val2 = sub i32 0, %shift2
254 %tst2 = icmp ne i32 %lhs32, %val2
255 br i1 %tst2, label %t3, label %end
256 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
259 %shift3 = ashr i32 %rhs32, 9
260 %val3 = sub i32 0, %shift3
261 %tst3 = icmp eq i32 %lhs32, %val3
262 br i1 %tst3, label %t4, label %end
263 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
266 %shift4 = shl i64 %rhs64, 43
267 %val4 = sub i64 0, %shift4
268 %tst4 = icmp slt i64 %lhs64, %val4
269 br i1 %tst4, label %t5, label %end
270 ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
271 ; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
272 ; CHECK: cmp {{x[0-9]+}}, [[RHS]]
275 %shift5 = lshr i64 %rhs64, 20
276 %val5 = sub i64 0, %shift5
277 %tst5 = icmp ne i64 %lhs64, %val5
278 br i1 %tst5, label %t6, label %end
279 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
282 %shift6 = ashr i64 %rhs64, 59
283 %val6 = sub i64 0, %shift6
284 %tst6 = icmp ne i64 %lhs64, %val6
285 br i1 %tst6, label %t7, label %end
286 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59