1 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
3 @var_8bit = global i8 0
4 @var_16bit = global i16 0
5 @var_32bit = global i32 0
6 @var_64bit = global i64 0
8 @var_float = global float 0.0
9 @var_double = global double 0.0
11 define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
14 %addr8_sxtw = getelementptr i8* %base, i32 %off32
15 %val8_sxtw = load volatile i8* %addr8_sxtw
16 %val32_signed = sext i8 %val8_sxtw to i32
17 store volatile i32 %val32_signed, i32* @var_32bit
18 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
20 %addr_lsl = getelementptr i8* %base, i64 %off64
21 %val8_lsl = load volatile i8* %addr_lsl
22 %val32_unsigned = zext i8 %val8_lsl to i32
23 store volatile i32 %val32_unsigned, i32* @var_32bit
24 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
26 %addrint_uxtw = ptrtoint i8* %base to i64
27 %offset_uxtw = zext i32 %off32 to i64
28 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
29 %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
30 %val8_uxtw = load volatile i8* %addr_uxtw
31 %newval8 = add i8 %val8_uxtw, 1
32 store volatile i8 %newval8, i8* @var_8bit
33 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
39 define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
42 %addr8_sxtwN = getelementptr i16* %base, i32 %off32
43 %val8_sxtwN = load volatile i16* %addr8_sxtwN
44 %val32_signed = sext i16 %val8_sxtwN to i32
45 store volatile i32 %val32_signed, i32* @var_32bit
46 ; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #1]
48 %addr_lslN = getelementptr i16* %base, i64 %off64
49 %val8_lslN = load volatile i16* %addr_lslN
50 %val32_unsigned = zext i16 %val8_lslN to i32
51 store volatile i32 %val32_unsigned, i32* @var_32bit
52 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #1]
54 %addrint_uxtw = ptrtoint i16* %base to i64
55 %offset_uxtw = zext i32 %off32 to i64
56 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
57 %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
58 %val8_uxtw = load volatile i16* %addr_uxtw
59 %newval8 = add i16 %val8_uxtw, 1
60 store volatile i16 %newval8, i16* @var_16bit
61 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
63 %base_sxtw = ptrtoint i16* %base to i64
64 %offset_sxtw = sext i32 %off32 to i64
65 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
66 %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
67 %val16_sxtw = load volatile i16* %addr_sxtw
68 %val64_signed = sext i16 %val16_sxtw to i64
69 store volatile i64 %val64_signed, i64* @var_64bit
70 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
73 %base_lsl = ptrtoint i16* %base to i64
74 %addrint_lsl = add i64 %base_lsl, %off64
75 %addr_lsl = inttoptr i64 %addrint_lsl to i16*
76 %val16_lsl = load volatile i16* %addr_lsl
77 %val64_unsigned = zext i16 %val16_lsl to i64
78 store volatile i64 %val64_unsigned, i64* @var_64bit
79 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
81 %base_uxtwN = ptrtoint i16* %base to i64
82 %offset_uxtwN = zext i32 %off32 to i64
83 %offset2_uxtwN = shl i64 %offset_uxtwN, 1
84 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
85 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
86 %val32 = load volatile i32* @var_32bit
87 %val16_trunc32 = trunc i32 %val32 to i16
88 store volatile i16 %val16_trunc32, i16* %addr_uxtwN
89 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
93 define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
96 %addr_sxtwN = getelementptr i32* %base, i32 %off32
97 %val_sxtwN = load volatile i32* %addr_sxtwN
98 store volatile i32 %val_sxtwN, i32* @var_32bit
99 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
101 %addr_lslN = getelementptr i32* %base, i64 %off64
102 %val_lslN = load volatile i32* %addr_lslN
103 store volatile i32 %val_lslN, i32* @var_32bit
104 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
106 %addrint_uxtw = ptrtoint i32* %base to i64
107 %offset_uxtw = zext i32 %off32 to i64
108 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
109 %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
110 %val_uxtw = load volatile i32* %addr_uxtw
111 %newval8 = add i32 %val_uxtw, 1
112 store volatile i32 %newval8, i32* @var_32bit
113 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
116 %base_sxtw = ptrtoint i32* %base to i64
117 %offset_sxtw = sext i32 %off32 to i64
118 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
119 %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
120 %val16_sxtw = load volatile i32* %addr_sxtw
121 %val64_signed = sext i32 %val16_sxtw to i64
122 store volatile i64 %val64_signed, i64* @var_64bit
123 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
126 %base_lsl = ptrtoint i32* %base to i64
127 %addrint_lsl = add i64 %base_lsl, %off64
128 %addr_lsl = inttoptr i64 %addrint_lsl to i32*
129 %val16_lsl = load volatile i32* %addr_lsl
130 %val64_unsigned = zext i32 %val16_lsl to i64
131 store volatile i64 %val64_unsigned, i64* @var_64bit
132 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
134 %base_uxtwN = ptrtoint i32* %base to i64
135 %offset_uxtwN = zext i32 %off32 to i64
136 %offset2_uxtwN = shl i64 %offset_uxtwN, 2
137 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
138 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
139 %val32 = load volatile i32* @var_32bit
140 store volatile i32 %val32, i32* %addr_uxtwN
141 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
145 define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
148 %addr_sxtwN = getelementptr i64* %base, i32 %off32
149 %val_sxtwN = load volatile i64* %addr_sxtwN
150 store volatile i64 %val_sxtwN, i64* @var_64bit
151 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
153 %addr_lslN = getelementptr i64* %base, i64 %off64
154 %val_lslN = load volatile i64* %addr_lslN
155 store volatile i64 %val_lslN, i64* @var_64bit
156 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
158 %addrint_uxtw = ptrtoint i64* %base to i64
159 %offset_uxtw = zext i32 %off32 to i64
160 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
161 %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
162 %val8_uxtw = load volatile i64* %addr_uxtw
163 %newval8 = add i64 %val8_uxtw, 1
164 store volatile i64 %newval8, i64* @var_64bit
165 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
167 %base_sxtw = ptrtoint i64* %base to i64
168 %offset_sxtw = sext i32 %off32 to i64
169 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
170 %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
171 %val64_sxtw = load volatile i64* %addr_sxtw
172 store volatile i64 %val64_sxtw, i64* @var_64bit
173 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
175 %base_lsl = ptrtoint i64* %base to i64
176 %addrint_lsl = add i64 %base_lsl, %off64
177 %addr_lsl = inttoptr i64 %addrint_lsl to i64*
178 %val64_lsl = load volatile i64* %addr_lsl
179 store volatile i64 %val64_lsl, i64* @var_64bit
180 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
182 %base_uxtwN = ptrtoint i64* %base to i64
183 %offset_uxtwN = zext i32 %off32 to i64
184 %offset2_uxtwN = shl i64 %offset_uxtwN, 3
185 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
186 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
187 %val64 = load volatile i64* @var_64bit
188 store volatile i64 %val64, i64* %addr_uxtwN
189 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
193 define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
196 %addr_sxtwN = getelementptr float* %base, i32 %off32
197 %val_sxtwN = load volatile float* %addr_sxtwN
198 store volatile float %val_sxtwN, float* @var_float
199 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
201 %addr_lslN = getelementptr float* %base, i64 %off64
202 %val_lslN = load volatile float* %addr_lslN
203 store volatile float %val_lslN, float* @var_float
204 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
206 %addrint_uxtw = ptrtoint float* %base to i64
207 %offset_uxtw = zext i32 %off32 to i64
208 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
209 %addr_uxtw = inttoptr i64 %addrint1_uxtw to float*
210 %val_uxtw = load volatile float* %addr_uxtw
211 store volatile float %val_uxtw, float* @var_float
212 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
214 %base_sxtw = ptrtoint float* %base to i64
215 %offset_sxtw = sext i32 %off32 to i64
216 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
217 %addr_sxtw = inttoptr i64 %addrint_sxtw to float*
218 %val64_sxtw = load volatile float* %addr_sxtw
219 store volatile float %val64_sxtw, float* @var_float
220 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
222 %base_lsl = ptrtoint float* %base to i64
223 %addrint_lsl = add i64 %base_lsl, %off64
224 %addr_lsl = inttoptr i64 %addrint_lsl to float*
225 %val64_lsl = load volatile float* %addr_lsl
226 store volatile float %val64_lsl, float* @var_float
227 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
229 %base_uxtwN = ptrtoint float* %base to i64
230 %offset_uxtwN = zext i32 %off32 to i64
231 %offset2_uxtwN = shl i64 %offset_uxtwN, 2
232 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
233 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to float*
234 %val64 = load volatile float* @var_float
235 store volatile float %val64, float* %addr_uxtwN
236 ; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
240 define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
241 ; CHECK: ldst_double:
243 %addr_sxtwN = getelementptr double* %base, i32 %off32
244 %val_sxtwN = load volatile double* %addr_sxtwN
245 store volatile double %val_sxtwN, double* @var_double
246 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
248 %addr_lslN = getelementptr double* %base, i64 %off64
249 %val_lslN = load volatile double* %addr_lslN
250 store volatile double %val_lslN, double* @var_double
251 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
253 %addrint_uxtw = ptrtoint double* %base to i64
254 %offset_uxtw = zext i32 %off32 to i64
255 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
256 %addr_uxtw = inttoptr i64 %addrint1_uxtw to double*
257 %val_uxtw = load volatile double* %addr_uxtw
258 store volatile double %val_uxtw, double* @var_double
259 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
261 %base_sxtw = ptrtoint double* %base to i64
262 %offset_sxtw = sext i32 %off32 to i64
263 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
264 %addr_sxtw = inttoptr i64 %addrint_sxtw to double*
265 %val64_sxtw = load volatile double* %addr_sxtw
266 store volatile double %val64_sxtw, double* @var_double
267 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
269 %base_lsl = ptrtoint double* %base to i64
270 %addrint_lsl = add i64 %base_lsl, %off64
271 %addr_lsl = inttoptr i64 %addrint_lsl to double*
272 %val64_lsl = load volatile double* %addr_lsl
273 store volatile double %val64_lsl, double* @var_double
274 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
276 %base_uxtwN = ptrtoint double* %base to i64
277 %offset_uxtwN = zext i32 %off32 to i64
278 %offset2_uxtwN = shl i64 %offset_uxtwN, 3
279 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
280 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to double*
281 %val64 = load volatile double* @var_double
282 store volatile double %val64, double* %addr_uxtwN
283 ; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
288 define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
289 ; CHECK: ldst_128bit:
291 %addr_sxtwN = getelementptr fp128* %base, i32 %off32
292 %val_sxtwN = load volatile fp128* %addr_sxtwN
293 store volatile fp128 %val_sxtwN, fp128* %base
294 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
296 %addr_lslN = getelementptr fp128* %base, i64 %off64
297 %val_lslN = load volatile fp128* %addr_lslN
298 store volatile fp128 %val_lslN, fp128* %base
299 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
301 %addrint_uxtw = ptrtoint fp128* %base to i64
302 %offset_uxtw = zext i32 %off32 to i64
303 %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
304 %addr_uxtw = inttoptr i64 %addrint1_uxtw to fp128*
305 %val_uxtw = load volatile fp128* %addr_uxtw
306 store volatile fp128 %val_uxtw, fp128* %base
307 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
309 %base_sxtw = ptrtoint fp128* %base to i64
310 %offset_sxtw = sext i32 %off32 to i64
311 %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
312 %addr_sxtw = inttoptr i64 %addrint_sxtw to fp128*
313 %val64_sxtw = load volatile fp128* %addr_sxtw
314 store volatile fp128 %val64_sxtw, fp128* %base
315 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
317 %base_lsl = ptrtoint fp128* %base to i64
318 %addrint_lsl = add i64 %base_lsl, %off64
319 %addr_lsl = inttoptr i64 %addrint_lsl to fp128*
320 %val64_lsl = load volatile fp128* %addr_lsl
321 store volatile fp128 %val64_lsl, fp128* %base
322 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
324 %base_uxtwN = ptrtoint fp128* %base to i64
325 %offset_uxtwN = zext i32 %off32 to i64
326 %offset2_uxtwN = shl i64 %offset_uxtwN, 4
327 %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
328 %addr_uxtwN = inttoptr i64 %addrint_uxtwN to fp128*
329 %val64 = load volatile fp128* %base
330 store volatile fp128 %val64, fp128* %addr_uxtwN
331 ; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #4]