1 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
3 @var_8bit = global i8 0
4 @var_16bit = global i16 0
5 @var_32bit = global i32 0
6 @var_64bit = global i64 0
8 @var_float = global float 0.0
9 @var_double = global double 0.0
11 @varptr = global i8* null
13 define void @ldst_8bit() {
14 ; CHECK-LABEL: ldst_8bit:
16 ; No architectural support for loads to 16-bit or 8-bit since we
17 ; promote i8 during lowering.
18 %addr_8bit = load i8** @varptr
20 ; match a sign-extending load 8-bit -> 32-bit
21 %addr_sext32 = getelementptr i8* %addr_8bit, i64 -256
22 %val8_sext32 = load volatile i8* %addr_sext32
23 %val32_signed = sext i8 %val8_sext32 to i32
24 store volatile i32 %val32_signed, i32* @var_32bit
25 ; CHECK: ldursb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
27 ; match a zero-extending load volatile 8-bit -> 32-bit
28 %addr_zext32 = getelementptr i8* %addr_8bit, i64 -12
29 %val8_zext32 = load volatile i8* %addr_zext32
30 %val32_unsigned = zext i8 %val8_zext32 to i32
31 store volatile i32 %val32_unsigned, i32* @var_32bit
32 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-12]
34 ; match an any-extending load volatile 8-bit -> 32-bit
35 %addr_anyext = getelementptr i8* %addr_8bit, i64 -1
36 %val8_anyext = load volatile i8* %addr_anyext
37 %newval8 = add i8 %val8_anyext, 1
38 store volatile i8 %newval8, i8* @var_8bit
39 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
41 ; match a sign-extending load volatile 8-bit -> 64-bit
42 %addr_sext64 = getelementptr i8* %addr_8bit, i64 -5
43 %val8_sext64 = load volatile i8* %addr_sext64
44 %val64_signed = sext i8 %val8_sext64 to i64
45 store volatile i64 %val64_signed, i64* @var_64bit
46 ; CHECK: ldursb {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
48 ; match a zero-extending load volatile 8-bit -> 64-bit.
49 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
50 ; of x0 so it's identical to load volatileing to 32-bits.
51 %addr_zext64 = getelementptr i8* %addr_8bit, i64 -9
52 %val8_zext64 = load volatile i8* %addr_zext64
53 %val64_unsigned = zext i8 %val8_zext64 to i64
54 store volatile i64 %val64_unsigned, i64* @var_64bit
55 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-9]
57 ; truncating store volatile 32-bits to 8-bits
58 %addr_trunc32 = getelementptr i8* %addr_8bit, i64 -256
59 %val32 = load volatile i32* @var_32bit
60 %val8_trunc32 = trunc i32 %val32 to i8
61 store volatile i8 %val8_trunc32, i8* %addr_trunc32
62 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
64 ; truncating store volatile 64-bits to 8-bits
65 %addr_trunc64 = getelementptr i8* %addr_8bit, i64 -1
66 %val64 = load volatile i64* @var_64bit
67 %val8_trunc64 = trunc i64 %val64 to i8
68 store volatile i8 %val8_trunc64, i8* %addr_trunc64
69 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
74 define void @ldst_16bit() {
75 ; CHECK-LABEL: ldst_16bit:
77 ; No architectural support for loads to 16-bit or 16-bit since we
78 ; promote i16 during lowering.
79 %addr_8bit = load i8** @varptr
81 ; match a sign-extending load 16-bit -> 32-bit
82 %addr8_sext32 = getelementptr i8* %addr_8bit, i64 -256
83 %addr_sext32 = bitcast i8* %addr8_sext32 to i16*
84 %val16_sext32 = load volatile i16* %addr_sext32
85 %val32_signed = sext i16 %val16_sext32 to i32
86 store volatile i32 %val32_signed, i32* @var_32bit
87 ; CHECK: ldursh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
89 ; match a zero-extending load volatile 16-bit -> 32-bit. With offset that would be unaligned.
90 %addr8_zext32 = getelementptr i8* %addr_8bit, i64 15
91 %addr_zext32 = bitcast i8* %addr8_zext32 to i16*
92 %val16_zext32 = load volatile i16* %addr_zext32
93 %val32_unsigned = zext i16 %val16_zext32 to i32
94 store volatile i32 %val32_unsigned, i32* @var_32bit
95 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #15]
97 ; match an any-extending load volatile 16-bit -> 32-bit
98 %addr8_anyext = getelementptr i8* %addr_8bit, i64 -1
99 %addr_anyext = bitcast i8* %addr8_anyext to i16*
100 %val16_anyext = load volatile i16* %addr_anyext
101 %newval16 = add i16 %val16_anyext, 1
102 store volatile i16 %newval16, i16* @var_16bit
103 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
105 ; match a sign-extending load volatile 16-bit -> 64-bit
106 %addr8_sext64 = getelementptr i8* %addr_8bit, i64 -5
107 %addr_sext64 = bitcast i8* %addr8_sext64 to i16*
108 %val16_sext64 = load volatile i16* %addr_sext64
109 %val64_signed = sext i16 %val16_sext64 to i64
110 store volatile i64 %val64_signed, i64* @var_64bit
111 ; CHECK: ldursh {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
113 ; match a zero-extending load volatile 16-bit -> 64-bit.
114 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
115 ; of x0 so it's identical to load volatileing to 32-bits.
116 %addr8_zext64 = getelementptr i8* %addr_8bit, i64 9
117 %addr_zext64 = bitcast i8* %addr8_zext64 to i16*
118 %val16_zext64 = load volatile i16* %addr_zext64
119 %val64_unsigned = zext i16 %val16_zext64 to i64
120 store volatile i64 %val64_unsigned, i64* @var_64bit
121 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #9]
123 ; truncating store volatile 32-bits to 16-bits
124 %addr8_trunc32 = getelementptr i8* %addr_8bit, i64 -256
125 %addr_trunc32 = bitcast i8* %addr8_trunc32 to i16*
126 %val32 = load volatile i32* @var_32bit
127 %val16_trunc32 = trunc i32 %val32 to i16
128 store volatile i16 %val16_trunc32, i16* %addr_trunc32
129 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
131 ; truncating store volatile 64-bits to 16-bits
132 %addr8_trunc64 = getelementptr i8* %addr_8bit, i64 -1
133 %addr_trunc64 = bitcast i8* %addr8_trunc64 to i16*
134 %val64 = load volatile i64* @var_64bit
135 %val16_trunc64 = trunc i64 %val64 to i16
136 store volatile i16 %val16_trunc64, i16* %addr_trunc64
137 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
142 define void @ldst_32bit() {
143 ; CHECK-LABEL: ldst_32bit:
145 %addr_8bit = load i8** @varptr
147 ; Straight 32-bit load/store
148 %addr32_8_noext = getelementptr i8* %addr_8bit, i64 1
149 %addr32_noext = bitcast i8* %addr32_8_noext to i32*
150 %val32_noext = load volatile i32* %addr32_noext
151 store volatile i32 %val32_noext, i32* %addr32_noext
152 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
153 ; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
155 ; Zero-extension to 64-bits
156 %addr32_8_zext = getelementptr i8* %addr_8bit, i64 -256
157 %addr32_zext = bitcast i8* %addr32_8_zext to i32*
158 %val32_zext = load volatile i32* %addr32_zext
159 %val64_unsigned = zext i32 %val32_zext to i64
160 store volatile i64 %val64_unsigned, i64* @var_64bit
161 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
162 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
164 ; Sign-extension to 64-bits
165 %addr32_8_sext = getelementptr i8* %addr_8bit, i64 -12
166 %addr32_sext = bitcast i8* %addr32_8_sext to i32*
167 %val32_sext = load volatile i32* %addr32_sext
168 %val64_signed = sext i32 %val32_sext to i64
169 store volatile i64 %val64_signed, i64* @var_64bit
170 ; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
171 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_64bit]
173 ; Truncation from 64-bits
174 %addr64_8_trunc = getelementptr i8* %addr_8bit, i64 255
175 %addr64_trunc = bitcast i8* %addr64_8_trunc to i64*
176 %addr32_8_trunc = getelementptr i8* %addr_8bit, i64 -20
177 %addr32_trunc = bitcast i8* %addr32_8_trunc to i32*
179 %val64_trunc = load volatile i64* %addr64_trunc
180 %val32_trunc = trunc i64 %val64_trunc to i32
181 store volatile i32 %val32_trunc, i32* %addr32_trunc
182 ; CHECK: ldur {{x[0-9]+}}, [{{x[0-9]+}}, #255]
183 ; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #-20]
188 define void @ldst_float() {
189 ; CHECK-LABEL: ldst_float:
191 %addr_8bit = load i8** @varptr
192 %addrfp_8 = getelementptr i8* %addr_8bit, i64 -5
193 %addrfp = bitcast i8* %addrfp_8 to float*
195 %valfp = load volatile float* %addrfp
196 ; CHECK: ldur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
198 store volatile float %valfp, float* %addrfp
199 ; CHECK: stur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
204 define void @ldst_double() {
205 ; CHECK-LABEL: ldst_double:
207 %addr_8bit = load i8** @varptr
208 %addrfp_8 = getelementptr i8* %addr_8bit, i64 4
209 %addrfp = bitcast i8* %addrfp_8 to double*
211 %valfp = load volatile double* %addrfp
212 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
214 store volatile double %valfp, double* %addrfp
215 ; CHECK: stur {{d[0-9]+}}, [{{x[0-9]+}}, #4]