1 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
2 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK %s
3 ; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI-CHECK %s
5 ;===------------------------------------------------------------------------===;
7 ;===------------------------------------------------------------------------===;
9 ; Load an i8 value from the global address space.
10 ; R600-CHECK: @load_i8
11 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
14 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
15 define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
16 %1 = load i8 addrspace(1)* %in
17 %2 = zext i8 %1 to i32
18 store i32 %2, i32 addrspace(1)* %out
22 ; R600-CHECK: @load_i8_sext
23 ; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
24 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
26 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
28 ; SI-CHECK: @load_i8_sext
29 ; SI-CHECK: BUFFER_LOAD_SBYTE
30 define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
32 %0 = load i8 addrspace(1)* %in
33 %1 = sext i8 %0 to i32
34 store i32 %1, i32 addrspace(1)* %out
38 ; Load an i16 value from the global address space.
39 ; R600-CHECK: @load_i16
40 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
42 ; SI-CHECK: BUFFER_LOAD_USHORT
43 define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
45 %0 = load i16 addrspace(1)* %in
46 %1 = zext i16 %0 to i32
47 store i32 %1, i32 addrspace(1)* %out
51 ; R600-CHECK: @load_i16_sext
52 ; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
53 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
55 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
57 ; SI-CHECK: @load_i16_sext
58 ; SI-CHECK: BUFFER_LOAD_SSHORT
59 define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
61 %0 = load i16 addrspace(1)* %in
62 %1 = sext i16 %0 to i32
63 store i32 %1, i32 addrspace(1)* %out
67 ; load an i32 value from the global address space.
68 ; R600-CHECK: @load_i32
69 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
72 ; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
73 define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
75 %0 = load i32 addrspace(1)* %in
76 store i32 %0, i32 addrspace(1)* %out
80 ; load a f32 value from the global address space.
81 ; R600-CHECK: @load_f32
82 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
85 ; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
86 define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
88 %0 = load float addrspace(1)* %in
89 store float %0, float addrspace(1)* %out
93 ; load a v2f32 value from the global address space
94 ; R600-CHECK: @load_v2f32
95 ; R600-CHECK: VTX_READ_64
97 ; SI-CHECK: @load_v2f32
98 ; SI-CHECK: BUFFER_LOAD_DWORDX2
99 define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
101 %0 = load <2 x float> addrspace(1)* %in
102 store <2 x float> %0, <2 x float> addrspace(1)* %out
106 ; R600-CHECK: @load_i64
110 ; SI-CHECK: @load_i64
111 ; SI-CHECK: BUFFER_LOAD_DWORDX2
112 define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
114 %0 = load i64 addrspace(1)* %in
115 store i64 %0, i64 addrspace(1)* %out
119 ; R600-CHECK: @load_i64_sext
122 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
124 ; SI-CHECK: @load_i64_sext
125 ; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:VGPR[0-9]_VGPR[0-9]]]
126 ; SI-CHECK: V_LSHL_B64 [[LSHL:VGPR[0-9]_VGPR[0-9]]], [[VAL]], 32
127 ; SI-CHECK: V_ASHR_I64 VGPR{{[0-9]}}_VGPR{{[0-9]}}, [[LSHL]], 32
129 define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
131 %0 = load i32 addrspace(1)* %in
132 %1 = sext i32 %0 to i64
133 store i64 %1, i64 addrspace(1)* %out
137 ; R600-CHECK: @load_i64_zext
140 define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
142 %0 = load i32 addrspace(1)* %in
143 %1 = zext i32 %0 to i64
144 store i64 %1, i64 addrspace(1)* %out
148 ;===------------------------------------------------------------------------===;
149 ; CONSTANT ADDRESS SPACE
150 ;===------------------------------------------------------------------------===;
152 ; Load a sign-extended i8 value
153 ; R600-CHECK: @load_const_i8_sext
154 ; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
155 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
157 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
159 ; SI-CHECK: @load_const_i8_sext
160 ; SI-CHECK: BUFFER_LOAD_SBYTE VGPR{{[0-9]+}},
161 define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
163 %0 = load i8 addrspace(2)* %in
164 %1 = sext i8 %0 to i32
165 store i32 %1, i32 addrspace(1)* %out
169 ; Load an aligned i8 value
170 ; R600-CHECK: @load_const_i8_aligned
171 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
172 ; SI-CHECK: @load_const_i8_aligned
173 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
174 define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
176 %0 = load i8 addrspace(2)* %in
177 %1 = zext i8 %0 to i32
178 store i32 %1, i32 addrspace(1)* %out
182 ; Load an un-aligned i8 value
183 ; R600-CHECK: @load_const_i8_unaligned
184 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
185 ; SI-CHECK: @load_const_i8_unaligned
186 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
187 define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
189 %0 = getelementptr i8 addrspace(2)* %in, i32 1
190 %1 = load i8 addrspace(2)* %0
191 %2 = zext i8 %1 to i32
192 store i32 %2, i32 addrspace(1)* %out
196 ; Load a sign-extended i16 value
197 ; R600-CHECK: @load_const_i16_sext
198 ; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
199 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
201 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
203 ; SI-CHECK: @load_const_i16_sext
204 ; SI-CHECK: BUFFER_LOAD_SSHORT
205 define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
207 %0 = load i16 addrspace(2)* %in
208 %1 = sext i16 %0 to i32
209 store i32 %1, i32 addrspace(1)* %out
213 ; Load an aligned i16 value
214 ; R600-CHECK: @load_const_i16_aligned
215 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
216 ; SI-CHECK: @load_const_i16_aligned
217 ; SI-CHECK: BUFFER_LOAD_USHORT
218 define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
220 %0 = load i16 addrspace(2)* %in
221 %1 = zext i16 %0 to i32
222 store i32 %1, i32 addrspace(1)* %out
226 ; Load an un-aligned i16 value
227 ; R600-CHECK: @load_const_i16_unaligned
228 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
229 ; SI-CHECK: @load_const_i16_unaligned
230 ; SI-CHECK: BUFFER_LOAD_USHORT
231 define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
233 %0 = getelementptr i16 addrspace(2)* %in, i32 1
234 %1 = load i16 addrspace(2)* %0
235 %2 = zext i16 %1 to i32
236 store i32 %2, i32 addrspace(1)* %out
240 ; Load an i32 value from the constant address space.
241 ; R600-CHECK: @load_const_addrspace_i32
242 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
244 ; SI-CHECK: @load_const_addrspace_i32
245 ; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
246 define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
248 %0 = load i32 addrspace(2)* %in
249 store i32 %0, i32 addrspace(1)* %out
253 ; Load a f32 value from the constant address space.
254 ; R600-CHECK: @load_const_addrspace_f32
255 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
257 ; SI-CHECK: @load_const_addrspace_f32
258 ; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
259 define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
260 %1 = load float addrspace(2)* %in
261 store float %1, float addrspace(1)* %out