1 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
2 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK %s
3 ; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI-CHECK %s
5 ;===------------------------------------------------------------------------===;
7 ;===------------------------------------------------------------------------===;
9 ; Load an i8 value from the global address space.
10 ; R600-CHECK: @load_i8
11 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
14 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
15 define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
16 %1 = load i8 addrspace(1)* %in
17 %2 = zext i8 %1 to i32
18 store i32 %2, i32 addrspace(1)* %out
22 ; R600-CHECK: @load_i8_sext
23 ; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
24 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
26 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
28 ; SI-CHECK: @load_i8_sext
29 ; SI-CHECK: BUFFER_LOAD_SBYTE
30 define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
32 %0 = load i8 addrspace(1)* %in
33 %1 = sext i8 %0 to i32
34 store i32 %1, i32 addrspace(1)* %out
38 ; R600-CHECK: @load_v2i8
39 ; R600-CHECK: VTX_READ_8
40 ; R600-CHECK: VTX_READ_8
41 ; SI-CHECK: @load_v2i8
42 ; SI-CHECK: BUFFER_LOAD_UBYTE
43 ; SI-CHECK: BUFFER_LOAD_UBYTE
44 define void @load_v2i8(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
46 %0 = load <2 x i8> addrspace(1)* %in
47 %1 = zext <2 x i8> %0 to <2 x i32>
48 store <2 x i32> %1, <2 x i32> addrspace(1)* %out
52 ; R600-CHECK: @load_v2i8_sext
53 ; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
54 ; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
55 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
57 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
59 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
61 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
63 ; SI-CHECK: @load_v2i8_sext
64 ; SI-CHECK: BUFFER_LOAD_SBYTE
65 ; SI-CHECK: BUFFER_LOAD_SBYTE
66 define void @load_v2i8_sext(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
68 %0 = load <2 x i8> addrspace(1)* %in
69 %1 = sext <2 x i8> %0 to <2 x i32>
70 store <2 x i32> %1, <2 x i32> addrspace(1)* %out
74 ; R600-CHECK: @load_v4i8
75 ; R600-CHECK: VTX_READ_8
76 ; R600-CHECK: VTX_READ_8
77 ; R600-CHECK: VTX_READ_8
78 ; R600-CHECK: VTX_READ_8
79 ; SI-CHECK: @load_v4i8
80 ; SI-CHECK: BUFFER_LOAD_UBYTE
81 ; SI-CHECK: BUFFER_LOAD_UBYTE
82 ; SI-CHECK: BUFFER_LOAD_UBYTE
83 ; SI-CHECK: BUFFER_LOAD_UBYTE
84 define void @load_v4i8(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
86 %0 = load <4 x i8> addrspace(1)* %in
87 %1 = zext <4 x i8> %0 to <4 x i32>
88 store <4 x i32> %1, <4 x i32> addrspace(1)* %out
92 ; R600-CHECK: @load_v4i8_sext
93 ; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
94 ; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
95 ; R600-CHECK-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
96 ; R600-CHECK-DAG: VTX_READ_8 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
97 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
99 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
101 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
103 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
105 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
107 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
109 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
111 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
113 ; SI-CHECK: @load_v4i8_sext
114 ; SI-CHECK: BUFFER_LOAD_SBYTE
115 ; SI-CHECK: BUFFER_LOAD_SBYTE
116 ; SI-CHECK: BUFFER_LOAD_SBYTE
117 ; SI-CHECK: BUFFER_LOAD_SBYTE
118 define void @load_v4i8_sext(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
120 %0 = load <4 x i8> addrspace(1)* %in
121 %1 = sext <4 x i8> %0 to <4 x i32>
122 store <4 x i32> %1, <4 x i32> addrspace(1)* %out
126 ; Load an i16 value from the global address space.
127 ; R600-CHECK: @load_i16
128 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
129 ; SI-CHECK: @load_i16
130 ; SI-CHECK: BUFFER_LOAD_USHORT
131 define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
133 %0 = load i16 addrspace(1)* %in
134 %1 = zext i16 %0 to i32
135 store i32 %1, i32 addrspace(1)* %out
139 ; R600-CHECK: @load_i16_sext
140 ; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
141 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
143 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
145 ; SI-CHECK: @load_i16_sext
146 ; SI-CHECK: BUFFER_LOAD_SSHORT
147 define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
149 %0 = load i16 addrspace(1)* %in
150 %1 = sext i16 %0 to i32
151 store i32 %1, i32 addrspace(1)* %out
155 ; R600-CHECK: @load_v2i16
156 ; R600-CHECK: VTX_READ_16
157 ; R600-CHECK: VTX_READ_16
158 ; SI-CHECK: @load_v2i16
159 ; SI-CHECK: BUFFER_LOAD_USHORT
160 ; SI-CHECK: BUFFER_LOAD_USHORT
161 define void @load_v2i16(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
163 %0 = load <2 x i16> addrspace(1)* %in
164 %1 = zext <2 x i16> %0 to <2 x i32>
165 store <2 x i32> %1, <2 x i32> addrspace(1)* %out
169 ; R600-CHECK: @load_v2i16_sext
170 ; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
171 ; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
172 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
174 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
176 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
178 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
180 ; SI-CHECK: @load_v2i16_sext
181 ; SI-CHECK: BUFFER_LOAD_SSHORT
182 ; SI-CHECK: BUFFER_LOAD_SSHORT
183 define void @load_v2i16_sext(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
185 %0 = load <2 x i16> addrspace(1)* %in
186 %1 = sext <2 x i16> %0 to <2 x i32>
187 store <2 x i32> %1, <2 x i32> addrspace(1)* %out
191 ; R600-CHECK: @load_v4i16
192 ; R600-CHECK: VTX_READ_16
193 ; R600-CHECK: VTX_READ_16
194 ; R600-CHECK: VTX_READ_16
195 ; R600-CHECK: VTX_READ_16
196 ; SI-CHECK: @load_v4i16
197 ; SI-CHECK: BUFFER_LOAD_USHORT
198 ; SI-CHECK: BUFFER_LOAD_USHORT
199 ; SI-CHECK: BUFFER_LOAD_USHORT
200 ; SI-CHECK: BUFFER_LOAD_USHORT
201 define void @load_v4i16(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
203 %0 = load <4 x i16> addrspace(1)* %in
204 %1 = zext <4 x i16> %0 to <4 x i32>
205 store <4 x i32> %1, <4 x i32> addrspace(1)* %out
209 ; R600-CHECK: @load_v4i16_sext
210 ; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
211 ; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
212 ; R600-CHECK-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
213 ; R600-CHECK-DAG: VTX_READ_16 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
214 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
216 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
218 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
220 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
222 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
224 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
226 ; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
228 ; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
230 ; SI-CHECK: @load_v4i16_sext
231 ; SI-CHECK: BUFFER_LOAD_SSHORT
232 ; SI-CHECK: BUFFER_LOAD_SSHORT
233 ; SI-CHECK: BUFFER_LOAD_SSHORT
234 ; SI-CHECK: BUFFER_LOAD_SSHORT
235 define void @load_v4i16_sext(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
237 %0 = load <4 x i16> addrspace(1)* %in
238 %1 = sext <4 x i16> %0 to <4 x i32>
239 store <4 x i32> %1, <4 x i32> addrspace(1)* %out
243 ; load an i32 value from the global address space.
244 ; R600-CHECK: @load_i32
245 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
247 ; SI-CHECK: @load_i32
248 ; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
249 define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
251 %0 = load i32 addrspace(1)* %in
252 store i32 %0, i32 addrspace(1)* %out
256 ; load a f32 value from the global address space.
257 ; R600-CHECK: @load_f32
258 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
260 ; SI-CHECK: @load_f32
261 ; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
262 define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
264 %0 = load float addrspace(1)* %in
265 store float %0, float addrspace(1)* %out
269 ; load a v2f32 value from the global address space
270 ; R600-CHECK: @load_v2f32
271 ; R600-CHECK: VTX_READ_64
273 ; SI-CHECK: @load_v2f32
274 ; SI-CHECK: BUFFER_LOAD_DWORDX2
275 define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
277 %0 = load <2 x float> addrspace(1)* %in
278 store <2 x float> %0, <2 x float> addrspace(1)* %out
282 ; R600-CHECK: @load_i64
283 ; R600-CHECK: MEM_RAT
284 ; R600-CHECK: MEM_RAT
286 ; SI-CHECK: @load_i64
287 ; SI-CHECK: BUFFER_LOAD_DWORDX2
288 define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
290 %0 = load i64 addrspace(1)* %in
291 store i64 %0, i64 addrspace(1)* %out
295 ; R600-CHECK: @load_i64_sext
296 ; R600-CHECK: MEM_RAT
297 ; R600-CHECK: MEM_RAT
298 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
300 ; SI-CHECK: @load_i64_sext
301 ; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:VGPR[0-9]_VGPR[0-9]]]
302 ; SI-CHECK: V_LSHL_B64 [[LSHL:VGPR[0-9]_VGPR[0-9]]], [[VAL]], 32
303 ; SI-CHECK: V_ASHR_I64 VGPR{{[0-9]}}_VGPR{{[0-9]}}, [[LSHL]], 32
305 define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
307 %0 = load i32 addrspace(1)* %in
308 %1 = sext i32 %0 to i64
309 store i64 %1, i64 addrspace(1)* %out
313 ; R600-CHECK: @load_i64_zext
314 ; R600-CHECK: MEM_RAT
315 ; R600-CHECK: MEM_RAT
316 define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
318 %0 = load i32 addrspace(1)* %in
319 %1 = zext i32 %0 to i64
320 store i64 %1, i64 addrspace(1)* %out
324 ;===------------------------------------------------------------------------===;
325 ; CONSTANT ADDRESS SPACE
326 ;===------------------------------------------------------------------------===;
328 ; Load a sign-extended i8 value
329 ; R600-CHECK: @load_const_i8_sext
330 ; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
331 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
333 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
335 ; SI-CHECK: @load_const_i8_sext
336 ; SI-CHECK: BUFFER_LOAD_SBYTE VGPR{{[0-9]+}},
337 define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
339 %0 = load i8 addrspace(2)* %in
340 %1 = sext i8 %0 to i32
341 store i32 %1, i32 addrspace(1)* %out
345 ; Load an aligned i8 value
346 ; R600-CHECK: @load_const_i8_aligned
347 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
348 ; SI-CHECK: @load_const_i8_aligned
349 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
350 define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
352 %0 = load i8 addrspace(2)* %in
353 %1 = zext i8 %0 to i32
354 store i32 %1, i32 addrspace(1)* %out
358 ; Load an un-aligned i8 value
359 ; R600-CHECK: @load_const_i8_unaligned
360 ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
361 ; SI-CHECK: @load_const_i8_unaligned
362 ; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
363 define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
365 %0 = getelementptr i8 addrspace(2)* %in, i32 1
366 %1 = load i8 addrspace(2)* %0
367 %2 = zext i8 %1 to i32
368 store i32 %2, i32 addrspace(1)* %out
372 ; Load a sign-extended i16 value
373 ; R600-CHECK: @load_const_i16_sext
374 ; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
375 ; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
377 ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
379 ; SI-CHECK: @load_const_i16_sext
380 ; SI-CHECK: BUFFER_LOAD_SSHORT
381 define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
383 %0 = load i16 addrspace(2)* %in
384 %1 = sext i16 %0 to i32
385 store i32 %1, i32 addrspace(1)* %out
389 ; Load an aligned i16 value
390 ; R600-CHECK: @load_const_i16_aligned
391 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
392 ; SI-CHECK: @load_const_i16_aligned
393 ; SI-CHECK: BUFFER_LOAD_USHORT
394 define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
396 %0 = load i16 addrspace(2)* %in
397 %1 = zext i16 %0 to i32
398 store i32 %1, i32 addrspace(1)* %out
402 ; Load an un-aligned i16 value
403 ; R600-CHECK: @load_const_i16_unaligned
404 ; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
405 ; SI-CHECK: @load_const_i16_unaligned
406 ; SI-CHECK: BUFFER_LOAD_USHORT
407 define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
409 %0 = getelementptr i16 addrspace(2)* %in, i32 1
410 %1 = load i16 addrspace(2)* %0
411 %2 = zext i16 %1 to i32
412 store i32 %2, i32 addrspace(1)* %out
416 ; Load an i32 value from the constant address space.
417 ; R600-CHECK: @load_const_addrspace_i32
418 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
420 ; SI-CHECK: @load_const_addrspace_i32
421 ; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
422 define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
424 %0 = load i32 addrspace(2)* %in
425 store i32 %0, i32 addrspace(1)* %out
429 ; Load a f32 value from the constant address space.
430 ; R600-CHECK: @load_const_addrspace_f32
431 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
433 ; SI-CHECK: @load_const_addrspace_f32
434 ; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
435 define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
436 %1 = load float addrspace(2)* %in
437 store float %1, float addrspace(1)* %out