1 ; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
3 ; CHECK-LABEL: {{^}}fold_sgpr:
4 ; CHECK: v_add_i32_e32 v{{[0-9]+}}, s
5 define void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) {
7 %tmp0 = icmp ne i32 %fold, 0
8 br i1 %tmp0, label %if, label %endif
11 %id = call i32 @llvm.r600.read.tidig.x()
12 %offset = add i32 %fold, %id
13 %tmp1 = getelementptr i32 addrspace(1)* %out, i32 %offset
14 store i32 0, i32 addrspace(1)* %tmp1
21 ; CHECK-LABEL: {{^}}fold_imm:
22 ; CHECK v_or_i32_e32 v{{[0-9]+}}, 5
23 define void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) {
26 %tmp0 = icmp ne i32 %cmp, 0
27 br i1 %tmp0, label %if, label %endif
30 %id = call i32 @llvm.r600.read.tidig.x()
31 %val = or i32 %id, %fold
32 store i32 %val, i32 addrspace(1)* %out
39 ; CHECK-LABEL: {{^}}fold_64bit_constant_add:
40 ; CHECK-NOT: s_mov_b64
41 ; FIXME: It would be better if we could use v_add here and drop the extra
42 ; v_mov_b32 instructions.
43 ; CHECK-DAG: s_add_u32 [[LO:s[0-9]+]], s{{[0-9]+}}, 1
44 ; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0
45 ; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]]
46 ; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]]
47 ; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}},
49 define void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) {
51 %tmp0 = add i64 %val, 1
52 store i64 %tmp0, i64 addrspace(1)* %out
56 ; Inline constants should always be folded.
58 ; CHECK-LABEL: {{^}}vector_inline:
59 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
60 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
61 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
62 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
64 define void @vector_inline(<4 x i32> addrspace(1)* %out) {
66 %tmp0 = call i32 @llvm.r600.read.tidig.x()
67 %tmp1 = add i32 %tmp0, 1
68 %tmp2 = add i32 %tmp0, 2
69 %tmp3 = add i32 %tmp0, 3
70 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
71 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
72 %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2
73 %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3
74 %tmp4 = xor <4 x i32> <i32 5, i32 5, i32 5, i32 5>, %vec3
75 store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out
79 ; Immediates with one use should be folded
80 ; CHECK-LABEL: {{^}}imm_one_use:
81 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}}
83 define void @imm_one_use(i32 addrspace(1)* %out) {
85 %tmp0 = call i32 @llvm.r600.read.tidig.x()
86 %tmp1 = xor i32 %tmp0, 100
87 store i32 %tmp1, i32 addrspace(1)* %out
90 ; CHECK-LABEL: {{^}}vector_imm:
91 ; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64
92 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
93 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
94 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
95 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
97 define void @vector_imm(<4 x i32> addrspace(1)* %out) {
99 %tmp0 = call i32 @llvm.r600.read.tidig.x()
100 %tmp1 = add i32 %tmp0, 1
101 %tmp2 = add i32 %tmp0, 2
102 %tmp3 = add i32 %tmp0, 3
103 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
104 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
105 %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2
106 %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3
107 %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3
108 store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out
112 declare i32 @llvm.r600.read.tidig.x() #0
113 attributes #0 = { readnone }