1 ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
3 declare i1 @llvm.AMDGPU.class.f32(float, i32) #1
4 declare i1 @llvm.AMDGPU.class.f64(double, i32) #1
5 declare i32 @llvm.r600.read.tidig.x() #1
6 declare float @llvm.fabs.f32(float) #1
7 declare double @llvm.fabs.f64(double) #1
9 ; SI-LABEL: {{^}}test_class_f32:
10 ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
11 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
12 ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
13 ; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[VB]]
14 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
15 ; SI-NEXT: buffer_store_dword [[RESULT]]
17 define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
18 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 %b) #1
19 %sext = sext i1 %result to i32
20 store i32 %sext, i32 addrspace(1)* %out, align 4
24 ; SI-LABEL: {{^}}test_class_fabs_f32:
25 ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
26 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
27 ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
28 ; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
29 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
30 ; SI-NEXT: buffer_store_dword [[RESULT]]
32 define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
33 %a.fabs = call float @llvm.fabs.f32(float %a) #1
34 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fabs, i32 %b) #1
35 %sext = sext i1 %result to i32
36 store i32 %sext, i32 addrspace(1)* %out, align 4
40 ; SI-LABEL: {{^}}test_class_fneg_f32:
41 ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
42 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
43 ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
44 ; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
45 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
46 ; SI-NEXT: buffer_store_dword [[RESULT]]
48 define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
49 %a.fneg = fsub float -0.0, %a
50 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg, i32 %b) #1
51 %sext = sext i1 %result to i32
52 store i32 %sext, i32 addrspace(1)* %out, align 4
56 ; SI-LABEL: {{^}}test_class_fneg_fabs_f32:
57 ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
58 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
59 ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
60 ; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
61 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
62 ; SI-NEXT: buffer_store_dword [[RESULT]]
64 define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
65 %a.fabs = call float @llvm.fabs.f32(float %a) #1
66 %a.fneg.fabs = fsub float -0.0, %a.fabs
67 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg.fabs, i32 %b) #1
68 %sext = sext i1 %result to i32
69 store i32 %sext, i32 addrspace(1)* %out, align 4
73 ; SI-LABEL: {{^}}test_class_1_f32:
74 ; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
75 ; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 1{{$}}
76 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
77 ; SI-NEXT: buffer_store_dword [[RESULT]]
79 define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
80 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
81 %sext = sext i1 %result to i32
82 store i32 %sext, i32 addrspace(1)* %out, align 4
86 ; SI-LABEL: {{^}}test_class_64_f32:
87 ; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
88 ; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 64{{$}}
89 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
90 ; SI-NEXT: buffer_store_dword [[RESULT]]
92 define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
93 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
94 %sext = sext i1 %result to i32
95 store i32 %sext, i32 addrspace(1)* %out, align 4
99 ; Set all 10 bits of mask
100 ; SI-LABEL: {{^}}test_class_full_mask_f32:
101 ; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
102 ; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
103 ; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
104 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
105 ; SI-NEXT: buffer_store_dword [[RESULT]]
107 define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
108 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1023) #1
109 %sext = sext i1 %result to i32
110 store i32 %sext, i32 addrspace(1)* %out, align 4
114 ; SI-LABEL: {{^}}test_class_9bit_mask_f32:
115 ; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
116 ; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
117 ; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
118 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
119 ; SI-NEXT: buffer_store_dword [[RESULT]]
121 define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
122 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
123 %sext = sext i1 %result to i32
124 store i32 %sext, i32 addrspace(1)* %out, align 4
128 ; SI-LABEL: {{^}}v_test_class_full_mask_f32:
129 ; SI-DAG: buffer_load_dword [[VA:v[0-9]+]]
130 ; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
131 ; SI: v_cmp_class_f32_e32 vcc, [[VA]], [[MASK]]
132 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
133 ; SI: buffer_store_dword [[RESULT]]
135 define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
136 %tid = call i32 @llvm.r600.read.tidig.x() #1
137 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
138 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
139 %a = load float, float addrspace(1)* %gep.in
141 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
142 %sext = sext i1 %result to i32
143 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
147 ; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f32:
148 ; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
149 ; SI: v_cmp_class_f32_e32 vcc, 1.0, [[VB]]
150 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
151 ; SI: buffer_store_dword [[RESULT]]
153 define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
154 %tid = call i32 @llvm.r600.read.tidig.x() #1
155 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
156 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
157 %b = load i32, i32 addrspace(1)* %gep.in
159 %result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
160 %sext = sext i1 %result to i32
161 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
165 ; FIXME: Why isn't this using a literal constant operand?
166 ; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f32:
167 ; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
168 ; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
169 ; SI: v_cmp_class_f32_e32 vcc, [[VK]], [[VB]]
170 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
171 ; SI: buffer_store_dword [[RESULT]]
173 define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
174 %tid = call i32 @llvm.r600.read.tidig.x() #1
175 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
176 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
177 %b = load i32, i32 addrspace(1)* %gep.in
179 %result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
180 %sext = sext i1 %result to i32
181 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
185 ; SI-LABEL: {{^}}test_class_f64:
186 ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
187 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
188 ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
189 ; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[VB]]
190 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
191 ; SI-NEXT: buffer_store_dword [[RESULT]]
193 define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
194 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 %b) #1
195 %sext = sext i1 %result to i32
196 store i32 %sext, i32 addrspace(1)* %out, align 4
200 ; SI-LABEL: {{^}}test_class_fabs_f64:
201 ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
202 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
203 ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
204 ; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
205 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
206 ; SI-NEXT: buffer_store_dword [[RESULT]]
208 define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
209 %a.fabs = call double @llvm.fabs.f64(double %a) #1
210 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fabs, i32 %b) #1
211 %sext = sext i1 %result to i32
212 store i32 %sext, i32 addrspace(1)* %out, align 4
216 ; SI-LABEL: {{^}}test_class_fneg_f64:
217 ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
218 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
219 ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
220 ; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
221 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
222 ; SI-NEXT: buffer_store_dword [[RESULT]]
224 define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
225 %a.fneg = fsub double -0.0, %a
226 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg, i32 %b) #1
227 %sext = sext i1 %result to i32
228 store i32 %sext, i32 addrspace(1)* %out, align 4
232 ; SI-LABEL: {{^}}test_class_fneg_fabs_f64:
233 ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
234 ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
235 ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
236 ; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
237 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
238 ; SI-NEXT: buffer_store_dword [[RESULT]]
240 define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
241 %a.fabs = call double @llvm.fabs.f64(double %a) #1
242 %a.fneg.fabs = fsub double -0.0, %a.fabs
243 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg.fabs, i32 %b) #1
244 %sext = sext i1 %result to i32
245 store i32 %sext, i32 addrspace(1)* %out, align 4
249 ; SI-LABEL: {{^}}test_class_1_f64:
250 ; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}}
252 define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
253 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 1) #1
254 %sext = sext i1 %result to i32
255 store i32 %sext, i32 addrspace(1)* %out, align 4
259 ; SI-LABEL: {{^}}test_class_64_f64:
260 ; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}}
262 define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
263 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 64) #1
264 %sext = sext i1 %result to i32
265 store i32 %sext, i32 addrspace(1)* %out, align 4
269 ; Set all 9 bits of mask
270 ; SI-LABEL: {{^}}test_class_full_mask_f64:
271 ; SI: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
272 ; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
273 ; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[MASK]]
274 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
275 ; SI-NEXT: buffer_store_dword [[RESULT]]
277 define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
278 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
279 %sext = sext i1 %result to i32
280 store i32 %sext, i32 addrspace(1)* %out, align 4
284 ; SI-LABEL: {{^}}v_test_class_full_mask_f64:
285 ; SI-DAG: buffer_load_dwordx2 [[VA:v\[[0-9]+:[0-9]+\]]]
286 ; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
287 ; SI: v_cmp_class_f64_e32 vcc, [[VA]], [[MASK]]
288 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
289 ; SI: buffer_store_dword [[RESULT]]
291 define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
292 %tid = call i32 @llvm.r600.read.tidig.x() #1
293 %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
294 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
295 %a = load double, double addrspace(1)* %in
297 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
298 %sext = sext i1 %result to i32
299 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
303 ; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f64:
304 ; XSI: v_cmp_class_f64_e32 vcc, 1.0,
305 ; SI: v_cmp_class_f64_e32 vcc,
307 define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
308 %tid = call i32 @llvm.r600.read.tidig.x() #1
309 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
310 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
311 %b = load i32, i32 addrspace(1)* %gep.in
313 %result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
314 %sext = sext i1 %result to i32
315 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
319 ; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64:
320 ; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
322 define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
323 %tid = call i32 @llvm.r600.read.tidig.x() #1
324 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
325 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
326 %b = load i32, i32 addrspace(1)* %gep.in
328 %result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
329 %sext = sext i1 %result to i32
330 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
334 ; SI-LABEL: {{^}}test_fold_or_class_f32_0:
335 ; SI-NOT: v_cmp_class
336 ; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 3{{$}}
337 ; SI-NOT: v_cmp_class
339 define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
340 %tid = call i32 @llvm.r600.read.tidig.x() #1
341 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
342 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
343 %a = load float, float addrspace(1)* %gep.in
345 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
346 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 3) #1
347 %or = or i1 %class0, %class1
349 %sext = sext i1 %or to i32
350 store i32 %sext, i32 addrspace(1)* %out, align 4
354 ; SI-LABEL: {{^}}test_fold_or3_class_f32_0:
355 ; SI-NOT: v_cmp_class
356 ; SI: v_cmp_class_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
357 ; SI-NOT: v_cmp_class
359 define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
360 %tid = call i32 @llvm.r600.read.tidig.x() #1
361 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
362 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
363 %a = load float, float addrspace(1)* %gep.in
365 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
366 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
367 %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
368 %or.0 = or i1 %class0, %class1
369 %or.1 = or i1 %or.0, %class2
371 %sext = sext i1 %or.1 to i32
372 store i32 %sext, i32 addrspace(1)* %out, align 4
376 ; SI-LABEL: {{^}}test_fold_or_all_tests_class_f32_0:
377 ; SI-NOT: v_cmp_class
378 ; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
379 ; SI: v_cmp_class_f32_e32 vcc, v{{[0-9]+}}, [[MASK]]{{$}}
380 ; SI-NOT: v_cmp_class
382 define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
383 %tid = call i32 @llvm.r600.read.tidig.x() #1
384 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
385 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
386 %a = load float, float addrspace(1)* %gep.in
388 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
389 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
390 %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
391 %class3 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
392 %class4 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 16) #1
393 %class5 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 32) #1
394 %class6 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
395 %class7 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 128) #1
396 %class8 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 256) #1
397 %class9 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 512) #1
398 %or.0 = or i1 %class0, %class1
399 %or.1 = or i1 %or.0, %class2
400 %or.2 = or i1 %or.1, %class3
401 %or.3 = or i1 %or.2, %class4
402 %or.4 = or i1 %or.3, %class5
403 %or.5 = or i1 %or.4, %class6
404 %or.6 = or i1 %or.5, %class7
405 %or.7 = or i1 %or.6, %class8
406 %or.8 = or i1 %or.7, %class9
407 %sext = sext i1 %or.8 to i32
408 store i32 %sext, i32 addrspace(1)* %out, align 4
412 ; SI-LABEL: {{^}}test_fold_or_class_f32_1:
413 ; SI-NOT: v_cmp_class
414 ; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 12{{$}}
415 ; SI-NOT: v_cmp_class
417 define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
418 %tid = call i32 @llvm.r600.read.tidig.x() #1
419 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
420 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
421 %a = load float, float addrspace(1)* %gep.in
423 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
424 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
425 %or = or i1 %class0, %class1
427 %sext = sext i1 %or to i32
428 store i32 %sext, i32 addrspace(1)* %out, align 4
432 ; SI-LABEL: {{^}}test_fold_or_class_f32_2:
433 ; SI-NOT: v_cmp_class
434 ; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
435 ; SI-NOT: v_cmp_class
437 define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
438 %tid = call i32 @llvm.r600.read.tidig.x() #1
439 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
440 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
441 %a = load float, float addrspace(1)* %gep.in
443 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
444 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
445 %or = or i1 %class0, %class1
447 %sext = sext i1 %or to i32
448 store i32 %sext, i32 addrspace(1)* %out, align 4
452 ; SI-LABEL: {{^}}test_no_fold_or_class_f32_0:
453 ; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 4{{$}}
454 ; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, 8{{$}}
457 define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
458 %tid = call i32 @llvm.r600.read.tidig.x() #1
459 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
460 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
461 %a = load float, float addrspace(1)* %gep.in
463 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
464 %class1 = call i1 @llvm.AMDGPU.class.f32(float %b, i32 8) #1
465 %or = or i1 %class0, %class1
467 %sext = sext i1 %or to i32
468 store i32 %sext, i32 addrspace(1)* %out, align 4
472 ; SI-LABEL: {{^}}test_class_0_f32:
473 ; SI-NOT: v_cmp_class
474 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
475 ; SI: buffer_store_dword [[RESULT]]
477 define void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
478 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 0) #1
479 %sext = sext i1 %result to i32
480 store i32 %sext, i32 addrspace(1)* %out, align 4
484 ; SI-LABEL: {{^}}test_class_0_f64:
485 ; SI-NOT: v_cmp_class
486 ; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
487 ; SI: buffer_store_dword [[RESULT]]
489 define void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
490 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 0) #1
491 %sext = sext i1 %result to i32
492 store i32 %sext, i32 addrspace(1)* %out, align 4
496 attributes #0 = { nounwind }
497 attributes #1 = { nounwind readnone }