1 ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
2 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
3 ; RUN: opt < %s -S -nvptx-favor-non-generic -dce | FileCheck %s --check-prefix IR
5 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
6 @scalar = internal addrspace(3) global float 0.000000e+00, align 4
8 ; Verifies nvptx-favor-non-generic correctly optimizes generic address space
9 ; usage to non-generic address space usage for the patterns we claim to handle:
14 ; gep and cast can be an instruction or a constant expression. This function
15 ; tries all possible combinations.
16 define float @ld_st_shared_f32(i32 %i, float %v) {
17 ; IR-LABEL: @ld_st_shared_f32
18 ; IR-NOT: addrspacecast
19 ; PTX-LABEL: ld_st_shared_f32(
21 %1 = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
22 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
24 store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
25 ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
26 ; use syncthreads to disable optimizations across components
27 call void @llvm.cuda.syncthreads()
31 %2 = addrspacecast float addrspace(3)* @scalar to float*
32 %3 = load float, float* %2, align 4
33 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
35 store float %v, float* %2, align 4
36 ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
37 call void @llvm.cuda.syncthreads()
41 %4 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
42 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
44 store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
45 ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
46 call void @llvm.cuda.syncthreads()
50 %5 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
51 %6 = load float, float* %5, align 4
52 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
54 store float %v, float* %5, align 4
55 ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
56 call void @llvm.cuda.syncthreads()
60 %7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
61 %8 = getelementptr inbounds [10 x float], [10 x float]* %7, i32 0, i32 %i
62 %9 = load float, float* %8, align 4
63 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
65 store float %v, float* %8, align 4
66 ; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
67 call void @llvm.cuda.syncthreads()
70 %sum2 = fadd float %1, %3
71 %sum3 = fadd float %sum2, %4
72 %sum4 = fadd float %sum3, %6
73 %sum5 = fadd float %sum4, %9
77 ; When hoisting an addrspacecast between different pointer types, replace the
78 ; addrspacecast with a bitcast.
79 define i32 @ld_int_from_float() {
80 ; IR-LABEL: @ld_int_from_float
81 ; IR: load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*)
82 ; PTX-LABEL: ld_int_from_float(
83 ; PTX: ld.shared.u{{(32|64)}}
84 %1 = load i32, i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4
88 declare void @llvm.cuda.syncthreads() #3
90 attributes #3 = { noduplicate nounwind }