From 85dc7da6f3eeee02bc25fc0e366de6d43fb6b88c Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 6 Aug 2014 00:29:49 +0000 Subject: [PATCH] R600: Increase nearby load scheduling threshold. This partially fixes weird looking load scheduling in memcpy test. The load clustering doesn't seem particularly smart, but this method seems to be partially deprecated so it might not be worth trying to fix. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214943 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/AMDGPUInstrInfo.cpp | 29 +++++++++---- test/CodeGen/R600/llvm.memcpy.ll | 65 ++++++++++++++++------------- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp index ac12d14cd45..ef3bdb17e99 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.cpp +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -218,15 +218,26 @@ bool AMDGPUInstrInfo::enableClusterLoads() const { return true; } -bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, - int64_t Offset1, int64_t Offset2, - unsigned NumLoads) const { - assert(Offset2 > Offset1 - && "Second offset should be larger than first offset!"); - // If we have less than 16 loads in a row, and the offsets are within 16, - // then schedule together. - // TODO: Make the loads schedule near if it fits in a cacheline - return (NumLoads < 16 && (Offset2 - Offset1) < 16); +// FIXME: This behaves strangely. If, for example, you have 32 load + stores, +// the first 16 loads will be interleaved with the stores, and the next 16 will +// be clustered as expected. It should really split into 2 16 store batches. +// +// Loads are clustered until this returns false, rather than trying to schedule +// groups of stores. This also means we have to deal with saying different +// address space loads should be clustered, and ones which might cause bank +// conflicts. +// +// This might be deprecated so it might not be worth that much effort to fix. +bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, + int64_t Offset0, int64_t Offset1, + unsigned NumLoads) const { + assert(Offset1 > Offset0 && + "Second offset should be larger than first offset!"); + // If we have less than 16 loads in a row, and the offsets are within 64 + // bytes, then schedule together. + + // A cacheline is 64 bytes (for global memory). + return (NumLoads <= 16 && (Offset1 - Offset0) < 64); } bool diff --git a/test/CodeGen/R600/llvm.memcpy.ll b/test/CodeGen/R600/llvm.memcpy.ll index cd8b532a792..68a4050ce37 100644 --- a/test/CodeGen/R600/llvm.memcpy.ll +++ b/test/CodeGen/R600/llvm.memcpy.ll @@ -15,17 +15,18 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 + ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 - ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 + ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 @@ -35,9 +36,8 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace ; SI: DS_READ_U8 ; SI: DS_WRITE_B8 ; SI: DS_READ_U8 -; SI: DS_WRITE_B8 ; SI: DS_READ_U8 -; SI: DS_WRITE_B8 + ; SI: DS_READ_U8 ; SI: DS_READ_U8 @@ -47,6 +47,7 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace ; SI: DS_READ_U8 ; SI: DS_READ_U8 ; SI: DS_READ_U8 + ; SI: DS_READ_U8 ; SI: DS_READ_U8 ; SI: DS_READ_U8 @@ -63,6 +64,9 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace ; SI: DS_WRITE_B8 ; SI: DS_WRITE_B8 ; SI: DS_WRITE_B8 +; SI: DS_WRITE_B8 +; SI: DS_WRITE_B8 + ; SI: DS_WRITE_B8 ; SI: DS_WRITE_B8 ; SI: DS_WRITE_B8 @@ -83,21 +87,13 @@ define void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias % ; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align2 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 -; SI: DS_WRITE_B16 ; SI: DS_READ_U16 ; SI: DS_READ_U16 @@ -117,6 +113,15 @@ define void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias % ; SI: DS_WRITE_B16 ; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 +; SI: DS_WRITE_B16 + ; SI: S_ENDPGM define void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)* @@ -278,37 +283,37 @@ define void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noa ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align2 ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT - ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT -; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_LOAD_USHORT + +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT +; SI-DAG: BUFFER_STORE_SHORT ; SI-DAG: BUFFER_STORE_SHORT ; SI: S_ENDPGM @@ -321,9 +326,9 @@ define void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noa ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align4 ; SI: BUFFER_LOAD_DWORDX4 -; SI: BUFFER_STORE_DWORDX4 ; SI: BUFFER_LOAD_DWORDX4 ; SI: BUFFER_STORE_DWORDX4 +; SI: BUFFER_STORE_DWORDX4 ; SI: S_ENDPGM define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* @@ -334,9 +339,9 @@ define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noa ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align8 ; SI: BUFFER_LOAD_DWORDX4 -; SI: BUFFER_STORE_DWORDX4 ; SI: BUFFER_LOAD_DWORDX4 ; SI: BUFFER_STORE_DWORDX4 +; SI: BUFFER_STORE_DWORDX4 ; SI: S_ENDPGM define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* @@ -347,9 +352,9 @@ define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noa ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align16 ; SI: BUFFER_LOAD_DWORDX4 -; SI: BUFFER_STORE_DWORDX4 ; SI: BUFFER_LOAD_DWORDX4 ; SI: BUFFER_STORE_DWORDX4 +; SI: BUFFER_STORE_DWORDX4 ; SI: S_ENDPGM define void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* -- 2.34.1