From 67278b1774c364930a0cb834100dfda12c6c080a Mon Sep 17 00:00:00 2001 From: Mark Heffernan Date: Tue, 11 Aug 2015 22:16:34 +0000 Subject: [PATCH] Use 32-bit divides instead of 64-bit divides where possible. For NVPTX, try to use 32-bit division instead of 64-bit division when the dividend and divisor fit in 32 bits. This speeds up some internal benchmarks significantly. The underlying reason is that many index computations are carried out in 64-bits but never actually exceed the capacity of a 32-bit word. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@244684 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/NVPTX/NVPTXISelLowering.cpp | 4 ++ test/CodeGen/NVPTX/bypass-div.ll | 80 ++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 test/CodeGen/NVPTX/bypass-div.ll diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 98ecd3b782a..724d7bce16a 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -124,6 +124,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // condition branches. setJumpIsExpensive(true); + // Wide divides are _very_ slow. Try to reduce the width of the divide if + // possible. + addBypassSlowDiv(64, 32); + // By default, use the Source scheduling if (sched4reg) setSchedulingPreference(Sched::RegPressure); diff --git a/test/CodeGen/NVPTX/bypass-div.ll b/test/CodeGen/NVPTX/bypass-div.ll new file mode 100644 index 00000000000..bd98c9a5b0b --- /dev/null +++ b/test/CodeGen/NVPTX/bypass-div.ll @@ -0,0 +1,80 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck %s + +; 64-bit divides and rems should be split into a fast and slow path where +; the fast path uses a 32-bit operation. + +define void @sdiv64(i64 %a, i64 %b, i64* %retptr) { +; CHECK-LABEL: sdiv64( +; CHECK: div.s64 +; CHECK: div.u32 +; CHECK: ret + %d = sdiv i64 %a, %b + store i64 %d, i64* %retptr + ret void +} + +define void @udiv64(i64 %a, i64 %b, i64* %retptr) { +; CHECK-LABEL: udiv64( +; CHECK: div.u64 +; CHECK: div.u32 +; CHECK: ret + %d = udiv i64 %a, %b + store i64 %d, i64* %retptr + ret void +} + +define void @srem64(i64 %a, i64 %b, i64* %retptr) { +; CHECK-LABEL: srem64( +; CHECK: rem.s64 +; CHECK: rem.u32 +; CHECK: ret + %d = srem i64 %a, %b + store i64 %d, i64* %retptr + ret void +} + +define void @urem64(i64 %a, i64 %b, i64* %retptr) { +; CHECK-LABEL: urem64( +; CHECK: rem.u64 +; CHECK: rem.u32 +; CHECK: ret + %d = urem i64 %a, %b + store i64 %d, i64* %retptr + ret void +} + +define void @sdiv32(i32 %a, i32 %b, i32* %retptr) { +; CHECK-LABEL: sdiv32( +; CHECK: div.s32 +; CHECK-NOT: div. + %d = sdiv i32 %a, %b + store i32 %d, i32* %retptr + ret void +} + +define void @udiv32(i32 %a, i32 %b, i32* %retptr) { +; CHECK-LABEL: udiv32( +; CHECK: div.u32 +; CHECK-NOT: div. + %d = udiv i32 %a, %b + store i32 %d, i32* %retptr + ret void +} + +define void @srem32(i32 %a, i32 %b, i32* %retptr) { +; CHECK-LABEL: srem32( +; CHECK: rem.s32 +; CHECK-NOT: rem. + %d = srem i32 %a, %b + store i32 %d, i32* %retptr + ret void +} + +define void @urem32(i32 %a, i32 %b, i32* %retptr) { +; CHECK-LABEL: urem32( +; CHECK: rem.u32 +; CHECK-NOT: rem. + %d = urem i32 %a, %b + store i32 %d, i32* %retptr + ret void +} -- 2.34.1