From 29f97a6c46c3a8323eaf2a2a0bfe10e790972d27 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sat, 21 Feb 2015 21:29:10 +0000 Subject: [PATCH] R600/SI: Use v_madmk_f32 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230149 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/SIInstrInfo.cpp | 55 +++++++++- test/CodeGen/R600/madmk.ll | 181 ++++++++++++++++++++++++++++++++ test/CodeGen/R600/uint_to_fp.ll | 2 +- 3 files changed, 233 insertions(+), 5 deletions(-) create mode 100644 test/CodeGen/R600/madmk.ll diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index f2e0364e408..4f1e5ad91ba 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -892,10 +892,57 @@ bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); - // The VOP2 src0 can't be an SGPR since the constant bus use will be the - // literal constant. - if (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))) - return false; + // Multiplied part is the constant: Use v_madmk_f32 + // We should only expect these to be on src0 due to canonicalizations. + if (Src0->isReg() && Src0->getReg() == Reg) { + if (!Src1->isReg() || + (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) + return false; + + if (!Src2->isReg() || + (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) + return false; + + // We need to do some weird looking operand shuffling since the madmk + // operands are out of the normal expected order with the multiplied + // constant as the last operand. + // + // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 + // src0 -> src2 K + // src1 -> src0 + // src2 -> src1 + + const int64_t Imm = DefMI->getOperand(1).getImm(); + + // FIXME: This would be a lot easier if we could return a new instruction + // instead of having to modify in place. + + // Remove these first since they are at the end. + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::omod)); + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::clamp)); + + unsigned Src1Reg = Src1->getReg(); + unsigned Src1SubReg = Src1->getSubReg(); + unsigned Src2Reg = Src2->getReg(); + unsigned Src2SubReg = Src2->getSubReg(); + Src0->setReg(Src1Reg); + Src0->setSubReg(Src1SubReg); + Src1->setReg(Src2Reg); + Src1->setSubReg(Src2SubReg); + + Src2->ChangeToImmediate(Imm); + + removeModOperands(*UseMI); + UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); + + bool DeleteDef = MRI->hasOneNonDBGUse(Reg); + if (DeleteDef) + DefMI->eraseFromParent(); + + return true; + } // Added part is the constant: Use v_madak_f32 if (Src2->isReg() && Src2->getReg() == Reg) { diff --git a/test/CodeGen/R600/madmk.ll b/test/CodeGen/R600/madmk.ll new file mode 100644 index 00000000000..249e48e4ce6 --- /dev/null +++ b/test/CodeGen/R600/madmk.ll @@ -0,0 +1,181 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; XUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +declare i32 @llvm.r600.read.tidig.x() nounwind readnone +declare float @llvm.fabs.f32(float) nounwind readnone + +; GCN-LABEL: {{^}}madmk_f32: +; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 +; GCN: v_madmk_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000 +define void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1 + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %a = load float addrspace(1)* %gep.0, align 4 + %b = load float addrspace(1)* %gep.1, align 4 + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}madmk_2_use_f32: +; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 +; GCN-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8 +; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000 +; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VK]], [[VB]] +; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VK]], [[VC]] +; GCN: s_endpgm +define void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + + %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1 + %in.gep.2 = getelementptr float addrspace(1)* %in.gep.0, i32 2 + + %out.gep.0 = getelementptr float addrspace(1)* %out, i32 %tid + %out.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1 + + %a = load float addrspace(1)* %in.gep.0, align 4 + %b = load float addrspace(1)* %in.gep.1, align 4 + %c = load float addrspace(1)* %in.gep.2, align 4 + + %mul0 = fmul float %a, 10.0 + %mul1 = fmul float %a, 10.0 + %madmk0 = fadd float %mul0, %b + %madmk1 = fadd float %mul1, %c + + store float %madmk0, float addrspace(1)* %out.gep.0, align 4 + store float %madmk1, float addrspace(1)* %out.gep.1, align 4 + ret void +} + +; We don't get any benefit if the constant is an inline immediate. +; GCN-LABEL: {{^}}madmk_inline_imm_f32: +; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 +; GCN: v_mad_f32 {{v[0-9]+}}, 4.0, [[VA]], [[VB]] +define void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1 + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %a = load float addrspace(1)* %gep.0, align 4 + %b = load float addrspace(1)* %gep.1, align 4 + + %mul = fmul float %a, 4.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}s_s_madmk_f32: +; GCN-NOT: v_madmk_f32 +; GCN: v_mad_f32 +; GCN: s_endpgm +define void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}v_s_madmk_f32: +; GCN-NOT: v_madmk_f32 +; GCN: v_mad_f32 +; GCN: s_endpgm +define void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %b) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + %a = load float addrspace(1)* %gep.0, align 4 + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}scalar_vector_madmk_f32: +; GCN-NOT: v_madmk_f32 +; GCN: v_mad_f32 +; GCN: s_endpgm +define void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %a) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + %b = load float addrspace(1)* %gep.0, align 4 + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}no_madmk_src0_modifier_f32: +; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 +; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}} +define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1 + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %a = load float addrspace(1)* %gep.0, align 4 + %b = load float addrspace(1)* %gep.1, align 4 + + %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone + + %mul = fmul float %a.fabs, 10.0 + %madmk = fadd float %mul, %b + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}no_madmk_src2_modifier_f32: +; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 +; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, |{{[sv][0-9]+}}| +define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1 + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %a = load float addrspace(1)* %gep.0, align 4 + %b = load float addrspace(1)* %gep.1, align 4 + + %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, %b.fabs + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}madmk_add_inline_imm_f32: +; GCN: buffer_load_dword [[A:v[0-9]+]] +; GCN: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000 +; GCN: v_mad_f32 {{v[0-9]+}}, [[VK]], [[A]], 2.0 +define void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { + %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid + %out.gep = getelementptr float addrspace(1)* %out, i32 %tid + + %a = load float addrspace(1)* %gep.0, align 4 + + %mul = fmul float %a, 10.0 + %madmk = fadd float %mul, 2.0 + store float %madmk, float addrspace(1)* %out.gep, align 4 + ret void +} diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll index cf14c25759f..1c8a1751d39 100644 --- a/test/CodeGen/R600/uint_to_fp.ll +++ b/test/CodeGen/R600/uint_to_fp.ll @@ -50,7 +50,7 @@ define void @uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> ; R600: MULADD_IEEE ; SI: v_cvt_f32_u32_e32 ; SI: v_cvt_f32_u32_e32 -; SI: v_mad_f32 +; SI: v_madmk_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, 0x4f800000 ; SI: s_endpgm define void @uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) { entry: -- 2.34.1