From 60178b180f31468f143ad5451c2ba24bdac81df6 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 6 Aug 2014 20:27:55 +0000 Subject: [PATCH] R600: Cleanup fadd and fsub tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214991 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/R600/fadd.ll | 85 +++++++++++++++++------------------ test/CodeGen/R600/fsub.ll | 93 +++++++++++++++++++++++++-------------- 2 files changed, 100 insertions(+), 78 deletions(-) diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll index 5d2b806039a..3a87c892d7b 100644 --- a/test/CodeGen/R600/fadd.ll +++ b/test/CodeGen/R600/fadd.ll @@ -1,66 +1,63 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK --check-prefix=FUNC -; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC ; FUNC-LABEL: @fadd_f32 -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W -; SI-CHECK: V_ADD_F32 +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W +; SI: V_ADD_F32 define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) { -entry: - %0 = fadd float %a, %b - store float %0, float addrspace(1)* %out + %add = fadd float %a, %b + store float %add, float addrspace(1)* %out, align 4 ret void } ; FUNC-LABEL: @fadd_v2f32 -; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z -; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 +; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z +; R600-DAG: ADD {{\** *}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y +; SI: V_ADD_F32 +; SI: V_ADD_F32 define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { -entry: - %0 = fadd <2 x float> %a, %b - store <2 x float> %0, <2 x float> addrspace(1)* %out + %add = fadd <2 x float> %a, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out, align 8 ret void } ; FUNC-LABEL: @fadd_v4f32 -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 - %a = load <4 x float> addrspace(1) * %in - %b = load <4 x float> addrspace(1) * %b_ptr + %a = load <4 x float> addrspace(1)* %in, align 16 + %b = load <4 x float> addrspace(1)* %b_ptr, align 16 %result = fadd <4 x float> %a, %b - store <4 x float> %result, <4 x float> addrspace(1)* %out + store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16 ret void } ; FUNC-LABEL: @fadd_v8f32 -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; R600-CHECK: ADD -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 -; SI-CHECK: V_ADD_F32 +; R600: ADD +; R600: ADD +; R600: ADD +; R600: ADD +; R600: ADD +; R600: ADD +; R600: ADD +; R600: ADD +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 +; SI: V_ADD_F32 define void @fadd_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, <8 x float> %b) { -entry: - %0 = fadd <8 x float> %a, %b - store <8 x float> %0, <8 x float> addrspace(1)* %out + %add = fadd <8 x float> %a, %b + store <8 x float> %add, <8 x float> addrspace(1)* %out, align 32 ret void } diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll index 5fb9ff6056d..22c6268e296 100644 --- a/test/CodeGen/R600/fsub.ll +++ b/test/CodeGen/R600/fsub.ll @@ -1,14 +1,25 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK -; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK - -; R600-CHECK: @fsub_f32 -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W -; SI-CHECK: @fsub_f32 -; SI-CHECK: V_SUB_F32 -define void @fsub_f32(float addrspace(1)* %out, float %a, float %b) { -entry: - %0 = fsub float %a, %b - store float %0, float addrspace(1)* %out +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s + + +; FUNC-LABEL: @v_fsub_f32 +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) { + %b_ptr = getelementptr float addrspace(1)* %in, i32 1 + %a = load float addrspace(1)* %in, align 4 + %b = load float addrspace(1)* %b_ptr, align 4 + %result = fsub float %a, %b + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @s_fsub_f32 +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W + +; SI: V_SUB_F32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} +define void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) { + %sub = fsub float %a, %b + store float %sub, float addrspace(1)* %out, align 4 ret void } @@ -16,34 +27,48 @@ declare float @llvm.R600.load.input(i32) readnone declare void @llvm.AMDGPU.store.output(float, i32) -; R600-CHECK: @fsub_v2f32 -; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z -; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y -; SI-CHECK: @fsub_v2f32 -; SI-CHECK: V_SUBREV_F32 -; SI-CHECK: V_SUBREV_F32 +; FUNC-LABEL: @fsub_v2f32 +; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z +; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y + +; FIXME: Should be using SGPR directly for first operand +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { -entry: - %0 = fsub <2 x float> %a, %b - store <2 x float> %0, <2 x float> addrspace(1)* %out + %sub = fsub <2 x float> %a, %b + store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8 ret void } -; R600-CHECK: @fsub_v4f32 -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; SI-CHECK: @fsub_v4f32 -; SI-CHECK: V_SUBREV_F32 -; SI-CHECK: V_SUBREV_F32 -; SI-CHECK: V_SUBREV_F32 -; SI-CHECK: V_SUBREV_F32 -define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { +; FUNC-LABEL: @v_fsub_v4f32 +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} + +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 - %a = load <4 x float> addrspace(1) * %in - %b = load <4 x float> addrspace(1) * %b_ptr + %a = load <4 x float> addrspace(1)* %in, align 16 + %b = load <4 x float> addrspace(1)* %b_ptr, align 16 + %result = fsub <4 x float> %a, %b + store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16 + ret void +} + +; FIXME: Should be using SGPR directly for first operand + +; FUNC-LABEL: @s_fsub_v4f32 +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: V_SUBREV_F32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: S_ENDPGM +define void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) { %result = fsub <4 x float> %a, %b - store <4 x float> %result, <4 x float> addrspace(1)* %out + store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16 ret void } -- 2.34.1