From 1c8de8dde489a828e4c79a1e47928f015c835d15 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Sat, 8 Aug 2015 19:08:20 +0000 Subject: [PATCH] [x86] enable machine combiner reassociations for 128-bit vector single/double adds git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@244403 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 4 +++ test/CodeGen/X86/machine-combiner.ll | 44 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 01d59f9a8b4..9a6e860135d 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6371,8 +6371,12 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) { case X86::IMUL32rr: case X86::IMUL64rr: return true; + case X86::ADDPDrr: + case X86::ADDPSrr: case X86::ADDSDrr: case X86::ADDSSrr: + case X86::VADDPDrr: + case X86::VADDPSrr: case X86::VADDSDrr: case X86::VADDSSrr: case X86::MULSDrr: diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index 0943bebbb09..af1c661417c 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -210,3 +210,47 @@ define double @reassociate_muls_double(double %x0, double %x1, double %x2, doubl ret double %t2 } +; Verify that SSE and AVX 128-bit vector single-precison adds are reassociated. + +define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { +; SSE-LABEL: reassociate_adds_v4f32: +; SSE: # BB#0: +; SSE-NEXT: mulps %xmm1, %xmm0 +; SSE-NEXT: addps %xmm3, %xmm2 +; SSE-NEXT: addps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_adds_v4f32: +; AVX: # BB#0: +; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vaddps %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fmul <4 x float> %x0, %x1 + %t1 = fadd <4 x float> %x2, %t0 + %t2 = fadd <4 x float> %x3, %t1 + ret <4 x float> %t2 +} + +; Verify that SSE and AVX 128-bit vector double-precison adds are reassociated. + +define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { +; SSE-LABEL: reassociate_adds_v2f64: +; SSE: # BB#0: +; SSE-NEXT: mulpd %xmm1, %xmm0 +; SSE-NEXT: addpd %xmm3, %xmm2 +; SSE-NEXT: addpd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_adds_v2f64: +; AVX: # BB#0: +; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vaddpd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fmul <2 x double> %x0, %x1 + %t1 = fadd <2 x double> %x2, %t0 + %t2 = fadd <2 x double> %x3, %t1 + ret <2 x double> %t2 +} + -- 2.34.1