From: Sanjay Patel Date: Thu, 9 Jul 2015 22:48:54 +0000 (+0000) Subject: [x86] enable machine combiner reassociations for scalar double-precision adds X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=39f09b515025615cab2da9ee48152c1f94c4a47a;p=oota-llvm.git [x86] enable machine combiner reassociations for scalar double-precision adds git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241871 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 5484ae91855..fdfdac90033 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6408,7 +6408,9 @@ static bool hasReassocSibling(const MachineInstr &Inst, bool &Commuted) { // 2. Other math / logic operations (and, or) static bool isAssociativeAndCommutative(unsigned Opcode) { switch (Opcode) { + case X86::ADDSDrr: case X86::ADDSSrr: + case X86::VADDSDrr: case X86::VADDSSrr: case X86::MULSSrr: case X86::VMULSSrr: diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index 2286da7e94d..ae059a1ed08 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -144,7 +144,7 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { ret float %t2 } -; Verify that SSE and AVX scalar single precison multiplies are reassociated. +; Verify that SSE and AVX scalar single-precison multiplies are reassociated. define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_muls1: @@ -165,3 +165,25 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { %t2 = fmul float %x3, %t1 ret float %t2 } + +; Verify that SSE and AVX scalar double-precison adds are reassociated. + +define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) { +; SSE-LABEL: reassociate_adds_double: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: addsd %xmm3, %xmm2 +; SSE-NEXT: addsd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_adds_double: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fdiv double %x0, %x1 + %t1 = fadd double %x2, %t0 + %t2 = fadd double %x3, %t1 + ret double %t2 +}