From: Sanjay Patel <spatel@rotateright.com>
Date: Sat, 12 Sep 2015 14:58:04 +0000 (+0000)
Subject: [x86] enable machine combiner reassociations for 128-bit vector logical integer insts
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=e6c453bf2d6c6f45b8009909e0556a809674a0d3;p=oota-llvm.git

[x86] enable machine combiner reassociations for 128-bit vector logical integer insts


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247506 91177308-0d34-0410-b5e6-96231b3b80d8
---

diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 8b883162999..446d4bce155 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -6408,6 +6408,12 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) {
   case X86::IMUL16rr:
   case X86::IMUL32rr:
   case X86::IMUL64rr:
+  case X86::PANDrr:
+  case X86::PORrr:
+  case X86::PXORrr:
+  case X86::VPANDrr:
+  case X86::VPORrr:
+  case X86::VPXORrr:
   // Normal min/max instructions are not commutative because of NaN and signed
   // zero semantics, but these are. Thus, there's no need to check for global
   // relaxed math; the instructions themselves have the properties we need.
diff --git a/test/CodeGen/X86/machine-combiner-int-vec.ll b/test/CodeGen/X86/machine-combiner-int-vec.ll
new file mode 100644
index 00000000000..8316f66c1dc
--- /dev/null
+++ b/test/CodeGen/X86/machine-combiner-int-vec.ll
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx < %s | FileCheck %s --check-prefix=AVX
+
+; Verify that 128-bit vector logical ops are reassociated.
+
+define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_and_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_and_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = and <4 x i32> %x2, %t0
+  %t2 = and <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_or_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm2
+; SSE-NEXT:    por %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_or_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = or <4 x i32> %x2, %t0
+  %t2 = or <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_xor_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm2
+; SSE-NEXT:    pxor %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_xor_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = xor <4 x i32> %x2, %t0
+  %t2 = xor <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+