// TODO: There are many more machine instruction opcodes to match:
// 1. Other data types (double, integer, vectors)
-// 2. Other math / logic operations (mul, and, or)
+// 2. Other math / logic operations (and, or)
static bool isAssociativeAndCommutative(unsigned Opcode) {
switch (Opcode) {
- case X86::VADDSSrr:
case X86::ADDSSrr:
+ case X86::VADDSSrr:
+ case X86::MULSSrr:
+ case X86::VMULSSrr:
return true;
default:
return false;
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s
; Anything more than one division using a single divisor operand
; should be converted into a reciprocal and multiplication.
; CHECK: # BB#0:
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm2, %xmm3
-; CHECK-NEXT: mulss %xmm3, %xmm0
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: mulss %xmm3, %xmm0
+; CHECK-NEXT: mulss %xmm3, %xmm0
; CHECK-NEXT: retq
%div1 = fdiv arcp float %x, %z
%mul = fmul arcp float %div1, %y
ret float %t2
}
+; Verify that SSE and AVX scalar single precison multiplies are reassociated.
+
+define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
+; SSE-LABEL: reassociate_muls1:
+; SSE: # BB#0:
+; SSE-NEXT: divss %xmm1, %xmm0
+; SSE-NEXT: mulss %xmm3, %xmm2
+; SSE-NEXT: mulss %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_muls1:
+; AVX: # BB#0:
+; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %t0 = fdiv float %x0, %x1
+ %t1 = fmul float %x2, %t0
+ %t2 = fmul float %x3, %t1
+ ret float %t2
+}
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm2
-; ESTIMATE-NEXT: vmulss %xmm1, %xmm1, %xmm1
-; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm3
+; ESTIMATE-NEXT: vmulss %xmm3, %xmm1, %xmm1
; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss %xmm0, %xmm2, %xmm2
; ESTIMATE-NEXT: vmulss %xmm2, %xmm1, %xmm1
-; ESTIMATE-NEXT: vmulss %xmm1, %xmm0, %xmm1
; ESTIMATE-NEXT: vxorps %xmm2, %xmm2, %xmm2
; ESTIMATE-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0
; ESTIMATE-NEXT: vandnps %xmm1, %xmm0, %xmm0
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm2
-; ESTIMATE-NEXT: vmulss %xmm1, %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm0
; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm0
; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; ESTIMATE-NEXT: vmulss %xmm2, %xmm0, %xmm0