ret <2 x double> %t2
}
+; Verify that AVX 256-bit vector single-precison adds are reassociated.
+
+define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_adds_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fmul <8 x float> %x0, %x1
+ %t1 = fadd <8 x float> %x2, %t0
+ %t2 = fadd <8 x float> %x3, %t1
+ ret <8 x float> %t2
+}
+
+; Verify that AVX 256-bit vector double-precison adds are reassociated.
+
+define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_adds_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fmul <4 x double> %x0, %x1
+ %t1 = fadd <4 x double> %x2, %t0
+ %t2 = fadd <4 x double> %x3, %t1
+ ret <4 x double> %t2
+}
+
+; Verify that AVX 256-bit vector single-precison multiplies are reassociated.
+
+define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_muls_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <8 x float> %x0, %x1
+ %t1 = fmul <8 x float> %x2, %t0
+ %t2 = fmul <8 x float> %x3, %t1
+ ret <8 x float> %t2
+}
+
+; Verify that AVX 256-bit vector double-precison multiplies are reassociated.
+
+define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_muls_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmulpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x double> %x0, %x1
+ %t1 = fmul <4 x double> %x2, %t0
+ %t2 = fmul <4 x double> %x3, %t1
+ ret <4 x double> %t2
+}
+
; ESTIMATE-LABEL: reciprocal_square_root_v8f32:
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtps %ymm0, %ymm1
-; ESTIMATE-NEXT: vmulps %ymm1, %ymm1, %ymm2
-; ESTIMATE-NEXT: vmulps %ymm0, %ymm2, %ymm0
+; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
; ESTIMATE-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
; ESTIMATE-NEXT: vmulps %ymm1, %ymm0, %ymm0