ret <2 x double> %sel2
}
+; Verify that AVX 256-bit vector single-precision minimum ops are reassociated.
+
+define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_mins_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <8 x float> %x0, %x1
+ %cmp1 = fcmp olt <8 x float> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
+ %cmp2 = fcmp olt <8 x float> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
+ ret <8 x float> %sel2
+}
+
+; Verify that AVX 256-bit vector single-precision maximum ops are reassociated.
+
+define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_maxs_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <8 x float> %x0, %x1
+ %cmp1 = fcmp ogt <8 x float> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
+ %cmp2 = fcmp ogt <8 x float> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
+ ret <8 x float> %sel2
+}
+
+; Verify that AVX 256-bit vector double-precision minimum ops are reassociated.
+
+define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_mins_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x double> %x0, %x1
+ %cmp1 = fcmp olt <4 x double> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
+ %cmp2 = fcmp olt <4 x double> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
+ ret <4 x double> %sel2
+}
+
+; Verify that AVX 256-bit vector double-precision maximum ops are reassociated.
+
+define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_maxs_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x double> %x0, %x1
+ %cmp1 = fcmp ogt <4 x double> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
+ %cmp2 = fcmp ogt <4 x double> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
+ ret <4 x double> %sel2
+}
+
+; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016
+; Verify that reassociation is not happening needlessly or wrongly.
+
+declare double @bar()
+
+define double @reassociate_adds_from_calls() {
+; AVX-LABEL: reassociate_adds_from_calls:
+; AVX: callq bar
+; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd 8(%rsp), %xmm1
+; AVX: vaddsd 16(%rsp), %xmm1, %xmm1
+; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+
+ %x0 = call double @bar()
+ %x1 = call double @bar()
+ %x2 = call double @bar()
+ %x3 = call double @bar()
+ %t0 = fadd double %x0, %x1
+ %t1 = fadd double %t0, %x2
+ %t2 = fadd double %t1, %x3
+ ret double %t2
+}
+
+define double @already_reassociated() {
+; AVX-LABEL: already_reassociated:
+; AVX: callq bar
+; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd 8(%rsp), %xmm1
+; AVX: vaddsd 16(%rsp), %xmm1, %xmm1
+; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+
+ %x0 = call double @bar()
+ %x1 = call double @bar()
+ %x2 = call double @bar()
+ %x3 = call double @bar()
+ %t0 = fadd double %x0, %x1
+ %t1 = fadd double %x2, %x3
+ %t2 = fadd double %t0, %t1
+ ret double %t2
+}
+