ret <16 x i8> %4
}
-define <2 x i64> @max_lt_v2i64c() {
-; SSE2-LABEL: max_lt_v2i64c:
+define <2 x i64> @min_lt_v2i64c() {
+; SSE2-LABEL: min_lt_v2i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551609,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551615,1]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v2i64c:
+; SSE41-LABEL: min_lt_v2i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v2i64c:
+; SSE42-LABEL: min_lt_v2i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v2i64c:
+; AVX-LABEL: min_lt_v2i64c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [18446744073709551609,7]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551615,1]
ret <2 x i64> %4
}
-define <4 x i64> @max_lt_v4i64c() {
-; SSE2-LABEL: max_lt_v4i64c:
+define <4 x i64> @min_lt_v4i64c() {
+; SSE2-LABEL: min_lt_v4i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [18446744073709551609,18446744073709551615]
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v4i64c:
+; SSE41-LABEL: min_lt_v4i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [18446744073709551609,18446744073709551615]
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v4i64c:
+; SSE42-LABEL: min_lt_v4i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [18446744073709551609,18446744073709551615]
; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1,7]
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_lt_v4i64c:
+; AVX1-LABEL: min_lt_v4i64c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551615,18446744073709551609]
; AVX1-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v4i64c:
+; AVX2-LABEL: min_lt_v4i64c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [18446744073709551609,18446744073709551615,1,7]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551615,18446744073709551609,7,1]
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v4i64c:
+; AVX512-LABEL: min_lt_v4i64c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [18446744073709551609,18446744073709551615,1,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551615,18446744073709551609,7,1]
ret <4 x i64> %4
}
-define <4 x i32> @max_lt_v4i32c() {
-; SSE2-LABEL: max_lt_v4i32c:
+define <4 x i32> @min_lt_v4i32c() {
+; SSE2-LABEL: min_lt_v4i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4294967289,4294967295,1,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,4294967289,7,1]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v4i32c:
+; SSE41-LABEL: min_lt_v4i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v4i32c:
+; SSE42-LABEL: min_lt_v4i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE42-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v4i32c:
+; AVX-LABEL: min_lt_v4i32c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; AVX-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
ret <4 x i32> %4
}
-define <8 x i32> @max_lt_v8i32c() {
-; SSE2-LABEL: max_lt_v8i32c:
+define <8 x i32> @min_lt_v8i32c() {
+; SSE2-LABEL: min_lt_v8i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967289,4294967291,4294967293,4294967295]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,3,5,7]
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v8i32c:
+; SSE41-LABEL: min_lt_v8i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v8i32c:
+; SSE42-LABEL: min_lt_v8i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE42-NEXT: pminsd {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_lt_v8i32c:
+; AVX1-LABEL: min_lt_v8i32c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v8i32c:
+; AVX2-LABEL: min_lt_v8i32c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX2-NEXT: vpminsd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v8i32c:
+; AVX512-LABEL: min_lt_v8i32c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX512-NEXT: vpminsd {{.*}}(%rip), %ymm0, %ymm0
ret <8 x i32> %4
}
-define <8 x i16> @max_lt_v8i16c() {
-; SSE-LABEL: max_lt_v8i16c:
+define <8 x i16> @min_lt_v8i16c() {
+; SSE-LABEL: min_lt_v8i16c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE-NEXT: pminsw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: max_lt_v8i16c:
+; AVX-LABEL: min_lt_v8i16c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; AVX-NEXT: vpminsw {{.*}}(%rip), %xmm0, %xmm0
ret <8 x i16> %4
}
-define <16 x i16> @max_lt_v16i16c() {
-; SSE-LABEL: max_lt_v16i16c:
+define <16 x i16> @min_lt_v16i16c() {
+; SSE-LABEL: min_lt_v16i16c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE-NEXT: pminsw {{.*}}(%rip), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: max_lt_v16i16c:
+; AVX1-LABEL: min_lt_v16i16c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; AVX1-NEXT: vpminsw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v16i16c:
+; AVX2-LABEL: min_lt_v16i16c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX2-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v16i16c:
+; AVX512-LABEL: min_lt_v16i16c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX512-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
ret <16 x i16> %4
}
-define <16 x i8> @max_lt_v16i8c() {
-; SSE2-LABEL: max_lt_v16i8c:
+define <16 x i8> @min_lt_v16i8c() {
+; SSE2-LABEL: min_lt_v16i8c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,254,253,252,251,250,249,0,7,6,5,4,3,2,1,0]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v16i8c:
+; SSE41-LABEL: min_lt_v16i8c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE41-NEXT: pminsb {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v16i8c:
+; SSE42-LABEL: min_lt_v16i8c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE42-NEXT: pminsb {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v16i8c:
+; AVX-LABEL: min_lt_v16i8c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; AVX-NEXT: vpminsb {{.*}}(%rip), %xmm0, %xmm0
ret <16 x i8> %4
}
-define <2 x i64> @max_le_v2i64c() {
-; SSE2-LABEL: max_le_v2i64c:
+define <2 x i64> @min_le_v2i64c() {
+; SSE2-LABEL: min_le_v2i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551609,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551615,1]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v2i64c:
+; SSE41-LABEL: min_le_v2i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v2i64c:
+; SSE42-LABEL: min_le_v2i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v2i64c:
+; AVX-LABEL: min_le_v2i64c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [18446744073709551609,7]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551615,1]
ret <2 x i64> %4
}
-define <4 x i64> @max_le_v4i64c() {
-; SSE2-LABEL: max_le_v4i64c:
+define <4 x i64> @min_le_v4i64c() {
+; SSE2-LABEL: min_le_v4i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [18446744073709551609,18446744073709551615]
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v4i64c:
+; SSE41-LABEL: min_le_v4i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [18446744073709551609,18446744073709551615]
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v4i64c:
+; SSE42-LABEL: min_le_v4i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [18446744073709551609,18446744073709551615]
; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1,7]
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_le_v4i64c:
+; AVX1-LABEL: min_le_v4i64c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
; AVX1-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v4i64c:
+; AVX2-LABEL: min_le_v4i64c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [18446744073709551609,18446744073709551615,1,7]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551615,18446744073709551609,7,1]
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v4i64c:
+; AVX512-LABEL: min_le_v4i64c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [18446744073709551609,18446744073709551615,1,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551615,18446744073709551609,7,1]
ret <4 x i64> %4
}
-define <4 x i32> @max_le_v4i32c() {
-; SSE2-LABEL: max_le_v4i32c:
+define <4 x i32> @min_le_v4i32c() {
+; SSE2-LABEL: min_le_v4i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4294967289,4294967295,1,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,4294967289,7,1]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v4i32c:
+; SSE41-LABEL: min_le_v4i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v4i32c:
+; SSE42-LABEL: min_le_v4i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE42-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v4i32c:
+; AVX-LABEL: min_le_v4i32c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; AVX-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
ret <4 x i32> %4
}
-define <8 x i32> @max_le_v8i32c() {
-; SSE2-LABEL: max_le_v8i32c:
+define <8 x i32> @min_le_v8i32c() {
+; SSE2-LABEL: min_le_v8i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967289,4294967291,4294967293,4294967295]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,3,5,7]
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v8i32c:
+; SSE41-LABEL: min_le_v8i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v8i32c:
+; SSE42-LABEL: min_le_v8i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE42-NEXT: pminsd {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_le_v8i32c:
+; AVX1-LABEL: min_le_v8i32c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v8i32c:
+; AVX2-LABEL: min_le_v8i32c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX2-NEXT: vpminsd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v8i32c:
+; AVX512-LABEL: min_le_v8i32c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX512-NEXT: vpminsd {{.*}}(%rip), %ymm0, %ymm0
ret <8 x i32> %4
}
-define <8 x i16> @max_le_v8i16c() {
-; SSE-LABEL: max_le_v8i16c:
+define <8 x i16> @min_le_v8i16c() {
+; SSE-LABEL: min_le_v8i16c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE-NEXT: pminsw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: max_le_v8i16c:
+; AVX-LABEL: min_le_v8i16c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; AVX-NEXT: vpminsw {{.*}}(%rip), %xmm0, %xmm0
ret <8 x i16> %4
}
-define <16 x i16> @max_le_v16i16c() {
-; SSE-LABEL: max_le_v16i16c:
+define <16 x i16> @min_le_v16i16c() {
+; SSE-LABEL: min_le_v16i16c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE-NEXT: pminsw {{.*}}(%rip), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: max_le_v16i16c:
+; AVX1-LABEL: min_le_v16i16c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; AVX1-NEXT: vpminsw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v16i16c:
+; AVX2-LABEL: min_le_v16i16c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX2-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v16i16c:
+; AVX512-LABEL: min_le_v16i16c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX512-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
ret <16 x i16> %4
}
-define <16 x i8> @max_le_v16i8c() {
-; SSE2-LABEL: max_le_v16i8c:
+define <16 x i8> @min_le_v16i8c() {
+; SSE2-LABEL: min_le_v16i8c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,254,253,252,251,250,249,0,7,6,5,4,3,2,1,0]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v16i8c:
+; SSE41-LABEL: min_le_v16i8c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE41-NEXT: pminsb {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v16i8c:
+; SSE42-LABEL: min_le_v16i8c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE42-NEXT: pminsb {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v16i8c:
+; AVX-LABEL: min_le_v16i8c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; AVX-NEXT: vpminsb {{.*}}(%rip), %xmm0, %xmm0
ret <16 x i8> %4
}
-define <2 x i64> @max_lt_v2i64c() {
-; SSE2-LABEL: max_lt_v2i64c:
+define <2 x i64> @min_lt_v2i64c() {
+; SSE2-LABEL: min_lt_v2i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551609,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551615,1]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v2i64c:
+; SSE41-LABEL: min_lt_v2i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v2i64c:
+; SSE42-LABEL: min_lt_v2i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movapd {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775807,9223372036854775809]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v2i64c:
+; AVX-LABEL: min_lt_v2i64c:
; AVX: # BB#0:
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [18446744073709551615,1]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775807,9223372036854775809]
ret <2 x i64> %4
}
-define <4 x i64> @max_lt_v4i64c() {
-; SSE2-LABEL: max_lt_v4i64c:
+define <4 x i64> @min_lt_v4i64c() {
+; SSE2-LABEL: min_lt_v4i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [18446744073709551609,18446744073709551615]
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v4i64c:
+; SSE41-LABEL: min_lt_v4i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [18446744073709551609,18446744073709551615]
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v4i64c:
+; SSE42-LABEL: min_lt_v4i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movapd {{.*#+}} xmm1 = [7,1]
; SSE42-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551615,18446744073709551609]
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_lt_v4i64c:
+; AVX1-LABEL: min_lt_v4i64c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775807,9223372036854775801]
; AVX1-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v4i64c:
+; AVX2-LABEL: min_lt_v4i64c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [9223372036854775807,9223372036854775801,9223372036854775815,9223372036854775809]
; AVX2-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v4i64c:
+; AVX512-LABEL: min_lt_v4i64c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [9223372036854775807,9223372036854775801,9223372036854775815,9223372036854775809]
ret <4 x i64> %4
}
-define <4 x i32> @max_lt_v4i32c() {
-; SSE2-LABEL: max_lt_v4i32c:
+define <4 x i32> @min_lt_v4i32c() {
+; SSE2-LABEL: min_lt_v4i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483649,2147483641,2147483655,2147483649]
; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v4i32c:
+; SSE41-LABEL: min_lt_v4i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v4i32c:
+; SSE42-LABEL: min_lt_v4i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE42-NEXT: pminud {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v4i32c:
+; AVX-LABEL: min_lt_v4i32c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; AVX-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
ret <4 x i32> %4
}
-define <8 x i32> @max_lt_v8i32c() {
-; SSE2-LABEL: max_lt_v8i32c:
+define <8 x i32> @min_lt_v8i32c() {
+; SSE2-LABEL: min_lt_v8i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483655,2147483653,2147483651,2147483649]
; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v8i32c:
+; SSE41-LABEL: min_lt_v8i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v8i32c:
+; SSE42-LABEL: min_lt_v8i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE42-NEXT: pminud {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_lt_v8i32c:
+; AVX1-LABEL: min_lt_v8i32c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v8i32c:
+; AVX2-LABEL: min_lt_v8i32c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX2-NEXT: vpminud {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v8i32c:
+; AVX512-LABEL: min_lt_v8i32c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX512-NEXT: vpminud {{.*}}(%rip), %ymm0, %ymm0
ret <8 x i32> %4
}
-define <8 x i16> @max_lt_v8i16c() {
-; SSE2-LABEL: max_lt_v8i16c:
+define <8 x i16> @min_lt_v8i16c() {
+; SSE2-LABEL: min_lt_v8i16c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65529,65531,65533,65535,1,3,5,7]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v8i16c:
+; SSE41-LABEL: min_lt_v8i16c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE41-NEXT: pminuw {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v8i16c:
+; SSE42-LABEL: min_lt_v8i16c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE42-NEXT: pminuw {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_lt_v8i16c:
+; AVX-LABEL: min_lt_v8i16c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; AVX-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
ret <8 x i16> %4
}
-define <16 x i16> @max_lt_v16i16c() {
-; SSE2-LABEL: max_lt_v16i16c:
+define <16 x i16> @min_lt_v16i16c() {
+; SSE2-LABEL: min_lt_v16i16c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32775,32774,32773,32772,32771,32770,32769,32768]
; SSE2-NEXT: pcmpgtw {{.*}}(%rip), %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_lt_v16i16c:
+; SSE41-LABEL: min_lt_v16i16c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE41-NEXT: pminuw {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_lt_v16i16c:
+; SSE42-LABEL: min_lt_v16i16c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE42-NEXT: pminuw {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_lt_v16i16c:
+; AVX1-LABEL: min_lt_v16i16c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; AVX1-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_lt_v16i16c:
+; AVX2-LABEL: min_lt_v16i16c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX2-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_lt_v16i16c:
+; AVX512-LABEL: min_lt_v16i16c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX512-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
ret <16 x i16> %4
}
-define <16 x i8> @max_lt_v16i8c() {
-; SSE-LABEL: max_lt_v16i8c:
+define <16 x i8> @min_lt_v16i8c() {
+; SSE-LABEL: min_lt_v16i8c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE-NEXT: pminub {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: max_lt_v16i8c:
+; AVX-LABEL: min_lt_v16i8c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; AVX-NEXT: vpminub {{.*}}(%rip), %xmm0, %xmm0
ret <16 x i8> %4
}
-define <2 x i64> @max_le_v2i64c() {
-; SSE2-LABEL: max_le_v2i64c:
+define <2 x i64> @min_le_v2i64c() {
+; SSE2-LABEL: min_le_v2i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551609,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551615,1]
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v2i64c:
+; SSE41-LABEL: min_le_v2i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18446744073709551609,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v2i64c:
+; SSE42-LABEL: min_le_v2i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movapd {{.*#+}} xmm1 = [18446744073709551615,1]
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775801,9223372036854775815]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v2i64c:
+; AVX-LABEL: min_le_v2i64c:
; AVX: # BB#0:
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [18446744073709551615,1]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775801,9223372036854775815]
ret <2 x i64> %4
}
-define <4 x i64> @max_le_v4i64c() {
-; SSE2-LABEL: max_le_v4i64c:
+define <4 x i64> @min_le_v4i64c() {
+; SSE2-LABEL: min_le_v4i64c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [18446744073709551609,18446744073709551615]
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v4i64c:
+; SSE41-LABEL: min_le_v4i64c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [18446744073709551609,18446744073709551615]
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [1,7]
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v4i64c:
+; SSE42-LABEL: min_le_v4i64c:
; SSE42: # BB#0:
; SSE42-NEXT: movapd {{.*#+}} xmm1 = [7,1]
; SSE42-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551615,18446744073709551609]
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_le_v4i64c:
+; AVX1-LABEL: min_le_v4i64c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775809,9223372036854775815]
; AVX1-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v4i64c:
+; AVX2-LABEL: min_le_v4i64c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [9223372036854775801,9223372036854775807,9223372036854775809,9223372036854775815]
; AVX2-NEXT: vblendvpd %ymm1, {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v4i64c:
+; AVX512-LABEL: min_le_v4i64c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovapd {{.*#+}} ymm0 = [18446744073709551615,18446744073709551609,7,1]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [9223372036854775801,9223372036854775807,9223372036854775809,9223372036854775815]
ret <4 x i64> %4
}
-define <4 x i32> @max_le_v4i32c() {
-; SSE2-LABEL: max_le_v4i32c:
+define <4 x i32> @min_le_v4i32c() {
+; SSE2-LABEL: min_le_v4i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483641,2147483647,2147483649,2147483655]
; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v4i32c:
+; SSE41-LABEL: min_le_v4i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v4i32c:
+; SSE42-LABEL: min_le_v4i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; SSE42-NEXT: pminud {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v4i32c:
+; AVX-LABEL: min_le_v4i32c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967295,1,7]
; AVX-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
ret <4 x i32> %4
}
-define <8 x i32> @max_le_v8i32c() {
-; SSE2-LABEL: max_le_v8i32c:
+define <8 x i32> @min_le_v8i32c() {
+; SSE2-LABEL: min_le_v8i32c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483649,2147483651,2147483653,2147483655]
; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v8i32c:
+; SSE41-LABEL: min_le_v8i32c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v8i32c:
+; SSE42-LABEL: min_le_v8i32c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,3,5,7]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; SSE42-NEXT: pminud {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_le_v8i32c:
+; AVX1-LABEL: min_le_v8i32c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967289,4294967291,4294967293,4294967295]
; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v8i32c:
+; AVX2-LABEL: min_le_v8i32c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX2-NEXT: vpminud {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v8i32c:
+; AVX512-LABEL: min_le_v8i32c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [4294967289,4294967291,4294967293,4294967295,1,3,5,7]
; AVX512-NEXT: vpminud {{.*}}(%rip), %ymm0, %ymm0
ret <8 x i32> %4
}
-define <8 x i16> @max_le_v8i16c() {
-; SSE2-LABEL: max_le_v8i16c:
+define <8 x i16> @min_le_v8i16c() {
+; SSE2-LABEL: min_le_v8i16c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65529,65531,65533,65535,1,3,5,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,65533,65531,65529,7,5,3,1]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v8i16c:
+; SSE41-LABEL: min_le_v8i16c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE41-NEXT: pminuw {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v8i16c:
+; SSE42-LABEL: min_le_v8i16c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; SSE42-NEXT: pminuw {{.*}}(%rip), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: max_le_v8i16c:
+; AVX-LABEL: min_le_v8i16c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65531,65533,65535,1,3,5,7]
; AVX-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
ret <8 x i16> %4
}
-define <16 x i16> @max_le_v16i16c() {
-; SSE2-LABEL: max_le_v16i16c:
+define <16 x i16> @min_le_v16i16c() {
+; SSE2-LABEL: min_le_v16i16c:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,2,3,4,5,6,7,8]
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
-; SSE41-LABEL: max_le_v16i16c:
+; SSE41-LABEL: min_le_v16i16c:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE41-NEXT: pminuw {{.*}}(%rip), %xmm1
; SSE41-NEXT: retq
;
-; SSE42-LABEL: max_le_v16i16c:
+; SSE42-LABEL: min_le_v16i16c:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4,5,6,7,8]
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; SSE42-NEXT: pminuw {{.*}}(%rip), %xmm1
; SSE42-NEXT: retq
;
-; AVX1-LABEL: max_le_v16i16c:
+; AVX1-LABEL: min_le_v16i16c:
; AVX1: # BB#0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [65529,65530,65531,65532,65533,65534,65535,0]
; AVX1-NEXT: vpminuw {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: max_le_v16i16c:
+; AVX2-LABEL: min_le_v16i16c:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX2-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: max_le_v16i16c:
+; AVX512-LABEL: min_le_v16i16c:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65529,65530,65531,65532,65533,65534,65535,0,1,2,3,4,5,6,7,8]
; AVX512-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
ret <16 x i16> %4
}
-define <16 x i8> @max_le_v16i8c() {
-; SSE-LABEL: max_le_v16i8c:
+define <16 x i8> @min_le_v16i8c() {
+; SSE-LABEL: min_le_v16i8c:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; SSE-NEXT: pminub {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: max_le_v16i8c:
+; AVX-LABEL: min_le_v16i8c:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [249,250,251,252,253,254,255,0,1,2,3,4,5,6,7,8]
; AVX-NEXT: vpminub {{.*}}(%rip), %xmm0, %xmm0