; Double to Signed Integer
;
-define <2 x i64> @fptosi_2vf64(<2 x double> %a) {
-; SSE2-LABEL: fptosi_2vf64:
+define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
+; SSE2-LABEL: fptosi_2f64_to_2i64:
; SSE2: # BB#0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_2vf64:
+; AVX-LABEL: fptosi_2f64_to_2i64:
; AVX: # BB#0:
; AVX-NEXT: vcvttsd2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm1
ret <2 x i64> %cvt
}
-define <4 x i32> @fptosi_2vf64_i32(<2 x double> %a) {
-; SSE2-LABEL: fptosi_2vf64_i32:
+define <4 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) {
+; SSE2-LABEL: fptosi_2f64_to_2i32:
; SSE2: # BB#0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_2vf64_i32:
+; AVX-LABEL: fptosi_2f64_to_2i32:
; AVX: # BB#0:
; AVX-NEXT: vcvttsd2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm1
ret <4 x i32> %ext
}
-define <4 x i64> @fptosi_4vf64(<4 x double> %a) {
-; SSE2-LABEL: fptosi_4vf64:
+define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
+; SSE2-LABEL: fptosi_4f64_to_4i64:
; SSE2: # BB#0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf64:
+; AVX-LABEL: fptosi_4f64_to_4i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vcvttsd2si %xmm1, %rax
ret <4 x i64> %cvt
}
-define <4 x i32> @fptosi_4vf64_i32(<4 x double> %a) {
-; SSE2-LABEL: fptosi_4vf64_i32:
+define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
+; SSE2-LABEL: fptosi_4f64_to_4i32:
; SSE2: # BB#0:
; SSE2-NEXT: cvttsd2si %xmm1, %rax
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf64_i32:
+; AVX-LABEL: fptosi_4f64_to_4i32:
; AVX: # BB#0:
; AVX-NEXT: vcvttpd2dqy %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; Double to Unsigned Integer
;
-define <2 x i64> @fptoui_2vf64(<2 x double> %a) {
-; SSE2-LABEL: fptoui_2vf64:
+define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
+; SSE2-LABEL: fptoui_2f64_to_2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE2-NEXT: movapd %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_2vf64:
+; AVX-LABEL: fptoui_2f64_to_2i64:
; AVX: # BB#0:
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
ret <2 x i64> %cvt
}
-define <4 x i32> @fptoui_2vf64_i32(<2 x double> %a) {
-; SSE2-LABEL: fptoui_2vf64_i32:
+define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
+; SSE2-LABEL: fptoui_2f64_to_2i32:
; SSE2: # BB#0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movapd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_2vf64_i32:
+; AVX-LABEL: fptoui_2f64_to_2i32:
; AVX: # BB#0:
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
ret <4 x i32> %ext
}
-define <4 x i64> @fptoui_4vf64(<4 x double> %a) {
-; SSE2-LABEL: fptoui_4vf64:
+define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
+; SSE2-LABEL: fptoui_4f64_to_4i64:
; SSE2: # BB#0:
; SSE2-NEXT: movapd %xmm0, %xmm2
; SSE2-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf64:
+; AVX-LABEL: fptoui_4f64_to_4i64:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
ret <4 x i64> %cvt
}
-define <4 x i32> @fptoui_4vf64_i32(<4 x double> %a) {
-; SSE2-LABEL: fptoui_4vf64_i32:
+define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
+; SSE2-LABEL: fptoui_4f64_to_4i32:
; SSE2: # BB#0:
; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE2-NEXT: movapd %xmm1, %xmm3
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf64_i32:
+; AVX-LABEL: fptoui_4f64_to_4i32:
; AVX: # BB#0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vcvttsd2si %xmm1, %rax
; Float to Signed Integer
;
-define <4 x i32> @fptosi_4vf32(<4 x float> %a) {
-; SSE2-LABEL: fptosi_4vf32:
+define <4 x i32> @fptosi_4f32_to_4i32(<4 x float> %a) {
+; SSE2-LABEL: fptosi_4f32_to_4i32:
; SSE2: # BB#0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf32:
+; AVX-LABEL: fptosi_4f32_to_4i32:
; AVX: # BB#0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: retq
ret <4 x i32> %cvt
}
-define <2 x i64> @fptosi_4vf32_i64(<4 x float> %a) {
-; SSE2-LABEL: fptosi_4vf32_i64:
+define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
+; SSE2-LABEL: fptosi_2f32_to_2i64:
; SSE2: # BB#0:
; SSE2-NEXT: cvttss2si %xmm0, %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf32_i64:
+; AVX-LABEL: fptosi_2f32_to_2i64:
; AVX: # BB#0:
; AVX-NEXT: vcvttss2si %xmm0, %rax
; AVX-NEXT: vmovq %rax, %xmm1
ret <2 x i64> %cvt
}
-define <8 x i32> @fptosi_8vf32(<8 x float> %a) {
-; SSE2-LABEL: fptosi_8vf32:
+define <8 x i32> @fptosi_8f32_to_8i32(<8 x float> %a) {
+; SSE2-LABEL: fptosi_8f32_to_8i32:
; SSE2: # BB#0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_8vf32:
+; AVX-LABEL: fptosi_8f32_to_8i32:
; AVX: # BB#0:
; AVX-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-NEXT: retq
ret <8 x i32> %cvt
}
-define <4 x i64> @fptosi_8vf32_i64(<8 x float> %a) {
-; SSE2-LABEL: fptosi_8vf32_i64:
+define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
+; SSE2-LABEL: fptosi_4f32_to_4i64:
; SSE2: # BB#0:
; SSE2-NEXT: cvttss2si %xmm0, %rax
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_8vf32_i64:
+; AVX-LABEL: fptosi_4f32_to_4i64:
; AVX: # BB#0:
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX-NEXT: vcvttss2si %xmm1, %rax
; Float to Unsigned Integer
;
-define <4 x i32> @fptoui_4vf32(<4 x float> %a) {
-; SSE2-LABEL: fptoui_4vf32:
+define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
+; SSE2-LABEL: fptoui_4f32_to_4i32:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf32:
+; AVX-LABEL: fptoui_4f32_to_4i32:
; AVX: # BB#0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vcvttss2si %xmm1, %rax
ret <4 x i32> %cvt
}
-define <2 x i64> @fptoui_4vf32_i64(<4 x float> %a) {
-; SSE2-LABEL: fptoui_4vf32_i64:
+define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
+; SSE2-LABEL: fptoui_2f32_to_2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf32_i64:
+; AVX-LABEL: fptoui_2f32_to_2i64:
; AVX: # BB#0:
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm2
ret <2 x i64> %cvt
}
-define <8 x i32> @fptoui_8vf32(<8 x float> %a) {
-; SSE2-LABEL: fptoui_8vf32:
+define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
+; SSE2-LABEL: fptoui_8f32_to_8i32:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_8vf32:
+; AVX-LABEL: fptoui_8f32_to_8i32:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
ret <8 x i32> %cvt
}
-define <4 x i64> @fptoui_8vf32_i64(<8 x float> %a) {
-; SSE2-LABEL: fptoui_8vf32_i64:
+define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
+; SSE2-LABEL: fptoui_4f32_to_4i64:
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_8vf32_i64:
+; AVX-LABEL: fptoui_4f32_to_4i64:
; AVX: # BB#0:
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; Constant Folding
;
-define <2 x i64> @fptosi_2vf64c() {
-; SSE2-LABEL: fptosi_2vf64c:
+define <2 x i64> @fptosi_2f64_to_2i64_const() {
+; SSE2-LABEL: fptosi_2f64_to_2i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_2vf64c:
+; AVX-LABEL: fptosi_2f64_to_2i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,18446744073709551615]
; AVX-NEXT: retq
ret <2 x i64> %cvt
}
-define <4 x i32> @fptosi_2vf64c_i32() {
-; SSE2-LABEL: fptosi_2vf64c_i32:
+define <4 x i32> @fptosi_2f64_to_2i32_const() {
+; SSE2-LABEL: fptosi_2f64_to_2i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_2vf64c_i32:
+; AVX-LABEL: fptosi_2f64_to_2i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; AVX-NEXT: retq
ret <4 x i32> %ext
}
-define <4 x i64> @fptosi_4vf64c() {
-; SSE2-LABEL: fptosi_4vf64c:
+define <4 x i64> @fptosi_4f64_to_4i64_const() {
+; SSE2-LABEL: fptosi_4f64_to_4i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,18446744073709551613]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf64c:
+; AVX-LABEL: fptosi_4f64_to_4i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,18446744073709551613]
; AVX-NEXT: retq
ret <4 x i64> %cvt
}
-define <4 x i32> @fptosi_4vf64c_i32() {
-; SSE2-LABEL: fptosi_4vf64c_i32:
+define <4 x i32> @fptosi_4f64_to_4i32_const() {
+; SSE2-LABEL: fptosi_4f64_to_4i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf64c_i32:
+; AVX-LABEL: fptosi_4f64_to_4i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; AVX-NEXT: retq
ret <4 x i32> %cvt
}
-define <2 x i64> @fptoui_2vf64c() {
-; SSE2-LABEL: fptoui_2vf64c:
+define <2 x i64> @fptoui_2f64_to_2i64_const() {
+; SSE2-LABEL: fptoui_2f64_to_2i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_2vf64c:
+; AVX-LABEL: fptoui_2f64_to_2i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4]
; AVX-NEXT: retq
ret <2 x i64> %cvt
}
-define <4 x i32> @fptoui_2vf64c_i32(<2 x double> %a) {
-; SSE2-LABEL: fptoui_2vf64c_i32:
+define <4 x i32> @fptoui_2f64_to_2i32_const(<2 x double> %a) {
+; SSE2-LABEL: fptoui_2f64_to_2i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = <2,4,u,u>
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_2vf64c_i32:
+; AVX-LABEL: fptoui_2f64_to_2i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <2,4,u,u>
; AVX-NEXT: retq
ret <4 x i32> %ext
}
-define <4 x i64> @fptoui_4vf64c(<4 x double> %a) {
-; SSE2-LABEL: fptoui_4vf64c:
+define <4 x i64> @fptoui_4f64_to_4i64_const(<4 x double> %a) {
+; SSE2-LABEL: fptoui_4f64_to_4i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [6,8]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf64c:
+; AVX-LABEL: fptoui_4f64_to_4i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [2,4,6,8]
; AVX-NEXT: retq
ret <4 x i64> %cvt
}
-define <4 x i32> @fptoui_4vf64c_i32(<4 x double> %a) {
-; SSE2-LABEL: fptoui_4vf64c_i32:
+define <4 x i32> @fptoui_4f64_to_4i32_const(<4 x double> %a) {
+; SSE2-LABEL: fptoui_4f64_to_4i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4,6,8]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf64c_i32:
+; AVX-LABEL: fptoui_4f64_to_4i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4,6,8]
; AVX-NEXT: retq
ret <4 x i32> %cvt
}
-define <4 x i32> @fptosi_4vf32c() {
-; SSE2-LABEL: fptosi_4vf32c:
+define <4 x i32> @fptosi_4f32_to_4i32_const() {
+; SSE2-LABEL: fptosi_4f32_to_4i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf32c:
+; AVX-LABEL: fptosi_4f32_to_4i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; AVX-NEXT: retq
ret <4 x i32> %cvt
}
-define <4 x i64> @fptosi_4vf32c_i64() {
-; SSE2-LABEL: fptosi_4vf32c_i64:
+define <4 x i64> @fptosi_4f32_to_4i64_const() {
+; SSE2-LABEL: fptosi_4f32_to_4i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,3]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_4vf32c_i64:
+; AVX-LABEL: fptosi_4f32_to_4i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,3]
; AVX-NEXT: retq
ret <4 x i64> %cvt
}
-define <8 x i32> @fptosi_8vf32c(<8 x float> %a) {
-; SSE2-LABEL: fptosi_8vf32c:
+define <8 x i32> @fptosi_8f32_to_8i32_const(<8 x float> %a) {
+; SSE2-LABEL: fptosi_8f32_to_8i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [6,4294967288,2,4294967295]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptosi_8vf32c:
+; AVX-LABEL: fptosi_8f32_to_8i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,4294967295,2,3,6,4294967288,2,4294967295]
; AVX-NEXT: retq
ret <8 x i32> %cvt
}
-define <4 x i32> @fptoui_4vf32c(<4 x float> %a) {
-; SSE2-LABEL: fptoui_4vf32c:
+define <4 x i32> @fptoui_4f32_to_4i32_const(<4 x float> %a) {
+; SSE2-LABEL: fptoui_4f32_to_4i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf32c:
+; AVX-LABEL: fptoui_4f32_to_4i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,4,6]
; AVX-NEXT: retq
ret <4 x i32> %cvt
}
-define <4 x i64> @fptoui_4vf32c_i64() {
-; SSE2-LABEL: fptoui_4vf32c_i64:
+define <4 x i64> @fptoui_4f32_to_4i64_const() {
+; SSE2-LABEL: fptoui_4f32_to_4i64_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [4,8]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_4vf32c_i64:
+; AVX-LABEL: fptoui_4f32_to_4i64_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8]
; AVX-NEXT: retq
ret <4 x i64> %cvt
}
-define <8 x i32> @fptoui_8vf32c(<8 x float> %a) {
-; SSE2-LABEL: fptoui_8vf32c:
+define <8 x i32> @fptoui_8f32_to_8i32_const(<8 x float> %a) {
+; SSE2-LABEL: fptoui_8f32_to_8i32_const:
; SSE2: # BB#0:
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [8,6,4,1]
; SSE2-NEXT: retq
;
-; AVX-LABEL: fptoui_8vf32c:
+; AVX-LABEL: fptoui_8f32_to_8i32_const:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,6,8,6,4,1]
; AVX-NEXT: retq
; Signed Integer to Double
;
-define <2 x double> @sitofp_2vf64(<2 x i64> %a) {
-; SSE2-LABEL: sitofp_2vf64:
+define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
+; SSE2-LABEL: sitofp_2i64_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cvtsi2sdq %rax, %xmm1
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_2vf64:
+; AVX-LABEL: sitofp_2i64_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1
ret <2 x double> %cvt
}
-define <2 x double> @sitofp_2vf64_i32(<4 x i32> %a) {
-; SSE2-LABEL: sitofp_2vf64_i32:
+define <2 x double> @sitofp_2i32_to_2f64(<4 x i32> %a) {
+; SSE2-LABEL: sitofp_2i32_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_2vf64_i32:
+; AVX-LABEL: sitofp_2i32_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
ret <2 x double> %cvt
}
-define <2 x double> @sitofp_2vf64_i16(<8 x i16> %a) {
-; SSE2-LABEL: sitofp_2vf64_i16:
+define <2 x double> @sitofp_2i16_to_2f64(<8 x i16> %a) {
+; SSE2-LABEL: sitofp_2i16_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_2vf64_i16:
+; AVX-LABEL: sitofp_2i16_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
ret <2 x double> %cvt
}
-define <2 x double> @sitofp_2vf64_i8(<16 x i8> %a) {
-; SSE2-LABEL: sitofp_2vf64_i8:
+define <2 x double> @sitofp_2i8_to_2f64(<16 x i8> %a) {
+; SSE2-LABEL: sitofp_2i8_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_2vf64_i8:
+; AVX-LABEL: sitofp_2i8_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
ret <2 x double> %cvt
}
-define <4 x double> @sitofp_4vf64(<4 x i64> %a) {
-; SSE2-LABEL: sitofp_4vf64:
+define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
+; SSE2-LABEL: sitofp_4i64_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cvtsi2sdq %rax, %xmm2
; SSE2-NEXT: movapd %xmm3, %xmm1
; SSE2-NEXT: retq
;
-; AVX1-LABEL: sitofp_4vf64:
+; AVX1-LABEL: sitofp_4i64_to_4f64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: sitofp_4vf64:
+; AVX2-LABEL: sitofp_4i64_to_4f64:
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
ret <4 x double> %cvt
}
-define <4 x double> @sitofp_4vf64_i32(<4 x i32> %a) {
-; SSE2-LABEL: sitofp_4vf64_i32:
+define <4 x double> @sitofp_4i32_to_4f64(<4 x i32> %a) {
+; SSE2-LABEL: sitofp_4i32_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf64_i32:
+; AVX-LABEL: sitofp_4i32_to_4f64:
; AVX: # BB#0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
ret <4 x double> %cvt
}
-define <4 x double> @sitofp_4vf64_i16(<8 x i16> %a) {
-; SSE2-LABEL: sitofp_4vf64_i16:
+define <4 x double> @sitofp_4i16_to_4f64(<8 x i16> %a) {
+; SSE2-LABEL: sitofp_4i16_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: cvtdq2pd %xmm1, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf64_i16:
+; AVX-LABEL: sitofp_4i16_to_4f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
ret <4 x double> %cvt
}
-define <4 x double> @sitofp_4vf64_i8(<16 x i8> %a) {
-; SSE2-LABEL: sitofp_4vf64_i8:
+define <4 x double> @sitofp_4i8_to_4f64(<16 x i8> %a) {
+; SSE2-LABEL: sitofp_4i8_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: cvtdq2pd %xmm1, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf64_i8:
+; AVX-LABEL: sitofp_4i8_to_4f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; Unsigned Integer to Double
;
-define <2 x double> @uitofp_2vf64(<2 x i64> %a) {
-; SSE2-LABEL: uitofp_2vf64:
+define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
+; SSE2-LABEL: uitofp_2i64_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_2vf64:
+; AVX-LABEL: uitofp_2i64_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
ret <2 x double> %cvt
}
-define <2 x double> @uitofp_2vf64_i32(<4 x i32> %a) {
-; SSE2-LABEL: uitofp_2vf64_i32:
+define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
+; SSE2-LABEL: uitofp_2i32_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_2vf64_i32:
+; AVX-LABEL: uitofp_2i32_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
ret <2 x double> %cvt
}
-define <2 x double> @uitofp_2vf64_i16(<8 x i16> %a) {
-; SSE2-LABEL: uitofp_2vf64_i16:
+define <2 x double> @uitofp_2i16_to_2f64(<8 x i16> %a) {
+; SSE2-LABEL: uitofp_2i16_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_2vf64_i16:
+; AVX-LABEL: uitofp_2i16_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: vpand .LCPI10_0(%rip), %xmm0, %xmm0
ret <2 x double> %cvt
}
-define <2 x double> @uitofp_2vf64_i8(<16 x i8> %a) {
-; SSE2-LABEL: uitofp_2vf64_i8:
+define <2 x double> @uitofp_2i8_to_2f64(<16 x i8> %a) {
+; SSE2-LABEL: uitofp_2i8_to_2f64:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_2vf64_i8:
+; AVX-LABEL: uitofp_2i8_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpand .LCPI11_0(%rip), %xmm0, %xmm0
ret <2 x double> %cvt
}
-define <4 x double> @uitofp_4vf64(<4 x i64> %a) {
-; SSE2-LABEL: uitofp_4vf64:
+define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
+; SSE2-LABEL: uitofp_4i64_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_4vf64:
+; AVX1-LABEL: uitofp_4i64_to_4f64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_4vf64:
+; AVX2-LABEL: uitofp_4i64_to_4f64:
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
ret <4 x double> %cvt
}
-define <4 x double> @uitofp_4vf64_i32(<4 x i32> %a) {
-; SSE2-LABEL: uitofp_4vf64_i32:
+define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
+; SSE2-LABEL: uitofp_4i32_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_4vf64_i32:
+; AVX1-LABEL: uitofp_4i32_to_4f64:
; AVX1: # BB#0:
; AVX1-NEXT: vpand .LCPI13_0(%rip), %xmm0, %xmm1
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_4vf64_i32:
+; AVX2-LABEL: uitofp_4i32_to_4f64:
; AVX2: # BB#0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
ret <4 x double> %cvt
}
-define <4 x double> @uitofp_4vf64_i16(<8 x i16> %a) {
-; SSE2-LABEL: uitofp_4vf64_i16:
+define <4 x double> @uitofp_4i16_to_4f64(<8 x i16> %a) {
+; SSE2-LABEL: uitofp_4i16_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_4vf64_i16:
+; AVX-LABEL: uitofp_4i16_to_4f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
ret <4 x double> %cvt
}
-define <4 x double> @uitofp_4vf64_i8(<16 x i8> %a) {
-; SSE2-LABEL: uitofp_4vf64_i8:
+define <4 x double> @uitofp_4i8_to_4f64(<16 x i8> %a) {
+; SSE2-LABEL: uitofp_4i8_to_4f64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_4vf64_i8:
+; AVX-LABEL: uitofp_4i8_to_4f64:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; Signed Integer to Float
;
-define <4 x float> @sitofp_4vf32(<4 x i32> %a) {
-; SSE2-LABEL: sitofp_4vf32:
+define <4 x float> @sitofp_4i32_to_4f32(<4 x i32> %a) {
+; SSE2-LABEL: sitofp_4i32_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf32:
+; AVX-LABEL: sitofp_4i32_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
ret <4 x float> %cvt
}
-define <4 x float> @sitofp_4vf32_i64(<2 x i64> %a) {
-; SSE2-LABEL: sitofp_4vf32_i64:
+define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
+; SSE2-LABEL: sitofp_2i64_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cvtsi2ssq %rax, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf32_i64:
+; AVX-LABEL: sitofp_2i64_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1
ret <4 x float> %ext
}
-define <4 x float> @sitofp_4vf32_i16(<8 x i16> %a) {
-; SSE2-LABEL: sitofp_4vf32_i16:
+define <4 x float> @sitofp_4i16_to_4f32(<8 x i16> %a) {
+; SSE2-LABEL: sitofp_4i16_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf32_i16:
+; AVX-LABEL: sitofp_4i16_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
ret <4 x float> %cvt
}
-define <4 x float> @sitofp_4vf32_i8(<16 x i8> %a) {
-; SSE2-LABEL: sitofp_4vf32_i8:
+define <4 x float> @sitofp_4i8_to_4f32(<16 x i8> %a) {
+; SSE2-LABEL: sitofp_4i8_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_4vf32_i8:
+; AVX-LABEL: sitofp_4i8_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
ret <4 x float> %cvt
}
-define <8 x float> @sitofp_8vf32(<8 x i32> %a) {
-; SSE2-LABEL: sitofp_8vf32:
+define <8 x float> @sitofp_8i32_to_8f32(<8 x i32> %a) {
+; SSE2-LABEL: sitofp_8i32_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
; SSE2-NEXT: retq
;
-; AVX-LABEL: sitofp_8vf32:
+; AVX-LABEL: sitofp_8i32_to_8f32:
; AVX: # BB#0:
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX-NEXT: retq
ret <8 x float> %cvt
}
-define <4 x float> @sitofp_4vf32_4i64(<4 x i64> %a) {
-; SSE2-LABEL: sitofp_4vf32_4i64:
+define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
+; SSE2-LABEL: sitofp_4i64_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cvtsi2ssq %rax, %xmm3
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: sitofp_4vf32_4i64:
+; AVX1-LABEL: sitofp_4i64_to_4f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
-; AVX2-LABEL: sitofp_4vf32_4i64:
+; AVX2-LABEL: sitofp_4i64_to_4f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1
ret <4 x float> %cvt
}
-define <8 x float> @sitofp_8vf32_i16(<8 x i16> %a) {
-; SSE2-LABEL: sitofp_8vf32_i16:
+define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
+; SSE2-LABEL: sitofp_8i16_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: sitofp_8vf32_i16:
+; AVX1-LABEL: sitofp_8i16_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: sitofp_8vf32_i16:
+; AVX2-LABEL: sitofp_8i16_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
ret <8 x float> %cvt
}
-define <8 x float> @sitofp_8vf32_i8(<16 x i8> %a) {
-; SSE2-LABEL: sitofp_8vf32_i8:
+define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
+; SSE2-LABEL: sitofp_8i8_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: sitofp_8vf32_i8:
+; AVX1-LABEL: sitofp_8i8_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: sitofp_8vf32_i8:
+; AVX2-LABEL: sitofp_8i8_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd %xmm0, %ymm0
; AVX2-NEXT: vpslld $24, %ymm0, %ymm0
; Unsigned Integer to Float
;
-define <4 x float> @uitofp_4vf32(<4 x i32> %a) {
-; SSE2-LABEL: uitofp_4vf32:
+define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
+; SSE2-LABEL: uitofp_4i32_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_4vf32:
+; AVX1-LABEL: uitofp_4i32_to_4f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_4vf32:
+; AVX2-LABEL: uitofp_4i32_to_4f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastd .LCPI24_0(%rip), %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
ret <4 x float> %cvt
}
-define <4 x float> @uitofp_4vf32_i64(<2 x i64> %a) {
-; SSE2-LABEL: uitofp_4vf32_i64:
+define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
+; SSE2-LABEL: uitofp_2i64_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_4vf32_i64:
+; AVX-LABEL: uitofp_2i64_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: movl %eax, %ecx
ret <4 x float> %ext
}
-define <4 x float> @uitofp_4vf32_i16(<8 x i16> %a) {
-; SSE2-LABEL: uitofp_4vf32_i16:
+define <4 x float> @uitofp_4i16_to_4f32(<8 x i16> %a) {
+; SSE2-LABEL: uitofp_4i16_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_4vf32_i16:
+; AVX-LABEL: uitofp_4i16_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
ret <4 x float> %cvt
}
-define <4 x float> @uitofp_4vf32_i8(<16 x i8> %a) {
-; SSE2-LABEL: uitofp_4vf32_i8:
+define <4 x float> @uitofp_4i8_to_4f32(<16 x i8> %a) {
+; SSE2-LABEL: uitofp_4i8_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
-; AVX-LABEL: uitofp_4vf32_i8:
+; AVX-LABEL: uitofp_4i8_to_4f32:
; AVX: # BB#0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
ret <4 x float> %cvt
}
-define <8 x float> @uitofp_8vf32(<8 x i32> %a) {
-; SSE2-LABEL: uitofp_8vf32:
+define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
+; SSE2-LABEL: uitofp_8i32_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: addps %xmm2, %xmm1
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_8vf32:
+; AVX1-LABEL: uitofp_8i32_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vandps .LCPI28_0(%rip), %ymm0, %ymm1
; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_8vf32:
+; AVX2-LABEL: uitofp_8i32_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastd .LCPI28_0(%rip), %ymm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
ret <8 x float> %cvt
}
-define <4 x float> @uitofp_4vf32_4i64(<4 x i64> %a) {
-; SSE2-LABEL: uitofp_4vf32_4i64:
+define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
+; SSE2-LABEL: uitofp_4i64_to_4f32:
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: movl %eax, %ecx
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_4vf32_4i64:
+; AVX1-LABEL: uitofp_4i64_to_4f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_4vf32_4i64:
+; AVX2-LABEL: uitofp_4i64_to_4f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: movl %eax, %ecx
ret <4 x float> %cvt
}
-define <8 x float> @uitofp_8vf32_i16(<8 x i16> %a) {
-; SSE2-LABEL: uitofp_8vf32_i16:
+define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
+; SSE2-LABEL: uitofp_8i16_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_8vf32_i16:
+; AVX1-LABEL: uitofp_8i16_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_8vf32_i16:
+; AVX2-LABEL: uitofp_8i16_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
ret <8 x float> %cvt
}
-define <8 x float> @uitofp_8vf32_i8(<16 x i8> %a) {
-; SSE2-LABEL: uitofp_8vf32_i8:
+define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
+; SSE2-LABEL: uitofp_8i8_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
-; AVX1-LABEL: uitofp_8vf32_i8:
+; AVX1-LABEL: uitofp_8i8_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: uitofp_8vf32_i8:
+; AVX2-LABEL: uitofp_8i8_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vpbroadcastd .LCPI31_0(%rip), %ymm1
;
%Arguments = type <{ <8 x i8>, <8 x i16>, <8 x float>* }>
-define void @aggregate_sitofp_8f32_i16(%Arguments* nocapture readonly %a0) {
-; SSE2-LABEL: aggregate_sitofp_8f32_i16:
+define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
+; SSE2-LABEL: aggregate_sitofp_8i16_to_8f32:
; SSE2: # BB#0:
; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: movdqu 8(%rdi), %xmm0
; SSE2-NEXT: movaps %xmm1, 16(%rax)
; SSE2-NEXT: retq
;
-; AVX1-LABEL: aggregate_sitofp_8f32_i16:
+; AVX1-LABEL: aggregate_sitofp_8i16_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: movq 24(%rdi), %rax
; AVX1-NEXT: vmovdqu 8(%rdi), %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
-; AVX2-LABEL: aggregate_sitofp_8f32_i16:
+; AVX2-LABEL: aggregate_sitofp_8i16_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: movq 24(%rdi), %rax
; AVX2-NEXT: vpmovsxwd 8(%rdi), %ymm0