1 ; RUN: llc < %s -march=x86-64 -mcpu=penryn | FileCheck -check-prefix=SSE41 %s
2 ; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck -check-prefix=AVX1 %s
3 ; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck -check-prefix=AVX2 %s
6 ; These tests inject a store into the chain to test the inreg versions of pmovsx
8 define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
9 %wide.load35 = load <2 x i8>* %in, align 1
10 %sext = sext <2 x i8> %wide.load35 to <2 x i64>
11 store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
12 store <2 x i64> %sext, <2 x i64>* %out, align 8
25 define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
26 %wide.load35 = load <4 x i8>* %in, align 1
27 %sext = sext <4 x i8> %wide.load35 to <4 x i64>
28 store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
29 store <4 x i64> %sext, <4 x i64>* %out, align 8
36 define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
37 %wide.load35 = load <4 x i8>* %in, align 1
38 %sext = sext <4 x i8> %wide.load35 to <4 x i32>
39 store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
40 store <4 x i32> %sext, <4 x i32>* %out, align 8
53 define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
54 %wide.load35 = load <8 x i8>* %in, align 1
55 %sext = sext <8 x i8> %wide.load35 to <8 x i32>
56 store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
57 store <8 x i32> %sext, <8 x i32>* %out, align 8
64 define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
65 %wide.load35 = load <8 x i8>* %in, align 1
66 %sext = sext <8 x i8> %wide.load35 to <8 x i16>
67 store <8 x i16> zeroinitializer, <8 x i16>* undef, align 8
68 store <8 x i16> %sext, <8 x i16>* %out, align 8
81 define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
82 %wide.load35 = load <16 x i8>* %in, align 1
83 %sext = sext <16 x i8> %wide.load35 to <16 x i16>
84 store <16 x i16> zeroinitializer, <16 x i16>* undef, align 8
85 store <16 x i16> %sext, <16 x i16>* %out, align 8
89 ; FIXME: v16i8 -> v16i16 is scalarized.
93 define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
94 %wide.load35 = load <2 x i16>* %in, align 1
95 %sext = sext <2 x i16> %wide.load35 to <2 x i64>
96 store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
97 store <2 x i64> %sext, <2 x i64>* %out, align 8
101 ; SSE41-LABEL: test7:
111 define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
112 %wide.load35 = load <4 x i16>* %in, align 1
113 %sext = sext <4 x i16> %wide.load35 to <4 x i64>
114 store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
115 store <4 x i64> %sext, <4 x i64>* %out, align 8
122 define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
123 %wide.load35 = load <4 x i16>* %in, align 1
124 %sext = sext <4 x i16> %wide.load35 to <4 x i32>
125 store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
126 store <4 x i32> %sext, <4 x i32>* %out, align 8
129 ; SSE41-LABEL: test9:
139 define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
140 %wide.load35 = load <8 x i16>* %in, align 1
141 %sext = sext <8 x i16> %wide.load35 to <8 x i32>
142 store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
143 store <8 x i32> %sext, <8 x i32>* %out, align 8
146 ; AVX2-LABEL: test10:
150 define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
151 %wide.load35 = load <2 x i32>* %in, align 1
152 %sext = sext <2 x i32> %wide.load35 to <2 x i64>
153 store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
154 store <2 x i64> %sext, <2 x i64>* %out, align 8
157 ; SSE41-LABEL: test11:
160 ; AVX1-LABEL: test11:
163 ; AVX2-LABEL: test11:
167 define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
168 %wide.load35 = load <4 x i32>* %in, align 1
169 %sext = sext <4 x i32> %wide.load35 to <4 x i64>
170 store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
171 store <4 x i64> %sext, <4 x i64>* %out, align 8
174 ; AVX2-LABEL: test12: