define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
entry:
%0 = bitcast double* %d to <4 x double>*
- %tmp1.i = load <4 x double>* %0, align 32
+ %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
%1 = bitcast float* %f to <8 x float>*
- %tmp1.i17 = load <8 x float>* %1, align 32
- %tmp1.i16 = load <4 x i64>* %i, align 32
+ %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
+ %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
store <4 x double> %tmp1.i, <4 x double>* %0, align 32
store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
;;
;; The two tests below check that we must fold load + scalar_to_vector
-;; + ins_subvec+ zext into only a single vmovss or vmovsd
+;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
-; CHECK: vmovss (%
+; CHECK: mov00
define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
- %val = load float* %ptr
+ %val = load float, float* %ptr
+; CHECK: vmovss (%
%i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
ret <8 x float> %i0
+; CHECK: ret
}
-; CHECK: vmovsd (%
+; CHECK: mov01
define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
- %val = load double* %ptr
+ %val = load double, double* %ptr
+; CHECK: vmovsd (%
%i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
ret <4 x double> %i0
+; CHECK: ret
}
; CHECK: vmovaps %ymm
unreachable
}
-; CHECK: vmovups %ymm
+; CHECK: storev16i16_01
+; CHECK: vextractf128
+; CHECK: vmovups %xmm
define void @storev16i16_01(<16 x i16> %a) nounwind {
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
}
+; CHECK: storev32i8
; CHECK: vmovaps %ymm
define void @storev32i8(<32 x i8> %a) nounwind {
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
}
-; CHECK: vmovups %ymm
+; CHECK: storev32i8_01
+; CHECK: vextractf128
+; CHECK: vmovups %xmm
define void @storev32i8_01(<32 x i8> %a) nounwind {
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
ret void
}
-declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind
+declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
; CHECK_O0: _f_f
; CHECK-O0: vmovss LCPI
br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
cif_mixed_test_all: ; preds = %cif_mask_mixed
- call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind
+ call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
unreachable
cif_mixed_test_any_check: ; preds = %cif_mask_mixed
unreachable
}
+; CHECK: add8i32
+; CHECK: vmovups
+; CHECK: vmovups
+; CHECK-NOT: vinsertf128
+; CHECK-NOT: vextractf128
+; CHECK: vmovups
+; CHECK: vmovups
+define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
+ %b = load <8 x i32>, <8 x i32>* %bp, align 1
+ %x = add <8 x i32> zeroinitializer, %b
+ store <8 x i32> %x, <8 x i32>* %ret, align 1
+ ret void
+}
+
+; CHECK: add4i64a64
+; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
+; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
+define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+ %b = load <4 x i64>, <4 x i64>* %bp, align 64
+ %x = add <4 x i64> zeroinitializer, %b
+ store <4 x i64> %x, <4 x i64>* %ret, align 64
+ ret void
+}
+
+; CHECK: add4i64a16
+; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
+; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
+; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
+; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
+define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+ %b = load <4 x i64>, <4 x i64>* %bp, align 16
+ %x = add <4 x i64> zeroinitializer, %b
+ store <4 x i64> %x, <4 x i64>* %ret, align 16
+ ret void
+}