{ X86::VMOVDQA64Zrr, X86::VMOVDQA64Zmr, TB_FOLDED_STORE | TB_ALIGN_64 },
{ X86::VMOVUPDZrr, X86::VMOVUPDZmr, TB_FOLDED_STORE },
{ X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE },
+ { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zmr, TB_FOLDED_STORE },
+ { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zmr, TB_FOLDED_STORE },
{ X86::VMOVDQU32Zrr, X86::VMOVDQU32Zmr, TB_FOLDED_STORE },
- { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE }
+ { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE },
+ // AVX-512 foldable instructions (256-bit versions)
+ { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
+ { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
+ { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
+ { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 },
+ { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256mr, TB_FOLDED_STORE },
+ { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256mr, TB_FOLDED_STORE },
+ // AVX-512 foldable instructions (128-bit versions)
+ { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
+ { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
+ { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
+ { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
+ { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128mr, TB_FOLDED_STORE },
+ { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128mr, TB_FOLDED_STORE },
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE }
};
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
{ X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
{ X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
{ X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
+ { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
{ X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
{ X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
{ X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
{ X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
{ X86::VPABSDZrr, X86::VPABSDZrm, 0 },
{ X86::VPABSQZrr, X86::VPABSQZrm, 0 },
+ // AVX-512 foldable instructions (256-bit versions)
+ { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
+ { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
+ { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
+ { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
+ { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
+ { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
+ { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
+ // AVX-512 foldable instructions (256-bit versions)
+ { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
+ { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
+ { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
+ { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
+ { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
+ { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
// AES foldable instructions
{ X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 },
--- /dev/null
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw -mattr=+avx512vl --show-mc-encoding| FileCheck %s
+
+; CHECK-LABEL: test_256_1
+; CHECK: vmovdqu8 {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <32 x i8> @test_256_1(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <32 x i8>*
+ %res = load <32 x i8>* %vaddr, align 1
+ ret <32 x i8>%res
+}
+
+; CHECK-LABEL: test_256_2
+; CHECK: vmovdqu8{{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_2(i8 * %addr, <32 x i8> %data) {
+ %vaddr = bitcast i8* %addr to <32 x i8>*
+ store <32 x i8>%data, <32 x i8>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_3
+; CHECK: vmovdqu8{{.*{%k[1-7]} }}## encoding: [0x62
+; CHECK: ret
+define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
+ %mask = icmp ne <32 x i8> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <32 x i8>*
+ %r = load <32 x i8>* %vaddr, align 1
+ %res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> %old
+ ret <32 x i8>%res
+}
+
+; CHECK-LABEL: test_256_4
+; CHECK: vmovdqu8{{.*{%k[1-7]} {z} }}## encoding: [0x62
+; CHECK: ret
+define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
+ %mask = icmp ne <32 x i8> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <32 x i8>*
+ %r = load <32 x i8>* %vaddr, align 1
+ %res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> zeroinitializer
+ ret <32 x i8>%res
+}
+
+; CHECK-LABEL: test_256_5
+; CHECK: vmovdqu16{{.*}} ## encoding: [0x62
+; CHECK: ret
+define <16 x i16> @test_256_5(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <16 x i16>*
+ %res = load <16 x i16>* %vaddr, align 1
+ ret <16 x i16>%res
+}
+
+; CHECK-LABEL: test_256_6
+; CHECK: vmovdqu16{{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_6(i8 * %addr, <16 x i16> %data) {
+ %vaddr = bitcast i8* %addr to <16 x i16>*
+ store <16 x i16>%data, <16 x i16>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_7
+; CHECK: vmovdqu16{{.*{%k[1-7]} }}## encoding: [0x62
+; CHECK: ret
+define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <16 x i16>*
+ %r = load <16 x i16>* %vaddr, align 1
+ %res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> %old
+ ret <16 x i16>%res
+}
+
+; CHECK-LABEL: test_256_8
+; CHECK: vmovdqu16{{.*{%k[1-7]} {z} }}## encoding: [0x62
+; CHECK: ret
+define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <16 x i16>*
+ %r = load <16 x i16>* %vaddr, align 1
+ %res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> zeroinitializer
+ ret <16 x i16>%res
+}
+
+; CHECK-LABEL: test_128_1
+; CHECK: vmovdqu8 {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <16 x i8> @test_128_1(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <16 x i8>*
+ %res = load <16 x i8>* %vaddr, align 1
+ ret <16 x i8>%res
+}
+
+; CHECK-LABEL: test_128_2
+; CHECK: vmovdqu8{{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_2(i8 * %addr, <16 x i8> %data) {
+ %vaddr = bitcast i8* %addr to <16 x i8>*
+ store <16 x i8>%data, <16 x i8>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_3
+; CHECK: vmovdqu8{{.*{%k[1-7]} }}## encoding: [0x62
+; CHECK: ret
+define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
+ %mask = icmp ne <16 x i8> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <16 x i8>*
+ %r = load <16 x i8>* %vaddr, align 1
+ %res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> %old
+ ret <16 x i8>%res
+}
+
+; CHECK-LABEL: test_128_4
+; CHECK: vmovdqu8{{.*{%k[1-7]} {z} }}## encoding: [0x62
+; CHECK: ret
+define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
+ %mask = icmp ne <16 x i8> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <16 x i8>*
+ %r = load <16 x i8>* %vaddr, align 1
+ %res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> zeroinitializer
+ ret <16 x i8>%res
+}
+
+; CHECK-LABEL: test_128_5
+; CHECK: vmovdqu16{{.*}} ## encoding: [0x62
+; CHECK: ret
+define <8 x i16> @test_128_5(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <8 x i16>*
+ %res = load <8 x i16>* %vaddr, align 1
+ ret <8 x i16>%res
+}
+
+; CHECK-LABEL: test_128_6
+; CHECK: vmovdqu16{{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_6(i8 * %addr, <8 x i16> %data) {
+ %vaddr = bitcast i8* %addr to <8 x i16>*
+ store <8 x i16>%data, <8 x i16>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_7
+; CHECK: vmovdqu16{{.*{%k[1-7]} }}## encoding: [0x62
+; CHECK: ret
+define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i16>*
+ %r = load <8 x i16>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> %old
+ ret <8 x i16>%res
+}
+
+; CHECK-LABEL: test_128_8
+; CHECK: vmovdqu16{{.*{%k[1-7]} {z} }}## encoding: [0x62
+; CHECK: ret
+define <8 x i16> @test_128_8(i8 * %addr, <8 x i16> %mask1) {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i16>*
+ %r = load <8 x i16>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> zeroinitializer
+ ret <8 x i16>%res
+}
+
--- /dev/null
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s
+
+; CHECK-LABEL: test_256_1
+; CHECK: vmovdqu32
+; CHECK: ret
+define <8 x i32> @test_256_1(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %res = load <8 x i32>* %vaddr, align 1
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_2
+; CHECK: vmovdqa32
+; CHECK: ret
+define <8 x i32> @test_256_2(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %res = load <8 x i32>* %vaddr, align 32
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_3
+; CHECK: vmovdqa64
+; CHECK: ret
+define void @test_256_3(i8 * %addr, <4 x i64> %data) {
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ store <4 x i64>%data, <4 x i64>* %vaddr, align 32
+ ret void
+}
+
+; CHECK-LABEL: test_256_4
+; CHECK: vmovdqu32
+; CHECK: ret
+define void @test_256_4(i8 * %addr, <8 x i32> %data) {
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ store <8 x i32>%data, <8 x i32>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_5
+; CHECK: vmovdqa32
+; CHECK: ret
+define void @test_256_5(i8 * %addr, <8 x i32> %data) {
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ store <8 x i32>%data, <8 x i32>* %vaddr, align 32
+ ret void
+}
+
+; CHECK-LABEL: test_256_6
+; CHECK: vmovdqa64
+; CHECK: ret
+define <4 x i64> @test_256_6(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %res = load <4 x i64>* %vaddr, align 32
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_7
+; CHECK: vmovdqu64
+; CHECK: ret
+define void @test_256_7(i8 * %addr, <4 x i64> %data) {
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ store <4 x i64>%data, <4 x i64>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_8
+; CHECK: vmovdqu64
+; CHECK: ret
+define <4 x i64> @test_256_8(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %res = load <4 x i64>* %vaddr, align 1
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_9
+; CHECK: vmovapd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_9(i8 * %addr, <4 x double> %data) {
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ store <4 x double>%data, <4 x double>* %vaddr, align 32
+ ret void
+}
+
+; CHECK-LABEL: test_256_10
+; CHECK: vmovapd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <4 x double> @test_256_10(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %res = load <4 x double>* %vaddr, align 32
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_256_11
+; CHECK: vmovaps {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_11(i8 * %addr, <8 x float> %data) {
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ store <8 x float>%data, <8 x float>* %vaddr, align 32
+ ret void
+}
+
+; CHECK-LABEL: test_256_12
+; CHECK: vmovaps {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <8 x float> @test_256_12(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %res = load <8 x float>* %vaddr, align 32
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_13
+; CHECK: vmovupd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_13(i8 * %addr, <4 x double> %data) {
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ store <4 x double>%data, <4 x double>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_14
+; CHECK: vmovupd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <4 x double> @test_256_14(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %res = load <4 x double>* %vaddr, align 1
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_256_15
+; CHECK: vmovups {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_256_15(i8 * %addr, <8 x float> %data) {
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ store <8 x float>%data, <8 x float>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_256_16
+; CHECK: vmovups {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <8 x float> @test_256_16(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %res = load <8 x float>* %vaddr, align 1
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_17
+; CHECK: vmovdqa32{{.*{%k[1-7]} }}
+; CHECK: ret
+define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %r = load <8 x i32>* %vaddr, align 32
+ %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_18
+; CHECK: vmovdqu32{{.*{%k[1-7]} }}
+; CHECK: ret
+define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %r = load <8 x i32>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_19
+; CHECK: vmovdqa32{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %r = load <8 x i32>* %vaddr, align 32
+ %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_20
+; CHECK: vmovdqu32{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x i32>*
+ %r = load <8 x i32>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
+ ret <8 x i32>%res
+}
+
+; CHECK-LABEL: test_256_21
+; CHECK: vmovdqa64{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %r = load <4 x i64>* %vaddr, align 32
+ %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_22
+; CHECK: vmovdqu64{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %r = load <4 x i64>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_23
+; CHECK: vmovdqa64{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %r = load <4 x i64>* %vaddr, align 32
+ %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_24
+; CHECK: vmovdqu64{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i64>*
+ %r = load <4 x i64>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
+ ret <4 x i64>%res
+}
+
+; CHECK-LABEL: test_256_25
+; CHECK: vmovaps{{.*{%k[1-7]} }}
+; CHECK: ret
+define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
+ %mask = fcmp one <8 x float> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %r = load <8 x float>* %vaddr, align 32
+ %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_26
+; CHECK: vmovups{{.*{%k[1-7]} }}
+; CHECK: ret
+define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
+ %mask = fcmp one <8 x float> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %r = load <8 x float>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_27
+; CHECK: vmovaps{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
+ %mask = fcmp one <8 x float> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %r = load <8 x float>* %vaddr, align 32
+ %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_28
+; CHECK: vmovups{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
+ %mask = fcmp one <8 x float> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <8 x float>*
+ %r = load <8 x float>* %vaddr, align 1
+ %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
+ ret <8 x float>%res
+}
+
+; CHECK-LABEL: test_256_29
+; CHECK: vmovapd{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %r = load <4 x double>* %vaddr, align 32
+ %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_256_30
+; CHECK: vmovupd{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %r = load <4 x double>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_256_31
+; CHECK: vmovapd{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %r = load <4 x double>* %vaddr, align 32
+ %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_256_32
+; CHECK: vmovupd{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x double>*
+ %r = load <4 x double>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
+ ret <4 x double>%res
+}
+
+; CHECK-LABEL: test_128_1
+; CHECK: vmovdqu32
+; CHECK: ret
+define <4 x i32> @test_128_1(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %res = load <4 x i32>* %vaddr, align 1
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_2
+; CHECK: vmovdqa32
+; CHECK: ret
+define <4 x i32> @test_128_2(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %res = load <4 x i32>* %vaddr, align 16
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_3
+; CHECK: vmovdqa64
+; CHECK: ret
+define void @test_128_3(i8 * %addr, <2 x i64> %data) {
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ store <2 x i64>%data, <2 x i64>* %vaddr, align 16
+ ret void
+}
+
+; CHECK-LABEL: test_128_4
+; CHECK: vmovdqu32
+; CHECK: ret
+define void @test_128_4(i8 * %addr, <4 x i32> %data) {
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ store <4 x i32>%data, <4 x i32>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_5
+; CHECK: vmovdqa32
+; CHECK: ret
+define void @test_128_5(i8 * %addr, <4 x i32> %data) {
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ store <4 x i32>%data, <4 x i32>* %vaddr, align 16
+ ret void
+}
+
+; CHECK-LABEL: test_128_6
+; CHECK: vmovdqa64
+; CHECK: ret
+define <2 x i64> @test_128_6(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %res = load <2 x i64>* %vaddr, align 16
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_7
+; CHECK: vmovdqu64
+; CHECK: ret
+define void @test_128_7(i8 * %addr, <2 x i64> %data) {
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ store <2 x i64>%data, <2 x i64>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_8
+; CHECK: vmovdqu64
+; CHECK: ret
+define <2 x i64> @test_128_8(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %res = load <2 x i64>* %vaddr, align 1
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_9
+; CHECK: vmovapd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_9(i8 * %addr, <2 x double> %data) {
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ store <2 x double>%data, <2 x double>* %vaddr, align 16
+ ret void
+}
+
+; CHECK-LABEL: test_128_10
+; CHECK: vmovapd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <2 x double> @test_128_10(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %res = load <2 x double>* %vaddr, align 16
+ ret <2 x double>%res
+}
+
+; CHECK-LABEL: test_128_11
+; CHECK: vmovaps {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_11(i8 * %addr, <4 x float> %data) {
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ store <4 x float>%data, <4 x float>* %vaddr, align 16
+ ret void
+}
+
+; CHECK-LABEL: test_128_12
+; CHECK: vmovaps {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <4 x float> @test_128_12(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %res = load <4 x float>* %vaddr, align 16
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_13
+; CHECK: vmovupd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_13(i8 * %addr, <2 x double> %data) {
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ store <2 x double>%data, <2 x double>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_14
+; CHECK: vmovupd {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <2 x double> @test_128_14(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %res = load <2 x double>* %vaddr, align 1
+ ret <2 x double>%res
+}
+
+; CHECK-LABEL: test_128_15
+; CHECK: vmovups {{.*}} ## encoding: [0x62
+; CHECK: ret
+define void @test_128_15(i8 * %addr, <4 x float> %data) {
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ store <4 x float>%data, <4 x float>* %vaddr, align 1
+ ret void
+}
+
+; CHECK-LABEL: test_128_16
+; CHECK: vmovups {{.*}} ## encoding: [0x62
+; CHECK: ret
+define <4 x float> @test_128_16(i8 * %addr) {
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %res = load <4 x float>* %vaddr, align 1
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_17
+; CHECK: vmovdqa32{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %r = load <4 x i32>* %vaddr, align 16
+ %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_18
+; CHECK: vmovdqu32{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %r = load <4 x i32>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_19
+; CHECK: vmovdqa32{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %r = load <4 x i32>* %vaddr, align 16
+ %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_20
+; CHECK: vmovdqu32{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x i32>*
+ %r = load <4 x i32>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
+ ret <4 x i32>%res
+}
+
+; CHECK-LABEL: test_128_21
+; CHECK: vmovdqa64{{.*{%k[1-7]} }}
+; CHECK: ret
+define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %r = load <2 x i64>* %vaddr, align 16
+ %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_22
+; CHECK: vmovdqu64{{.*{%k[1-7]} }}
+; CHECK: ret
+define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %r = load <2 x i64>* %vaddr, align 1
+ %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_23
+; CHECK: vmovdqa64{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %r = load <2 x i64>* %vaddr, align 16
+ %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_24
+; CHECK: vmovdqu64{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x i64>*
+ %r = load <2 x i64>* %vaddr, align 1
+ %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
+ ret <2 x i64>%res
+}
+
+; CHECK-LABEL: test_128_25
+; CHECK: vmovaps{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %r = load <4 x float>* %vaddr, align 16
+ %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_26
+; CHECK: vmovups{{.*{%k[1-7]} }}
+; CHECK: ret
+define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %r = load <4 x float>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_27
+; CHECK: vmovaps{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %r = load <4 x float>* %vaddr, align 16
+ %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_28
+; CHECK: vmovups{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <4 x float>*
+ %r = load <4 x float>* %vaddr, align 1
+ %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
+ ret <4 x float>%res
+}
+
+; CHECK-LABEL: test_128_29
+; CHECK: vmovapd{{.*{%k[1-7]} }}
+; CHECK: ret
+define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %r = load <2 x double>* %vaddr, align 16
+ %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
+ ret <2 x double>%res
+}
+
+; CHECK-LABEL: test_128_30
+; CHECK: vmovupd{{.*{%k[1-7]} }}
+; CHECK: ret
+define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %r = load <2 x double>* %vaddr, align 1
+ %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
+ ret <2 x double>%res
+}
+
+; CHECK-LABEL: test_128_31
+; CHECK: vmovapd{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %r = load <2 x double>* %vaddr, align 16
+ %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
+ ret <2 x double>%res
+}
+
+; CHECK-LABEL: test_128_32
+; CHECK: vmovupd{{.*{%k[1-7]} {z} }}
+; CHECK: ret
+define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %vaddr = bitcast i8* %addr to <2 x double>*
+ %r = load <2 x double>* %vaddr, align 1
+ %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
+ ret <2 x double>%res
+}
+