setOperationAction(ISD::MUL, MVT::v16i16, Legal);
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v4i32, Legal);
+ setOperationAction(ISD::SHL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v4i32, Legal);
+ setOperationAction(ISD::SRL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SRA, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v8i32, Legal);
+ setOperationAction(ISD::SHL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v8i32, Legal);
+ setOperationAction(ISD::SRL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SRA, MVT::v8i32, Legal);
// Don't lower v32i8 because there is no 128-bit byte mul
} else {
setOperationAction(ISD::ADD, MVT::v4i64, Custom);
VEX_W;
defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", memopv4i32, memopv8i32,
int_x86_avx2_psrav_d, int_x86_avx2_psrav_d_256>;
+
+
+let Predicates = [HasAVX2] in {
+ def : Pat<(v4i32 (shl (v4i32 VR128:$src1), (v4i32 VR128:$src2))),
+ (VPSLLVDrr VR128:$src1, VR128:$src2)>;
+ def : Pat<(v2i64 (shl (v2i64 VR128:$src1), (v2i64 VR128:$src2))),
+ (VPSLLVQrr VR128:$src1, VR128:$src2)>;
+ def : Pat<(v4i32 (srl (v4i32 VR128:$src1), (v4i32 VR128:$src2))),
+ (VPSRLVDrr VR128:$src1, VR128:$src2)>;
+ def : Pat<(v2i64 (srl (v2i64 VR128:$src1), (v2i64 VR128:$src2))),
+ (VPSRLVQrr VR128:$src1, VR128:$src2)>;
+ def : Pat<(v4i32 (sra (v4i32 VR128:$src1), (v4i32 VR128:$src2))),
+ (VPSRAVDrr VR128:$src1, VR128:$src2)>;
+
+ def : Pat<(v8i32 (shl (v8i32 VR256:$src1), (v8i32 VR256:$src2))),
+ (VPSLLVDYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v4i64 (shl (v4i64 VR256:$src1), (v4i64 VR256:$src2))),
+ (VPSLLVQYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v8i32 (srl (v8i32 VR256:$src1), (v8i32 VR256:$src2))),
+ (VPSRLVDYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v4i64 (srl (v4i64 VR256:$src1), (v4i64 VR256:$src2))),
+ (VPSRLVQYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v8i32 (sra (v8i32 VR256:$src1), (v8i32 VR256:$src2))),
+ (VPSRAVDYrr VR256:$src1, VR256:$src2)>;
+}
+
+
+
ret <4 x i64> %x
}
-
-
; CHECK: vpblendvb
; CHECK: vpblendvb %ymm
; CHECK: ret
%min = select <32 x i1> %min_is_x, <32 x i8> %x, <32 x i8> %y
ret <32 x i8> %min
}
+
+
+; CHECK: variable_shl0
+; CHECK: psllvd
+; CHECK: ret
+define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
+ %k = shl <4 x i32> %x, %y
+ ret <4 x i32> %k
+}
+; CHECK: variable_shl1
+; CHECK: psllvd
+; CHECK: ret
+define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
+ %k = shl <8 x i32> %x, %y
+ ret <8 x i32> %k
+}
+; CHECK: variable_shl2
+; CHECK: psllvq
+; CHECK: ret
+define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
+ %k = shl <2 x i64> %x, %y
+ ret <2 x i64> %k
+}
+; CHECK: variable_shl3
+; CHECK: psllvq
+; CHECK: ret
+define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
+ %k = shl <4 x i64> %x, %y
+ ret <4 x i64> %k
+}
+; CHECK: variable_srl0
+; CHECK: psrlvd
+; CHECK: ret
+define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
+ %k = lshr <4 x i32> %x, %y
+ ret <4 x i32> %k
+}
+; CHECK: variable_srl1
+; CHECK: psrlvd
+; CHECK: ret
+define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
+ %k = lshr <8 x i32> %x, %y
+ ret <8 x i32> %k
+}
+; CHECK: variable_srl2
+; CHECK: psrlvq
+; CHECK: ret
+define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
+ %k = lshr <2 x i64> %x, %y
+ ret <2 x i64> %k
+}
+; CHECK: variable_srl3
+; CHECK: psrlvq
+; CHECK: ret
+define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
+ %k = lshr <4 x i64> %x, %y
+ ret <4 x i64> %k
+}
+
+; CHECK: variable_sra0
+; CHECK: psravd
+; CHECK: ret
+define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
+ %k = ashr <4 x i32> %x, %y
+ ret <4 x i32> %k
+}
+; CHECK: variable_sra1
+; CHECK: psravd
+; CHECK: ret
+define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
+ %k = ashr <8 x i32> %x, %y
+ ret <8 x i32> %k
+}