EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
+ // fold vector ops
+ if (VT.isVector()) {
+ SDValue FoldedVOp = SimplifyVBinOp(N);
+ if (FoldedVOp.getNode()) return FoldedVOp;
+ }
+
// fold (shl c1, c2) -> c1<<c2
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
+ // fold vector ops
+ if (VT.isVector()) {
+ SDValue FoldedVOp = SimplifyVBinOp(N);
+ if (FoldedVOp.getNode()) return FoldedVOp;
+ }
+
// fold (sra c1, c2) -> (sra c1, c2)
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C);
EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
+ // fold vector ops
+ if (VT.isVector()) {
+ SDValue FoldedVOp = SimplifyVBinOp(N);
+ if (FoldedVOp.getNode()) return FoldedVOp;
+ }
+
// fold (srl c1, c2) -> c1 >>u c2
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C);
--- /dev/null
+; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck %s
+
+define void @ashr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: ashr_v4i32:
+
+ %1 = ashr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = ashr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sra
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -2
+ ; CHECK-NOT: sra
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size ashr_v4i32
+}
+
+define void @lshr_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: lshr_v4i32:
+
+ %1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: srl
+ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], %lo
+ ; CHECK-NOT: srl
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size lshr_v4i32
+}
+
+define void @shl_v4i32(<4 x i32>* %c) nounwind {
+ ; CHECK-LABEL: shl_v4i32:
+
+ %1 = shl <4 x i32> <i32 8, i32 4, i32 2, i32 1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %1, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ %2 = shl <4 x i32> <i32 -8, i32 -4, i32 -2, i32 -1>,
+ <i32 0, i32 1, i32 2, i32 3>
+ ; CHECK-NOT: sll
+ ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -8
+ ; CHECK-NOT: sll
+ store volatile <4 x i32> %2, <4 x i32>* %c
+ ; CHECK-DAG: st.w [[R1]], 0($4)
+
+ ret void
+ ; CHECK-LABEL: .size shl_v4i32
+}