string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
SDNode OpNode, SDNode ExtOp, bit Commutable>
: N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2), N3RegFrm, IIC_VSUBiD,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (OpNode (TyQ QPR:$src1),
- (TyQ (ExtOp (TyD DPR:$src2)))))]> {
+ (outs QPR:$Qd), (ins QPR:$Qn, DPR:$Dm), N3RegFrm, IIC_VSUBiD,
+ OpcodeStr, Dt, "$Qd, $Qn, $Dm", "",
+ [(set QPR:$Qd, (OpNode (TyQ QPR:$Qn),
+ (TyQ (ExtOp (TyD DPR:$Dm)))))]> {
let isCommutable = Commutable;
+
+ // Instruction operands.
+ bits<4> Qd;
+ bits<4> Qn;
+ bits<5> Dm;
+
+ let Inst{15-13} = Qd{2-0};
+ let Inst{22} = Qd{3};
+ let Inst{12} = 0;
+ let Inst{19-17} = Qn{2-0};
+ let Inst{7} = Qn{3};
+ let Inst{16} = 0;
+ let Inst{3-0} = Dm{3-0};
+ let Inst{5} = Dm{4};
}
// Pairwise long 2-register intrinsics, both double- and quad-register.
%tmp5 = add <2 x i64> %tmp3, %tmp4
ret <2 x i64> %tmp5
}
+
+; CHECK: vaddws_8xi8
+define <8 x i16> @vaddws_8xi8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
+; CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2]
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+; CHECK: vaddws_4xi16
+define <4 x i32> @vaddws_4xi16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
+; CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2]
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+; CHECK: vaddws_2xi32
+define <2 x i64> @vaddws_2xi32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
+; CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2]
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}
+
+; CHECK: vaddwu_8xi8
+define <8 x i16> @vaddwu_8xi8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
+; CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3]
+ %tmp4 = add <8 x i16> %tmp1, %tmp3
+ ret <8 x i16> %tmp4
+}
+
+; CHECK: vaddwu_4xi16
+define <4 x i32> @vaddwu_4xi16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
+; CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3]
+ %tmp4 = add <4 x i32> %tmp1, %tmp3
+ ret <4 x i32> %tmp4
+}
+
+; CHECK: vaddwu_2xi32
+define <2 x i64> @vaddwu_2xi32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
+; CHECK: vaddw.u32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf3]
+ %tmp4 = add <2 x i64> %tmp1, %tmp3
+ ret <2 x i64> %tmp4
+}