case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
+ case HexagonISD::VCOMBINE: return "HexagonISD::VCOMBINE";
case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
+ bool UseHVX = Subtarget.useHVXOps();
EVT VT = Op.getValueType();
unsigned NElts = Op.getNumOperands();
SDValue Vec = Op.getOperand(0);
}
}
+ if (UseHVX) {
+ SDValue Vec0 = Op.getOperand(1);
+ uint64_t VS = VecVT.getSizeInBits();
+ assert((VS == 64*8 && Subtarget.useHVXSglOps()) ||
+ (VS == 128*8 && Subtarget.useHVXDblOps()));
+ SDValue Combined = DAG.getNode(HexagonISD::VCOMBINE, dl, VT, Vec0, Vec);
+ return Combined;
+ }
for (unsigned i = 0, e = NElts; i != e; ++i) {
unsigned OpIdx = NElts - i - 1;
SDValue Operand = Op.getOperand(OpIdx);
defm V6_vcombine :
T_HVX_alu_WV <"$dst = vcombine($src1,$src2)">, V6_vcombine_enc;
+def SDTHexagonVCOMBINE: SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>,
+ SDTCisSubVecOfVec<1, 0>]>;
+
+def HexagonVCOMBINE: SDNode<"HexagonISD::VCOMBINE", SDTHexagonVCOMBINE>;
+
+def: Pat<(v32i32 (HexagonVCOMBINE (v16i32 VectorRegs:$Vs),
+ (v16i32 VectorRegs:$Vt))),
+ (V6_vcombine VectorRegs:$Vs, VectorRegs:$Vt)>,
+ Requires<[UseHVXSgl]>;
+def: Pat<(v64i32 (HexagonVCOMBINE (v32i32 VecDblRegs:$Vs),
+ (v32i32 VecDblRegs:$Vt))),
+ (V6_vcombine_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
+ Requires<[UseHVXDbl]>;
+
let Itinerary = CVI_VINLANESAT, Type = TypeCVI_VINLANESAT in {
defm V6_vsathub :
T_HVX_alu_VV <"$dst.ub = vsat($src1.h,$src2.h)">, V6_vsathub_enc;
V6_vasrhbrndsat_enc;
}
-// Assemlber mapped -- alias?
+// Assembler mapped -- alias?
//defm V6_vtran2x2vdd : T_HVX_shift_VV <"">, V6_vtran2x2vdd_enc;
let Itinerary = CVI_VP_VS_LONG, Type = TypeCVI_VP_VS in {
defm V6_vshuffvdd :