let Inst{1-0} = dst;
}
-let isCodeGenOnly = 0 in {
def C2_cmpeqi : T_CMP <"cmp.eq", 0b00, 0, s10Ext>;
def C2_cmpgti : T_CMP <"cmp.gt", 0b01, 0, s10Ext>;
def C2_cmpgtui : T_CMP <"cmp.gtu", 0b10, 0, u9Ext>;
-}
class T_CMP_pat <InstHexagon MI, PatFrag OpNode, PatLeaf ImmPred>
: Pat<(i1 (OpNode (i32 IntRegs:$src1), ImmPred:$src2)),
let AsmString = "$Rd = combine($Rs"#Op1#", $Rt"#Op2#")";
}
-let isCodeGenOnly = 0 in {
def A2_combine_hh : T_ALU32_combineh<".h", ".h", 0b011, 0b100, 1>;
def A2_combine_hl : T_ALU32_combineh<".h", ".l", 0b011, 0b101, 1>;
def A2_combine_lh : T_ALU32_combineh<".l", ".h", 0b011, 0b110, 1>;
def A2_combine_ll : T_ALU32_combineh<".l", ".l", 0b011, 0b111, 1>;
-}
class T_ALU32_3op_sfx<string mnemonic, string suffix, bits<3> MajOp,
bits<3> MinOp, bit OpsRev, bit IsComm>
let AsmString = "$Rd = "#mnemonic#"($Rs, $Rt)"#suffix;
}
-let isCodeGenOnly = 0 in {
def A2_svaddh : T_ALU32_3op<"vaddh", 0b110, 0b000, 0, 1>;
def A2_svsubh : T_ALU32_3op<"vsubh", 0b110, 0b100, 1, 0>;
-}
-let Defs = [USR_OVF], Itinerary = ALU32_3op_tc_2_SLOT0123,
- isCodeGenOnly = 0 in {
+let Defs = [USR_OVF], Itinerary = ALU32_3op_tc_2_SLOT0123 in {
def A2_svaddhs : T_ALU32_3op_sfx<"vaddh", ":sat", 0b110, 0b001, 0, 1>;
def A2_addsat : T_ALU32_3op_sfx<"add", ":sat", 0b110, 0b010, 0, 1>;
def A2_svadduhs : T_ALU32_3op_sfx<"vadduh", ":sat", 0b110, 0b011, 0, 1>;
def A2_svsubuhs : T_ALU32_3op_sfx<"vsubuh", ":sat", 0b110, 0b111, 1, 0>;
}
-let Itinerary = ALU32_3op_tc_2_SLOT0123, isCodeGenOnly = 0 in
+let Itinerary = ALU32_3op_tc_2_SLOT0123 in
def A2_svavghs : T_ALU32_3op_sfx<"vavgh", ":rnd", 0b111, 0b001, 0, 1>;
-let isCodeGenOnly = 0 in {
def A2_svavgh : T_ALU32_3op<"vavgh", 0b111, 0b000, 0, 1>;
def A2_svnavgh : T_ALU32_3op<"vnavgh", 0b111, 0b011, 1, 0>;
-}
multiclass T_ALU32_3op_p<string mnemonic, bits<3> MajOp, bits<3> MinOp,
bit OpsRev> {
defm A2_p#NAME : T_ALU32_3op_p<mnemonic, MajOp, MinOp, OpsRev>;
}
-let isCodeGenOnly = 0 in {
defm add : T_ALU32_3op_A2<"add", 0b011, 0b000, 0, 1>;
defm and : T_ALU32_3op_A2<"and", 0b001, 0b000, 0, 1>;
defm or : T_ALU32_3op_A2<"or", 0b001, 0b001, 0, 1>;
defm sub : T_ALU32_3op_A2<"sub", 0b011, 0b001, 1, 0>;
defm xor : T_ALU32_3op_A2<"xor", 0b001, 0b011, 0, 1>;
-}
// Pats for instruction selection.
class BinOp32_pat<SDNode Op, InstHexagon MI, ValueType ResT>
def: BinOp32_pat<xor, A2_xor, i32>;
// A few special cases producing register pairs:
-let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0,
- isCodeGenOnly = 0 in {
+let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0 in {
def S2_packhl : T_ALU32_3op <"packhl", 0b101, 0b100, 0, 0>;
let isPredicable = 1 in
let Inst{1-0} = Pd;
}
-let Itinerary = ALU32_3op_tc_2early_SLOT0123, isCodeGenOnly = 0 in {
+let Itinerary = ALU32_3op_tc_2early_SLOT0123 in {
def C2_cmpeq : T_ALU32_3op_cmp< "cmp.eq", 0b00, 0, 1>;
def C2_cmpgt : T_ALU32_3op_cmp< "cmp.gt", 0b10, 0, 0>;
def C2_cmpgtu : T_ALU32_3op_cmp< "cmp.gtu", 0b11, 0, 0>;
def: T_cmp32_rr_pat<C2_cmpgt, RevCmp<setlt>, i1>;
def: T_cmp32_rr_pat<C2_cmpgtu, RevCmp<setult>, i1>;
-let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1,
- isCodeGenOnly = 0 in
+let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1 in
def C2_mux: ALU32_rr<(outs IntRegs:$Rd),
(ins PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt),
"$Rd = mux($Pu, $Rs, $Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel {
let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
isExtentSigned = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 1,
- AddedComplexity = 75, isCodeGenOnly = 0 in
+ AddedComplexity = 75 in
def A2_combineii: ALU32Inst <(outs DoubleRegs:$Rdd), (ins s8Ext:$s8, s8Imm:$S8),
"$Rdd = combine(#$s8, #$S8)",
[(set (i64 DoubleRegs:$Rdd),
}
}
-let isCodeGenOnly = 0 in
defm ADD_ri : Addri_base<"add", add>, ImmRegRel, PredNewRel;
//===----------------------------------------------------------------------===//
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in {
def OR_ri : T_ALU32ri_logical<"or", or, 0b10>, ImmRegRel;
def AND_ri : T_ALU32ri_logical<"and", and, 0b00>, ImmRegRel;
-}
// Subtract register from immediate
// Rd32=sub(#s10,Rs32)
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 10,
-CextOpcode = "sub", InputType = "imm", hasNewValue = 1, isCodeGenOnly = 0 in
+CextOpcode = "sub", InputType = "imm", hasNewValue = 1 in
def SUB_ri: ALU32_ri <(outs IntRegs:$Rd), (ins s10Ext:$s10, IntRegs:$Rs),
"$Rd = sub(#$s10, $Rs)" ,
[(set IntRegs:$Rd, (sub s10ExtPred:$s10, IntRegs:$Rs))] > ,
}
// Nop.
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def A2_nop: ALU32Inst <(outs), (ins), "nop" > {
let IClass = 0b0111;
let Inst{27-24} = 0b1111;
let Inst{13-0} = u16{13-0};
}
-let isCodeGenOnly = 0 in {
def A2_tfril: T_tfr16<0>;
def A2_tfrih: T_tfr16<1>;
-}
// Conditional transfer is an alias to conditional "Rd = add(Rs, #0)".
let isPredicated = 1, hasNewValue = 1, opNewValue = 0 in
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in {
def C2_cmoveit : T_TFRI_Pred<0, 0>;
def C2_cmoveif : T_TFRI_Pred<1, 0>;
def C2_cmovenewit : T_TFRI_Pred<0, 1>;
def C2_cmovenewif : T_TFRI_Pred<1, 1>;
-}
let InputType = "imm", isExtendable = 1, isExtentSigned = 1,
CextOpcode = "TFR", BaseOpcode = "TFRI", hasNewValue = 1, opNewValue = 0,
isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16, isMoveImm = 1,
- isPredicated = 0, isPredicable = 1, isReMaterializable = 1,
- isCodeGenOnly = 0 in
+ isPredicated = 0, isPredicable = 1, isReMaterializable = 1 in
def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16Ext:$s16), "$Rd = #$s16",
[(set (i32 IntRegs:$Rd), s16ExtPred:$s16)], "", ALU32_2op_tc_1_SLOT0123>,
ImmRegRel, PredRel {
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in
defm A2_tfr : tfr_base<"TFR">, ImmRegRel, PredNewRel;
let isAsmParserOnly = 1 in
defm A2_tfrp : TFR64_base<"TFR64">, PredNewRel;
let Inst{4-0} = Rd;
}
-let opExtendable = 2, isCodeGenOnly = 0 in
+let opExtendable = 2 in
def C2_muxri : T_MUX1<0b1, (ins PredRegs:$Pu, s8Ext:$s8, IntRegs:$Rs),
"$Rd = mux($Pu, #$s8, $Rs)">;
-let opExtendable = 3, isCodeGenOnly = 0 in
+let opExtendable = 3 in
def C2_muxir : T_MUX1<0b0, (ins PredRegs:$Pu, IntRegs:$Rs, s8Ext:$s8),
"$Rd = mux($Pu, $Rs, #$s8)">;
// C2_muxii: Scalar mux immediates.
let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1,
- opExtentBits = 8, opExtendable = 2, isCodeGenOnly = 0 in
+ opExtentBits = 8, opExtendable = 2 in
def C2_muxii: ALU32Inst <(outs IntRegs:$Rd),
(ins PredRegs:$Pu, s8Ext:$s8, s8Imm:$S8),
"$Rd = mux($Pu, #$s8, #$S8)" ,
}
}
-let isCodeGenOnly = 0 in {
defm aslh : ALU32_2op_base<"aslh", 0b000>, PredNewRel;
defm asrh : ALU32_2op_base<"asrh", 0b001>, PredNewRel;
defm sxtb : ALU32_2op_base<"sxtb", 0b101>, PredNewRel;
defm sxth : ALU32_2op_base<"sxth", 0b111>, PredNewRel;
defm zxth : ALU32_2op_base<"zxth", 0b110>, PredNewRel;
-}
// Rd=zxtb(Rs): assembler mapped to Rd=and(Rs,#255).
// Compiler would want to generate 'zxtb' instead of 'and' becuase 'zxtb' has
// ALU64 - Vector add
// Rdd=vadd[u][bhw](Rss,Rtt)
-let Itinerary = ALU64_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_1_SLOT23 in {
def A2_vaddub : T_VectALU_64 < "vaddub", 0b000, 0b000, 0, 0, 0, 0>;
def A2_vaddh : T_VectALU_64 < "vaddh", 0b000, 0b010, 0, 0, 0, 0>;
def A2_vaddw : T_VectALU_64 < "vaddw", 0b000, 0b101, 0, 0, 0, 0>;
}
// Rdd=vadd[u][bhw](Rss,Rtt):sat
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def A2_vaddubs : T_VectALU_64 < "vaddub", 0b000, 0b001, 1, 0, 0, 0>;
def A2_vaddhs : T_VectALU_64 < "vaddh", 0b000, 0b011, 1, 0, 0, 0>;
def A2_vadduhs : T_VectALU_64 < "vadduh", 0b000, 0b100, 1, 0, 0, 0>;
// ALU64 - Vector average
// Rdd=vavg[u][bhw](Rss,Rtt)
-let Itinerary = ALU64_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_1_SLOT23 in {
def A2_vavgub : T_VectALU_64 < "vavgub", 0b010, 0b000, 0, 0, 0, 0>;
def A2_vavgh : T_VectALU_64 < "vavgh", 0b010, 0b010, 0, 0, 0, 0>;
def A2_vavguh : T_VectALU_64 < "vavguh", 0b010, 0b101, 0, 0, 0, 0>;
}
// Rdd=vavg[u][bhw](Rss,Rtt)[:rnd|:crnd]
-let isCodeGenOnly = 0 in {
def A2_vavgubr : T_VectALU_64 < "vavgub", 0b010, 0b001, 0, 1, 0, 0>;
def A2_vavghr : T_VectALU_64 < "vavgh", 0b010, 0b011, 0, 1, 0, 0>;
def A2_vavghcr : T_VectALU_64 < "vavgh", 0b010, 0b100, 0, 0, 1, 0>;
def A2_vavguhr : T_VectALU_64 < "vavguh", 0b010, 0b110, 0, 1, 0, 0>;
-}
-let isCodeGenOnly = 0 in {
def A2_vavgwr : T_VectALU_64 < "vavgw", 0b011, 0b001, 0, 1, 0, 0>;
def A2_vavgwcr : T_VectALU_64 < "vavgw", 0b011, 0b010, 0, 0, 1, 0>;
def A2_vavguwr : T_VectALU_64 < "vavguw", 0b011, 0b100, 0, 1, 0, 0>;
-}
// Rdd=vnavg[bh](Rss,Rtt)
-let Itinerary = ALU64_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_1_SLOT23 in {
def A2_vnavgh : T_VectALU_64 < "vnavgh", 0b100, 0b000, 0, 0, 0, 1>;
def A2_vnavgw : T_VectALU_64 < "vnavgw", 0b100, 0b011, 0, 0, 0, 1>;
}
// Rdd=vnavg[bh](Rss,Rtt)[:rnd|:crnd]:sat
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def A2_vnavghr : T_VectALU_64 < "vnavgh", 0b100, 0b001, 1, 1, 0, 1>;
def A2_vnavghcr : T_VectALU_64 < "vnavgh", 0b100, 0b010, 1, 0, 1, 1>;
def A2_vnavgwr : T_VectALU_64 < "vnavgw", 0b100, 0b100, 1, 1, 0, 1>;
}
// Rdd=vsub[u][bh](Rss,Rtt)
-let Itinerary = ALU64_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_1_SLOT23 in {
def A2_vsubub : T_VectALU_64 < "vsubub", 0b001, 0b000, 0, 0, 0, 1>;
def A2_vsubh : T_VectALU_64 < "vsubh", 0b001, 0b010, 0, 0, 0, 1>;
def A2_vsubw : T_VectALU_64 < "vsubw", 0b001, 0b101, 0, 0, 0, 1>;
}
// Rdd=vsub[u][bh](Rss,Rtt):sat
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def A2_vsububs : T_VectALU_64 < "vsubub", 0b001, 0b001, 1, 0, 0, 1>;
def A2_vsubhs : T_VectALU_64 < "vsubh", 0b001, 0b011, 1, 0, 0, 1>;
def A2_vsubuhs : T_VectALU_64 < "vsubuh", 0b001, 0b100, 1, 0, 0, 1>;
}
// Rdd=vmax[u][bhw](Rss,Rtt)
-let isCodeGenOnly = 0 in {
def A2_vmaxb : T_VectALU_64 < "vmaxb", 0b110, 0b110, 0, 0, 0, 1>;
def A2_vmaxub : T_VectALU_64 < "vmaxub", 0b110, 0b000, 0, 0, 0, 1>;
def A2_vmaxh : T_VectALU_64 < "vmaxh", 0b110, 0b001, 0, 0, 0, 1>;
def A2_vmaxuh : T_VectALU_64 < "vmaxuh", 0b110, 0b010, 0, 0, 0, 1>;
def A2_vmaxw : T_VectALU_64 < "vmaxw", 0b110, 0b011, 0, 0, 0, 1>;
def A2_vmaxuw : T_VectALU_64 < "vmaxuw", 0b101, 0b101, 0, 0, 0, 1>;
-}
// Rdd=vmin[u][bhw](Rss,Rtt)
-let isCodeGenOnly = 0 in {
def A2_vminb : T_VectALU_64 < "vminb", 0b110, 0b111, 0, 0, 0, 1>;
def A2_vminub : T_VectALU_64 < "vminub", 0b101, 0b000, 0, 0, 0, 1>;
def A2_vminh : T_VectALU_64 < "vminh", 0b101, 0b001, 0, 0, 0, 1>;
def A2_vminuh : T_VectALU_64 < "vminuh", 0b101, 0b010, 0, 0, 0, 1>;
def A2_vminw : T_VectALU_64 < "vminw", 0b101, 0b011, 0, 0, 0, 1>;
def A2_vminuw : T_VectALU_64 < "vminuw", 0b101, 0b100, 0, 0, 0, 1>;
-}
//===----------------------------------------------------------------------===//
// Template class for vector compare
(i1 (MI DoubleRegs:$Rss, DoubleRegs:$Rtt))>;
// Vector compare bytes
-let isCodeGenOnly = 0 in {
def A2_vcmpbeq : T_vcmp <"vcmpb.eq", 0b0110>;
def A2_vcmpbgtu : T_vcmp <"vcmpb.gtu", 0b0111>;
-}
// Vector compare halfwords
-let isCodeGenOnly = 0 in {
def A2_vcmpheq : T_vcmp <"vcmph.eq", 0b0011>;
def A2_vcmphgt : T_vcmp <"vcmph.gt", 0b0100>;
def A2_vcmphgtu : T_vcmp <"vcmph.gtu", 0b0101>;
-}
// Vector compare words
-let isCodeGenOnly = 0 in {
def A2_vcmpweq : T_vcmp <"vcmpw.eq", 0b0000>;
def A2_vcmpwgt : T_vcmp <"vcmpw.gt", 0b0001>;
def A2_vcmpwgtu : T_vcmp <"vcmpw.gtu", 0b0010>;
-}
def: T_vcmp_pat<A2_vcmpbeq, seteq, v8i8>;
def: T_vcmp_pat<A2_vcmpbgtu, setugt, v8i8>;
}
//Rd=sub(Rt.L,Rs.[LH])
-let isCodeGenOnly = 0 in {
def A2_subh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 1>;
def A2_subh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 1>;
-}
-let isCodeGenOnly = 0 in {
//Rd=add(Rt.L,Rs.[LH])
def A2_addh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 0>;
def A2_addh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 0>;
-}
-let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in {
//Rd=sub(Rt.L,Rs.[LH]):sat
def A2_subh_l16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 0, 1>;
def A2_subh_l16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 0, 1>;
}
//Rd=sub(Rt.[LH],Rs.[LH]):<<16
-let isCodeGenOnly = 0 in {
def A2_subh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 1>;
def A2_subh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 1>;
def A2_subh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 1>;
def A2_subh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 1>;
-}
//Rd=add(Rt.[LH],Rs.[LH]):<<16
-let isCodeGenOnly = 0 in {
def A2_addh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 0>;
def A2_addh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 0>;
def A2_addh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 0>;
def A2_addh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 0>;
-}
-let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in {
//Rd=sub(Rt.[LH],Rs.[LH]):sat:<<16
def A2_subh_h16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 1, 1>;
def A2_subh_h16_sat_lh : T_XTYPE_ADD_SUB <0b01, 1, 1, 1>;
def: Pat<(shl (sub I32:$src1, I32:$src2), (i32 16)),
(A2_subh_h16_ll I32:$src1, I32:$src2)>;
-let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 0, hasNewValue = 1 in
def S2_parityp: ALU64Inst<(outs IntRegs:$Rd),
(ins DoubleRegs:$Rs, DoubleRegs:$Rt),
"$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
let Inst{20-16} = !if(isMax, Rt, Rs);
}
-let isCodeGenOnly = 0 in {
def A2_min : T_XTYPE_MIN_MAX < 0, 0 >;
def A2_minu : T_XTYPE_MIN_MAX < 0, 1 >;
def A2_max : T_XTYPE_MIN_MAX < 1, 0 >;
def A2_maxu : T_XTYPE_MIN_MAX < 1, 1 >;
-}
// Here, depending on the operand being selected, we'll either generate a
// min or max instruction.
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def C2_cmpeqp : T_cmp64_rr<"cmp.eq", 0b000, 1>;
def C2_cmpgtp : T_cmp64_rr<"cmp.gt", 0b010, 0>;
def C2_cmpgtup : T_cmp64_rr<"cmp.gtu", 0b100, 0>;
-}
class T_cmp64_rr_pat<InstHexagon MI, PatFrag CmpOp>
: Pat<(i1 (CmpOp (i64 DoubleRegs:$Rs), (i64 DoubleRegs:$Rt))),
def: T_cmp64_rr_pat<C2_cmpgtp, RevCmp<setlt>>;
def: T_cmp64_rr_pat<C2_cmpgtup, RevCmp<setult>>;
-let isCodeGenOnly = 0 in
def C2_vmux : ALU64_rr<(outs DoubleRegs:$Rd),
(ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
"$Rd = vmux($Pu, $Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> {
: T_ALU64_rr<mnemonic, !if(IsSat,":sat",""), 0b0011, MajOp, MinOp, OpsRev,
IsComm, "">;
-let isCodeGenOnly = 0 in {
def A2_addp : T_ALU64_arith<"add", 0b000, 0b111, 0, 0, 1>;
def A2_subp : T_ALU64_arith<"sub", 0b001, 0b111, 0, 1, 0>;
-}
def: Pat<(i64 (add I64:$Rs, I64:$Rt)), (A2_addp I64:$Rs, I64:$Rt)>;
def: Pat<(i64 (sub I64:$Rs, I64:$Rt)), (A2_subp I64:$Rs, I64:$Rt)>;
: T_ALU64_rr<mnemonic, "", 0b0011, 0b111, MinOp, OpsRev, IsComm,
!if(IsNeg,"~","")>;
-let isCodeGenOnly = 0 in {
def A2_andp : T_ALU64_logical<"and", 0b000, 0, 1, 0>;
def A2_orp : T_ALU64_logical<"or", 0b010, 0, 1, 0>;
def A2_xorp : T_ALU64_logical<"xor", 0b100, 0, 1, 0>;
-}
def: Pat<(i64 (and I64:$Rs, I64:$Rt)), (A2_andp I64:$Rs, I64:$Rt)>;
def: Pat<(i64 (or I64:$Rs, I64:$Rt)), (A2_orp I64:$Rs, I64:$Rt)>;
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def C2_any8 : T_LOGICAL_1OP<"any8", 0b00>;
def C2_all8 : T_LOGICAL_1OP<"all8", 0b01>;
def C2_not : T_LOGICAL_1OP<"not", 0b10>;
-}
def: Pat<(i1 (not (i1 PredRegs:$Ps))),
(C2_not PredRegs:$Ps)>;
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def C2_and : T_LOGICAL_2OP<"and", 0b000, 0, 1>;
def C2_or : T_LOGICAL_2OP<"or", 0b001, 0, 1>;
def C2_xor : T_LOGICAL_2OP<"xor", 0b010, 0, 0>;
def C2_andn : T_LOGICAL_2OP<"and", 0b011, 1, 1>;
def C2_orn : T_LOGICAL_2OP<"or", 0b111, 1, 1>;
-}
def: Pat<(i1 (and I1:$Ps, I1:$Pt)), (C2_and I1:$Ps, I1:$Pt)>;
def: Pat<(i1 (or I1:$Ps, I1:$Pt)), (C2_or I1:$Ps, I1:$Pt)>;
def: Pat<(i1 (and I1:$Ps, (not I1:$Pt))), (C2_andn I1:$Ps, I1:$Pt)>;
def: Pat<(i1 (or I1:$Ps, (not I1:$Pt))), (C2_orn I1:$Ps, I1:$Pt)>;
-let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 0, hasNewValue = 1 in
def C2_vitpack : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps, PredRegs:$Pt),
"$Rd = vitpack($Ps, $Pt)", [], "", S_2op_tc_1_SLOT23> {
bits<5> Rd;
let Inst{4-0} = Rd;
}
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def C2_mask : SInst<(outs DoubleRegs:$Rd), (ins PredRegs:$Pt),
"$Rd = mask($Pt)", [], "", S_2op_tc_1_SLOT23> {
bits<5> Rd;
}
-let Defs = VolatileV3.Regs, isCodeGenOnly = 0 in {
+let Defs = VolatileV3.Regs in {
def J2_callrt : JUMPR_MISC_CALLR<1, 0, (ins PredRegs:$Pu, IntRegs:$Rs)>;
def J2_callrf : JUMPR_MISC_CALLR<1, 1, (ins PredRegs:$Pu, IntRegs:$Rs)>;
}
-let isTerminator = 1, hasSideEffects = 0, isCodeGenOnly = 0 in {
+let isTerminator = 1, hasSideEffects = 0 in {
defm J2_jump : JMP_base<"JMP", "">, PredNewRel;
// Deal with explicit assembly
}
}
-let accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess in {
defm loadrb: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext, 0b1000>;
defm loadrub: LD_Idxd <"memub", "LDriub", IntRegs, s11_0Ext, u6_0Ext, 0b1001>;
}
-let accessSize = HalfWordAccess, opExtentAlign = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
defm loadrh: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext, 0b1010>;
defm loadruh: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext, 0b1011>;
}
-let accessSize = WordAccess, opExtentAlign = 2, isCodeGenOnly = 0 in
+let accessSize = WordAccess, opExtentAlign = 2 in
defm loadri: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext, 0b1100>;
-let accessSize = DoubleWordAccess, opExtentAlign = 3, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess, opExtentAlign = 3 in
defm loadrd: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext, 0b1110>;
-let accessSize = HalfWordAccess, opExtentAlign = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
def L2_loadbsw2_io: T_load_io<"membh", IntRegs, 0b0001, s11_1Ext>;
def L2_loadbzw2_io: T_load_io<"memubh", IntRegs, 0b0011, s11_1Ext>;
}
-let accessSize = WordAccess, opExtentAlign = 2, isCodeGenOnly = 0 in {
+let accessSize = WordAccess, opExtentAlign = 2 in {
def L2_loadbzw4_io: T_load_io<"memubh", DoubleRegs, 0b0101, s11_2Ext>;
def L2_loadbsw4_io: T_load_io<"membh", DoubleRegs, 0b0111, s11_2Ext>;
}
}
// post increment byte loads with immediate offset
-let accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess in {
defm loadrb : LD_PostInc <"memb", "LDrib", IntRegs, s4_0Imm, 0b1000>;
defm loadrub : LD_PostInc <"memub", "LDriub", IntRegs, s4_0Imm, 0b1001>;
}
// post increment halfword loads with immediate offset
-let accessSize = HalfWordAccess, opExtentAlign = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
defm loadrh : LD_PostInc <"memh", "LDrih", IntRegs, s4_1Imm, 0b1010>;
defm loadruh : LD_PostInc <"memuh", "LDriuh", IntRegs, s4_1Imm, 0b1011>;
}
// post increment word loads with immediate offset
-let accessSize = WordAccess, opExtentAlign = 2, isCodeGenOnly = 0 in
+let accessSize = WordAccess, opExtentAlign = 2 in
defm loadri : LD_PostInc <"memw", "LDriw", IntRegs, s4_2Imm, 0b1100>;
// post increment doubleword loads with immediate offset
-let accessSize = DoubleWordAccess, opExtentAlign = 3, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess, opExtentAlign = 3 in
defm loadrd : LD_PostInc <"memd", "LDrid", DoubleRegs, s4_3Imm, 0b1110>;
// Rd=memb[u]h(Rx++#s4:1)
// Rdd=memb[u]h(Rx++#s4:2)
-let accessSize = HalfWordAccess, opExtentAlign = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, opExtentAlign = 1 in {
def L2_loadbsw2_pi : T_load_pi <"membh", IntRegs, s4_1Imm, 0b0001>;
def L2_loadbzw2_pi : T_load_pi <"memubh", IntRegs, s4_1Imm, 0b0011>;
}
-let accessSize = WordAccess, opExtentAlign = 2, hasNewValue = 0,
- isCodeGenOnly = 0 in {
+let accessSize = WordAccess, opExtentAlign = 2, hasNewValue = 0 in {
def L2_loadbsw4_pi : T_load_pi <"membh", DoubleRegs, s4_2Imm, 0b0111>;
def L2_loadbzw4_pi : T_load_pi <"memubh", DoubleRegs, s4_2Imm, 0b0101>;
}
let Inst{4-0} = dst;
}
-let hasNewValue = 1, isCodeGenOnly = 0 in {
+let hasNewValue = 1 in {
def L2_loadrb_pr : T_load_pr <"memb", IntRegs, 0b1000, ByteAccess>;
def L2_loadrub_pr : T_load_pr <"memub", IntRegs, 0b1001, ByteAccess>;
def L2_loadrh_pr : T_load_pr <"memh", IntRegs, 0b1010, HalfWordAccess>;
def L2_loadbzw2_pr : T_load_pr <"memubh", IntRegs, 0b0011, HalfWordAccess>;
}
-let isCodeGenOnly = 0 in {
def L2_loadrd_pr : T_load_pr <"memd", DoubleRegs, 0b1110, DoubleWordAccess>;
def L2_loadbzw4_pr : T_load_pr <"memubh", DoubleRegs, 0b0101, WordAccess>;
-}
// Load predicate.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
"Error; should not emit",
[]>;
-let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0, isCodeGenOnly = 0 in
+let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0 in
def L2_deallocframe : LDInst<(outs), (ins),
"deallocframe",
[]> {
let Inst{4-0} = dst;
}
-let accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess in {
def L2_loadrb_pcr : T_load_pcr <"memb", IntRegs, 0b1000>;
def L2_loadrub_pcr : T_load_pcr <"memub", IntRegs, 0b1001>;
}
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess in {
def L2_loadrh_pcr : T_load_pcr <"memh", IntRegs, 0b1010>;
def L2_loadruh_pcr : T_load_pcr <"memuh", IntRegs, 0b1011>;
def L2_loadbsw2_pcr : T_load_pcr <"membh", IntRegs, 0b0001>;
def L2_loadbzw2_pcr : T_load_pcr <"memubh", IntRegs, 0b0011>;
}
-let accessSize = WordAccess, isCodeGenOnly = 0 in {
+let accessSize = WordAccess in {
def L2_loadri_pcr : T_load_pcr <"memw", IntRegs, 0b1100>;
let hasNewValue = 0 in {
def L2_loadbzw4_pcr : T_load_pcr <"memubh", DoubleRegs, 0b0101>;
}
}
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
def L2_loadrd_pcr : T_load_pcr <"memd", DoubleRegs, 0b1110>;
//===----------------------------------------------------------------------===//
}
// Byte variants of circ load
-let accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess in {
def L2_loadrb_pci : T_load_pci <"memb", IntRegs, s4_0Imm, 0b1000>;
def L2_loadrub_pci : T_load_pci <"memub", IntRegs, s4_0Imm, 0b1001>;
}
// Half word variants of circ load
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess in {
def L2_loadrh_pci : T_load_pci <"memh", IntRegs, s4_1Imm, 0b1010>;
def L2_loadruh_pci : T_load_pci <"memuh", IntRegs, s4_1Imm, 0b1011>;
def L2_loadbzw2_pci : T_load_pci <"memubh", IntRegs, s4_1Imm, 0b0011>;
}
// Word variants of circ load
-let accessSize = WordAccess, isCodeGenOnly = 0 in
+let accessSize = WordAccess in
def L2_loadri_pci : T_load_pci <"memw", IntRegs, s4_2Imm, 0b1100>;
-let accessSize = WordAccess, hasNewValue = 0, isCodeGenOnly = 0 in {
+let accessSize = WordAccess, hasNewValue = 0 in {
def L2_loadbzw4_pci : T_load_pci <"memubh", DoubleRegs, s4_2Imm, 0b0101>;
def L2_loadbsw4_pci : T_load_pci <"membh", DoubleRegs, s4_2Imm, 0b0111>;
}
-let accessSize = DoubleWordAccess, hasNewValue = 0, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess, hasNewValue = 0 in
def L2_loadrd_pci : T_load_pci <"memd", DoubleRegs, s4_3Imm, 0b1110>;
// L[24]_load[wd]_locked: Load word/double with lock.
let Inst{13-12} = !if (!eq(mnemonic, "memd_locked"), 0b01, 0b00);
let Inst{4-0} = dst;
}
-let hasNewValue = 1, accessSize = WordAccess, opNewValue = 0, isCodeGenOnly = 0 in
+let hasNewValue = 1, accessSize = WordAccess, opNewValue = 0 in
def L2_loadw_locked : T_load_locked <"memw_locked", IntRegs>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
def L4_loadd_locked : T_load_locked <"memd_locked", DoubleRegs>;
// S[24]_store[wd]_locked: Store word/double conditionally.
let Inst{1-0} = Pd;
}
-let accessSize = WordAccess, isCodeGenOnly = 0 in
+let accessSize = WordAccess in
def S2_storew_locked : T_store_locked <"memw_locked", IntRegs>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
def S4_stored_locked : T_store_locked <"memd_locked", DoubleRegs>;
//===----------------------------------------------------------------------===//
let Inst{4-0} = dst;
}
-let hasNewValue =1, opNewValue = 0, isCodeGenOnly = 0 in {
+let hasNewValue =1, opNewValue = 0 in {
def L2_loadrb_pbr : T_load_pbr <"memb", IntRegs, ByteAccess, 0b1000>;
def L2_loadrub_pbr : T_load_pbr <"memub", IntRegs, ByteAccess, 0b1001>;
def L2_loadrh_pbr : T_load_pbr <"memh", IntRegs, HalfWordAccess, 0b1010>;
def L2_loadri_pbr : T_load_pbr <"memw", IntRegs, WordAccess, 0b1100>;
}
-let isCodeGenOnly = 0 in {
def L2_loadbzw4_pbr : T_load_pbr <"memubh", DoubleRegs, WordAccess, 0b0101>;
def L2_loadbsw4_pbr : T_load_pbr <"membh", DoubleRegs, WordAccess, 0b0111>;
def L2_loadrd_pbr : T_load_pbr <"memd", DoubleRegs, DoubleWordAccess, 0b1110>;
-}
//===----------------------------------------------------------------------===//
// LD -
}
//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpy_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 0>;
def M2_mpy_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 0>;
def M2_mpy_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 0>;
def M2_mpy_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 0>;
def M2_mpy_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 0>;
def M2_mpy_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 0>;
-}
//Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpyu_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 1>;
def M2_mpyu_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 1>;
def M2_mpyu_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 1>;
def M2_mpyu_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 1>;
def M2_mpyu_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 1>;
def M2_mpyu_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 1>;
-}
//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1]:rnd
-let isCodeGenOnly = 0 in {
def M2_mpy_rnd_ll_s1: T_M2_mpy <0b00, 0, 1, 1, 0>;
def M2_mpy_rnd_ll_s0: T_M2_mpy <0b00, 0, 1, 0, 0>;
def M2_mpy_rnd_lh_s1: T_M2_mpy <0b01, 0, 1, 1, 0>;
def M2_mpy_rnd_hl_s0: T_M2_mpy <0b10, 0, 1, 0, 0>;
def M2_mpy_rnd_hh_s1: T_M2_mpy <0b11, 0, 1, 1, 0>;
def M2_mpy_rnd_hh_s0: T_M2_mpy <0b11, 0, 1, 0, 0>;
-}
//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:sat]
//Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat]
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def M2_mpy_sat_ll_s1: T_M2_mpy <0b00, 1, 0, 1, 0>;
def M2_mpy_sat_ll_s0: T_M2_mpy <0b00, 1, 0, 0, 0>;
def M2_mpy_sat_lh_s1: T_M2_mpy <0b01, 1, 0, 1, 0>;
}
//Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpy_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 0>;
def M2_mpy_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 0>;
def M2_mpy_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 0>;
def M2_mpy_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 0>;
def M2_mpy_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 0>;
def M2_mpy_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 0>;
-}
//Rx += mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpyu_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 1>;
def M2_mpyu_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 1>;
def M2_mpyu_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 1>;
def M2_mpyu_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 1>;
def M2_mpyu_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 1>;
def M2_mpyu_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 1>;
-}
//Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpy_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 0>;
def M2_mpy_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 0>;
def M2_mpy_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 0>;
def M2_mpy_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 0>;
def M2_mpy_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 0>;
def M2_mpy_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 0>;
-}
//Rx -= mpyu(Rs.[H|L],Rt.[H|L])[:<<1]
-let isCodeGenOnly = 0 in {
def M2_mpyu_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 1>;
def M2_mpyu_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 1>;
def M2_mpyu_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 1>;
def M2_mpyu_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 1>;
def M2_mpyu_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 1>;
def M2_mpyu_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 1>;
-}
//Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_mpy_acc_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 0, 1, 0>;
def M2_mpy_acc_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 0, 0, 0>;
def M2_mpy_acc_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 0, 1, 0>;
def M2_mpy_acc_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 0, 0, 0>;
def M2_mpy_acc_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 0, 1, 0>;
def M2_mpy_acc_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 0, 0, 0>;
-}
//Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_mpy_nac_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 1, 1, 0>;
def M2_mpy_nac_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 1, 0, 0>;
def M2_mpy_nac_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 1, 1, 0>;
def M2_mpy_nac_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 1, 0, 0>;
def M2_mpy_nac_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 1, 1, 0>;
def M2_mpy_nac_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 1, 0, 0>;
-}
//===----------------------------------------------------------------------===//
// Template Class
let Inst{12-8} = Rt;
}
-let isCodeGenOnly = 0 in {
def M2_mpyd_acc_hh_s0: T_M2_mpyd_acc <0b11, 0, 0, 0>;
def M2_mpyd_acc_hl_s0: T_M2_mpyd_acc <0b10, 0, 0, 0>;
def M2_mpyd_acc_lh_s0: T_M2_mpyd_acc <0b01, 0, 0, 0>;
def M2_mpyud_nac_hl_s1: T_M2_mpyd_acc <0b10, 1, 1, 1>;
def M2_mpyud_nac_lh_s1: T_M2_mpyd_acc <0b01, 1, 1, 1>;
def M2_mpyud_nac_ll_s1: T_M2_mpyd_acc <0b00, 1, 1, 1>;
-}
//===----------------------------------------------------------------------===//
// Template Class -- Vector Multipy
}
// Vector complex multiply imaginary: Rdd=vcmpyi(Rss,Rtt)[:<<1]:sat
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def M2_vcmpy_s1_sat_i: T_M2_vmpy <"vcmpyi", 0b110, 0b110, 1, 0, 1>;
def M2_vcmpy_s0_sat_i: T_M2_vmpy <"vcmpyi", 0b010, 0b110, 0, 0, 1>;
}
// Vector complex multiply real: Rdd=vcmpyr(Rss,Rtt)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_vcmpy_s1_sat_r: T_M2_vmpy <"vcmpyr", 0b101, 0b110, 1, 0, 1>;
def M2_vcmpy_s0_sat_r: T_M2_vmpy <"vcmpyr", 0b001, 0b110, 0, 0, 1>;
-}
// Vector dual multiply: Rdd=vdmpy(Rss,Rtt)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_vdmpys_s1: T_M2_vmpy <"vdmpy", 0b100, 0b100, 1, 0, 1>;
def M2_vdmpys_s0: T_M2_vmpy <"vdmpy", 0b000, 0b100, 0, 0, 1>;
-}
// Vector multiply even halfwords: Rdd=vmpyeh(Rss,Rtt)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_vmpy2es_s1: T_M2_vmpy <"vmpyeh", 0b100, 0b110, 1, 0, 1>;
def M2_vmpy2es_s0: T_M2_vmpy <"vmpyeh", 0b000, 0b110, 0, 0, 1>;
-}
//Rdd=vmpywoh(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmpyh_s0: T_M2_vmpy <"vmpywoh", 0b000, 0b111, 0, 0, 1>;
def M2_mmpyh_s1: T_M2_vmpy <"vmpywoh", 0b100, 0b111, 1, 0, 1>;
def M2_mmpyh_rs0: T_M2_vmpy <"vmpywoh", 0b001, 0b111, 0, 1, 1>;
def M2_mmpyh_rs1: T_M2_vmpy <"vmpywoh", 0b101, 0b111, 1, 1, 1>;
-}
//Rdd=vmpyweh(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmpyl_s0: T_M2_vmpy <"vmpyweh", 0b000, 0b101, 0, 0, 1>;
def M2_mmpyl_s1: T_M2_vmpy <"vmpyweh", 0b100, 0b101, 1, 0, 1>;
def M2_mmpyl_rs0: T_M2_vmpy <"vmpyweh", 0b001, 0b101, 0, 1, 1>;
def M2_mmpyl_rs1: T_M2_vmpy <"vmpyweh", 0b101, 0b101, 1, 1, 1>;
-}
//Rdd=vmpywouh(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmpyuh_s0: T_M2_vmpy <"vmpywouh", 0b010, 0b111, 0, 0, 1>;
def M2_mmpyuh_s1: T_M2_vmpy <"vmpywouh", 0b110, 0b111, 1, 0, 1>;
def M2_mmpyuh_rs0: T_M2_vmpy <"vmpywouh", 0b011, 0b111, 0, 1, 1>;
def M2_mmpyuh_rs1: T_M2_vmpy <"vmpywouh", 0b111, 0b111, 1, 1, 1>;
-}
//Rdd=vmpyweuh(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmpyul_s0: T_M2_vmpy <"vmpyweuh", 0b010, 0b101, 0, 0, 1>;
def M2_mmpyul_s1: T_M2_vmpy <"vmpyweuh", 0b110, 0b101, 1, 0, 1>;
def M2_mmpyul_rs0: T_M2_vmpy <"vmpyweuh", 0b011, 0b101, 0, 1, 1>;
def M2_mmpyul_rs1: T_M2_vmpy <"vmpyweuh", 0b111, 0b101, 1, 1, 1>;
-}
let hasNewValue = 1, opNewValue = 0 in
class T_MType_mpy <string mnemonic, bits<4> RegTyBits, RegisterClass RC,
bit isSat = 0, bit isRnd = 0, string op2str = "" >
: T_MType_mpy<mnemonic, 0b1101, IntRegs, MajOp, MinOp, isSat, isRnd, op2str>;
-let isCodeGenOnly = 0 in {
def M2_vradduh : T_MType_dd <"vradduh", 0b000, 0b001, 0, 0>;
def M2_vdmpyrs_s0 : T_MType_dd <"vdmpy", 0b000, 0b000, 1, 1>;
def M2_vdmpyrs_s1 : T_MType_dd <"vdmpy", 0b100, 0b000, 1, 1>;
-}
-let CextOpcode = "mpyi", InputType = "reg", isCodeGenOnly = 0 in
+let CextOpcode = "mpyi", InputType = "reg" in
def M2_mpyi : T_MType_rr1 <"mpyi", 0b000, 0b000>, ImmRegRel;
-let isCodeGenOnly = 0 in {
def M2_mpy_up : T_MType_rr1 <"mpy", 0b000, 0b001>;
def M2_mpyu_up : T_MType_rr1 <"mpyu", 0b010, 0b001>;
-}
-let isCodeGenOnly = 0 in
def M2_dpmpyss_rnd_s0 : T_MType_rr1 <"mpy", 0b001, 0b001, 0, 1>;
-let isCodeGenOnly = 0 in {
def M2_vmpy2s_s0pack : T_MType_rr1 <"vmpyh", 0b001, 0b111, 1, 1>;
def M2_vmpy2s_s1pack : T_MType_rr1 <"vmpyh", 0b101, 0b111, 1, 1>;
-}
-let isCodeGenOnly = 0 in {
def M2_hmmpyh_rs1 : T_MType_rr2 <"mpy", 0b101, 0b100, 1, 1, ".h">;
def M2_hmmpyl_rs1 : T_MType_rr2 <"mpy", 0b111, 0b100, 1, 1, ".l">;
-}
-let isCodeGenOnly = 0 in {
def M2_cmpyrs_s0 : T_MType_rr2 <"cmpy", 0b001, 0b110, 1, 1>;
def M2_cmpyrs_s1 : T_MType_rr2 <"cmpy", 0b101, 0b110, 1, 1>;
def M2_cmpyrsc_s0 : T_MType_rr2 <"cmpy", 0b011, 0b110, 1, 1, "*">;
def M2_cmpyrsc_s1 : T_MType_rr2 <"cmpy", 0b111, 0b110, 1, 1, "*">;
-}
// V4 Instructions
-let isCodeGenOnly = 0 in {
def M2_vraddh : T_MType_dd <"vraddh", 0b001, 0b111, 0>;
def M2_mpysu_up : T_MType_rr1 <"mpysu", 0b011, 0b001, 0>;
def M2_mpy_up_s1 : T_MType_rr1 <"mpy", 0b101, 0b010, 0>;
def M2_hmmpyh_s1 : T_MType_rr2 <"mpy", 0b101, 0b000, 1, 0, ".h">;
def M2_hmmpyl_s1 : T_MType_rr2 <"mpy", 0b101, 0b001, 1, 0, ".l">;
-}
def: Pat<(i32 (mul I32:$src1, I32:$src2)), (M2_mpyi I32:$src1, I32:$src2)>;
def: Pat<(i32 (mulhs I32:$src1, I32:$src2)), (M2_mpy_up I32:$src1, I32:$src2)>;
let Inst{12-5} = u8;
}
-let isExtendable = 1, opExtentBits = 8, opExtendable = 2, isCodeGenOnly = 0 in
+let isExtendable = 1, opExtentBits = 8, opExtendable = 2 in
def M2_mpysip : T_MType_mpy_ri <0, u8Ext,
[(set (i32 IntRegs:$Rd), (mul IntRegs:$Rs, u8ExtPred:$u8))]>;
-let isCodeGenOnly = 0 in
def M2_mpysin : T_MType_mpy_ri <1, u8Imm,
[(set (i32 IntRegs:$Rd), (ineg (mul IntRegs:$Rs,
u8ImmPred:$u8)))]>;
let Inst{4-0} = dst;
}
-let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23, isCodeGenOnly = 0 in {
+let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23 in {
def M2_macsip : T_MType_acc_ri <"+= mpyi", 0b010, u8Ext,
[(set (i32 IntRegs:$dst),
(add (mul IntRegs:$src2, u8ExtPred:$src3),
IntRegs:$src1))]>, ImmRegRel;
}
-let CextOpcode = "ADD_acc", isCodeGenOnly = 0 in {
+let CextOpcode = "ADD_acc" in {
let isExtentSigned = 1 in
def M2_accii : T_MType_acc_ri <"+= add", 0b100, s8Ext,
[(set (i32 IntRegs:$dst),
(i32 IntRegs:$src1)))]>, ImmRegRel;
}
-let CextOpcode = "SUB_acc", isCodeGenOnly = 0 in {
+let CextOpcode = "SUB_acc" in {
let isExtentSigned = 1 in
def M2_naccii : T_MType_acc_ri <"-= add", 0b101, s8Ext>, ImmRegRel;
def M2_nacci : T_MType_acc_rr <"-= add", 0b100, 0b001, 0>, ImmRegRel;
}
-let Itinerary = M_tc_3x_SLOT23, isCodeGenOnly = 0 in
+let Itinerary = M_tc_3x_SLOT23 in
def M2_macsin : T_MType_acc_ri <"-= mpyi", 0b011, u8Ext>;
-let isCodeGenOnly = 0 in {
def M2_xor_xacc : T_MType_acc_rr < "^= xor", 0b100, 0b011, 0>;
def M2_subacc : T_MType_acc_rr <"+= sub", 0b000, 0b011, 1>;
-}
class T_MType_acc_pat1 <InstHexagon MI, SDNode firstOp, SDNode secOp,
PatLeaf ImmPred>
}
// Vector reduce add unsigned bytes: Rdd32=vrmpybu(Rss32,Rtt32)
-let isCodeGenOnly = 0 in {
def A2_vraddub: T_XTYPE_Vect <"vraddub", 0b010, 0b001, 0>;
def A2_vraddub_acc: T_XTYPE_Vect_acc <"vraddub", 0b010, 0b001, 0>;
-}
// Vector sum of absolute differences unsigned bytes: Rdd=vrsadub(Rss,Rtt)
-let isCodeGenOnly = 0 in {
def A2_vrsadub: T_XTYPE_Vect <"vrsadub", 0b010, 0b010, 0>;
def A2_vrsadub_acc: T_XTYPE_Vect_acc <"vrsadub", 0b010, 0b010, 0>;
-}
// Vector absolute difference words: Rdd=vabsdiffw(Rtt,Rss)
-let isCodeGenOnly = 0 in
def M2_vabsdiffw: T_XTYPE_Vect_diff<0b001, "vabsdiffw">;
// Vector absolute difference: Rdd=vabsdiffh(Rtt,Rss)
-let isCodeGenOnly = 0 in
def M2_vabsdiffh: T_XTYPE_Vect_diff<0b011, "vabsdiffh">;
// Vector reduce complex multiply real or imaginary:
// Rdd[+]=vrcmpy[ir](Rss,Rtt[*])
-let isCodeGenOnly = 0 in {
def M2_vrcmpyi_s0: T_XTYPE_Vect <"vrcmpyi", 0b000, 0b000, 0>;
def M2_vrcmpyi_s0c: T_XTYPE_Vect <"vrcmpyi", 0b010, 0b000, 1>;
def M2_vrcmaci_s0: T_XTYPE_Vect_acc <"vrcmpyi", 0b000, 0b000, 0>;
def M2_vrcmaci_s0c: T_XTYPE_Vect_acc <"vrcmpyi", 0b010, 0b000, 1>;
-}
-let isCodeGenOnly = 0 in {
def M2_vrcmpyr_s0: T_XTYPE_Vect <"vrcmpyr", 0b000, 0b001, 0>;
def M2_vrcmpyr_s0c: T_XTYPE_Vect <"vrcmpyr", 0b011, 0b001, 1>;
def M2_vrcmacr_s0: T_XTYPE_Vect_acc <"vrcmpyr", 0b000, 0b001, 0>;
def M2_vrcmacr_s0c: T_XTYPE_Vect_acc <"vrcmpyr", 0b011, 0b001, 1>;
-}
+
// Vector reduce halfwords:
// Rdd[+]=vrmpyh(Rss,Rtt)
-let isCodeGenOnly = 0 in {
def M2_vrmpy_s0: T_XTYPE_Vect <"vrmpyh", 0b000, 0b010, 0>;
def M2_vrmac_s0: T_XTYPE_Vect_acc <"vrmpyh", 0b000, 0b010, 0>;
-}
//===----------------------------------------------------------------------===//
// Template Class -- Vector Multipy with accumulation.
// Vector multiply word by signed half with accumulation
// Rxx+=vmpyw[eo]h(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmacls_s1: T_M2_vmpy_acc_sat <"vmpyweh", 0b100, 0b101, 1, 0>;
def M2_mmacls_s0: T_M2_vmpy_acc_sat <"vmpyweh", 0b000, 0b101, 0, 0>;
def M2_mmacls_rs1: T_M2_vmpy_acc_sat <"vmpyweh", 0b101, 0b101, 1, 1>;
def M2_mmacls_rs0: T_M2_vmpy_acc_sat <"vmpyweh", 0b001, 0b101, 0, 1>;
-}
-let isCodeGenOnly = 0 in {
def M2_mmachs_s1: T_M2_vmpy_acc_sat <"vmpywoh", 0b100, 0b111, 1, 0>;
def M2_mmachs_s0: T_M2_vmpy_acc_sat <"vmpywoh", 0b000, 0b111, 0, 0>;
def M2_mmachs_rs1: T_M2_vmpy_acc_sat <"vmpywoh", 0b101, 0b111, 1, 1>;
def M2_mmachs_rs0: T_M2_vmpy_acc_sat <"vmpywoh", 0b001, 0b111, 0, 1>;
-}
// Vector multiply word by unsigned half with accumulation
// Rxx+=vmpyw[eo]uh(Rss,Rtt)[:<<1][:rnd]:sat
-let isCodeGenOnly = 0 in {
def M2_mmaculs_s1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b110, 0b101, 1, 0>;
def M2_mmaculs_s0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b010, 0b101, 0, 0>;
def M2_mmaculs_rs1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b111, 0b101, 1, 1>;
def M2_mmaculs_rs0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b011, 0b101, 0, 1>;
-}
-let isCodeGenOnly = 0 in {
def M2_mmacuhs_s1: T_M2_vmpy_acc_sat <"vmpywouh", 0b110, 0b111, 1, 0>;
def M2_mmacuhs_s0: T_M2_vmpy_acc_sat <"vmpywouh", 0b010, 0b111, 0, 0>;
def M2_mmacuhs_rs1: T_M2_vmpy_acc_sat <"vmpywouh", 0b111, 0b111, 1, 1>;
def M2_mmacuhs_rs0: T_M2_vmpy_acc_sat <"vmpywouh", 0b011, 0b111, 0, 1>;
-}
// Vector multiply even halfwords with accumulation
// Rxx+=vmpyeh(Rss,Rtt)[:<<1][:sat]
-let isCodeGenOnly = 0 in {
def M2_vmac2es: T_M2_vmpy_acc <"vmpyeh", 0b001, 0b010, 0, 0>;
def M2_vmac2es_s1: T_M2_vmpy_acc_sat <"vmpyeh", 0b100, 0b110, 1, 0>;
def M2_vmac2es_s0: T_M2_vmpy_acc_sat <"vmpyeh", 0b000, 0b110, 0, 0>;
-}
// Vector dual multiply with accumulation
// Rxx+=vdmpy(Rss,Rtt)[:sat]
-let isCodeGenOnly = 0 in {
def M2_vdmacs_s1: T_M2_vmpy_acc_sat <"vdmpy", 0b100, 0b100, 1, 0>;
def M2_vdmacs_s0: T_M2_vmpy_acc_sat <"vdmpy", 0b000, 0b100, 0, 0>;
-}
// Vector complex multiply real or imaginary with accumulation
// Rxx+=vcmpy[ir](Rss,Rtt):sat
-let isCodeGenOnly = 0 in {
def M2_vcmac_s0_sat_r: T_M2_vmpy_acc_sat <"vcmpyr", 0b001, 0b100, 0, 0>;
def M2_vcmac_s0_sat_i: T_M2_vmpy_acc_sat <"vcmpyi", 0b010, 0b100, 0, 0>;
-}
//===----------------------------------------------------------------------===//
// Template Class -- Multiply signed/unsigned halfwords with and without
let Inst{12-8} = Rt;
}
-let isCodeGenOnly = 0 in {
def M2_mpyd_hh_s0: T_M2_mpyd<0b11, 0, 0, 0>;
def M2_mpyd_hl_s0: T_M2_mpyd<0b10, 0, 0, 0>;
def M2_mpyd_lh_s0: T_M2_mpyd<0b01, 0, 0, 0>;
def M2_mpyud_hl_s1: T_M2_mpyd<0b10, 0, 1, 1>;
def M2_mpyud_lh_s1: T_M2_mpyd<0b01, 0, 1, 1>;
def M2_mpyud_ll_s1: T_M2_mpyd<0b00, 0, 1, 1>;
-}
+
//===----------------------------------------------------------------------===//
// Template Class for xtype mpy:
// Vector multiply
// MPY - Multiply and use full result
// Rdd = mpy[u](Rs,Rt)
-let isCodeGenOnly = 0 in {
def M2_dpmpyss_s0 : T_XTYPE_mpy64 < "mpy", 0b000, 0b000, 0, 0, 0>;
def M2_dpmpyuu_s0 : T_XTYPE_mpy64 < "mpyu", 0b010, 0b000, 0, 0, 0>;
def M2_dpmpyss_nac_s0 : T_XTYPE_mpy64_acc < "mpy", "-", 0b001, 0b000, 0, 0, 0>;
def M2_dpmpyuu_acc_s0 : T_XTYPE_mpy64_acc < "mpyu", "+", 0b010, 0b000, 0, 0, 0>;
def M2_dpmpyuu_nac_s0 : T_XTYPE_mpy64_acc < "mpyu", "-", 0b011, 0b000, 0, 0, 0>;
-}
+
// Complex multiply real or imaginary
// Rxx=cmpy[ir](Rs,Rt)
-let isCodeGenOnly = 0 in {
def M2_cmpyi_s0 : T_XTYPE_mpy64 < "cmpyi", 0b000, 0b001, 0, 0, 0>;
def M2_cmpyr_s0 : T_XTYPE_mpy64 < "cmpyr", 0b000, 0b010, 0, 0, 0>;
-}
// Rxx+=cmpy[ir](Rs,Rt)
-let isCodeGenOnly = 0 in {
def M2_cmaci_s0 : T_XTYPE_mpy64_acc < "cmpyi", "+", 0b000, 0b001, 0, 0, 0>;
def M2_cmacr_s0 : T_XTYPE_mpy64_acc < "cmpyr", "+", 0b000, 0b010, 0, 0, 0>;
-}
// Complex multiply
// Rdd=cmpy(Rs,Rt)[:<<]:sat
-let isCodeGenOnly = 0 in {
def M2_cmpys_s0 : T_XTYPE_mpy64 < "cmpy", 0b000, 0b110, 1, 0, 0>;
def M2_cmpys_s1 : T_XTYPE_mpy64 < "cmpy", 0b100, 0b110, 1, 1, 0>;
-}
// Rdd=cmpy(Rs,Rt*)[:<<]:sat
-let isCodeGenOnly = 0 in {
def M2_cmpysc_s0 : T_XTYPE_mpy64 < "cmpy", 0b010, 0b110, 1, 0, 1>;
def M2_cmpysc_s1 : T_XTYPE_mpy64 < "cmpy", 0b110, 0b110, 1, 1, 1>;
-}
// Rxx[-+]=cmpy(Rs,Rt)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_cmacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b000, 0b110, 1, 0, 0>;
def M2_cnacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b000, 0b111, 1, 0, 0>;
def M2_cmacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b100, 0b110, 1, 1, 0>;
def M2_cnacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b100, 0b111, 1, 1, 0>;
-}
// Rxx[-+]=cmpy(Rs,Rt*)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_cmacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b010, 0b110, 1, 0, 1>;
def M2_cnacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b010, 0b111, 1, 0, 1>;
def M2_cmacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b110, 0b110, 1, 1, 1>;
def M2_cnacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b110, 0b111, 1, 1, 1>;
-}
+
// Vector multiply halfwords
// Rdd=vmpyh(Rs,Rt)[:<<]:sat
//let Defs = [USR_OVF] in {
-let isCodeGenOnly = 0 in {
def M2_vmpy2s_s1 : T_XTYPE_mpy64 < "vmpyh", 0b100, 0b101, 1, 1, 0>;
def M2_vmpy2s_s0 : T_XTYPE_mpy64 < "vmpyh", 0b000, 0b101, 1, 0, 0>;
- }
//}
// Rxx+=vmpyh(Rs,Rt)[:<<1][:sat]
-let isCodeGenOnly = 0 in {
def M2_vmac2 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b001, 0b001, 0, 0, 0>;
def M2_vmac2s_s1 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b100, 0b101, 1, 1, 0>;
def M2_vmac2s_s0 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b000, 0b101, 1, 0, 0>;
-}
def: Pat<(i64 (mul (i64 (anyext (i32 IntRegs:$src1))),
(i64 (anyext (i32 IntRegs:$src2))))),
}
}
-let accessSize = ByteAccess, isCodeGenOnly = 0 in
+let accessSize = ByteAccess in
defm storerb: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm, 0b1000>;
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in
+let accessSize = HalfWordAccess in
defm storerh: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm, 0b1010>;
-let accessSize = WordAccess, isCodeGenOnly = 0 in
+let accessSize = WordAccess in
defm storeri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm, 0b1100>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
defm storerd: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm, 0b1110>;
-let accessSize = HalfWordAccess, isNVStorable = 0, isCodeGenOnly = 0 in
+let accessSize = HalfWordAccess, isNVStorable = 0 in
defm storerf: ST_PostInc <"memh", "STrih_H", IntRegs, s4_1Imm, 0b1011, 1>;
// Patterns for generating stores, where the address takes different forms:
let Inst{7} = 0b0;
}
-let isCodeGenOnly = 0 in {
def S2_storerb_pr : T_store_pr<"memb", IntRegs, 0b000, ByteAccess>;
def S2_storerh_pr : T_store_pr<"memh", IntRegs, 0b010, HalfWordAccess>;
def S2_storeri_pr : T_store_pr<"memw", IntRegs, 0b100, WordAccess>;
def S2_storerd_pr : T_store_pr<"memd", DoubleRegs, 0b110, DoubleWordAccess>;
def S2_storerf_pr : T_store_pr<"memh", IntRegs, 0b011, HalfWordAccess, 1>;
-}
+
let opExtendable = 1, isExtentSigned = 1, isPredicable = 1 in
class T_store_io <string mnemonic, RegisterClass RC, Operand ImmOp,
bits<3>MajOp, bit isH = 0>
}
}
-let addrMode = BaseImmOffset, InputType = "imm", isCodeGenOnly = 0 in {
+let addrMode = BaseImmOffset, InputType = "imm" in {
let accessSize = ByteAccess in
defm storerb: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext, u6_0Ext, 0b000>;
// S2_allocframe: Allocate stack frame.
let Defs = [R29, R30], Uses = [R29, R31, R30],
- hasSideEffects = 0, accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+ hasSideEffects = 0, accessSize = DoubleWordAccess in
def S2_allocframe: ST0Inst <
(outs), (ins u11_3Imm:$u11_3),
"allocframe(#$u11_3)" > {
let Inst{1} = 0b0;
}
-let isCodeGenOnly = 0 in {
def S2_storerb_pci : T_store_pci<"memb", IntRegs, s4_0Imm, 0b1000,
ByteAccess>;
def S2_storerh_pci : T_store_pci<"memh", IntRegs, s4_1Imm, 0b1010,
WordAccess>;
def S2_storerd_pci : T_store_pci<"memd", DoubleRegs, s4_3Imm, 0b1110,
DoubleWordAccess>;
-}
let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 4 in
class T_storenew_pci <string mnemonic, Operand Imm,
/* ByteAccess */ offset{3-0}));
let Inst{1} = 0b0;
}
-let isCodeGenOnly = 0 in {
+
def S2_storerbnew_pci : T_storenew_pci <"memb", s4_0Imm, 0b00, ByteAccess>;
def S2_storerhnew_pci : T_storenew_pci <"memh", s4_1Imm, 0b01, HalfWordAccess>;
def S2_storerinew_pci : T_storenew_pci <"memw", s4_2Imm, 0b10, WordAccess>;
-}
//===----------------------------------------------------------------------===//
// Circular stores with auto-increment register
//===----------------------------------------------------------------------===//
-let Uses = [CS], isNVStorable = 1, isCodeGenOnly = 0 in
+let Uses = [CS], isNVStorable = 1 in
class T_store_pcr <string mnemonic, RegisterClass RC, bits<4>MajOp,
MemAccessSize AlignSize, string RegSrc = "Rt">
: STInst <(outs IntRegs:$_dst_),
let Inst{1} = 0b1;
}
-let isCodeGenOnly = 0 in {
def S2_storerb_pcr : T_store_pcr<"memb", IntRegs, 0b1000, ByteAccess>;
def S2_storerh_pcr : T_store_pcr<"memh", IntRegs, 0b1010, HalfWordAccess>;
def S2_storeri_pcr : T_store_pcr<"memw", IntRegs, 0b1100, WordAccess>;
def S2_storerd_pcr : T_store_pcr<"memd", DoubleRegs, 0b1110, DoubleWordAccess>;
def S2_storerf_pcr : T_store_pcr<"memh", IntRegs, 0b1011,
HalfWordAccess, "Rt.h">;
-}
//===----------------------------------------------------------------------===//
// Circular .new stores with auto-increment register
let Inst{1} = 0b1;
}
-let isCodeGenOnly = 0 in {
def S2_storerbnew_pcr : T_storenew_pcr <"memb", 0b00, ByteAccess>;
def S2_storerhnew_pcr : T_storenew_pcr <"memh", 0b01, HalfWordAccess>;
def S2_storerinew_pcr : T_storenew_pcr <"memw", 0b10, WordAccess>;
-}
//===----------------------------------------------------------------------===//
// Bit-reversed stores with auto-increment register
let Inst{12-8} = src;
}
-let isNVStorable = 1, isCodeGenOnly = 0 in {
+let isNVStorable = 1 in {
let BaseOpcode = "S2_storerb_pbr" in
def S2_storerb_pbr : T_store_pbr<"memb", IntRegs, ByteAccess,
0b000>, NewValueRel;
def S2_storeri_pbr : T_store_pbr<"memw", IntRegs, WordAccess,
0b100>, NewValueRel;
}
-let isCodeGenOnly = 0 in {
+
def S2_storerf_pbr : T_store_pbr<"memh", IntRegs, HalfWordAccess, 0b011, 1>;
def S2_storerd_pbr : T_store_pbr<"memd", DoubleRegs, DoubleWordAccess, 0b110>;
-}
//===----------------------------------------------------------------------===//
// Bit-reversed .new stores with auto-increment register
let Inst{10-8} = Nt;
}
-let BaseOpcode = "S2_storerb_pbr", isCodeGenOnly = 0 in
+let BaseOpcode = "S2_storerb_pbr" in
def S2_storerbnew_pbr : T_storenew_pbr<"memb", ByteAccess, 0b00>;
-let BaseOpcode = "S2_storerh_pbr", isCodeGenOnly = 0 in
+let BaseOpcode = "S2_storerh_pbr" in
def S2_storerhnew_pbr : T_storenew_pbr<"memh", HalfWordAccess, 0b01>;
-let BaseOpcode = "S2_storeri_pbr", isCodeGenOnly = 0 in
+let BaseOpcode = "S2_storeri_pbr" in
def S2_storerinew_pbr : T_storenew_pbr<"memw", WordAccess, 0b10>;
//===----------------------------------------------------------------------===//
: T_S2op_1 <mnemonic, 0b1100, IntRegs, IntRegs, MajOp, MinOp, isSat>;
// Vector sign/zero extend
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 0 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def S2_vsxtbh : T_S2op_1_di <"vsxtbh", 0b00, 0b000>;
def S2_vsxthw : T_S2op_1_di <"vsxthw", 0b00, 0b100>;
def S2_vzxtbh : T_S2op_1_di <"vzxtbh", 0b00, 0b010>;
}
// Vector splat bytes/halfwords
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 0 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def S2_vsplatrb : T_S2op_1_ii <"vsplatb", 0b01, 0b111>;
def S2_vsplatrh : T_S2op_1_di <"vsplath", 0b01, 0b010>;
}
// Sign extend word to doubleword
-let isCodeGenOnly = 0 in
def A2_sxtw : T_S2op_1_di <"sxtw", 0b01, 0b000>;
def: Pat <(i64 (sext I32:$src)), (A2_sxtw I32:$src)>;
// Vector saturate and pack
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def S2_svsathb : T_S2op_1_ii <"vsathb", 0b10, 0b000>;
def S2_svsathub : T_S2op_1_ii <"vsathub", 0b10, 0b010>;
def S2_vsathb : T_S2op_1_id <"vsathb", 0b00, 0b110>;
}
// Vector truncate
-let isCodeGenOnly = 0 in {
def S2_vtrunohb : T_S2op_1_id <"vtrunohb", 0b10, 0b000>;
def S2_vtrunehb : T_S2op_1_id <"vtrunehb", 0b10, 0b010>;
-}
// Swizzle the bytes of a word
-let isCodeGenOnly = 0 in
def A2_swiz : T_S2op_1_ii <"swiz", 0b10, 0b111>;
// Saturate
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def A2_sat : T_S2op_1_id <"sat", 0b11, 0b000>;
def A2_satb : T_S2op_1_ii <"satb", 0b11, 0b111>;
def A2_satub : T_S2op_1_ii <"satub", 0b11, 0b110>;
def A2_roundsat : T_S2op_1_id <"round", 0b11, 0b001, 0b1>;
}
-let Itinerary = S_2op_tc_2_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = S_2op_tc_2_SLOT23 in {
// Vector round and pack
def S2_vrndpackwh : T_S2op_1_id <"vrndwh", 0b10, 0b100>;
(u5ImmPred:$u5)))]>;
// Vector arithmetic shift right by immediate with truncate and pack
-let isCodeGenOnly = 0 in
def S2_asr_i_svw_trun : T_S2op_2_id <"vasrw", 0b110, 0b010>;
// Arithmetic/logical shift right/left by immediate
-let Itinerary = S_2op_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = S_2op_tc_1_SLOT23 in {
def S2_asr_i_r : T_S2op_shift <"asr", 0b000, 0b000, sra>;
def S2_lsr_i_r : T_S2op_shift <"lsr", 0b000, 0b001, srl>;
def S2_asl_i_r : T_S2op_shift <"asl", 0b000, 0b010, shl>;
}
// Shift left by immediate with saturation
-let Defs = [USR_OVF], isCodeGenOnly = 0 in
+let Defs = [USR_OVF] in
def S2_asl_i_r_sat : T_S2op_2_ii <"asl", 0b010, 0b010, 1>;
// Shift right with round
-let isCodeGenOnly = 0 in
def S2_asr_i_r_rnd : T_S2op_2_ii <"asr", 0b010, 0b000, 0, 1>;
let isAsmParserOnly = 1 in
let Inst{4-0} = Rdd;
}
-let isCodeGenOnly = 0 in {
def A2_absp : T_S2op_3 <"abs", 0b10, 0b110>;
def A2_negp : T_S2op_3 <"neg", 0b10, 0b101>;
def A2_notp : T_S2op_3 <"not", 0b10, 0b100>;
-}
// Innterleave/deinterleave
-let isCodeGenOnly = 0 in {
def S2_interleave : T_S2op_3 <"interleave", 0b11, 0b101>;
def S2_deinterleave : T_S2op_3 <"deinterleave", 0b11, 0b100>;
-}
// Vector Complex conjugate
-let isCodeGenOnly = 0 in
def A2_vconj : T_S2op_3 <"vconj", 0b10, 0b111, 1>;
// Vector saturate without pack
-let isCodeGenOnly = 0 in {
def S2_vsathb_nopack : T_S2op_3 <"vsathb", 0b00, 0b111>;
def S2_vsathub_nopack : T_S2op_3 <"vsathub", 0b00, 0b100>;
def S2_vsatwh_nopack : T_S2op_3 <"vsatwh", 0b00, 0b110>;
def S2_vsatwuh_nopack : T_S2op_3 <"vsatwuh", 0b00, 0b101>;
-}
// Vector absolute value halfwords with and without saturation
// Rdd64=vabsh(Rss64)[:sat]
-let isCodeGenOnly = 0 in {
def A2_vabsh : T_S2op_3 <"vabsh", 0b01, 0b100>;
def A2_vabshsat : T_S2op_3 <"vabsh", 0b01, 0b101, 1>;
-}
// Vector absolute value words with and without saturation
-let isCodeGenOnly = 0 in {
def A2_vabsw : T_S2op_3 <"vabsw", 0b01, 0b110>;
def A2_vabswsat : T_S2op_3 <"vabsw", 0b01, 0b111, 1>;
-}
//===----------------------------------------------------------------------===//
// STYPE/BIT +
: T_COUNT_LEADING<MnOp, MajOp, MinOp, 0b0,
(outs IntRegs:$Rd), (ins DoubleRegs:$Rs)>;
-let isCodeGenOnly = 0 in {
def S2_cl0 : T_COUNT_LEADING_32<"cl0", 0b000, 0b101>;
def S2_cl1 : T_COUNT_LEADING_32<"cl1", 0b000, 0b110>;
def S2_ct0 : T_COUNT_LEADING_32<"ct0", 0b010, 0b100>;
def S2_clb : T_COUNT_LEADING_32<"clb", 0b000, 0b100>;
def S2_clbp : T_COUNT_LEADING_64<"clb", 0b010, 0b000>;
def S2_clbnorm : T_COUNT_LEADING_32<"normamt", 0b000, 0b111>;
-}
def: Pat<(i32 (ctlz I32:$Rs)), (S2_cl0 I32:$Rs)>;
def: Pat<(i32 (ctlz (not I32:$Rs))), (S2_cl1 I32:$Rs)>;
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in {
def S2_clrbit_i : T_SCT_BIT_IMM<"clrbit", 0b001>;
def S2_setbit_i : T_SCT_BIT_IMM<"setbit", 0b000>;
def S2_togglebit_i : T_SCT_BIT_IMM<"togglebit", 0b010>;
def S2_clrbit_r : T_SCT_BIT_REG<"clrbit", 0b01>;
def S2_setbit_r : T_SCT_BIT_REG<"setbit", 0b00>;
def S2_togglebit_r : T_SCT_BIT_REG<"togglebit", 0b10>;
-}
def: Pat<(i32 (and (i32 IntRegs:$Rs), (not (shl 1, u5ImmPred:$u5)))),
(S2_clrbit_i IntRegs:$Rs, u5ImmPred:$u5)>;
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def S2_tstbit_i : T_TEST_BIT_IMM<"tstbit", 0b000>;
def S2_tstbit_r : T_TEST_BIT_REG<"tstbit", 0>;
-}
let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm.
def: Pat<(i1 (setne (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)),
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def C2_bitsclri : T_TEST_BITS_IMM<"bitsclr", 0b10, 0>;
def C2_bitsclr : T_TEST_BITS_REG<"bitsclr", 0b10, 0>;
def C2_bitsset : T_TEST_BITS_REG<"bitsset", 0b01, 0>;
-}
let AddedComplexity = 20 in { // Complexity greater than compare reg-imm.
def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), u6ImmPred:$u6), 0)),
//===----------------------------------------------------------------------===//
// Predicate transfer.
-let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 0, hasNewValue = 1 in
def C2_tfrpr : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps),
"$Rd = $Ps", [], "", S_2op_tc_1_SLOT23> {
bits<5> Rd;
}
// Transfer general register to predicate.
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def C2_tfrrp: SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs),
"$Pd = $Rs", [], "", S_2op_tc_2early_SLOT23> {
bits<2> Pd;
}
// Shift by immediate.
-let isCodeGenOnly = 0 in {
def S2_asr_i_p : S_2OpInstImmI6<"asr", sra, 0b000>;
def S2_asl_i_p : S_2OpInstImmI6<"asl", shl, 0b010>;
def S2_lsr_i_p : S_2OpInstImmI6<"lsr", srl, 0b001>;
-}
// Shift left by small amount and add.
-let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0,
- isCodeGenOnly = 0 in
+let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0 in
def S2_addasl_rrri: SInst <(outs IntRegs:$Rd),
(ins IntRegs:$Rt, IntRegs:$Rs, u3Imm:$u3),
"$Rd = addasl($Rt, $Rs, #$u3)" ,
//===----------------------------------------------------------------------===//
def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDTNone, [SDNPHasChain]>;
-let hasSideEffects = 1, isSoloAX = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 1, isSoloAX = 1 in
def BARRIER : SYSInst<(outs), (ins),
"barrier",
[(HexagonBARRIER)],"",ST_tc_st_SLOT0> {
}
-let Defs = [SA0, LC0, USR], isCodeGenOnly = 0 in
+let Defs = [SA0, LC0, USR] in
defm J2_loop0 : LOOP_ri<"loop0">;
// Interestingly only loop0's appear to set usr.lpcfg
-let Defs = [SA1, LC1], isCodeGenOnly = 0 in
+let Defs = [SA1, LC1] in
defm J2_loop1 : LOOP_ri<"loop1">;
let isBranch = 1, isTerminator = 1, hasSideEffects = 0,
def r : SPLOOP_rBase<mnemonic, op>;
}
-let isCodeGenOnly = 0 in {
defm J2_ploop1s : SPLOOP_ri<"1", 0b01>;
defm J2_ploop2s : SPLOOP_ri<"2", 0b10>;
defm J2_ploop3s : SPLOOP_ri<"3", 0b11>;
-}
-
// if (Rs[!>=<]=#0) jump:[t/nt]
let Defs = [PC], isPredicated = 1, isBranch = 1, hasSideEffects = 0,
def NAME : J2_jump_0_Base<compare, 0, op>;
def NAME#pt : J2_jump_0_Base<compare, 1, op>;
}
-let isCodeGenOnly = 0 in {
+
defm J2_jumprz : J2_jump_compare_0<"!=", 0b00>;
defm J2_jumprgtez : J2_jump_compare_0<">=", 0b01>;
defm J2_jumprnz : J2_jump_compare_0<"==", 0b10>;
defm J2_jumprltez : J2_jump_compare_0<"<=", 0b11>;
-}
// Transfer to/from Control/GPR Guest/GPR
let hasSideEffects = 0 in
let Inst{20-16} = src;
let Inst{4-0} = dst;
}
-let isCodeGenOnly = 0 in
+
def A2_tfrrcr : TFR_CR_RS_base<CtrRegs, IntRegs, 0b0>;
def : InstAlias<"m0 = $Rs", (A2_tfrrcr C6, IntRegs:$Rs)>;
def : InstAlias<"m1 = $Rs", (A2_tfrrcr C7, IntRegs:$Rs)>;
let Inst{4-0} = dst;
}
-let hasNewValue = 1, opNewValue = 0, isCodeGenOnly = 0 in
+let hasNewValue = 1, opNewValue = 0 in
def A2_tfrcrr : TFR_RD_CR_base<IntRegs, CtrRegs, 1>;
def : InstAlias<"$Rd = m0", (A2_tfrcrr IntRegs:$Rd, C6)>;
def : InstAlias<"$Rd = m1", (A2_tfrcrr IntRegs:$Rd, C7)>;
// Y4_trace: Send value to etm trace.
-let isSoloAX = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let isSoloAX = 1, hasSideEffects = 0 in
def Y4_trace: CRInst <(outs), (ins IntRegs:$Rs),
"trace($Rs)"> {
bits<5> Rs;
}
// Call subroutine indirectly.
-let Defs = VolatileV3.Regs, isCodeGenOnly = 0 in
+let Defs = VolatileV3.Regs in
def J2_callr : JUMPR_MISC_CALLR<0, 1>;
// Indirect tail-call.
defm _xacc : xtype_imm_base< opc1, "^= ", OpNode, xor, 0b100, minOp>;
}
-let isCodeGenOnly = 0 in {
defm S2_asr : xtype_imm_acc<"asr", sra, 0b00>;
defm S2_lsr : xtype_imm_acc<"lsr", srl, 0b01>,
defm S2_asl : xtype_imm_acc<"asl", shl, 0b10>,
xtype_xor_imm_acc<"asl", shl, 0b10>;
-}
multiclass xtype_reg_acc_r<string opc1, SDNode OpNode, bits<2>minOp> {
let AddedComplexity = 100 in
defm _r_p : xtype_reg_acc_p <OpcStr, OpNode, minOp>;
}
-let isCodeGenOnly = 0 in {
defm S2_asl : xtype_reg_acc<"asl", shl, 0b10>;
defm S2_asr : xtype_reg_acc<"asr", sra, 0b00>;
defm S2_lsr : xtype_reg_acc<"lsr", srl, 0b01>;
defm S2_lsl : xtype_reg_acc<"lsl", shl, 0b11>;
-}
//===----------------------------------------------------------------------===//
let hasSideEffects = 0 in
: T_S3op_1 <mnemonic, DoubleRegs, MajOp, MinOp, SwapOps,
isSat, isRnd, hasShift>;
-let Itinerary = S_3op_tc_1_SLOT23, isCodeGenOnly = 0 in {
+let Itinerary = S_3op_tc_1_SLOT23 in {
def S2_shuffeb : T_S3op_64 < "shuffeb", 0b00, 0b010, 0>;
def S2_shuffeh : T_S3op_64 < "shuffeh", 0b00, 0b110, 0>;
def S2_shuffob : T_S3op_64 < "shuffob", 0b00, 0b100, 1>;
def S2_vtrunowh : T_S3op_64 < "vtrunowh", 0b10, 0b100, 0>;
}
-let isCodeGenOnly = 0 in
def S2_lfsp : T_S3op_64 < "lfs", 0b10, 0b110, 0>;
let hasSideEffects = 0 in
let Inst{4-0} = Rdd;
}
-let isCodeGenOnly = 0 in {
def S2_valignrb : T_S3op_2 < "valignb", 0b000, 1>;
def S2_vsplicerb : T_S3op_2 < "vspliceb", 0b100, 0>;
-}
//===----------------------------------------------------------------------===//
// Template class used by vector shift, vector rotate, vector neg,
// Shift by register
// Rdd=[asr|lsr|asl|lsl](Rss,Rt)
-let isCodeGenOnly = 0 in {
def S2_asr_r_p : T_S3op_shift64 < "asr", sra, 0b00>;
def S2_lsr_r_p : T_S3op_shift64 < "lsr", srl, 0b01>;
def S2_asl_r_p : T_S3op_shift64 < "asl", shl, 0b10>;
def S2_lsl_r_p : T_S3op_shift64 < "lsl", shl, 0b11>;
-}
// Rd=[asr|lsr|asl|lsl](Rs,Rt)
-let isCodeGenOnly = 0 in {
def S2_asr_r_r : T_S3op_shift32<"asr", sra, 0b00>;
def S2_lsr_r_r : T_S3op_shift32<"lsr", srl, 0b01>;
def S2_asl_r_r : T_S3op_shift32<"asl", shl, 0b10>;
def S2_lsl_r_r : T_S3op_shift32<"lsl", shl, 0b11>;
-}
// Shift by register with saturation
// Rd=asr(Rs,Rt):sat
// Rd=asl(Rs,Rt):sat
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def S2_asr_r_r_sat : T_S3op_shift32_Sat<"asr", 0b00>;
def S2_asl_r_r_sat : T_S3op_shift32_Sat<"asl", 0b10>;
}
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in
def S2_asr_r_svw_trun : T_S3op_8<"vasrw", 0b010, 0, 0, 0>;
-let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23, isCodeGenOnly = 0 in
+let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
def S2_vcrotate : T_S3op_shiftVect < "vcrotate", 0b11, 0b00>;
let hasSideEffects = 0 in
let Inst{4-0} = Rdd;
}
-let isCodeGenOnly = 0 in {
def S2_valignib : T_S3op_7 < "valignb", 0>;
def S2_vspliceib : T_S3op_7 < "vspliceb", 1>;
-}
//===----------------------------------------------------------------------===//
// Template class for 'insert bitfield' instructions
// Rx=insert(Rs,Rtt)
// Rx=insert(Rs,#u5,#U5)
-let hasNewValue = 1, isCodeGenOnly = 0 in {
+let hasNewValue = 1 in {
def S2_insert_rp : T_S3op_insert <"insert", IntRegs>;
def S2_insert : T_S2op_insert <0b1111, IntRegs, u5Imm>;
}
// Rxx=insert(Rss,Rtt)
// Rxx=insert(Rss,#u6,#U6)
-let isCodeGenOnly = 0 in {
def S2_insertp_rp : T_S3op_insert<"insert", DoubleRegs>;
def S2_insertp : T_S2op_insert <0b0011, DoubleRegs, u6Imm>;
-}
//===----------------------------------------------------------------------===//
// Template class for 'extract bitfield' instructions
// Rdd=extractu(Rss,Rtt)
// Rdd=extractu(Rss,#u6,#U6)
-let isCodeGenOnly = 0 in {
def S2_extractup_rp : T_S3op_64 < "extractu", 0b00, 0b000, 0>;
def S2_extractup : T_S2op_extract <"extractu", 0b0001, DoubleRegs, u6Imm>;
-}
// Rd=extractu(Rs,Rtt)
// Rd=extractu(Rs,#u5,#U5)
-let hasNewValue = 1, isCodeGenOnly = 0 in {
+let hasNewValue = 1 in {
def S2_extractu_rp : T_S3op_extract<"extractu", 0b00>;
def S2_extractu : T_S2op_extract <"extractu", 0b1101, IntRegs, u5Imm>;
}
let Inst{4-0} = Rx;
}
-let isCodeGenOnly = 0 in {
def S2_tableidxb : tableidxRaw<"tableidxb", 0b00>;
def S2_tableidxh : tableidxRaw<"tableidxh", 0b01>;
def S2_tableidxw : tableidxRaw<"tableidxw", 0b10>;
def S2_tableidxd : tableidxRaw<"tableidxd", 0b11>;
-}
// Change the sign of the immediate for Rd=-mpyi(Rs,#u8)
def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)),
let AsmString = "$Rd = "#mnemonic#"($Rs, ~$Rt)";
}
-let BaseOpcode = "andn_rr", CextOpcode = "andn", isCodeGenOnly = 0 in
+let BaseOpcode = "andn_rr", CextOpcode = "andn" in
def A4_andn : T_ALU32_3op_not<"and", 0b001, 0b100, 1>;
-let BaseOpcode = "orn_rr", CextOpcode = "orn", isCodeGenOnly = 0 in
+let BaseOpcode = "orn_rr", CextOpcode = "orn" in
def A4_orn : T_ALU32_3op_not<"or", 0b001, 0b101, 1>;
-let CextOpcode = "rcmp.eq", isCodeGenOnly = 0 in
+let CextOpcode = "rcmp.eq" in
def A4_rcmpeq : T_ALU32_3op<"cmp.eq", 0b011, 0b010, 0, 1>;
-let CextOpcode = "!rcmp.eq", isCodeGenOnly = 0 in
+let CextOpcode = "!rcmp.eq" in
def A4_rcmpneq : T_ALU32_3op<"!cmp.eq", 0b011, 0b011, 0, 1>;
-let isCodeGenOnly = 0 in {
def C4_cmpneq : T_ALU32_3op_cmp<"!cmp.eq", 0b00, 1, 1>;
def C4_cmplte : T_ALU32_3op_cmp<"!cmp.gt", 0b10, 1, 0>;
def C4_cmplteu : T_ALU32_3op_cmp<"!cmp.gtu", 0b11, 1, 0>;
-}
// Pats for instruction selection.
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def A4_cmpbeq : T_CMP_rrbh<"cmpb.eq", 0b110, 1>;
def A4_cmpbgt : T_CMP_rrbh<"cmpb.gt", 0b010, 0>;
def A4_cmpbgtu : T_CMP_rrbh<"cmpb.gtu", 0b111, 0>;
def A4_cmpheq : T_CMP_rrbh<"cmph.eq", 0b011, 1>;
def A4_cmphgt : T_CMP_rrbh<"cmph.gt", 0b100, 0>;
def A4_cmphgtu : T_CMP_rrbh<"cmph.gtu", 0b101, 0>;
-}
class T_CMP_ribh<string mnemonic, bits<2> MajOp, bit IsHalf, bit IsComm,
Operand ImmType, bit IsImmExt, bit IsImmSigned, int ImmBits>
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def A4_cmpbeqi : T_CMP_ribh<"cmpb.eq", 0b00, 0, 1, u8Imm, 0, 0, 8>;
def A4_cmpbgti : T_CMP_ribh<"cmpb.gt", 0b01, 0, 0, s8Imm, 0, 1, 8>;
def A4_cmpbgtui : T_CMP_ribh<"cmpb.gtu", 0b10, 0, 0, u7Ext, 1, 0, 7>;
def A4_cmpheqi : T_CMP_ribh<"cmph.eq", 0b00, 1, 1, s8Ext, 1, 1, 8>;
def A4_cmphgti : T_CMP_ribh<"cmph.gt", 0b01, 1, 0, s8Ext, 1, 1, 8>;
def A4_cmphgtui : T_CMP_ribh<"cmph.gtu", 0b10, 1, 0, u7Ext, 1, 0, 7>;
-}
+
class T_RCMP_EQ_ri<string mnemonic, bit IsNeg>
: ALU32_ri<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s8Ext:$s8),
"$Rd = "#mnemonic#"($Rs, #$s8)", [], "", ALU32_2op_tc_1_SLOT0123>,
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in {
def A4_rcmpeqi : T_RCMP_EQ_ri<"cmp.eq", 0>;
def A4_rcmpneqi : T_RCMP_EQ_ri<"!cmp.eq", 1>;
-}
def: Pat<(i32 (zext (i1 (seteq (i32 IntRegs:$Rs), s8ExtPred:$s8)))),
(A4_rcmpeqi IntRegs:$Rs, s8ExtPred:$s8)>;
let Inst{4-0} = Rdd;
}
-let opExtendable = 2, isCodeGenOnly = 0 in
+let opExtendable = 2 in
def A4_combineri : T_Combine1<0b00, (ins IntRegs:$Rs, s8Ext:$s8),
"$Rdd = combine($Rs, #$s8)">;
-let opExtendable = 1, isCodeGenOnly = 0 in
+let opExtendable = 1 in
def A4_combineir : T_Combine1<0b01, (ins s8Ext:$s8, IntRegs:$Rs),
"$Rdd = combine(#$s8, $Rs)">;
let Inst{6-5} = addr{1-0};
}
-let accessSize = ByteAccess, hasNewValue = 1, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess, hasNewValue = 1 in {
def L4_loadrb_ap : T_LD_abs_set <"memb", IntRegs, 0b1000>;
def L4_loadrub_ap : T_LD_abs_set <"memub", IntRegs, 0b1001>;
}
-let accessSize = HalfWordAccess, hasNewValue = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, hasNewValue = 1 in {
def L4_loadrh_ap : T_LD_abs_set <"memh", IntRegs, 0b1010>;
def L4_loadruh_ap : T_LD_abs_set <"memuh", IntRegs, 0b1011>;
}
-let accessSize = WordAccess, hasNewValue = 1, isCodeGenOnly = 0 in
+let accessSize = WordAccess, hasNewValue = 1 in
def L4_loadri_ap : T_LD_abs_set <"memw", IntRegs, 0b1100>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
def L4_loadrd_ap : T_LD_abs_set <"memd", DoubleRegs, 0b1110>;
// Load - Indirect with long offset
let InputType = "imm", addrMode = BaseLongOffset, isExtended = 1,
let Inst{4-0} = dst;
}
-let accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess in {
def L4_loadrb_ur : T_LoadAbsReg<"memb", "LDrib", IntRegs, 0b1000>;
def L4_loadrub_ur : T_LoadAbsReg<"memub", "LDriub", IntRegs, 0b1001>;
def L4_loadalignb_ur : T_LoadAbsReg<"memb_fifo", "LDrib_fifo",
DoubleRegs, 0b0100>;
}
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess in {
def L4_loadrh_ur : T_LoadAbsReg<"memh", "LDrih", IntRegs, 0b1010>;
def L4_loadruh_ur : T_LoadAbsReg<"memuh", "LDriuh", IntRegs, 0b1011>;
def L4_loadbsw2_ur : T_LoadAbsReg<"membh", "LDribh2", IntRegs, 0b0001>;
DoubleRegs, 0b0010>;
}
-let accessSize = WordAccess, isCodeGenOnly = 0 in {
+let accessSize = WordAccess in {
def L4_loadri_ur : T_LoadAbsReg<"memw", "LDriw", IntRegs, 0b1100>;
def L4_loadbsw4_ur : T_LoadAbsReg<"membh", "LDribh4", DoubleRegs, 0b0111>;
def L4_loadbzw4_ur : T_LoadAbsReg<"memubh", "LDriubh4", DoubleRegs, 0b0101>;
}
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
def L4_loadrd_ur : T_LoadAbsReg<"memd", "LDrid", DoubleRegs, 0b1110>;
}
}
-let hasNewValue = 1, accessSize = ByteAccess, isCodeGenOnly = 0 in {
+let hasNewValue = 1, accessSize = ByteAccess in {
defm loadrb : ld_idxd_shl<"memb", "LDrib", IntRegs, 0b000>;
defm loadrub : ld_idxd_shl<"memub", "LDriub", IntRegs, 0b001>;
}
-let hasNewValue = 1, accessSize = HalfWordAccess, isCodeGenOnly = 0 in {
+let hasNewValue = 1, accessSize = HalfWordAccess in {
defm loadrh : ld_idxd_shl<"memh", "LDrih", IntRegs, 0b010>;
defm loadruh : ld_idxd_shl<"memuh", "LDriuh", IntRegs, 0b011>;
}
-let hasNewValue = 1, accessSize = WordAccess, isCodeGenOnly = 0 in
+let hasNewValue = 1, accessSize = WordAccess in
defm loadri : ld_idxd_shl<"memw", "LDriw", IntRegs, 0b100>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
defm loadrd : ld_idxd_shl<"memd", "LDrid", DoubleRegs, 0b110>;
// 'def pats' for load instructions with base + register offset and non-zero
let Inst{5-0} = addr;
}
-let mayStore = 1, addrMode = AbsoluteSet, isCodeGenOnly = 0 in {
+let mayStore = 1, addrMode = AbsoluteSet in {
def S4_storerbnew_ap : T_ST_absset_nv <"memb", "STrib", 0b00, ByteAccess>;
def S4_storerhnew_ap : T_ST_absset_nv <"memh", "STrih", 0b01, HalfWordAccess>;
def S4_storerinew_ap : T_ST_absset_nv <"memw", "STriw", 0b10, WordAccess>;
let Inst{5-0} = src3;
}
-let isCodeGenOnly = 0 in {
def S4_storerb_ur : T_StoreAbsReg <"memb", "STrib", IntRegs, 0b000, ByteAccess>;
def S4_storerh_ur : T_StoreAbsReg <"memh", "STrih", IntRegs, 0b010,
HalfWordAccess>;
def S4_storeri_ur : T_StoreAbsReg <"memw", "STriw", IntRegs, 0b100, WordAccess>;
def S4_storerd_ur : T_StoreAbsReg <"memd", "STrid", DoubleRegs, 0b110,
DoubleWordAccess>;
-}
let AddedComplexity = 40 in
multiclass T_StoreAbsReg_Pats <InstHexagon MI, RegisterClass RC, ValueType VT,
let Inst{5-0} = src3;
}
-let isCodeGenOnly = 0 in {
def S4_storerbnew_ur : T_StoreAbsRegNV <"memb", "STrib", 0b00, ByteAccess>;
def S4_storerhnew_ur : T_StoreAbsRegNV <"memh", "STrih", 0b01, HalfWordAccess>;
def S4_storerinew_ur : T_StoreAbsRegNV <"memw", "STriw", 0b10, WordAccess>;
-}
//===----------------------------------------------------------------------===//
// Template classes for the non-predicated store instructions with
}
}
-let addrMode = BaseRegOffset, InputType = "reg", hasSideEffects = 0,
- isCodeGenOnly = 0 in {
+let addrMode = BaseRegOffset, InputType = "reg", hasSideEffects = 0 in {
let accessSize = ByteAccess in
defm storerb: ST_Idxd_shl<"memb", "STrib", IntRegs, 0b000>,
ST_Idxd_shl_nv<"memb", "STrib", IntRegs, 0b00>;
}
let hasSideEffects = 0, validSubTargets = HasV4SubT, addrMode = BaseImmOffset,
- InputType = "imm", isCodeGenOnly = 0 in {
+ InputType = "imm" in {
let accessSize = ByteAccess in
defm S4_storeirb : ST_Imm<"memb", "STrib", u6_0Imm, 0b00>;
}
}
-let addrMode = BaseImmOffset, InputType = "imm", isCodeGenOnly = 0 in {
+let addrMode = BaseImmOffset, InputType = "imm" in {
let accessSize = ByteAccess in
defm storerb: ST_Idxd_nv<"memb", "STrib", IntRegs, s11_0Ext,
u6_0Ext, 0b00>, AddrModeRel;
// Post increment loads with register offset.
//===----------------------------------------------------------------------===//
-let hasNewValue = 1, isCodeGenOnly = 0 in
+let hasNewValue = 1 in
def L2_loadbsw2_pr : T_load_pr <"membh", IntRegs, 0b0001, HalfWordAccess>;
-let isCodeGenOnly = 0 in
def L2_loadbsw4_pr : T_load_pr <"membh", DoubleRegs, 0b0111, WordAccess>;
//===----------------------------------------------------------------------===//
}
}
-let accessSize = ByteAccess, isCodeGenOnly = 0 in
+let accessSize = ByteAccess in
defm storerbnew: ST_PostInc_nv <"memb", "STrib", s4_0Imm, 0b00>;
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in
+let accessSize = HalfWordAccess in
defm storerhnew: ST_PostInc_nv <"memh", "STrih", s4_1Imm, 0b01>;
-let accessSize = WordAccess, isCodeGenOnly = 0 in
+let accessSize = WordAccess in
defm storerinew: ST_PostInc_nv <"memw", "STriw", s4_2Imm, 0b10>;
//===----------------------------------------------------------------------===//
let Inst{7} = 0b0;
}
-let isCodeGenOnly = 0 in {
def S2_storerbnew_pr : T_StorePI_RegNV<"memb", 0b00, ByteAccess>;
def S2_storerhnew_pr : T_StorePI_RegNV<"memh", 0b01, HalfWordAccess>;
def S2_storerinew_pr : T_StorePI_RegNV<"memw", 0b10, WordAccess>;
-}
// memb(Rx++#s4:0:circ(Mu))=Nt.new
// memb(Rx++I:circ(Mu))=Nt.new
// if ([!]cmp.gtu(Rt,Ns.new)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
- Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT,
- isCodeGenOnly = 0 in {
+ Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT in {
defm CMPEQrr : NVJrr_base<"cmp.eq", "CMPEQ", 0b000, 0>, PredRel;
defm CMPGTrr : NVJrr_base<"cmp.gt", "CMPGT", 0b001, 0>, PredRel;
defm CMPGTUrr : NVJrr_base<"cmp.gtu", "CMPGTU", 0b010, 0>, PredRel;
// if ([!]cmp.gtu(Ns.new,#U5)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1,
- Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT,
- isCodeGenOnly = 0 in {
+ Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT in {
defm CMPEQri : NVJri_base<"cmp.eq", "CMPEQ", 0b000>, PredRel;
defm CMPGTri : NVJri_base<"cmp.gt", "CMPGT", 0b001>, PredRel;
defm CMPGTUri : NVJri_base<"cmp.gtu", "CMPGTU", 0b010>, PredRel;
// if ([!]cmp.gt(Ns.new,#-1)) jump:[n]t #r9:2
let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator=1,
- Defs = [PC], hasSideEffects = 0, isCodeGenOnly = 0 in {
+ Defs = [PC], hasSideEffects = 0 in {
defm TSTBIT0 : NVJ_ConstImm_base<"tstbit", "TSTBIT", 0b011, "0">, PredRel;
defm CMPEQn1 : NVJ_ConstImm_base<"cmp.eq", "CMPEQ", 0b100, "-1">, PredRel;
defm CMPGTn1 : NVJ_ConstImm_base<"cmp.gt", "CMPGT", 0b101, "-1">, PredRel;
}
// J4_hintjumpr: Hint indirect conditional jump.
-let isBranch = 1, isIndirectBranch = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let isBranch = 1, isIndirectBranch = 1, hasSideEffects = 0 in
def J4_hintjumpr: JRInst <
(outs),
(ins IntRegs:$Rs),
// PC-relative add
let hasNewValue = 1, isExtendable = 1, opExtendable = 1,
isExtentSigned = 0, opExtentBits = 6, hasSideEffects = 0,
- Uses = [PC], validSubTargets = HasV4SubT, isCodeGenOnly = 0 in
+ Uses = [PC], validSubTargets = HasV4SubT in
def C4_addipc : CRInst <(outs IntRegs:$Rd), (ins u6Ext:$u6),
"$Rd = add(pc, #$u6)", [], "", CR_tc_2_SLOT3 > {
bits<5> Rd;
let Inst{1-0} = Pd;
}
-let isCodeGenOnly = 0 in {
def C4_and_and : T_LOGICAL_3OP<"and", "and", 0b00, 0>;
def C4_and_or : T_LOGICAL_3OP<"and", "or", 0b01, 0>;
def C4_or_and : T_LOGICAL_3OP<"or", "and", 0b10, 0>;
def C4_and_orn : T_LOGICAL_3OP<"and", "or", 0b01, 1>;
def C4_or_andn : T_LOGICAL_3OP<"or", "and", 0b10, 1>;
def C4_or_orn : T_LOGICAL_3OP<"or", "or", 0b11, 1>;
-}
//===----------------------------------------------------------------------===//
// CR -
//===----------------------------------------------------------------------===//
// Logical with-not instructions.
-let validSubTargets = HasV4SubT, isCodeGenOnly = 0 in {
+let validSubTargets = HasV4SubT in {
def A4_andnp : T_ALU64_logical<"and", 0b001, 1, 0, 1>;
def A4_ornp : T_ALU64_logical<"or", 0b011, 1, 0, 1>;
}
-let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasNewValue = 1, hasSideEffects = 0 in
def S4_parity: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
"$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
bits<5> Rd;
// Add and accumulate.
// Rd=add(Rs,add(Ru,#s6))
let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 6,
- opExtendable = 3, isCodeGenOnly = 0 in
+ opExtendable = 3 in
def S4_addaddi : ALU64Inst <(outs IntRegs:$Rd),
(ins IntRegs:$Rs, IntRegs:$Ru, s6Ext:$s6),
"$Rd = add($Rs, add($Ru, #$s6))" ,
}
let isExtentSigned = 1, hasSideEffects = 0, hasNewValue = 1, isExtendable = 1,
- opExtentBits = 6, opExtendable = 2, isCodeGenOnly = 0 in
+ opExtentBits = 6, opExtendable = 2 in
def S4_subaddi: ALU64Inst <(outs IntRegs:$Rd),
(ins IntRegs:$Rs, s6Ext:$s6, IntRegs:$Ru),
"$Rd = add($Rs, sub(#$s6, $Ru))",
// Rd=extract(Rs,Rtt)
// Rd=extract(Rs,#u5,#U5)
-let isCodeGenOnly = 0 in {
def S4_extractp_rp : T_S3op_64 < "extract", 0b11, 0b100, 0>;
def S4_extractp : T_S2op_extract <"extract", 0b1010, DoubleRegs, u6Imm>;
-}
-let hasNewValue = 1, isCodeGenOnly = 0 in {
+let hasNewValue = 1 in {
def S4_extract_rp : T_S3op_extract<"extract", 0b01>;
def S4_extract : T_S2op_extract <"extract", 0b1101, IntRegs, u5Imm>;
}
// Complex add/sub halfwords/words
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def S4_vxaddsubh : T_S3op_64 < "vxaddsubh", 0b01, 0b100, 0, 1>;
def S4_vxaddsubw : T_S3op_64 < "vxaddsubw", 0b01, 0b000, 0, 1>;
def S4_vxsubaddh : T_S3op_64 < "vxsubaddh", 0b01, 0b110, 0, 1>;
def S4_vxsubaddw : T_S3op_64 < "vxsubaddw", 0b01, 0b010, 0, 1>;
}
-let Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Defs = [USR_OVF] in {
def S4_vxaddsubhr : T_S3op_64 < "vxaddsubh", 0b11, 0b000, 0, 1, 1, 1>;
def S4_vxsubaddhr : T_S3op_64 < "vxsubaddh", 0b11, 0b010, 0, 1, 1, 1>;
}
-let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in {
+let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF] in {
def M4_mac_up_s1_sat: T_MType_acc_rr<"+= mpy", 0b011, 0b000, 0, [], 0, 1, 1>;
def M4_nac_up_s1_sat: T_MType_acc_rr<"-= mpy", 0b011, 0b001, 0, [], 0, 1, 1>;
}
// Logical xor with xor accumulation.
// Rxx^=xor(Rss,Rtt)
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def M4_xor_xacc
: SInst <(outs DoubleRegs:$Rxx),
(ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt),
// Rotate and reduce bytes
// Rdd=vrcrotate(Rss,Rt,#u2)
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def S4_vrcrotate
: SInst <(outs DoubleRegs:$Rdd),
(ins DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2),
// Rotate and reduce bytes with accumulation
// Rxx+=vrcrotate(Rss,Rt,#u2)
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def S4_vrcrotate_acc
: SInst <(outs DoubleRegs:$Rxx),
(ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2),
// Vector reduce conditional negate halfwords
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def S2_vrcnegh
: SInst <(outs DoubleRegs:$Rxx),
(ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt),
}
// Split bitfield
-let isCodeGenOnly = 0 in
def A4_bitspliti : T_S2op_2_di <"bitsplit", 0b110, 0b100>;
// Arithmetic/Convergent round
-let isCodeGenOnly = 0 in
def A4_cround_ri : T_S2op_2_ii <"cround", 0b111, 0b000>;
-let isCodeGenOnly = 0 in
def A4_round_ri : T_S2op_2_ii <"round", 0b111, 0b100>;
-let Defs = [USR_OVF], isCodeGenOnly = 0 in
+let Defs = [USR_OVF] in
def A4_round_ri_sat : T_S2op_2_ii <"round", 0b111, 0b110, 1>;
// Logical-logical words.
// Compound or-and -- Rx=or(Ru,and(Rx,#s10))
let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 10,
- opExtendable = 3, isCodeGenOnly = 0 in
+ opExtendable = 3 in
def S4_or_andix:
ALU64Inst<(outs IntRegs:$Rx),
(ins IntRegs:$Ru, IntRegs:$_src_, s10Ext:$s10),
// Miscellaneous ALU64 instructions.
//
-let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasNewValue = 1, hasSideEffects = 0 in
def A4_modwrapu: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt),
"$Rd = modwrap($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> {
bits<5> Rd;
let Inst{4-0} = Rd;
}
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def A4_bitsplit: ALU64Inst<(outs DoubleRegs:$Rd),
(ins IntRegs:$Rs, IntRegs:$Rt),
"$Rd = bitsplit($Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> {
let Inst{4-0} = Rd;
}
-let isCodeGenOnly = 0 in {
// Rx[&|]=xor(Rs,Rt)
def M4_or_xor : T_MType_acc_rr < "|= xor", 0b110, 0b001, 0>;
def M4_and_xor : T_MType_acc_rr < "&= xor", 0b010, 0b010, 0>;
def M4_xor_andn : T_MType_acc_rr < "^= and", 0b001, 0b010, 0, [], 1>;
def M4_or_andn : T_MType_acc_rr < "|= and", 0b001, 0b000, 0, [], 1>;
def M4_and_andn : T_MType_acc_rr < "&= and", 0b001, 0b001, 0, [], 1>;
-}
// Compound or-or and or-and
let isExtentSigned = 1, InputType = "imm", hasNewValue = 1, isExtendable = 1,
let Inst{4-0} = Rx;
}
-let CextOpcode = "ORr_ANDr", isCodeGenOnly = 0 in
+let CextOpcode = "ORr_ANDr" in
def S4_or_andi : T_CompOR <"and", 0b00, and>;
-let CextOpcode = "ORr_ORr", isCodeGenOnly = 0 in
+let CextOpcode = "ORr_ORr" in
def S4_or_ori : T_CompOR <"or", 0b10, or>;
// Modulo wrap
//===----------------------------------------------------------------------===//
// Bit reverse
-let isCodeGenOnly = 0 in
def S2_brevp : T_S2op_3 <"brev", 0b11, 0b110>;
// Bit count
-let isCodeGenOnly = 0 in {
def S2_ct0p : T_COUNT_LEADING_64<"ct0", 0b111, 0b010>;
def S2_ct1p : T_COUNT_LEADING_64<"ct1", 0b111, 0b100>;
def S4_clbpnorm : T_COUNT_LEADING_64<"normamt", 0b011, 0b000>;
-}
def: Pat<(i32 (trunc (cttz (i64 DoubleRegs:$Rss)))),
(S2_ct0p (i64 DoubleRegs:$Rss))>;
def: Pat<(i32 (trunc (cttz (not (i64 DoubleRegs:$Rss))))),
(S2_ct1p (i64 DoubleRegs:$Rss))>;
-let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 0, hasNewValue = 1 in
def S4_clbaddi : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6Imm:$s6),
"$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> {
bits<5> Rs;
let Inst{4-0} = Rd;
}
-let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in
+let hasSideEffects = 0, hasNewValue = 1 in
def S4_clbpaddi : SInst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, s6Imm:$s6),
"$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> {
bits<5> Rs;
// Bit test/set/clear
-let isCodeGenOnly = 0 in {
def S4_ntstbit_i : T_TEST_BIT_IMM<"!tstbit", 0b001>;
def S4_ntstbit_r : T_TEST_BIT_REG<"!tstbit", 1>;
-}
let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm.
def: Pat<(i1 (seteq (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)),
def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 Set5ImmPred:$u5)), (i32 0))),
(S4_ntstbit_i (i32 IntRegs:$Rs), (BITPOS32 Set5ImmPred:$u5))>;
-let isCodeGenOnly = 0 in {
def C4_nbitsset : T_TEST_BITS_REG<"!bitsset", 0b01, 1>;
def C4_nbitsclr : T_TEST_BITS_REG<"!bitsclr", 0b10, 1>;
def C4_nbitsclri : T_TEST_BITS_IMM<"!bitsclr", 0b10, 1>;
-}
// Do not increase complexity of these patterns. In the DAG, "cmp i8" may be
// represented as a compare against "value & 0xFF", which is an exact match
// Rd=add(#u6,mpyi(Rs,#U6)) -- Multiply by immed and add immed.
-let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1,
- isCodeGenOnly = 0 in
+let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1 in
def M4_mpyri_addi : MInst<(outs IntRegs:$Rd),
(ins u6Ext:$u6, IntRegs:$Rs, u6Imm:$U6),
"$Rd = add(#$u6, mpyi($Rs, #$U6))" ,
// Rd=add(#u6,mpyi(Rs,Rt))
let CextOpcode = "ADD_MPY", InputType = "imm", hasNewValue = 1,
- isExtendable = 1, opExtentBits = 6, opExtendable = 1, isCodeGenOnly = 0 in
+ isExtendable = 1, opExtentBits = 6, opExtendable = 1 in
def M4_mpyrr_addi : MInst <(outs IntRegs:$Rd),
(ins u6Ext:$u6, IntRegs:$Rs, IntRegs:$Rt),
"$Rd = add(#$u6, mpyi($Rs, $Rt))" ,
let Inst{4-0} = src1;
}
-let isCodeGenOnly = 0 in
def M4_mpyri_addr_u2 : T_AddMpy<0b0, u6_2ImmPred,
(ins IntRegs:$src1, u6_2Imm:$src2, IntRegs:$src3)>;
let isExtendable = 1, opExtentBits = 6, opExtendable = 3,
- CextOpcode = "ADD_MPY", InputType = "imm", isCodeGenOnly = 0 in
+ CextOpcode = "ADD_MPY", InputType = "imm" in
def M4_mpyri_addr : T_AddMpy<0b1, u6ExtPred,
(ins IntRegs:$src1, IntRegs:$src3, u6Ext:$src2)>, ImmRegRel;
// Rx=add(Ru,mpyi(Rx,Rs))
let validSubTargets = HasV4SubT, CextOpcode = "ADD_MPY", InputType = "reg",
- hasNewValue = 1, isCodeGenOnly = 0 in
+ hasNewValue = 1 in
def M4_mpyrr_addr: MInst_acc <(outs IntRegs:$Rx),
(ins IntRegs:$Ru, IntRegs:$_src_, IntRegs:$Rs),
"$Rx = add($Ru, mpyi($_src_, $Rs))",
// Vector reduce multiply word by signed half (32x16)
//Rdd=vrmpyweh(Rss,Rtt)[:<<1]
-let isCodeGenOnly = 0 in {
def M4_vrmpyeh_s0 : T_M2_vmpy<"vrmpyweh", 0b010, 0b100, 0, 0, 0>;
def M4_vrmpyeh_s1 : T_M2_vmpy<"vrmpyweh", 0b110, 0b100, 1, 0, 0>;
-}
//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
-let isCodeGenOnly = 0 in {
def M4_vrmpyoh_s0 : T_M2_vmpy<"vrmpywoh", 0b001, 0b010, 0, 0, 0>;
def M4_vrmpyoh_s1 : T_M2_vmpy<"vrmpywoh", 0b101, 0b010, 1, 0, 0>;
-}
+
//Rdd+=vrmpyweh(Rss,Rtt)[:<<1]
-let isCodeGenOnly = 0 in {
def M4_vrmpyeh_acc_s0: T_M2_vmpy_acc<"vrmpyweh", 0b001, 0b110, 0, 0>;
def M4_vrmpyeh_acc_s1: T_M2_vmpy_acc<"vrmpyweh", 0b101, 0b110, 1, 0>;
-}
//Rdd=vrmpywoh(Rss,Rtt)[:<<1]
-let isCodeGenOnly = 0 in {
def M4_vrmpyoh_acc_s0: T_M2_vmpy_acc<"vrmpywoh", 0b011, 0b110, 0, 0>;
def M4_vrmpyoh_acc_s1: T_M2_vmpy_acc<"vrmpywoh", 0b111, 0b110, 1, 0>;
-}
// Vector multiply halfwords, signed by unsigned
// Rdd=vmpyhsu(Rs,Rt)[:<<]:sat
-let isCodeGenOnly = 0 in {
def M2_vmpy2su_s0 : T_XTYPE_mpy64 < "vmpyhsu", 0b000, 0b111, 1, 0, 0>;
def M2_vmpy2su_s1 : T_XTYPE_mpy64 < "vmpyhsu", 0b100, 0b111, 1, 1, 0>;
-}
// Rxx+=vmpyhsu(Rs,Rt)[:<<1]:sat
-let isCodeGenOnly = 0 in {
def M2_vmac2su_s0 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b011, 0b101, 1, 0, 0>;
def M2_vmac2su_s1 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b111, 0b101, 1, 1, 0>;
-}
// Vector polynomial multiply halfwords
// Rdd=vpmpyh(Rs,Rt)
-let isCodeGenOnly = 0 in
def M4_vpmpyh : T_XTYPE_mpy64 < "vpmpyh", 0b110, 0b111, 0, 0, 0>;
// Rxx^=vpmpyh(Rs,Rt)
-let isCodeGenOnly = 0 in
def M4_vpmpyh_acc : T_XTYPE_mpy64_acc < "vpmpyh", "^", 0b101, 0b111, 0, 0, 0>;
// Polynomial multiply words
// Rdd=pmpyw(Rs,Rt)
-let isCodeGenOnly = 0 in
def M4_pmpyw : T_XTYPE_mpy64 < "pmpyw", 0b010, 0b111, 0, 0, 0>;
// Rxx^=pmpyw(Rs,Rt)
-let isCodeGenOnly = 0 in
def M4_pmpyw_acc : T_XTYPE_mpy64_acc < "pmpyw", "^", 0b001, 0b111, 0, 0, 0>;
//===----------------------------------------------------------------------===//
}
// Vector compare bytes
-let isCodeGenOnly = 0 in
def A4_vcmpbgt : T_vcmp <"vcmpb.gt", 0b1010>;
def: T_vcmp_pat<A4_vcmpbgt, setgt, v8i8>;
let AsmString = "$Pd = any8(vcmpb.eq($Rss, $Rtt))" in
-let isCodeGenOnly = 0 in
def A4_vcmpbeq_any : T_vcmp <"any8(vcmpb.gt", 0b1000>;
-let isCodeGenOnly = 0 in {
def A4_vcmpbeqi : T_vcmpImm <"vcmpb.eq", 0b00, 0b00, u8Imm>;
def A4_vcmpbgti : T_vcmpImm <"vcmpb.gt", 0b01, 0b00, s8Imm>;
def A4_vcmpbgtui : T_vcmpImm <"vcmpb.gtu", 0b10, 0b00, u7Imm>;
-}
// Vector compare halfwords
-let isCodeGenOnly = 0 in {
def A4_vcmpheqi : T_vcmpImm <"vcmph.eq", 0b00, 0b01, s8Imm>;
def A4_vcmphgti : T_vcmpImm <"vcmph.gt", 0b01, 0b01, s8Imm>;
def A4_vcmphgtui : T_vcmpImm <"vcmph.gtu", 0b10, 0b01, u7Imm>;
-}
// Vector compare words
-let isCodeGenOnly = 0 in {
def A4_vcmpweqi : T_vcmpImm <"vcmpw.eq", 0b00, 0b10, s8Imm>;
def A4_vcmpwgti : T_vcmpImm <"vcmpw.gt", 0b01, 0b10, s8Imm>;
def A4_vcmpwgtui : T_vcmpImm <"vcmpw.gtu", 0b10, 0b10, u7Imm>;
-}
//===----------------------------------------------------------------------===//
// XTYPE/SHIFT +
def _lsr_ri : T_S4_ShiftOperate<mnemonic, "lsr", Op, srl, 1, MajOp, Itin>;
}
-let AddedComplexity = 200, isCodeGenOnly = 0 in {
+let AddedComplexity = 200 in {
defm S4_addi : T_ShiftOperate<"add", add, 0b10, ALU64_tc_2_SLOT23>;
defm S4_andi : T_ShiftOperate<"and", and, 0b00, ALU64_tc_2_SLOT23>;
}
-let AddedComplexity = 30, isCodeGenOnly = 0 in
+let AddedComplexity = 30 in
defm S4_ori : T_ShiftOperate<"or", or, 0b01, ALU64_tc_1_SLOT23>;
-let isCodeGenOnly = 0 in
defm S4_subi : T_ShiftOperate<"sub", sub, 0b11, ALU64_tc_1_SLOT23>;
// Vector conditional negate
// Rdd=vcnegh(Rss,Rt)
-let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23, isCodeGenOnly = 0 in
+let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
def S2_vcnegh : T_S3op_shiftVect < "vcnegh", 0b11, 0b01>;
// Rd=[cround|round](Rs,Rt)
-let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23, isCodeGenOnly = 0 in {
+let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23 in {
def A4_cround_rr : T_S3op_3 < "cround", IntRegs, 0b11, 0b00>;
def A4_round_rr : T_S3op_3 < "round", IntRegs, 0b11, 0b10>;
}
// Rd=round(Rs,Rt):sat
-let hasNewValue = 1, Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23,
- isCodeGenOnly = 0 in
+let hasNewValue = 1, Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in
def A4_round_rr_sat : T_S3op_3 < "round", IntRegs, 0b11, 0b11, 1>;
// Rd=[cmpyiwh|cmpyrwh](Rss,Rt):<<1:rnd:sat
-let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23, isCodeGenOnly = 0 in {
+let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23 in {
def M4_cmpyi_wh : T_S3op_8<"cmpyiwh", 0b100, 1, 1, 1>;
def M4_cmpyr_wh : T_S3op_8<"cmpyrwh", 0b110, 1, 1, 1>;
}
let Inst{4-0} = Rdd;
}
-let isCodeGenOnly = 0 in {
def A4_addp_c : T_S3op_carry < "add", 0b110 >;
def A4_subp_c : T_S3op_carry < "sub", 0b111 >;
-}
let Itinerary = S_3op_tc_3_SLOT23, hasSideEffects = 0 in
class T_S3op_6 <string mnemonic, bits<3> MinOp, bit isUnsigned>
// Vector reduce maximum halfwords
// Rxx=vrmax[u]h(Rss,Ru)
-let isCodeGenOnly = 0 in {
def A4_vrmaxh : T_S3op_6 < "vrmaxh", 0b001, 0>;
def A4_vrmaxuh : T_S3op_6 < "vrmaxuh", 0b001, 1>;
-}
+
// Vector reduce maximum words
// Rxx=vrmax[u]w(Rss,Ru)
-let isCodeGenOnly = 0 in {
def A4_vrmaxw : T_S3op_6 < "vrmaxw", 0b010, 0>;
def A4_vrmaxuw : T_S3op_6 < "vrmaxuw", 0b010, 1>;
-}
+
// Vector reduce minimum halfwords
// Rxx=vrmin[u]h(Rss,Ru)
-let isCodeGenOnly = 0 in {
def A4_vrminh : T_S3op_6 < "vrminh", 0b101, 0>;
def A4_vrminuh : T_S3op_6 < "vrminuh", 0b101, 1>;
-}
// Vector reduce minimum words
// Rxx=vrmin[u]w(Rss,Ru)
-let isCodeGenOnly = 0 in {
def A4_vrminw : T_S3op_6 < "vrminw", 0b110, 0>;
def A4_vrminuw : T_S3op_6 < "vrminuw", 0b110, 1>;
-}
// Shift an immediate left by register amount.
-let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasNewValue = 1, hasSideEffects = 0 in
def S4_lsli: SInst <(outs IntRegs:$Rd), (ins s6Imm:$s6, IntRegs:$Rt),
"$Rd = lsl(#$s6, $Rt)" ,
[(set (i32 IntRegs:$Rd), (shl s6ImmPred:$s6,
// Define MemOp instructions.
let isExtendable = 1, opExtendable = 1, isExtentSigned = 0,
validSubTargets =HasV4SubT in {
- let opExtentBits = 6, accessSize = ByteAccess, isCodeGenOnly = 0 in
+ let opExtentBits = 6, accessSize = ByteAccess in
defm memopb_io : MemOp_base <"memb", 0b00, u6_0Ext>;
- let opExtentBits = 7, accessSize = HalfWordAccess, isCodeGenOnly = 0 in
+ let opExtentBits = 7, accessSize = HalfWordAccess in
defm memoph_io : MemOp_base <"memh", 0b01, u6_1Ext>;
- let opExtentBits = 8, accessSize = WordAccess, isCodeGenOnly = 0 in
+ let opExtentBits = 8, accessSize = WordAccess in
defm memopw_io : MemOp_base <"memw", 0b10, u6_2Ext>;
}
// Pd=cmpb.eq(Rs,#u8)
// p=!cmp.eq(r1,#s10)
-let isCodeGenOnly = 0 in {
def C4_cmpneqi : T_CMP <"cmp.eq", 0b00, 1, s10Ext>;
def C4_cmpltei : T_CMP <"cmp.gt", 0b01, 1, s10Ext>;
def C4_cmplteui : T_CMP <"cmp.gtu", 0b10, 1, u9Ext>;
-}
def : T_CMP_pat <C4_cmpneqi, setne, s10ExtPred>;
def : T_CMP_pat <C4_cmpltei, setle, s10ExtPred>;
let isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC], Uses = [R30], hasSideEffects = 0,
- validSubTargets = HasV4SubT, isCodeGenOnly = 0 in
+ validSubTargets = HasV4SubT in
defm L4_return: LD_MISC_L4_RETURN <"dealloc_return">, PredNewRel;
// Restore registers and dealloc return function call.
//===----------------------------------------------------------------------===//
// Stores with absolute addressing
//===----------------------------------------------------------------------===//
-let accessSize = ByteAccess, isCodeGenOnly = 0 in
+let accessSize = ByteAccess in
defm storerb : ST_Abs <"memb", "STrib", IntRegs, u16_0Imm, 0b00>,
ST_Abs_NV <"memb", "STrib", u16_0Imm, 0b00>;
-let accessSize = HalfWordAccess, isCodeGenOnly = 0 in
+let accessSize = HalfWordAccess in
defm storerh : ST_Abs <"memh", "STrih", IntRegs, u16_1Imm, 0b01>,
ST_Abs_NV <"memh", "STrih", u16_1Imm, 0b01>;
-let accessSize = WordAccess, isCodeGenOnly = 0 in
+let accessSize = WordAccess in
defm storeri : ST_Abs <"memw", "STriw", IntRegs, u16_2Imm, 0b10>,
ST_Abs_NV <"memw", "STriw", u16_2Imm, 0b10>;
-let isNVStorable = 0, accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let isNVStorable = 0, accessSize = DoubleWordAccess in
defm storerd : ST_Abs <"memd", "STrid", DoubleRegs, u16_3Imm, 0b11>;
-let isNVStorable = 0, accessSize = HalfWordAccess, isCodeGenOnly = 0 in
+let isNVStorable = 0, accessSize = HalfWordAccess in
defm storerf : ST_Abs <"memh", "STrif", IntRegs, u16_1Imm, 0b01, 1>;
//===----------------------------------------------------------------------===//
}
}
-let accessSize = ByteAccess, hasNewValue = 1, isCodeGenOnly = 0 in {
+let accessSize = ByteAccess, hasNewValue = 1 in {
defm loadrb : LD_Abs<"memb", "LDrib", IntRegs, u16_0Imm, 0b000>;
defm loadrub : LD_Abs<"memub", "LDriub", IntRegs, u16_0Imm, 0b001>;
}
-let accessSize = HalfWordAccess, hasNewValue = 1, isCodeGenOnly = 0 in {
+let accessSize = HalfWordAccess, hasNewValue = 1 in {
defm loadrh : LD_Abs<"memh", "LDrih", IntRegs, u16_1Imm, 0b010>;
defm loadruh : LD_Abs<"memuh", "LDriuh", IntRegs, u16_1Imm, 0b011>;
}
-let accessSize = WordAccess, hasNewValue = 1, isCodeGenOnly = 0 in
+let accessSize = WordAccess, hasNewValue = 1 in
defm loadri : LD_Abs<"memw", "LDriw", IntRegs, u16_2Imm, 0b100>;
-let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in
+let accessSize = DoubleWordAccess in
defm loadrd : LD_Abs<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>;
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// A4_boundscheck_lo: Detect if a register is within bounds.
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def A4_boundscheck_lo: ALU64Inst <
(outs PredRegs:$Pd),
(ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
}
// A4_boundscheck_hi: Detect if a register is within bounds.
-let hasSideEffects = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 0 in
def A4_boundscheck_hi: ALU64Inst <
(outs PredRegs:$Pd),
(ins DoubleRegs:$Rss, DoubleRegs:$Rtt),
"$Pd=boundscheck($Rs,$Rtt)">;
// A4_tlbmatch: Detect if a VA/ASID matches a TLB entry.
-let isPredicateLate = 1, hasSideEffects = 0, isCodeGenOnly = 0 in
+let isPredicateLate = 1, hasSideEffects = 0 in
def A4_tlbmatch : ALU64Inst<(outs PredRegs:$Pd),
(ins DoubleRegs:$Rs, IntRegs:$Rt),
"$Pd = tlbmatch($Rs, $Rt)",
// Use LD0Inst for dcfetch, but set "mayLoad" to 0 because this doesn't
// really do a load.
-let hasSideEffects = 1, mayLoad = 0, isCodeGenOnly = 0 in
+let hasSideEffects = 1, mayLoad = 0 in
def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3),
"dcfetch($Rs + #$u11_3)",
[(HexagonDCFETCH IntRegs:$Rs, u11_3ImmPred:$u11_3)],
let Inst{7-1} = r9_2{8-2};
}
-let Defs = [PC, P0], Uses = [P0], isCodeGenOnly = 0 in {
+let Defs = [PC, P0], Uses = [P0] in {
def J4_tstbit0_tp0_jump_nt : CJInst_tstbit_R0<"p0", 0, "nt">;
def J4_tstbit0_tp0_jump_t : CJInst_tstbit_R0<"p0", 0, "t">;
def J4_tstbit0_fp0_jump_nt : CJInst_tstbit_R0<"p0", 1, "nt">;
def J4_tstbit0_fp0_jump_t : CJInst_tstbit_R0<"p0", 1, "t">;
}
-let Defs = [PC, P1], Uses = [P1], isCodeGenOnly = 0 in {
+let Defs = [PC, P1], Uses = [P1] in {
def J4_tstbit0_tp1_jump_nt : CJInst_tstbit_R0<"p1", 0, "nt">;
def J4_tstbit0_tp1_jump_t : CJInst_tstbit_R0<"p1", 0, "t">;
def J4_tstbit0_fp1_jump_nt : CJInst_tstbit_R0<"p1", 1, "nt">;
defm J4_cmp#NAME#_f : T_tnt_CJInst_RR<op, 1>;
}
// TypeCJ Instructions compare RR and jump
-let isCodeGenOnly = 0 in {
defm eq : T_pnp_CJInst_RR<"eq">;
defm gt : T_pnp_CJInst_RR<"gt">;
defm gtu : T_pnp_CJInst_RR<"gtu">;
-}
let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1,
isPredicated = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11,
defm J4_cmp#NAME#i_f : T_tnt_CJInst_RU5<op, 1>;
}
// TypeCJ Instructions compare RI and jump
-let isCodeGenOnly = 0 in {
defm eq : T_pnp_CJInst_RU5<"eq">;
defm gt : T_pnp_CJInst_RU5<"gt">;
defm gtu : T_pnp_CJInst_RU5<"gtu">;
-}
let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1,
isPredicated = 1, isPredicatedFalse = 1, isPredicatedNew = 1,
defm J4_cmp#NAME#n1_f : T_tnt_CJInst_Rn1<op, 1>;
}
// TypeCJ Instructions compare -1 and jump
-let isCodeGenOnly = 0 in {
defm eq : T_pnp_CJInst_Rn1<"eq">;
defm gt : T_pnp_CJInst_Rn1<"gt">;
-}
// J4_jumpseti: Direct unconditional jump and set register to immediate.
let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
- opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT,
- isCodeGenOnly = 0 in
+ opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT in
def J4_jumpseti: CJInst <
(outs IntRegs:$Rd),
(ins u6Imm:$U6, brtarget:$r9_2),
// J4_jumpsetr: Direct unconditional jump and transfer register.
let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
- opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT,
- isCodeGenOnly = 0 in
+ opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT in
def J4_jumpsetr: CJInst <
(outs IntRegs:$Rd),
(ins IntRegs:$Rs, brtarget:$r9_2),