1 let isCodeGenOnly = 1 in {
2 /// Arithmetic Instructions (ALU Immediate)
3 def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>,
5 def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>,
7 def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
9 def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
11 def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>,
13 def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
15 def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>,
17 def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM;
19 /// Arithmetic Instructions (3-Operand, R-Type)
20 def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>;
21 def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>;
22 def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>;
23 def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>;
24 def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>;
25 def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>;
26 def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>,
28 def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
30 def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
32 def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
34 def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>;
35 def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
37 def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
40 /// Shift Instructions
41 def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>,
43 def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>,
45 def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>,
47 def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>,
49 def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>,
51 def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>,
53 def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>,
55 def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>,
58 /// Load and Store Instructions - aligned
59 defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>;
60 defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>;
61 defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>;
62 defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>;
63 defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>;
64 defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>;
65 defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>;
66 defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>;