1 //===- MipsInstrInfo.td - Target Description for Mips Target -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Mips implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction format superclass
16 //===----------------------------------------------------------------------===//
18 include "MipsInstrFormats.td"
20 //===----------------------------------------------------------------------===//
21 // Mips profiles and nodes
22 //===----------------------------------------------------------------------===//
24 def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
25 def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
26 def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
30 def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
31 def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
32 def SDT_MipsMAddMSub : SDTypeProfile<0, 4,
33 [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
36 def SDT_MipsDivRem : SDTypeProfile<0, 2,
40 def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
42 def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>,
44 def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
46 def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
47 SDTCisVT<2, i32>, SDTCisSameAs<2, 3>]>;
48 def SDT_Ins : SDTypeProfile<1, 4, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
49 SDTCisVT<2, i32>, SDTCisSameAs<2, 3>,
53 def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
54 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
57 // Hi and Lo nodes are used to handle global addresses. Used on
58 // MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
59 // static model. (nothing to do with Mips Registers Hi and Lo)
60 def MipsHi : SDNode<"MipsISD::Hi", SDTIntUnaryOp>;
61 def MipsLo : SDNode<"MipsISD::Lo", SDTIntUnaryOp>;
62 def MipsGPRel : SDNode<"MipsISD::GPRel", SDTIntUnaryOp>;
64 // TlsGd node is used to handle General Dynamic TLS
65 def MipsTlsGd : SDNode<"MipsISD::TlsGd", SDTIntUnaryOp>;
67 // TprelHi and TprelLo nodes are used to handle Local Exec TLS
68 def MipsTprelHi : SDNode<"MipsISD::TprelHi", SDTIntUnaryOp>;
69 def MipsTprelLo : SDNode<"MipsISD::TprelLo", SDTIntUnaryOp>;
72 def MipsThreadPointer: SDNode<"MipsISD::ThreadPointer", SDT_MipsThreadPointer>;
75 def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
78 // These are target-independent nodes, but have target-specific formats.
79 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
80 [SDNPHasChain, SDNPOutGlue]>;
81 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
82 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
85 def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub,
86 [SDNPOptInGlue, SDNPOutGlue]>;
87 def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub,
88 [SDNPOptInGlue, SDNPOutGlue]>;
89 def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub,
90 [SDNPOptInGlue, SDNPOutGlue]>;
91 def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub,
92 [SDNPOptInGlue, SDNPOutGlue]>;
95 def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsDivRem,
97 def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem,
100 // Target constant nodes that are not part of any isel patterns and remain
101 // unchanged can cause instructions with illegal operands to be emitted.
102 // Wrapper node patterns give the instruction selector a chance to replace
103 // target constant nodes that would otherwise remain unchanged with ADDiu
104 // nodes. Without these wrapper node patterns, the following conditional move
105 // instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
107 // movn %got(d)($gp), %got(c)($gp), $4
108 // This instruction is illegal since movn can take only register operands.
110 def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntBinOp>;
112 // Pointer to dynamically allocated stack area.
113 def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc,
114 [SDNPHasChain, SDNPInGlue]>;
116 def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>;
118 def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>;
119 def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>;
121 //===----------------------------------------------------------------------===//
122 // Mips Instruction Predicate Definitions.
123 //===----------------------------------------------------------------------===//
124 def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">,
125 AssemblerPredicate<"FeatureSEInReg">;
126 def HasBitCount : Predicate<"Subtarget.hasBitCount()">,
127 AssemblerPredicate<"FeatureBitCount">;
128 def HasSwap : Predicate<"Subtarget.hasSwap()">,
129 AssemblerPredicate<"FeatureSwap">;
130 def HasCondMov : Predicate<"Subtarget.hasCondMov()">,
131 AssemblerPredicate<"FeatureCondMov">;
132 def HasMips32 : Predicate<"Subtarget.hasMips32()">,
133 AssemblerPredicate<"FeatureMips32">;
134 def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">,
135 AssemblerPredicate<"FeatureMips32r2">;
136 def HasMips64 : Predicate<"Subtarget.hasMips64()">,
137 AssemblerPredicate<"FeatureMips64">;
138 def HasMips32r2Or64 : Predicate<"Subtarget.hasMips32r2Or64()">,
139 AssemblerPredicate<"FeatureMips32r2,FeatureMips64">;
140 def NotMips64 : Predicate<"!Subtarget.hasMips64()">,
141 AssemblerPredicate<"!FeatureMips64">;
142 def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">,
143 AssemblerPredicate<"FeatureMips64r2">;
144 def IsN64 : Predicate<"Subtarget.isABI_N64()">,
145 AssemblerPredicate<"FeatureN64">;
146 def NotN64 : Predicate<"!Subtarget.isABI_N64()">,
147 AssemblerPredicate<"!FeatureN64">;
148 def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">,
149 AssemblerPredicate<"FeatureMips32">;
150 def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">,
151 AssemblerPredicate<"FeatureMips32">;
152 def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">,
153 AssemblerPredicate<"FeatureMips32">;
155 //===----------------------------------------------------------------------===//
156 // Mips Operand, Complex Patterns and Transformations Definitions.
157 //===----------------------------------------------------------------------===//
159 // Instruction operand types
160 def jmptarget : Operand<OtherVT> {
161 let EncoderMethod = "getJumpTargetOpValue";
163 def brtarget : Operand<OtherVT> {
164 let EncoderMethod = "getBranchTargetOpValue";
165 let OperandType = "OPERAND_PCREL";
166 let DecoderMethod = "DecodeBranchTarget";
168 def calltarget : Operand<iPTR> {
169 let EncoderMethod = "getJumpTargetOpValue";
171 def calltarget64: Operand<i64>;
172 def simm16 : Operand<i32> {
173 let DecoderMethod= "DecodeSimm16";
175 def simm16_64 : Operand<i64>;
176 def shamt : Operand<i32>;
179 def uimm16 : Operand<i32> {
180 let PrintMethod = "printUnsignedImm";
184 def mem : Operand<i32> {
185 let PrintMethod = "printMemOperand";
186 let MIOperandInfo = (ops CPURegs, simm16);
187 let EncoderMethod = "getMemEncoding";
190 def mem64 : Operand<i64> {
191 let PrintMethod = "printMemOperand";
192 let MIOperandInfo = (ops CPU64Regs, simm16_64);
195 def mem_ea : Operand<i32> {
196 let PrintMethod = "printMemOperandEA";
197 let MIOperandInfo = (ops CPURegs, simm16);
198 let EncoderMethod = "getMemEncoding";
201 def mem_ea_64 : Operand<i64> {
202 let PrintMethod = "printMemOperandEA";
203 let MIOperandInfo = (ops CPU64Regs, simm16_64);
204 let EncoderMethod = "getMemEncoding";
207 // size operand of ext instruction
208 def size_ext : Operand<i32> {
209 let EncoderMethod = "getSizeExtEncoding";
210 let DecoderMethod = "DecodeExtSize";
213 // size operand of ins instruction
214 def size_ins : Operand<i32> {
215 let EncoderMethod = "getSizeInsEncoding";
216 let DecoderMethod = "DecodeInsSize";
219 // Transformation Function - get the lower 16 bits.
220 def LO16 : SDNodeXForm<imm, [{
221 return getImm(N, N->getZExtValue() & 0xFFFF);
224 // Transformation Function - get the higher 16 bits.
225 def HI16 : SDNodeXForm<imm, [{
226 return getImm(N, (N->getZExtValue() >> 16) & 0xFFFF);
229 // Node immediate fits as 16-bit sign extended on target immediate.
231 def immSExt16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
233 // Node immediate fits as 16-bit zero extended on target immediate.
234 // The LO16 param means that only the lower 16 bits of the node
235 // immediate are caught.
237 def immZExt16 : PatLeaf<(imm), [{
238 if (N->getValueType(0) == MVT::i32)
239 return (uint32_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
241 return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
244 // Immediate can be loaded with LUi (32-bit int with lower 16-bit cleared).
245 def immLow16Zero : PatLeaf<(imm), [{
246 int64_t Val = N->getSExtValue();
247 return isInt<32>(Val) && !(Val & 0xffff);
250 // shamt field must fit in 5 bits.
251 def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
253 // Mips Address Mode! SDNode frameindex could possibily be a match
254 // since load and store instructions from stack used it.
255 def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>;
257 //===----------------------------------------------------------------------===//
258 // Pattern fragment for load/store
259 //===----------------------------------------------------------------------===//
260 class UnalignedLoad<PatFrag Node> :
261 PatFrag<(ops node:$ptr), (Node node:$ptr), [{
262 LoadSDNode *LD = cast<LoadSDNode>(N);
263 return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment();
266 class AlignedLoad<PatFrag Node> :
267 PatFrag<(ops node:$ptr), (Node node:$ptr), [{
268 LoadSDNode *LD = cast<LoadSDNode>(N);
269 return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment();
272 class UnalignedStore<PatFrag Node> :
273 PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
274 StoreSDNode *SD = cast<StoreSDNode>(N);
275 return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment();
278 class AlignedStore<PatFrag Node> :
279 PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
280 StoreSDNode *SD = cast<StoreSDNode>(N);
281 return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment();
284 // Load/Store PatFrags.
285 def sextloadi16_a : AlignedLoad<sextloadi16>;
286 def zextloadi16_a : AlignedLoad<zextloadi16>;
287 def extloadi16_a : AlignedLoad<extloadi16>;
288 def load_a : AlignedLoad<load>;
289 def sextloadi32_a : AlignedLoad<sextloadi32>;
290 def zextloadi32_a : AlignedLoad<zextloadi32>;
291 def extloadi32_a : AlignedLoad<extloadi32>;
292 def truncstorei16_a : AlignedStore<truncstorei16>;
293 def store_a : AlignedStore<store>;
294 def truncstorei32_a : AlignedStore<truncstorei32>;
295 def sextloadi16_u : UnalignedLoad<sextloadi16>;
296 def zextloadi16_u : UnalignedLoad<zextloadi16>;
297 def extloadi16_u : UnalignedLoad<extloadi16>;
298 def load_u : UnalignedLoad<load>;
299 def sextloadi32_u : UnalignedLoad<sextloadi32>;
300 def zextloadi32_u : UnalignedLoad<zextloadi32>;
301 def extloadi32_u : UnalignedLoad<extloadi32>;
302 def truncstorei16_u : UnalignedStore<truncstorei16>;
303 def store_u : UnalignedStore<store>;
304 def truncstorei32_u : UnalignedStore<truncstorei32>;
306 //===----------------------------------------------------------------------===//
307 // Instructions specific format
308 //===----------------------------------------------------------------------===//
310 // Arithmetic and logical instructions with 3 register operands.
311 class ArithLogicR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode,
312 InstrItinClass itin, RegisterClass RC, bit isComm = 0>:
313 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
314 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
315 [(set RC:$rd, (OpNode RC:$rs, RC:$rt))], itin> {
317 let isCommutable = isComm;
318 let isReMaterializable = 1;
321 class ArithOverflowR<bits<6> op, bits<6> func, string instr_asm,
322 InstrItinClass itin, RegisterClass RC, bit isComm = 0>:
323 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
324 !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], itin> {
326 let isCommutable = isComm;
329 // Arithmetic and logical instructions with 2 register operands.
330 class ArithLogicI<bits<6> op, string instr_asm, SDNode OpNode,
331 Operand Od, PatLeaf imm_type, RegisterClass RC> :
332 FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16),
333 !strconcat(instr_asm, "\t$rt, $rs, $imm16"),
334 [(set RC:$rt, (OpNode RC:$rs, imm_type:$imm16))], IIAlu> {
335 let isReMaterializable = 1;
338 class ArithOverflowI<bits<6> op, string instr_asm, SDNode OpNode,
339 Operand Od, PatLeaf imm_type, RegisterClass RC> :
340 FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16),
341 !strconcat(instr_asm, "\t$rt, $rs, $imm16"), [], IIAlu>;
343 // Arithmetic Multiply ADD/SUB
344 let rd = 0, shamt = 0, Defs = [HI, LO], Uses = [HI, LO] in
345 class MArithR<bits<6> func, string instr_asm, SDNode op, bit isComm = 0> :
346 FR<0x1c, func, (outs), (ins CPURegs:$rs, CPURegs:$rt),
347 !strconcat(instr_asm, "\t$rs, $rt"),
348 [(op CPURegs:$rs, CPURegs:$rt, LO, HI)], IIImul> {
351 let isCommutable = isComm;
355 class LogicNOR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
356 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
357 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
358 [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu> {
360 let isCommutable = 1;
364 class shift_rotate_imm<bits<6> func, bits<5> isRotate, string instr_asm,
365 SDNode OpNode, PatFrag PF, Operand ImmOpnd,
367 FR<0x00, func, (outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt),
368 !strconcat(instr_asm, "\t$rd, $rt, $shamt"),
369 [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu> {
373 // 32-bit shift instructions.
374 class shift_rotate_imm32<bits<6> func, bits<5> isRotate, string instr_asm,
376 shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt5, shamt, CPURegs>;
378 class shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm,
379 SDNode OpNode, RegisterClass RC>:
380 FR<0x00, func, (outs RC:$rd), (ins CPURegs:$rs, RC:$rt),
381 !strconcat(instr_asm, "\t$rd, $rt, $rs"),
382 [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs))], IIAlu> {
383 let shamt = isRotate;
386 // Load Upper Imediate
387 class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>:
388 FI<op, (outs RC:$rt), (ins Imm:$imm16),
389 !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> {
391 let neverHasSideEffects = 1;
392 let isReMaterializable = 1;
395 class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
396 InstrItinClass itin>: FFI<op, outs, ins, asmstr, pattern> {
398 let Inst{25-21} = addr{20-16};
399 let Inst{15-0} = addr{15-0};
400 let DecoderMethod = "DecodeMem";
404 let canFoldAsLoad = 1 in
405 class LoadM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
406 Operand MemOpnd, bit Pseudo>:
407 FMem<op, (outs RC:$rt), (ins MemOpnd:$addr),
408 !strconcat(instr_asm, "\t$rt, $addr"),
409 [(set RC:$rt, (OpNode addr:$addr))], IILoad> {
410 let isPseudo = Pseudo;
413 class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
414 Operand MemOpnd, bit Pseudo>:
415 FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr),
416 !strconcat(instr_asm, "\t$rt, $addr"),
417 [(OpNode RC:$rt, addr:$addr)], IIStore> {
418 let isPseudo = Pseudo;
421 // Unaligned Memory Load/Store
422 let canFoldAsLoad = 1 in
423 class LoadUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
424 FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), "", [], IILoad> {}
426 class StoreUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
427 FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), "", [], IIStore> {}
430 multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
432 def #NAME# : LoadM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
434 def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
436 let DecoderNamespace = "Mips64";
437 let isCodeGenOnly = 1;
442 multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
444 def #NAME# : LoadM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
446 def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
448 let DecoderNamespace = "Mips64";
449 let isCodeGenOnly = 1;
454 multiclass LoadUnAlign32<bits<6> op> {
455 def #NAME# : LoadUnAlign<op, CPURegs, mem>,
457 def _P8 : LoadUnAlign<op, CPURegs, mem64>,
459 let DecoderNamespace = "Mips64";
460 let isCodeGenOnly = 1;
464 multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
466 def #NAME# : StoreM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
468 def _P8 : StoreM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
470 let DecoderNamespace = "Mips64";
471 let isCodeGenOnly = 1;
476 multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode,
478 def #NAME# : StoreM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
480 def _P8 : StoreM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
482 let DecoderNamespace = "Mips64";
483 let isCodeGenOnly = 1;
488 multiclass StoreUnAlign32<bits<6> op> {
489 def #NAME# : StoreUnAlign<op, CPURegs, mem>,
491 def _P8 : StoreUnAlign<op, CPURegs, mem64>,
493 let DecoderNamespace = "Mips64";
494 let isCodeGenOnly = 1;
498 // Conditional Branch
499 class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
500 BranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16),
501 !strconcat(instr_asm, "\t$rs, $rt, $imm16"),
502 [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$imm16)], IIBranch> {
504 let isTerminator = 1;
505 let hasDelaySlot = 1;
508 class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op,
510 BranchBase<op, (outs), (ins RC:$rs, brtarget:$imm16),
511 !strconcat(instr_asm, "\t$rs, $imm16"),
512 [(brcond (i32 (cond_op RC:$rs, 0)), bb:$imm16)], IIBranch> {
515 let isTerminator = 1;
516 let hasDelaySlot = 1;
520 class SetCC_R<bits<6> op, bits<6> func, string instr_asm, PatFrag cond_op,
522 FR<op, func, (outs CPURegs:$rd), (ins RC:$rs, RC:$rt),
523 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
524 [(set CPURegs:$rd, (cond_op RC:$rs, RC:$rt))],
529 class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
530 PatLeaf imm_type, RegisterClass RC>:
531 FI<op, (outs CPURegs:$rt), (ins RC:$rs, Od:$imm16),
532 !strconcat(instr_asm, "\t$rt, $rs, $imm16"),
533 [(set CPURegs:$rt, (cond_op RC:$rs, imm_type:$imm16))],
537 class JumpFJ<bits<6> op, string instr_asm>:
538 FJ<op, (outs), (ins jmptarget:$target),
539 !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch> {
543 let hasDelaySlot = 1;
544 let Predicates = [RelocStatic];
545 let DecoderMethod = "DecodeJumpTarget";
548 // Unconditional branch
549 class UncondBranch<bits<6> op, string instr_asm>:
550 BranchBase<op, (outs), (ins brtarget:$imm16),
551 !strconcat(instr_asm, "\t$imm16"), [(br bb:$imm16)], IIBranch> {
555 let isTerminator = 1;
557 let hasDelaySlot = 1;
558 let Predicates = [RelocPIC];
561 let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1,
562 isIndirectBranch = 1 in
563 class JumpFR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
564 FR<op, func, (outs), (ins RC:$rs),
565 !strconcat(instr_asm, "\t$rs"), [(brind RC:$rs)], IIBranch> {
571 // Jump and Link (Call)
572 let isCall=1, hasDelaySlot=1 in {
573 class JumpLink<bits<6> op, string instr_asm>:
574 FJ<op, (outs), (ins calltarget:$target, variable_ops),
575 !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
577 let DecoderMethod = "DecodeJumpTarget";
580 class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm,
582 FR<op, func, (outs), (ins RC:$rs, variable_ops),
583 !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink RC:$rs)], IIBranch> {
589 class BranchLink<string instr_asm, bits<5> _rt, RegisterClass RC>:
590 FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16, variable_ops),
591 !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch> {
597 class Mult<bits<6> func, string instr_asm, InstrItinClass itin,
598 RegisterClass RC, list<Register> DefRegs>:
599 FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
600 !strconcat(instr_asm, "\t$rs, $rt"), [], itin> {
603 let isCommutable = 1;
605 let neverHasSideEffects = 1;
608 class Mult32<bits<6> func, string instr_asm, InstrItinClass itin>:
609 Mult<func, instr_asm, itin, CPURegs, [HI, LO]>;
611 class Div<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin,
612 RegisterClass RC, list<Register> DefRegs>:
613 FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
614 !strconcat(instr_asm, "\t$$zero, $rs, $rt"),
615 [(op RC:$rs, RC:$rt)], itin> {
621 class Div32<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
622 Div<op, func, instr_asm, itin, CPURegs, [HI, LO]>;
625 class MoveFromLOHI<bits<6> func, string instr_asm, RegisterClass RC,
626 list<Register> UseRegs>:
627 FR<0x00, func, (outs RC:$rd), (ins),
628 !strconcat(instr_asm, "\t$rd"), [], IIHiLo> {
633 let neverHasSideEffects = 1;
636 class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
637 list<Register> DefRegs>:
638 FR<0x00, func, (outs), (ins RC:$rs),
639 !strconcat(instr_asm, "\t$rs"), [], IIHiLo> {
644 let neverHasSideEffects = 1;
647 class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
648 FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
649 instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
651 // Count Leading Ones/Zeros in Word
652 class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
653 FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
654 !strconcat(instr_asm, "\t$rd, $rs"),
655 [(set RC:$rd, (ctlz RC:$rs))], IIAlu>,
656 Requires<[HasBitCount]> {
661 class CountLeading1<bits<6> func, string instr_asm, RegisterClass RC>:
662 FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
663 !strconcat(instr_asm, "\t$rd, $rs"),
664 [(set RC:$rd, (ctlz (not RC:$rs)))], IIAlu>,
665 Requires<[HasBitCount]> {
670 // Sign Extend in Register.
671 class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt,
673 FR<0x1f, 0x20, (outs RC:$rd), (ins RC:$rt),
674 !strconcat(instr_asm, "\t$rd, $rt"),
675 [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary> {
678 let Predicates = [HasSEInReg];
682 class SubwordSwap<bits<6> func, bits<5> sa, string instr_asm, RegisterClass RC>:
683 FR<0x1f, func, (outs RC:$rd), (ins RC:$rt),
684 !strconcat(instr_asm, "\t$rd, $rt"), [], NoItinerary> {
687 let Predicates = [HasSwap];
688 let neverHasSideEffects = 1;
692 class ReadHardware<RegisterClass CPURegClass, RegisterClass HWRegClass>
693 : FR<0x1f, 0x3b, (outs CPURegClass:$rt), (ins HWRegClass:$rd),
694 "rdhwr\t$rt, $rd", [], IIAlu> {
700 class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
701 FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz),
702 !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
703 [(set RC:$rt, (MipsExt RC:$rs, imm:$pos, imm:$sz))], NoItinerary> {
708 let Predicates = [HasMips32r2];
711 class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
712 FR<0x1f, _funct, (outs RC:$rt),
713 (ins RC:$rs, uimm16:$pos, size_ins:$sz, RC:$src),
714 !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
715 [(set RC:$rt, (MipsIns RC:$rs, imm:$pos, imm:$sz, RC:$src))],
721 let Predicates = [HasMips32r2];
722 let Constraints = "$src = $rt";
725 // Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
726 class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
728 MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
729 !strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
730 [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
732 multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
733 def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
734 def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]> {
735 let DecoderNamespace = "Mips64";
739 // Atomic Compare & Swap.
740 class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
742 MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
743 !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
744 [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
746 multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
747 def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
748 def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]> {
749 let DecoderNamespace = "Mips64";
753 class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
754 FMem<Opc, (outs RC:$rt), (ins Mem:$addr),
755 !strconcat(opstring, "\t$rt, $addr"), [], IILoad> {
759 class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
760 FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr),
761 !strconcat(opstring, "\t$rt, $addr"), [], IIStore> {
763 let Constraints = "$rt = $dst";
766 //===----------------------------------------------------------------------===//
767 // Pseudo instructions
768 //===----------------------------------------------------------------------===//
770 // As stack alignment is always done with addiu, we need a 16-bit immediate
771 let Defs = [SP], Uses = [SP] in {
772 def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins uimm16:$amt),
773 "!ADJCALLSTACKDOWN $amt",
774 [(callseq_start timm:$amt)]>;
775 def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
776 "!ADJCALLSTACKUP $amt1",
777 [(callseq_end timm:$amt1, timm:$amt2)]>;
780 // When handling PIC code the assembler needs .cpload and .cprestore
781 // directives. If the real instructions corresponding these directives
782 // are used, we have the same behavior, but get also a bunch of warnings
783 // from the assembler.
784 let neverHasSideEffects = 1 in
785 def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc, CPURegs:$gp),
786 ".cprestore\t$loc", []>;
788 // For O32 ABI & PIC & non-fixed global base register, the following instruction
789 // seqeunce is emitted to set the global base register:
791 // 0. lui $2, %hi(_gp_disp)
792 // 1. addiu $2, $2, %lo(_gp_disp)
793 // 2. addu $globalbasereg, $2, $t9
795 // SETGP01 is emitted during Prologue/Epilogue insertion and then converted to
796 // instructions 0 and 1 in the sequence above during MC lowering.
797 // SETGP2 is emitted just before register allocation and converted to
798 // instruction 2 just prior to post-RA scheduling.
800 // These pseudo instructions are needed to ensure no instructions are inserted
801 // before or between instructions 0 and 1, which is a limitation imposed by
804 let isTerminator = 1, isBarrier = 1 in
805 def SETGP01 : MipsPseudo<(outs CPURegs:$dst), (ins), "", []>;
807 let neverHasSideEffects = 1 in
808 def SETGP2 : MipsPseudo<(outs CPURegs:$globalreg), (ins CPURegs:$picreg), "",
811 let usesCustomInserter = 1 in {
812 defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
813 defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">;
814 defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">;
815 defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">;
816 defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">;
817 defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">;
818 defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">;
819 defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">;
820 defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">;
821 defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">;
822 defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">;
823 defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">;
824 defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">;
825 defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">;
826 defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">;
827 defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">;
828 defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">;
829 defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">;
831 defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">;
832 defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">;
833 defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">;
835 defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">;
836 defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">;
837 defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">;
840 //===----------------------------------------------------------------------===//
841 // Instruction definition
842 //===----------------------------------------------------------------------===//
844 //===----------------------------------------------------------------------===//
845 // MipsI Instructions
846 //===----------------------------------------------------------------------===//
848 /// Arithmetic Instructions (ALU Immediate)
849 def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>;
850 def ADDi : ArithOverflowI<0x08, "addi", add, simm16, immSExt16, CPURegs>;
851 def SLTi : SetCC_I<0x0a, "slti", setlt, simm16, immSExt16, CPURegs>;
852 def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>;
853 def ANDi : ArithLogicI<0x0c, "andi", and, uimm16, immZExt16, CPURegs>;
854 def ORi : ArithLogicI<0x0d, "ori", or, uimm16, immZExt16, CPURegs>;
855 def XORi : ArithLogicI<0x0e, "xori", xor, uimm16, immZExt16, CPURegs>;
856 def LUi : LoadUpper<0x0f, "lui", CPURegs, uimm16>;
858 /// Arithmetic Instructions (3-Operand, R-Type)
859 def ADDu : ArithLogicR<0x00, 0x21, "addu", add, IIAlu, CPURegs, 1>;
860 def SUBu : ArithLogicR<0x00, 0x23, "subu", sub, IIAlu, CPURegs>;
861 def ADD : ArithOverflowR<0x00, 0x20, "add", IIAlu, CPURegs, 1>;
862 def SUB : ArithOverflowR<0x00, 0x22, "sub", IIAlu, CPURegs>;
863 def SLT : SetCC_R<0x00, 0x2a, "slt", setlt, CPURegs>;
864 def SLTu : SetCC_R<0x00, 0x2b, "sltu", setult, CPURegs>;
865 def AND : ArithLogicR<0x00, 0x24, "and", and, IIAlu, CPURegs, 1>;
866 def OR : ArithLogicR<0x00, 0x25, "or", or, IIAlu, CPURegs, 1>;
867 def XOR : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPURegs, 1>;
868 def NOR : LogicNOR<0x00, 0x27, "nor", CPURegs>;
870 /// Shift Instructions
871 def SLL : shift_rotate_imm32<0x00, 0x00, "sll", shl>;
872 def SRL : shift_rotate_imm32<0x02, 0x00, "srl", srl>;
873 def SRA : shift_rotate_imm32<0x03, 0x00, "sra", sra>;
874 def SLLV : shift_rotate_reg<0x04, 0x00, "sllv", shl, CPURegs>;
875 def SRLV : shift_rotate_reg<0x06, 0x00, "srlv", srl, CPURegs>;
876 def SRAV : shift_rotate_reg<0x07, 0x00, "srav", sra, CPURegs>;
878 // Rotate Instructions
879 let Predicates = [HasMips32r2] in {
880 def ROTR : shift_rotate_imm32<0x02, 0x01, "rotr", rotr>;
881 def ROTRV : shift_rotate_reg<0x06, 0x01, "rotrv", rotr, CPURegs>;
884 /// Load and Store Instructions
886 defm LB : LoadM32<0x20, "lb", sextloadi8>;
887 defm LBu : LoadM32<0x24, "lbu", zextloadi8>;
888 defm LH : LoadM32<0x21, "lh", sextloadi16_a>;
889 defm LHu : LoadM32<0x25, "lhu", zextloadi16_a>;
890 defm LW : LoadM32<0x23, "lw", load_a>;
891 defm SB : StoreM32<0x28, "sb", truncstorei8>;
892 defm SH : StoreM32<0x29, "sh", truncstorei16_a>;
893 defm SW : StoreM32<0x2b, "sw", store_a>;
896 defm ULH : LoadM32<0x21, "ulh", sextloadi16_u, 1>;
897 defm ULHu : LoadM32<0x25, "ulhu", zextloadi16_u, 1>;
898 defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
899 defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
900 defm USW : StoreM32<0x2b, "usw", store_u, 1>;
902 /// Primitives for unaligned
903 defm LWL : LoadUnAlign32<0x22>;
904 defm LWR : LoadUnAlign32<0x26>;
905 defm SWL : StoreUnAlign32<0x2A>;
906 defm SWR : StoreUnAlign32<0x2E>;
908 let hasSideEffects = 1 in
909 def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
910 [(MipsSync imm:$stype)], NoItinerary, FrmOther>
915 let Inst{10-6} = stype;
919 /// Load-linked, Store-conditional
920 def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
921 def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]> {
922 let DecoderNamespace = "Mips64";
925 def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
926 def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]> {
927 let DecoderNamespace = "Mips64";
930 /// Jump and Branch Instructions
931 def J : JumpFJ<0x02, "j">;
932 def JR : JumpFR<0x00, 0x08, "jr", CPURegs>;
933 def B : UncondBranch<0x04, "b">;
934 def BEQ : CBranch<0x04, "beq", seteq, CPURegs>;
935 def BNE : CBranch<0x05, "bne", setne, CPURegs>;
936 def BGEZ : CBranchZero<0x01, 1, "bgez", setge, CPURegs>;
937 def BGTZ : CBranchZero<0x07, 0, "bgtz", setgt, CPURegs>;
938 def BLEZ : CBranchZero<0x06, 0, "blez", setle, CPURegs>;
939 def BLTZ : CBranchZero<0x01, 0, "bltz", setlt, CPURegs>;
941 def JAL : JumpLink<0x03, "jal">;
942 def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>;
943 def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>;
944 def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>;
946 let isReturn=1, isTerminator=1, hasDelaySlot=1, isCodeGenOnly=1,
947 isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in
948 def RET : FR <0x00, 0x08, (outs), (ins CPURegs:$target),
949 "jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>;
951 /// Multiply and Divide Instructions.
952 def MULT : Mult32<0x18, "mult", IIImul>;
953 def MULTu : Mult32<0x19, "multu", IIImul>;
954 def SDIV : Div32<MipsDivRem, 0x1a, "div", IIIdiv>;
955 def UDIV : Div32<MipsDivRemU, 0x1b, "divu", IIIdiv>;
957 def MTHI : MoveToLOHI<0x11, "mthi", CPURegs, [HI]>;
958 def MTLO : MoveToLOHI<0x13, "mtlo", CPURegs, [LO]>;
959 def MFHI : MoveFromLOHI<0x10, "mfhi", CPURegs, [HI]>;
960 def MFLO : MoveFromLOHI<0x12, "mflo", CPURegs, [LO]>;
962 /// Sign Ext In Register Instructions.
963 def SEB : SignExtInReg<0x10, "seb", i8, CPURegs>;
964 def SEH : SignExtInReg<0x18, "seh", i16, CPURegs>;
967 def CLZ : CountLeading0<0x20, "clz", CPURegs>;
968 def CLO : CountLeading1<0x21, "clo", CPURegs>;
970 /// Word Swap Bytes Within Halfwords
971 def WSBH : SubwordSwap<0x20, 0x2, "wsbh", CPURegs>;
975 def NOP : FJ<0, (outs), (ins), "nop", [], IIAlu>;
977 // FrameIndexes are legalized when they are operands from load/store
978 // instructions. The same not happens for stack address copies, so an
979 // add op with mem ComplexPattern is used and the stack address copy
980 // can be matched. It's similar to Sparc LEA_ADDRi
981 def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> {
982 let isCodeGenOnly = 1;
985 // DynAlloc node points to dynamically allocated stack space.
986 // $sp is added to the list of implicitly used registers to prevent dead code
987 // elimination from removing instructions that modify $sp.
989 def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> {
990 let isCodeGenOnly = 1;
994 def MADD : MArithR<0, "madd", MipsMAdd, 1>;
995 def MADDU : MArithR<1, "maddu", MipsMAddu, 1>;
996 def MSUB : MArithR<4, "msub", MipsMSub>;
997 def MSUBU : MArithR<5, "msubu", MipsMSubu>;
999 // MUL is a assembly macro in the current used ISAs. In recent ISA's
1000 // it is a real instruction.
1001 def MUL : ArithLogicR<0x1c, 0x02, "mul", mul, IIImul, CPURegs, 1>,
1002 Requires<[HasMips32]>;
1004 def RDHWR : ReadHardware<CPURegs, HWRegs>;
1006 def EXT : ExtBase<0, "ext", CPURegs>;
1007 def INS : InsBase<4, "ins", CPURegs>;
1009 //===----------------------------------------------------------------------===//
1010 // Arbitrary patterns that map to one or more instructions
1011 //===----------------------------------------------------------------------===//
1014 def : Pat<(i32 immSExt16:$in),
1015 (ADDiu ZERO, imm:$in)>;
1016 def : Pat<(i32 immZExt16:$in),
1017 (ORi ZERO, imm:$in)>;
1018 def : Pat<(i32 immLow16Zero:$in),
1019 (LUi (HI16 imm:$in))>;
1021 // Arbitrary immediates
1022 def : Pat<(i32 imm:$imm),
1023 (ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
1026 def : Pat<(subc CPURegs:$lhs, CPURegs:$rhs),
1027 (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
1028 def : Pat<(addc CPURegs:$lhs, CPURegs:$rhs),
1029 (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
1030 def : Pat<(addc CPURegs:$src, immSExt16:$imm),
1031 (ADDiu CPURegs:$src, imm:$imm)>;
1034 def : Pat<(MipsJmpLink (i32 tglobaladdr:$dst)),
1035 (JAL tglobaladdr:$dst)>;
1036 def : Pat<(MipsJmpLink (i32 texternalsym:$dst)),
1037 (JAL texternalsym:$dst)>;
1038 //def : Pat<(MipsJmpLink CPURegs:$dst),
1039 // (JALR CPURegs:$dst)>;
1042 def : Pat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
1043 def : Pat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
1044 def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
1045 def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
1046 def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
1048 def : Pat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>;
1049 def : Pat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>;
1050 def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
1051 def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
1052 def : Pat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
1054 def : Pat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
1055 (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
1056 def : Pat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
1057 (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
1058 def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
1059 (ADDiu CPURegs:$hi, tjumptable:$lo)>;
1060 def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
1061 (ADDiu CPURegs:$hi, tconstpool:$lo)>;
1062 def : Pat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
1063 (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
1066 def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
1067 (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
1068 def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
1069 (ADDiu CPURegs:$gp, tconstpool:$in)>;
1072 class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
1073 Pat<(MipsWrapper RC:$gp, node:$in),
1074 (ADDiuOp RC:$gp, node:$in)>;
1076 def : WrapperPat<tglobaladdr, ADDiu, CPURegs>;
1077 def : WrapperPat<tconstpool, ADDiu, CPURegs>;
1078 def : WrapperPat<texternalsym, ADDiu, CPURegs>;
1079 def : WrapperPat<tblockaddress, ADDiu, CPURegs>;
1080 def : WrapperPat<tjumptable, ADDiu, CPURegs>;
1081 def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>;
1083 // Mips does not have "not", so we expand our way
1084 def : Pat<(not CPURegs:$in),
1085 (NOR CPURegs:$in, ZERO)>;
1088 let Predicates = [NotN64] in {
1089 def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
1090 def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
1091 def : Pat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
1092 def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
1094 let Predicates = [IsN64] in {
1095 def : Pat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
1096 def : Pat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
1097 def : Pat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
1098 def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
1102 let Predicates = [NotN64] in {
1103 def : Pat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
1104 def : Pat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
1106 let Predicates = [IsN64] in {
1107 def : Pat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
1108 def : Pat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
1112 multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
1113 Instruction SLTOp, Instruction SLTuOp, Instruction SLTiOp,
1114 Instruction SLTiuOp, Register ZEROReg> {
1115 def : Pat<(brcond (i32 (setne RC:$lhs, 0)), bb:$dst),
1116 (BNEOp RC:$lhs, ZEROReg, bb:$dst)>;
1117 def : Pat<(brcond (i32 (seteq RC:$lhs, 0)), bb:$dst),
1118 (BEQOp RC:$lhs, ZEROReg, bb:$dst)>;
1120 def : Pat<(brcond (i32 (setge RC:$lhs, RC:$rhs)), bb:$dst),
1121 (BEQ (SLTOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
1122 def : Pat<(brcond (i32 (setuge RC:$lhs, RC:$rhs)), bb:$dst),
1123 (BEQ (SLTuOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
1124 def : Pat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
1125 (BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
1126 def : Pat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
1127 (BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
1129 def : Pat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
1130 (BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
1131 def : Pat<(brcond (i32 (setule RC:$lhs, RC:$rhs)), bb:$dst),
1132 (BEQ (SLTuOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
1134 def : Pat<(brcond RC:$cond, bb:$dst),
1135 (BNEOp RC:$cond, ZEROReg, bb:$dst)>;
1138 defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
1141 multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
1142 Instruction SLTuOp, Register ZEROReg> {
1143 def : Pat<(seteq RC:$lhs, RC:$rhs),
1144 (SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
1145 def : Pat<(setne RC:$lhs, RC:$rhs),
1146 (SLTuOp ZEROReg, (XOROp RC:$lhs, RC:$rhs))>;
1149 multiclass SetlePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1150 def : Pat<(setle RC:$lhs, RC:$rhs),
1151 (XORi (SLTOp RC:$rhs, RC:$lhs), 1)>;
1152 def : Pat<(setule RC:$lhs, RC:$rhs),
1153 (XORi (SLTuOp RC:$rhs, RC:$lhs), 1)>;
1156 multiclass SetgtPats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1157 def : Pat<(setgt RC:$lhs, RC:$rhs),
1158 (SLTOp RC:$rhs, RC:$lhs)>;
1159 def : Pat<(setugt RC:$lhs, RC:$rhs),
1160 (SLTuOp RC:$rhs, RC:$lhs)>;
1163 multiclass SetgePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1164 def : Pat<(setge RC:$lhs, RC:$rhs),
1165 (XORi (SLTOp RC:$lhs, RC:$rhs), 1)>;
1166 def : Pat<(setuge RC:$lhs, RC:$rhs),
1167 (XORi (SLTuOp RC:$lhs, RC:$rhs), 1)>;
1170 multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp,
1171 Instruction SLTiuOp> {
1172 def : Pat<(setge RC:$lhs, immSExt16:$rhs),
1173 (XORi (SLTiOp RC:$lhs, immSExt16:$rhs), 1)>;
1174 def : Pat<(setuge RC:$lhs, immSExt16:$rhs),
1175 (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
1178 defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>;
1179 defm : SetlePats<CPURegs, SLT, SLTu>;
1180 defm : SetgtPats<CPURegs, SLT, SLTu>;
1181 defm : SetgePats<CPURegs, SLT, SLTu>;
1182 defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
1184 // select MipsDynAlloc
1185 def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
1188 def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
1190 //===----------------------------------------------------------------------===//
1191 // Floating Point Support
1192 //===----------------------------------------------------------------------===//
1194 include "MipsInstrFPU.td"
1195 include "Mips64InstrInfo.td"
1196 include "MipsCondMov.td"