1 //===- MipsInstrInfo.td - Target Description for Mips Target -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Mips implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction format superclass
16 //===----------------------------------------------------------------------===//
18 include "MipsInstrFormats.td"
20 //===----------------------------------------------------------------------===//
21 // Mips profiles and nodes
22 //===----------------------------------------------------------------------===//
24 def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
25 def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
26 def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
30 def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
31 def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
32 def SDT_MipsMAddMSub : SDTypeProfile<0, 4,
33 [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
36 def SDT_MipsDivRem : SDTypeProfile<0, 2,
40 def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
42 def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>,
44 def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
46 def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
47 SDTCisVT<2, i32>, SDTCisSameAs<2, 3>]>;
48 def SDT_Ins : SDTypeProfile<1, 4, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
49 SDTCisVT<2, i32>, SDTCisSameAs<2, 3>,
53 def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink,
54 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
57 // Hi and Lo nodes are used to handle global addresses. Used on
58 // MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
59 // static model. (nothing to do with Mips Registers Hi and Lo)
60 def MipsHi : SDNode<"MipsISD::Hi", SDTIntUnaryOp>;
61 def MipsLo : SDNode<"MipsISD::Lo", SDTIntUnaryOp>;
62 def MipsGPRel : SDNode<"MipsISD::GPRel", SDTIntUnaryOp>;
64 // TlsGd node is used to handle General Dynamic TLS
65 def MipsTlsGd : SDNode<"MipsISD::TlsGd", SDTIntUnaryOp>;
67 // TprelHi and TprelLo nodes are used to handle Local Exec TLS
68 def MipsTprelHi : SDNode<"MipsISD::TprelHi", SDTIntUnaryOp>;
69 def MipsTprelLo : SDNode<"MipsISD::TprelLo", SDTIntUnaryOp>;
72 def MipsThreadPointer: SDNode<"MipsISD::ThreadPointer", SDT_MipsThreadPointer>;
75 def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
78 // These are target-independent nodes, but have target-specific formats.
79 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
80 [SDNPHasChain, SDNPOutGlue]>;
81 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
82 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
85 def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub,
86 [SDNPOptInGlue, SDNPOutGlue]>;
87 def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub,
88 [SDNPOptInGlue, SDNPOutGlue]>;
89 def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub,
90 [SDNPOptInGlue, SDNPOutGlue]>;
91 def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub,
92 [SDNPOptInGlue, SDNPOutGlue]>;
95 def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsDivRem,
97 def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem,
100 // Target constant nodes that are not part of any isel patterns and remain
101 // unchanged can cause instructions with illegal operands to be emitted.
102 // Wrapper node patterns give the instruction selector a chance to replace
103 // target constant nodes that would otherwise remain unchanged with ADDiu
104 // nodes. Without these wrapper node patterns, the following conditional move
105 // instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
107 // movn %got(d)($gp), %got(c)($gp), $4
108 // This instruction is illegal since movn can take only register operands.
110 def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntUnaryOp>;
112 // Pointer to dynamically allocated stack area.
113 def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc,
114 [SDNPHasChain, SDNPInGlue]>;
116 def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>;
118 def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>;
119 def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>;
121 //===----------------------------------------------------------------------===//
122 // Mips Instruction Predicate Definitions.
123 //===----------------------------------------------------------------------===//
124 def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">;
125 def HasBitCount : Predicate<"Subtarget.hasBitCount()">;
126 def HasSwap : Predicate<"Subtarget.hasSwap()">;
127 def HasCondMov : Predicate<"Subtarget.hasCondMov()">;
128 def HasMips32 : Predicate<"Subtarget.hasMips32()">;
129 def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">;
130 def HasMips64 : Predicate<"Subtarget.hasMips64()">;
131 def NotMips64 : Predicate<"!Subtarget.hasMips64()">;
132 def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">;
133 def IsN64 : Predicate<"Subtarget.isABI_N64()">;
134 def NotN64 : Predicate<"!Subtarget.isABI_N64()">;
135 def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
136 def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">;
138 //===----------------------------------------------------------------------===//
139 // Mips Operand, Complex Patterns and Transformations Definitions.
140 //===----------------------------------------------------------------------===//
142 // Instruction operand types
143 def jmptarget : Operand<OtherVT> {
144 let EncoderMethod = "getJumpTargetOpValue";
146 def brtarget : Operand<OtherVT> {
147 let EncoderMethod = "getBranchTargetOpValue";
148 let OperandType = "OPERAND_PCREL";
150 def calltarget : Operand<iPTR> {
151 let EncoderMethod = "getJumpTargetOpValue";
153 def calltarget64: Operand<i64>;
154 def simm16 : Operand<i32>;
155 def simm16_64 : Operand<i64>;
156 def shamt : Operand<i32>;
159 def uimm16 : Operand<i32> {
160 let PrintMethod = "printUnsignedImm";
164 def mem : Operand<i32> {
165 let PrintMethod = "printMemOperand";
166 let MIOperandInfo = (ops CPURegs, simm16);
167 let EncoderMethod = "getMemEncoding";
170 def mem64 : Operand<i64> {
171 let PrintMethod = "printMemOperand";
172 let MIOperandInfo = (ops CPU64Regs, simm16_64);
175 def mem_ea : Operand<i32> {
176 let PrintMethod = "printMemOperandEA";
177 let MIOperandInfo = (ops CPURegs, simm16);
178 let EncoderMethod = "getMemEncoding";
181 def mem_ea_64 : Operand<i64> {
182 let PrintMethod = "printMemOperandEA";
183 let MIOperandInfo = (ops CPU64Regs, simm16_64);
184 let EncoderMethod = "getMemEncoding";
187 // size operand of ext instruction
188 def size_ext : Operand<i32> {
189 let EncoderMethod = "getSizeExtEncoding";
192 // size operand of ins instruction
193 def size_ins : Operand<i32> {
194 let EncoderMethod = "getSizeInsEncoding";
197 // Transformation Function - get the lower 16 bits.
198 def LO16 : SDNodeXForm<imm, [{
199 return getImm(N, N->getZExtValue() & 0xFFFF);
202 // Transformation Function - get the higher 16 bits.
203 def HI16 : SDNodeXForm<imm, [{
204 return getImm(N, (N->getZExtValue() >> 16) & 0xFFFF);
207 // Node immediate fits as 16-bit sign extended on target immediate.
209 def immSExt16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
211 // Node immediate fits as 16-bit zero extended on target immediate.
212 // The LO16 param means that only the lower 16 bits of the node
213 // immediate are caught.
215 def immZExt16 : PatLeaf<(imm), [{
216 if (N->getValueType(0) == MVT::i32)
217 return (uint32_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
219 return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
222 // Immediate can be loaded with LUi (32-bit int with lower 16-bit cleared).
223 def immLow16Zero : PatLeaf<(imm), [{
224 int64_t Val = N->getSExtValue();
225 return isInt<32>(Val) && !(Val & 0xffff);
228 // shamt field must fit in 5 bits.
229 def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
231 // Mips Address Mode! SDNode frameindex could possibily be a match
232 // since load and store instructions from stack used it.
233 def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], []>;
235 //===----------------------------------------------------------------------===//
236 // Pattern fragment for load/store
237 //===----------------------------------------------------------------------===//
238 class UnalignedLoad<PatFrag Node> :
239 PatFrag<(ops node:$ptr), (Node node:$ptr), [{
240 LoadSDNode *LD = cast<LoadSDNode>(N);
241 return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment();
244 class AlignedLoad<PatFrag Node> :
245 PatFrag<(ops node:$ptr), (Node node:$ptr), [{
246 LoadSDNode *LD = cast<LoadSDNode>(N);
247 return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment();
250 class UnalignedStore<PatFrag Node> :
251 PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
252 StoreSDNode *SD = cast<StoreSDNode>(N);
253 return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment();
256 class AlignedStore<PatFrag Node> :
257 PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
258 StoreSDNode *SD = cast<StoreSDNode>(N);
259 return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment();
262 // Load/Store PatFrags.
263 def sextloadi16_a : AlignedLoad<sextloadi16>;
264 def zextloadi16_a : AlignedLoad<zextloadi16>;
265 def extloadi16_a : AlignedLoad<extloadi16>;
266 def load_a : AlignedLoad<load>;
267 def sextloadi32_a : AlignedLoad<sextloadi32>;
268 def zextloadi32_a : AlignedLoad<zextloadi32>;
269 def extloadi32_a : AlignedLoad<extloadi32>;
270 def truncstorei16_a : AlignedStore<truncstorei16>;
271 def store_a : AlignedStore<store>;
272 def truncstorei32_a : AlignedStore<truncstorei32>;
273 def sextloadi16_u : UnalignedLoad<sextloadi16>;
274 def zextloadi16_u : UnalignedLoad<zextloadi16>;
275 def extloadi16_u : UnalignedLoad<extloadi16>;
276 def load_u : UnalignedLoad<load>;
277 def sextloadi32_u : UnalignedLoad<sextloadi32>;
278 def zextloadi32_u : UnalignedLoad<zextloadi32>;
279 def extloadi32_u : UnalignedLoad<extloadi32>;
280 def truncstorei16_u : UnalignedStore<truncstorei16>;
281 def store_u : UnalignedStore<store>;
282 def truncstorei32_u : UnalignedStore<truncstorei32>;
284 //===----------------------------------------------------------------------===//
285 // Instructions specific format
286 //===----------------------------------------------------------------------===//
288 // Arithmetic and logical instructions with 3 register operands.
289 class ArithLogicR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode,
290 InstrItinClass itin, RegisterClass RC, bit isComm = 0>:
291 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
292 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
293 [(set RC:$rd, (OpNode RC:$rs, RC:$rt))], itin> {
295 let isCommutable = isComm;
298 class ArithOverflowR<bits<6> op, bits<6> func, string instr_asm,
299 InstrItinClass itin, RegisterClass RC, bit isComm = 0>:
300 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
301 !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], itin> {
303 let isCommutable = isComm;
306 // Arithmetic and logical instructions with 2 register operands.
307 class ArithLogicI<bits<6> op, string instr_asm, SDNode OpNode,
308 Operand Od, PatLeaf imm_type, RegisterClass RC> :
309 FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16),
310 !strconcat(instr_asm, "\t$rt, $rs, $imm16"),
311 [(set RC:$rt, (OpNode RC:$rs, imm_type:$imm16))], IIAlu>;
313 class ArithOverflowI<bits<6> op, string instr_asm, SDNode OpNode,
314 Operand Od, PatLeaf imm_type, RegisterClass RC> :
315 FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16),
316 !strconcat(instr_asm, "\t$rt, $rs, $imm16"), [], IIAlu>;
318 // Arithmetic Multiply ADD/SUB
319 let rd = 0, shamt = 0, Defs = [HI, LO], Uses = [HI, LO] in
320 class MArithR<bits<6> func, string instr_asm, SDNode op, bit isComm = 0> :
321 FR<0x1c, func, (outs), (ins CPURegs:$rs, CPURegs:$rt),
322 !strconcat(instr_asm, "\t$rs, $rt"),
323 [(op CPURegs:$rs, CPURegs:$rt, LO, HI)], IIImul> {
326 let isCommutable = isComm;
330 class LogicNOR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
331 FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt),
332 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
333 [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu> {
335 let isCommutable = 1;
339 class shift_rotate_imm<bits<6> func, bits<5> isRotate, string instr_asm,
340 SDNode OpNode, PatFrag PF, Operand ImmOpnd,
342 FR<0x00, func, (outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt),
343 !strconcat(instr_asm, "\t$rd, $rt, $shamt"),
344 [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu> {
348 // 32-bit shift instructions.
349 class shift_rotate_imm32<bits<6> func, bits<5> isRotate, string instr_asm,
351 shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt5, shamt, CPURegs>;
353 class shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm,
354 SDNode OpNode, RegisterClass RC>:
355 FR<0x00, func, (outs RC:$rd), (ins CPURegs:$rs, RC:$rt),
356 !strconcat(instr_asm, "\t$rd, $rt, $rs"),
357 [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs))], IIAlu> {
358 let shamt = isRotate;
361 // Load Upper Imediate
362 class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>:
363 FI<op, (outs RC:$rt), (ins Imm:$imm16),
364 !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> {
368 class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
369 InstrItinClass itin>: FFI<op, outs, ins, asmstr, pattern> {
371 let Inst{25-21} = addr{20-16};
372 let Inst{15-0} = addr{15-0};
376 let canFoldAsLoad = 1 in
377 class LoadM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
378 Operand MemOpnd, bit Pseudo>:
379 FMem<op, (outs RC:$rt), (ins MemOpnd:$addr),
380 !strconcat(instr_asm, "\t$rt, $addr"),
381 [(set RC:$rt, (OpNode addr:$addr))], IILoad> {
382 let isPseudo = Pseudo;
385 class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
386 Operand MemOpnd, bit Pseudo>:
387 FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr),
388 !strconcat(instr_asm, "\t$rt, $addr"),
389 [(OpNode RC:$rt, addr:$addr)], IIStore> {
390 let isPseudo = Pseudo;
393 // Unaligned Memory Load/Store
394 let canFoldAsLoad = 1 in
395 class LoadUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
396 FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), "", [], IILoad> {}
398 class StoreUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
399 FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), "", [], IIStore> {}
402 multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
404 def #NAME# : LoadM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
406 def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
411 multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
413 def #NAME# : LoadM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
415 def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
420 multiclass LoadUnAlign32<bits<6> op> {
421 def #NAME# : LoadUnAlign<op, CPURegs, mem>,
423 def _P8 : LoadUnAlign<op, CPURegs, mem64>,
427 multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
429 def #NAME# : StoreM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>,
431 def _P8 : StoreM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
436 multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode,
438 def #NAME# : StoreM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>,
440 def _P8 : StoreM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
445 multiclass StoreUnAlign32<bits<6> op> {
446 def #NAME# : StoreUnAlign<op, CPURegs, mem>,
448 def _P8 : StoreUnAlign<op, CPURegs, mem64>,
452 // Conditional Branch
453 class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
454 BranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16),
455 !strconcat(instr_asm, "\t$rs, $rt, $imm16"),
456 [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$imm16)], IIBranch> {
458 let isTerminator = 1;
459 let hasDelaySlot = 1;
462 class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op,
464 BranchBase<op, (outs), (ins RC:$rs, brtarget:$imm16),
465 !strconcat(instr_asm, "\t$rs, $imm16"),
466 [(brcond (i32 (cond_op RC:$rs, 0)), bb:$imm16)], IIBranch> {
469 let isTerminator = 1;
470 let hasDelaySlot = 1;
474 class SetCC_R<bits<6> op, bits<6> func, string instr_asm, PatFrag cond_op,
476 FR<op, func, (outs CPURegs:$rd), (ins RC:$rs, RC:$rt),
477 !strconcat(instr_asm, "\t$rd, $rs, $rt"),
478 [(set CPURegs:$rd, (cond_op RC:$rs, RC:$rt))],
483 class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
484 PatLeaf imm_type, RegisterClass RC>:
485 FI<op, (outs CPURegs:$rt), (ins RC:$rs, Od:$imm16),
486 !strconcat(instr_asm, "\t$rt, $rs, $imm16"),
487 [(set CPURegs:$rt, (cond_op RC:$rs, imm_type:$imm16))],
491 class JumpFJ<bits<6> op, string instr_asm>:
492 FJ<op, (outs), (ins jmptarget:$target),
493 !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch> {
497 let hasDelaySlot = 1;
498 let Predicates = [RelocStatic];
501 // Unconditional branch
502 class UncondBranch<bits<6> op, string instr_asm>:
503 BranchBase<op, (outs), (ins brtarget:$imm16),
504 !strconcat(instr_asm, "\t$imm16"), [(br bb:$imm16)], IIBranch> {
508 let isTerminator = 1;
510 let hasDelaySlot = 1;
511 let Predicates = [RelocPIC];
514 let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1,
515 isIndirectBranch = 1 in
516 class JumpFR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
517 FR<op, func, (outs), (ins RC:$rs),
518 !strconcat(instr_asm, "\t$rs"), [(brind RC:$rs)], IIBranch> {
524 // Jump and Link (Call)
525 let isCall=1, hasDelaySlot=1 in {
526 class JumpLink<bits<6> op, string instr_asm>:
527 FJ<op, (outs), (ins calltarget:$target, variable_ops),
528 !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
531 class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm,
533 FR<op, func, (outs), (ins RC:$rs, variable_ops),
534 !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink RC:$rs)], IIBranch> {
540 class BranchLink<string instr_asm, bits<5> _rt, RegisterClass RC>:
541 FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16, variable_ops),
542 !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch> {
548 class Mult<bits<6> func, string instr_asm, InstrItinClass itin,
549 RegisterClass RC, list<Register> DefRegs>:
550 FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
551 !strconcat(instr_asm, "\t$rs, $rt"), [], itin> {
554 let isCommutable = 1;
558 class Mult32<bits<6> func, string instr_asm, InstrItinClass itin>:
559 Mult<func, instr_asm, itin, CPURegs, [HI, LO]>;
561 class Div<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin,
562 RegisterClass RC, list<Register> DefRegs>:
563 FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
564 !strconcat(instr_asm, "\t$$zero, $rs, $rt"),
565 [(op RC:$rs, RC:$rt)], itin> {
571 class Div32<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
572 Div<op, func, instr_asm, itin, CPURegs, [HI, LO]>;
575 class MoveFromLOHI<bits<6> func, string instr_asm, RegisterClass RC,
576 list<Register> UseRegs>:
577 FR<0x00, func, (outs RC:$rd), (ins),
578 !strconcat(instr_asm, "\t$rd"), [], IIHiLo> {
585 class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
586 list<Register> DefRegs>:
587 FR<0x00, func, (outs), (ins RC:$rs),
588 !strconcat(instr_asm, "\t$rs"), [], IIHiLo> {
595 class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
596 FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
597 instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
599 // Count Leading Ones/Zeros in Word
600 class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
601 FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
602 !strconcat(instr_asm, "\t$rd, $rs"),
603 [(set RC:$rd, (ctlz RC:$rs))], IIAlu>,
604 Requires<[HasBitCount]> {
609 class CountLeading1<bits<6> func, string instr_asm, RegisterClass RC>:
610 FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
611 !strconcat(instr_asm, "\t$rd, $rs"),
612 [(set RC:$rd, (ctlz (not RC:$rs)))], IIAlu>,
613 Requires<[HasBitCount]> {
618 // Sign Extend in Register.
619 class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt>:
620 FR<0x1f, 0x20, (outs CPURegs:$rd), (ins CPURegs:$rt),
621 !strconcat(instr_asm, "\t$rd, $rt"),
622 [(set CPURegs:$rd, (sext_inreg CPURegs:$rt, vt))], NoItinerary> {
625 let Predicates = [HasSEInReg];
629 class SubwordSwap<bits<6> func, bits<5> sa, string instr_asm, RegisterClass RC>:
630 FR<0x1f, func, (outs RC:$rd), (ins RC:$rt),
631 !strconcat(instr_asm, "\t$rd, $rt"), [], NoItinerary> {
634 let Predicates = [HasSwap];
638 class ReadHardware<RegisterClass CPURegClass, RegisterClass HWRegClass>
639 : FR<0x1f, 0x3b, (outs CPURegClass:$rt), (ins HWRegClass:$rd),
640 "rdhwr\t$rt, $rd", [], IIAlu> {
646 class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
647 FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz),
648 !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
649 [(set RC:$rt, (MipsExt RC:$rs, imm:$pos, imm:$sz))], NoItinerary> {
654 let Predicates = [HasMips32r2];
657 class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
658 FR<0x1f, _funct, (outs RC:$rt),
659 (ins RC:$rs, uimm16:$pos, size_ins:$sz, RC:$src),
660 !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
661 [(set RC:$rt, (MipsIns RC:$rs, imm:$pos, imm:$sz, RC:$src))],
667 let Predicates = [HasMips32r2];
668 let Constraints = "$src = $rt";
671 // Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
672 class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
674 MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
675 !strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
676 [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
678 multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
679 def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
680 def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>;
683 // Atomic Compare & Swap.
684 class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
686 MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
687 !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
688 [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
690 multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
691 def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
692 def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>;
695 class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
696 FMem<Opc, (outs RC:$rt), (ins Mem:$addr),
697 !strconcat(opstring, "\t$rt, $addr"), [], IILoad> {
701 class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
702 FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr),
703 !strconcat(opstring, "\t$rt, $addr"), [], IIStore> {
705 let Constraints = "$rt = $dst";
708 //===----------------------------------------------------------------------===//
709 // Pseudo instructions
710 //===----------------------------------------------------------------------===//
712 // As stack alignment is always done with addiu, we need a 16-bit immediate
713 let Defs = [SP], Uses = [SP] in {
714 def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins uimm16:$amt),
715 "!ADJCALLSTACKDOWN $amt",
716 [(callseq_start timm:$amt)]>;
717 def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
718 "!ADJCALLSTACKUP $amt1",
719 [(callseq_end timm:$amt1, timm:$amt2)]>;
722 // Some assembly macros need to avoid pseudoinstructions and assembler
723 // automatic reodering, we should reorder ourselves.
724 def MACRO : MipsPseudo<(outs), (ins), ".set\tmacro", []>;
725 def REORDER : MipsPseudo<(outs), (ins), ".set\treorder", []>;
726 def NOMACRO : MipsPseudo<(outs), (ins), ".set\tnomacro", []>;
727 def NOREORDER : MipsPseudo<(outs), (ins), ".set\tnoreorder", []>;
729 // These macros are inserted to prevent GAS from complaining
730 // when using the AT register.
731 def NOAT : MipsPseudo<(outs), (ins), ".set\tnoat", []>;
732 def ATMACRO : MipsPseudo<(outs), (ins), ".set\tat", []>;
734 // When handling PIC code the assembler needs .cpload and .cprestore
735 // directives. If the real instructions corresponding these directives
736 // are used, we have the same behavior, but get also a bunch of warnings
737 // from the assembler.
738 def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$picreg), ".cpload\t$picreg", []>;
739 def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc), ".cprestore\t$loc", []>;
741 let usesCustomInserter = 1 in {
742 defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
743 defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">;
744 defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">;
745 defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">;
746 defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">;
747 defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">;
748 defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">;
749 defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">;
750 defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">;
751 defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">;
752 defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">;
753 defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">;
754 defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">;
755 defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">;
756 defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">;
757 defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">;
758 defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">;
759 defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">;
761 defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">;
762 defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">;
763 defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">;
765 defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">;
766 defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">;
767 defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">;
770 //===----------------------------------------------------------------------===//
771 // Instruction definition
772 //===----------------------------------------------------------------------===//
774 //===----------------------------------------------------------------------===//
775 // MipsI Instructions
776 //===----------------------------------------------------------------------===//
778 /// Arithmetic Instructions (ALU Immediate)
779 def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>;
780 def ADDi : ArithOverflowI<0x08, "addi", add, simm16, immSExt16, CPURegs>;
781 def SLTi : SetCC_I<0x0a, "slti", setlt, simm16, immSExt16, CPURegs>;
782 def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>;
783 def ANDi : ArithLogicI<0x0c, "andi", and, uimm16, immZExt16, CPURegs>;
784 def ORi : ArithLogicI<0x0d, "ori", or, uimm16, immZExt16, CPURegs>;
785 def XORi : ArithLogicI<0x0e, "xori", xor, uimm16, immZExt16, CPURegs>;
786 def LUi : LoadUpper<0x0f, "lui", CPURegs, uimm16>;
788 /// Arithmetic Instructions (3-Operand, R-Type)
789 def ADDu : ArithLogicR<0x00, 0x21, "addu", add, IIAlu, CPURegs, 1>;
790 def SUBu : ArithLogicR<0x00, 0x23, "subu", sub, IIAlu, CPURegs>;
791 def ADD : ArithOverflowR<0x00, 0x20, "add", IIAlu, CPURegs, 1>;
792 def SUB : ArithOverflowR<0x00, 0x22, "sub", IIAlu, CPURegs>;
793 def SLT : SetCC_R<0x00, 0x2a, "slt", setlt, CPURegs>;
794 def SLTu : SetCC_R<0x00, 0x2b, "sltu", setult, CPURegs>;
795 def AND : ArithLogicR<0x00, 0x24, "and", and, IIAlu, CPURegs, 1>;
796 def OR : ArithLogicR<0x00, 0x25, "or", or, IIAlu, CPURegs, 1>;
797 def XOR : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPURegs, 1>;
798 def NOR : LogicNOR<0x00, 0x27, "nor", CPURegs>;
800 /// Shift Instructions
801 def SLL : shift_rotate_imm32<0x00, 0x00, "sll", shl>;
802 def SRL : shift_rotate_imm32<0x02, 0x00, "srl", srl>;
803 def SRA : shift_rotate_imm32<0x03, 0x00, "sra", sra>;
804 def SLLV : shift_rotate_reg<0x04, 0x00, "sllv", shl, CPURegs>;
805 def SRLV : shift_rotate_reg<0x06, 0x00, "srlv", srl, CPURegs>;
806 def SRAV : shift_rotate_reg<0x07, 0x00, "srav", sra, CPURegs>;
808 // Rotate Instructions
809 let Predicates = [HasMips32r2] in {
810 def ROTR : shift_rotate_imm32<0x02, 0x01, "rotr", rotr>;
811 def ROTRV : shift_rotate_reg<0x06, 0x01, "rotrv", rotr, CPURegs>;
814 /// Load and Store Instructions
816 defm LB : LoadM32<0x20, "lb", sextloadi8>;
817 defm LBu : LoadM32<0x24, "lbu", zextloadi8>;
818 defm LH : LoadM32<0x21, "lh", sextloadi16_a>;
819 defm LHu : LoadM32<0x25, "lhu", zextloadi16_a>;
820 defm LW : LoadM32<0x23, "lw", load_a>;
821 defm SB : StoreM32<0x28, "sb", truncstorei8>;
822 defm SH : StoreM32<0x29, "sh", truncstorei16_a>;
823 defm SW : StoreM32<0x2b, "sw", store_a>;
826 defm ULH : LoadM32<0x21, "ulh", sextloadi16_u, 1>;
827 defm ULHu : LoadM32<0x25, "ulhu", zextloadi16_u, 1>;
828 defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
829 defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
830 defm USW : StoreM32<0x2b, "usw", store_u, 1>;
832 /// Primitives for unaligned
833 defm LWL : LoadUnAlign32<0x22>;
834 defm LWR : LoadUnAlign32<0x26>;
835 defm SWL : StoreUnAlign32<0x2A>;
836 defm SWR : StoreUnAlign32<0x2E>;
838 let hasSideEffects = 1 in
839 def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
840 [(MipsSync imm:$stype)], NoItinerary, FrmOther>
845 let Inst{10-6} = stype;
849 /// Load-linked, Store-conditional
850 def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
851 def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>;
852 def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
853 def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>;
855 /// Jump and Branch Instructions
856 def J : JumpFJ<0x02, "j">;
857 def JR : JumpFR<0x00, 0x08, "jr", CPURegs>;
858 def B : UncondBranch<0x04, "b">;
859 def BEQ : CBranch<0x04, "beq", seteq, CPURegs>;
860 def BNE : CBranch<0x05, "bne", setne, CPURegs>;
861 def BGEZ : CBranchZero<0x01, 1, "bgez", setge, CPURegs>;
862 def BGTZ : CBranchZero<0x07, 0, "bgtz", setgt, CPURegs>;
863 def BLEZ : CBranchZero<0x06, 0, "blez", setle, CPURegs>;
864 def BLTZ : CBranchZero<0x01, 0, "bltz", setlt, CPURegs>;
866 // All calls clobber the non-callee saved registers...
867 let Defs = [AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
868 K0, K1, GP, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9] in {
869 def JAL : JumpLink<0x03, "jal">;
870 def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>;
871 def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>;
872 def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>;
875 let isReturn=1, isTerminator=1, hasDelaySlot=1,
876 isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in
877 def RET : FR <0x00, 0x08, (outs), (ins CPURegs:$target),
878 "jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>;
880 /// Multiply and Divide Instructions.
881 def MULT : Mult32<0x18, "mult", IIImul>;
882 def MULTu : Mult32<0x19, "multu", IIImul>;
883 def SDIV : Div32<MipsDivRem, 0x1a, "div", IIIdiv>;
884 def UDIV : Div32<MipsDivRemU, 0x1b, "divu", IIIdiv>;
886 def MTHI : MoveToLOHI<0x11, "mthi", CPURegs, [HI]>;
887 def MTLO : MoveToLOHI<0x13, "mtlo", CPURegs, [LO]>;
888 def MFHI : MoveFromLOHI<0x10, "mfhi", CPURegs, [HI]>;
889 def MFLO : MoveFromLOHI<0x12, "mflo", CPURegs, [LO]>;
891 /// Sign Ext In Register Instructions.
892 def SEB : SignExtInReg<0x10, "seb", i8>;
893 def SEH : SignExtInReg<0x18, "seh", i16>;
896 def CLZ : CountLeading0<0x20, "clz", CPURegs>;
897 def CLO : CountLeading1<0x21, "clo", CPURegs>;
899 /// Word Swap Bytes Within Halfwords
900 def WSBH : SubwordSwap<0x20, 0x2, "wsbh", CPURegs>;
904 def NOP : FJ<0, (outs), (ins), "nop", [], IIAlu>;
906 // FrameIndexes are legalized when they are operands from load/store
907 // instructions. The same not happens for stack address copies, so an
908 // add op with mem ComplexPattern is used and the stack address copy
909 // can be matched. It's similar to Sparc LEA_ADDRi
910 def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
912 // DynAlloc node points to dynamically allocated stack space.
913 // $sp is added to the list of implicitly used registers to prevent dead code
914 // elimination from removing instructions that modify $sp.
916 def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
919 def MADD : MArithR<0, "madd", MipsMAdd, 1>;
920 def MADDU : MArithR<1, "maddu", MipsMAddu, 1>;
921 def MSUB : MArithR<4, "msub", MipsMSub>;
922 def MSUBU : MArithR<5, "msubu", MipsMSubu>;
924 // MUL is a assembly macro in the current used ISAs. In recent ISA's
925 // it is a real instruction.
926 def MUL : ArithLogicR<0x1c, 0x02, "mul", mul, IIImul, CPURegs, 1>,
927 Requires<[HasMips32]>;
929 def RDHWR : ReadHardware<CPURegs, HWRegs>;
931 def EXT : ExtBase<0, "ext", CPURegs>;
932 def INS : InsBase<4, "ins", CPURegs>;
934 //===----------------------------------------------------------------------===//
935 // Arbitrary patterns that map to one or more instructions
936 //===----------------------------------------------------------------------===//
939 def : Pat<(i32 immSExt16:$in),
940 (ADDiu ZERO, imm:$in)>;
941 def : Pat<(i32 immZExt16:$in),
942 (ORi ZERO, imm:$in)>;
943 def : Pat<(i32 immLow16Zero:$in),
944 (LUi (HI16 imm:$in))>;
946 // Arbitrary immediates
947 def : Pat<(i32 imm:$imm),
948 (ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
951 def : Pat<(subc CPURegs:$lhs, CPURegs:$rhs),
952 (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
953 def : Pat<(addc CPURegs:$lhs, CPURegs:$rhs),
954 (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
955 def : Pat<(addc CPURegs:$src, immSExt16:$imm),
956 (ADDiu CPURegs:$src, imm:$imm)>;
959 def : Pat<(MipsJmpLink (i32 tglobaladdr:$dst)),
960 (JAL tglobaladdr:$dst)>;
961 def : Pat<(MipsJmpLink (i32 texternalsym:$dst)),
962 (JAL texternalsym:$dst)>;
963 //def : Pat<(MipsJmpLink CPURegs:$dst),
964 // (JALR CPURegs:$dst)>;
967 def : Pat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
968 def : Pat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
969 def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
970 def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
971 def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
973 def : Pat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>;
974 def : Pat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>;
975 def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
976 def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
977 def : Pat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
979 def : Pat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
980 (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
981 def : Pat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
982 (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
983 def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
984 (ADDiu CPURegs:$hi, tjumptable:$lo)>;
985 def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
986 (ADDiu CPURegs:$hi, tconstpool:$lo)>;
987 def : Pat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
988 (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
991 def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
992 (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
993 def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
994 (ADDiu CPURegs:$gp, tconstpool:$in)>;
997 class WrapperPat<SDNode node, Instruction ADDiuOp, Register GPReg>:
998 Pat<(MipsWrapper node:$in),
999 (ADDiuOp GPReg, node:$in)>;
1001 def : WrapperPat<tglobaladdr, ADDiu, GP>;
1002 def : WrapperPat<tconstpool, ADDiu, GP>;
1003 def : WrapperPat<texternalsym, ADDiu, GP>;
1004 def : WrapperPat<tblockaddress, ADDiu, GP>;
1005 def : WrapperPat<tjumptable, ADDiu, GP>;
1006 def : WrapperPat<tglobaltlsaddr, ADDiu, GP>;
1008 // Mips does not have "not", so we expand our way
1009 def : Pat<(not CPURegs:$in),
1010 (NOR CPURegs:$in, ZERO)>;
1013 let Predicates = [NotN64] in {
1014 def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
1015 def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
1016 def : Pat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
1017 def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
1019 let Predicates = [IsN64] in {
1020 def : Pat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
1021 def : Pat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
1022 def : Pat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
1023 def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
1027 let Predicates = [NotN64] in {
1028 def : Pat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
1029 def : Pat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
1031 let Predicates = [IsN64] in {
1032 def : Pat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
1033 def : Pat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
1037 multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
1038 Instruction SLTOp, Instruction SLTuOp, Instruction SLTiOp,
1039 Instruction SLTiuOp, Register ZEROReg> {
1040 def : Pat<(brcond (i32 (setne RC:$lhs, 0)), bb:$dst),
1041 (BNEOp RC:$lhs, ZEROReg, bb:$dst)>;
1042 def : Pat<(brcond (i32 (seteq RC:$lhs, 0)), bb:$dst),
1043 (BEQOp RC:$lhs, ZEROReg, bb:$dst)>;
1045 def : Pat<(brcond (i32 (setge RC:$lhs, RC:$rhs)), bb:$dst),
1046 (BEQ (SLTOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
1047 def : Pat<(brcond (i32 (setuge RC:$lhs, RC:$rhs)), bb:$dst),
1048 (BEQ (SLTuOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>;
1049 def : Pat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
1050 (BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
1051 def : Pat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
1052 (BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
1054 def : Pat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
1055 (BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
1056 def : Pat<(brcond (i32 (setule RC:$lhs, RC:$rhs)), bb:$dst),
1057 (BEQ (SLTuOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
1059 def : Pat<(brcond RC:$cond, bb:$dst),
1060 (BNEOp RC:$cond, ZEROReg, bb:$dst)>;
1063 defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
1066 multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
1067 Instruction SLTuOp, Register ZEROReg> {
1068 def : Pat<(seteq RC:$lhs, RC:$rhs),
1069 (SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
1070 def : Pat<(setne RC:$lhs, RC:$rhs),
1071 (SLTuOp ZEROReg, (XOROp RC:$lhs, RC:$rhs))>;
1074 multiclass SetlePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1075 def : Pat<(setle RC:$lhs, RC:$rhs),
1076 (XORi (SLTOp RC:$rhs, RC:$lhs), 1)>;
1077 def : Pat<(setule RC:$lhs, RC:$rhs),
1078 (XORi (SLTuOp RC:$rhs, RC:$lhs), 1)>;
1081 multiclass SetgtPats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1082 def : Pat<(setgt RC:$lhs, RC:$rhs),
1083 (SLTOp RC:$rhs, RC:$lhs)>;
1084 def : Pat<(setugt RC:$lhs, RC:$rhs),
1085 (SLTuOp RC:$rhs, RC:$lhs)>;
1088 multiclass SetgePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> {
1089 def : Pat<(setge RC:$lhs, RC:$rhs),
1090 (XORi (SLTOp RC:$lhs, RC:$rhs), 1)>;
1091 def : Pat<(setuge RC:$lhs, RC:$rhs),
1092 (XORi (SLTuOp RC:$lhs, RC:$rhs), 1)>;
1095 multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp,
1096 Instruction SLTiuOp> {
1097 def : Pat<(setge RC:$lhs, immSExt16:$rhs),
1098 (XORi (SLTiOp RC:$lhs, immSExt16:$rhs), 1)>;
1099 def : Pat<(setuge RC:$lhs, immSExt16:$rhs),
1100 (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
1103 defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>;
1104 defm : SetlePats<CPURegs, SLT, SLTu>;
1105 defm : SetgtPats<CPURegs, SLT, SLTu>;
1106 defm : SetgePats<CPURegs, SLT, SLTu>;
1107 defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
1109 // select MipsDynAlloc
1110 def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
1113 def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
1115 //===----------------------------------------------------------------------===//
1116 // Floating Point Support
1117 //===----------------------------------------------------------------------===//
1119 include "MipsInstrFPU.td"
1120 include "Mips64InstrInfo.td"
1121 include "MipsCondMov.td"