1 //===- X86InstrInfo.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTIntShiftDOp: SDTypeProfile<1, 3,
21 [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
22 SDTCisInt<0>, SDTCisInt<3>]>;
24 def SDTX86CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
26 def SDTX86Cmov : SDTypeProfile<1, 4,
27 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
28 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
30 def SDTX86BrCond : SDTypeProfile<0, 3,
31 [SDTCisVT<0, OtherVT>,
32 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
34 def SDTX86SetCC : SDTypeProfile<1, 2,
36 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
38 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
40 def SDTX86cas8 : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
42 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i16>]>;
44 def SDT_X86CallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
45 def SDT_X86CallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
48 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
50 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
52 def SDTX86RdTsc : SDTypeProfile<0, 0, []>;
54 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
56 def SDT_X86TLSADDR : SDTypeProfile<1, 1, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
58 def SDT_X86TLSTP : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
60 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
62 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
64 def X86bsf : SDNode<"X86ISD::BSF", SDTIntUnaryOp>;
65 def X86bsr : SDNode<"X86ISD::BSR", SDTIntUnaryOp>;
66 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
67 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
69 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
71 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
72 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
74 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
76 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
77 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
79 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8,
80 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
83 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
84 [SDNPHasChain, SDNPOptInFlag]>;
86 def X86callseq_start :
87 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
88 [SDNPHasChain, SDNPOutFlag]>;
90 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
91 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
93 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
94 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
96 def X86tailcall: SDNode<"X86ISD::TAILCALL", SDT_X86Call,
97 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
99 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
100 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
101 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
102 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
105 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG",SDTX86RdTsc,
106 [SDNPHasChain, SDNPOutFlag, SDNPSideEffect]>;
108 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
109 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
111 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
112 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
113 def X86TLStp : SDNode<"X86ISD::THREAD_POINTER", SDT_X86TLSTP, []>;
115 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
118 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
119 [SDNPHasChain, SDNPOptInFlag]>;
121 //===----------------------------------------------------------------------===//
122 // X86 Operand Definitions.
125 // *mem - Operand definitions for the funky X86 addressing mode operands.
127 class X86MemOperand<string printMethod> : Operand<iPTR> {
128 let PrintMethod = printMethod;
129 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
132 def i8mem : X86MemOperand<"printi8mem">;
133 def i16mem : X86MemOperand<"printi16mem">;
134 def i32mem : X86MemOperand<"printi32mem">;
135 def i64mem : X86MemOperand<"printi64mem">;
136 def i128mem : X86MemOperand<"printi128mem">;
137 def f32mem : X86MemOperand<"printf32mem">;
138 def f64mem : X86MemOperand<"printf64mem">;
139 def f80mem : X86MemOperand<"printf80mem">;
140 def f128mem : X86MemOperand<"printf128mem">;
142 def lea32mem : Operand<i32> {
143 let PrintMethod = "printi32mem";
144 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
147 def SSECC : Operand<i8> {
148 let PrintMethod = "printSSECC";
151 def piclabel: Operand<i32> {
152 let PrintMethod = "printPICLabel";
155 // A couple of more descriptive operand definitions.
156 // 16-bits but only 8 bits are significant.
157 def i16i8imm : Operand<i16>;
158 // 32-bits but only 8 bits are significant.
159 def i32i8imm : Operand<i32>;
161 // Branch targets have OtherVT type.
162 def brtarget : Operand<OtherVT>;
164 //===----------------------------------------------------------------------===//
165 // X86 Complex Pattern Definitions.
168 // Define X86 specific addressing mode.
169 def addr : ComplexPattern<iPTR, 4, "SelectAddr", [], []>;
170 def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
171 [add, mul, shl, or, frameindex], []>;
173 //===----------------------------------------------------------------------===//
174 // X86 Instruction Predicate Definitions.
175 def HasMMX : Predicate<"Subtarget->hasMMX()">;
176 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
177 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
178 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
179 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
180 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
181 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
182 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
183 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
184 def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
185 def In64BitMode : Predicate<"Subtarget->is64Bit()">;
186 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
187 def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
188 def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
190 //===----------------------------------------------------------------------===//
191 // X86 Instruction Format Definitions.
194 include "X86InstrFormats.td"
196 //===----------------------------------------------------------------------===//
197 // Pattern fragments...
200 // X86 specific condition code. These correspond to CondCode in
201 // X86InstrInfo.h. They must be kept in synch.
202 def X86_COND_A : PatLeaf<(i8 0)>;
203 def X86_COND_AE : PatLeaf<(i8 1)>;
204 def X86_COND_B : PatLeaf<(i8 2)>;
205 def X86_COND_BE : PatLeaf<(i8 3)>;
206 def X86_COND_E : PatLeaf<(i8 4)>;
207 def X86_COND_G : PatLeaf<(i8 5)>;
208 def X86_COND_GE : PatLeaf<(i8 6)>;
209 def X86_COND_L : PatLeaf<(i8 7)>;
210 def X86_COND_LE : PatLeaf<(i8 8)>;
211 def X86_COND_NE : PatLeaf<(i8 9)>;
212 def X86_COND_NO : PatLeaf<(i8 10)>;
213 def X86_COND_NP : PatLeaf<(i8 11)>;
214 def X86_COND_NS : PatLeaf<(i8 12)>;
215 def X86_COND_O : PatLeaf<(i8 13)>;
216 def X86_COND_P : PatLeaf<(i8 14)>;
217 def X86_COND_S : PatLeaf<(i8 15)>;
219 def i16immSExt8 : PatLeaf<(i16 imm), [{
220 // i16immSExt8 predicate - True if the 16-bit immediate fits in a 8-bit
221 // sign extended field.
222 return (int16_t)N->getValue() == (int8_t)N->getValue();
225 def i32immSExt8 : PatLeaf<(i32 imm), [{
226 // i32immSExt8 predicate - True if the 32-bit immediate fits in a 8-bit
227 // sign extended field.
228 return (int32_t)N->getValue() == (int8_t)N->getValue();
231 // Helper fragments for loads.
232 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
233 // known to be 32-bit aligned or better. Ditto for i8 to i16.
234 def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
235 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
236 if (LD->getAddressingMode() != ISD::UNINDEXED)
238 ISD::LoadExtType ExtType = LD->getExtensionType();
239 if (ExtType == ISD::NON_EXTLOAD)
241 if (ExtType == ISD::EXTLOAD)
242 return LD->getAlignment() >= 2 && !LD->isVolatile();
247 def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
248 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
249 if (LD->getAddressingMode() != ISD::UNINDEXED)
251 ISD::LoadExtType ExtType = LD->getExtensionType();
252 if (ExtType == ISD::NON_EXTLOAD)
254 if (ExtType == ISD::EXTLOAD)
255 return LD->getAlignment() >= 4 && !LD->isVolatile();
260 def loadi8 : PatFrag<(ops node:$ptr), (i8 (load node:$ptr))>;
261 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
263 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
264 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
265 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
267 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
268 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
269 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
271 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
272 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
273 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
274 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
275 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
276 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
278 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
279 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
280 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
281 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
282 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
283 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
286 // An 'and' node with a single use.
287 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
288 return N->hasOneUse();
291 //===----------------------------------------------------------------------===//
292 // Instruction list...
295 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
296 // a stack adjustment and the codegen must know that they may modify the stack
297 // pointer before prolog-epilog rewriting occurs.
298 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
299 // sub / add which can clobber EFLAGS.
300 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
301 def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt),
303 [(X86callseq_start imm:$amt)]>;
304 def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
306 [(X86callseq_end imm:$amt1, imm:$amt2)]>;
310 let neverHasSideEffects = 1 in
311 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
314 let neverHasSideEffects = 1, isNotDuplicable = 1 in
315 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins piclabel:$label),
316 "call\t$label\n\tpop{l}\t$reg", []>;
318 //===----------------------------------------------------------------------===//
319 // Control Flow Instructions...
322 // Return instructions.
323 let isTerminator = 1, isReturn = 1, isBarrier = 1,
324 hasCtrlDep = 1, FPForm = SpecialFP, FPFormBits = SpecialFP.Value in {
325 def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
328 def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
330 [(X86retflag imm:$amt)]>;
333 // All branches are RawFrm, Void, Branch, and Terminators
334 let isBranch = 1, isTerminator = 1 in
335 class IBr<bits<8> opcode, dag ins, string asm, list<dag> pattern> :
336 I<opcode, RawFrm, (outs), ins, asm, pattern>;
338 let isBranch = 1, isBarrier = 1 in
339 def JMP : IBr<0xE9, (ins brtarget:$dst), "jmp\t$dst", [(br bb:$dst)]>;
342 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
343 def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
344 [(brind GR32:$dst)]>;
345 def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
346 [(brind (loadi32 addr:$dst))]>;
349 // Conditional branches
350 let Uses = [EFLAGS] in {
351 def JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst",
352 [(X86brcond bb:$dst, X86_COND_E, EFLAGS)]>, TB;
353 def JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst",
354 [(X86brcond bb:$dst, X86_COND_NE, EFLAGS)]>, TB;
355 def JL : IBr<0x8C, (ins brtarget:$dst), "jl\t$dst",
356 [(X86brcond bb:$dst, X86_COND_L, EFLAGS)]>, TB;
357 def JLE : IBr<0x8E, (ins brtarget:$dst), "jle\t$dst",
358 [(X86brcond bb:$dst, X86_COND_LE, EFLAGS)]>, TB;
359 def JG : IBr<0x8F, (ins brtarget:$dst), "jg\t$dst",
360 [(X86brcond bb:$dst, X86_COND_G, EFLAGS)]>, TB;
361 def JGE : IBr<0x8D, (ins brtarget:$dst), "jge\t$dst",
362 [(X86brcond bb:$dst, X86_COND_GE, EFLAGS)]>, TB;
364 def JB : IBr<0x82, (ins brtarget:$dst), "jb\t$dst",
365 [(X86brcond bb:$dst, X86_COND_B, EFLAGS)]>, TB;
366 def JBE : IBr<0x86, (ins brtarget:$dst), "jbe\t$dst",
367 [(X86brcond bb:$dst, X86_COND_BE, EFLAGS)]>, TB;
368 def JA : IBr<0x87, (ins brtarget:$dst), "ja\t$dst",
369 [(X86brcond bb:$dst, X86_COND_A, EFLAGS)]>, TB;
370 def JAE : IBr<0x83, (ins brtarget:$dst), "jae\t$dst",
371 [(X86brcond bb:$dst, X86_COND_AE, EFLAGS)]>, TB;
373 def JS : IBr<0x88, (ins brtarget:$dst), "js\t$dst",
374 [(X86brcond bb:$dst, X86_COND_S, EFLAGS)]>, TB;
375 def JNS : IBr<0x89, (ins brtarget:$dst), "jns\t$dst",
376 [(X86brcond bb:$dst, X86_COND_NS, EFLAGS)]>, TB;
377 def JP : IBr<0x8A, (ins brtarget:$dst), "jp\t$dst",
378 [(X86brcond bb:$dst, X86_COND_P, EFLAGS)]>, TB;
379 def JNP : IBr<0x8B, (ins brtarget:$dst), "jnp\t$dst",
380 [(X86brcond bb:$dst, X86_COND_NP, EFLAGS)]>, TB;
381 def JO : IBr<0x80, (ins brtarget:$dst), "jo\t$dst",
382 [(X86brcond bb:$dst, X86_COND_O, EFLAGS)]>, TB;
383 def JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst",
384 [(X86brcond bb:$dst, X86_COND_NO, EFLAGS)]>, TB;
387 //===----------------------------------------------------------------------===//
388 // Call Instructions...
391 // All calls clobber the non-callee saved registers...
392 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
393 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
394 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS] in {
395 def CALLpcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i32imm:$dst,variable_ops),
396 "call\t${dst:call}", []>;
397 def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
398 "call\t{*}$dst", [(X86call GR32:$dst)]>;
399 def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
400 "call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>;
405 def TAILCALL : I<0, Pseudo, (outs), (ins),
409 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
410 def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops),
411 "#TC_RETURN $dst $offset",
414 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
415 def TCRETURNri : I<0, Pseudo, (outs), (ins GR32:$dst, i32imm:$offset, variable_ops),
416 "#TC_RETURN $dst $offset",
419 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
421 def TAILJMPd : IBr<0xE9, (ins i32imm:$dst), "jmp\t${dst:call} # TAILCALL",
423 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
424 def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst # TAILCALL",
426 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
427 def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst),
428 "jmp\t{*}$dst # TAILCALL", []>;
430 //===----------------------------------------------------------------------===//
431 // Miscellaneous Instructions...
433 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
434 def LEAVE : I<0xC9, RawFrm,
435 (outs), (ins), "leave", []>;
437 let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
439 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
442 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
445 let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in
446 def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf", []>;
447 let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
448 def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
450 let isTwoAddress = 1 in // GR32 = bswap GR32
451 def BSWAP32r : I<0xC8, AddRegFrm,
452 (outs GR32:$dst), (ins GR32:$src),
454 [(set GR32:$dst, (bswap GR32:$src))]>, TB;
457 // Bit scan instructions.
458 let Defs = [EFLAGS] in {
459 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
460 "bsf{w}\t{$src, $dst|$dst, $src}",
461 [(set GR16:$dst, (X86bsf GR16:$src)), (implicit EFLAGS)]>, TB;
462 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
463 "bsf{w}\t{$src, $dst|$dst, $src}",
464 [(set GR16:$dst, (X86bsf (loadi16 addr:$src))),
465 (implicit EFLAGS)]>, TB;
466 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
467 "bsf{l}\t{$src, $dst|$dst, $src}",
468 [(set GR32:$dst, (X86bsf GR32:$src)), (implicit EFLAGS)]>, TB;
469 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
470 "bsf{l}\t{$src, $dst|$dst, $src}",
471 [(set GR32:$dst, (X86bsf (loadi32 addr:$src))),
472 (implicit EFLAGS)]>, TB;
474 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
475 "bsr{w}\t{$src, $dst|$dst, $src}",
476 [(set GR16:$dst, (X86bsr GR16:$src)), (implicit EFLAGS)]>, TB;
477 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
478 "bsr{w}\t{$src, $dst|$dst, $src}",
479 [(set GR16:$dst, (X86bsr (loadi16 addr:$src))),
480 (implicit EFLAGS)]>, TB;
481 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
482 "bsr{l}\t{$src, $dst|$dst, $src}",
483 [(set GR32:$dst, (X86bsr GR32:$src)), (implicit EFLAGS)]>, TB;
484 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
485 "bsr{l}\t{$src, $dst|$dst, $src}",
486 [(set GR32:$dst, (X86bsr (loadi32 addr:$src))),
487 (implicit EFLAGS)]>, TB;
490 let neverHasSideEffects = 1 in
491 def LEA16r : I<0x8D, MRMSrcMem,
492 (outs GR16:$dst), (ins i32mem:$src),
493 "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
494 let isReMaterializable = 1 in
495 def LEA32r : I<0x8D, MRMSrcMem,
496 (outs GR32:$dst), (ins lea32mem:$src),
497 "lea{l}\t{$src|$dst}, {$dst|$src}",
498 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
500 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI] in {
501 def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
502 [(X86rep_movs i8)]>, REP;
503 def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
504 [(X86rep_movs i16)]>, REP, OpSize;
505 def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
506 [(X86rep_movs i32)]>, REP;
509 let Defs = [ECX,EDI], Uses = [AL,ECX,EDI] in
510 def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
511 [(X86rep_stos i8)]>, REP;
512 let Defs = [ECX,EDI], Uses = [AX,ECX,EDI] in
513 def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
514 [(X86rep_stos i16)]>, REP, OpSize;
515 let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI] in
516 def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
517 [(X86rep_stos i32)]>, REP;
519 let Defs = [RAX, RDX] in
520 def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>,
523 let isBarrier = 1, hasCtrlDep = 1 in {
524 def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
527 //===----------------------------------------------------------------------===//
528 // Input/Output Instructions...
530 let Defs = [AL], Uses = [DX] in
531 def IN8rr : I<0xEC, RawFrm, (outs), (ins),
532 "in{b}\t{%dx, %al|%AL, %DX}", []>;
533 let Defs = [AX], Uses = [DX] in
534 def IN16rr : I<0xED, RawFrm, (outs), (ins),
535 "in{w}\t{%dx, %ax|%AX, %DX}", []>, OpSize;
536 let Defs = [EAX], Uses = [DX] in
537 def IN32rr : I<0xED, RawFrm, (outs), (ins),
538 "in{l}\t{%dx, %eax|%EAX, %DX}", []>;
541 def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i16i8imm:$port),
542 "in{b}\t{$port, %al|%AL, $port}", []>;
544 def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
545 "in{w}\t{$port, %ax|%AX, $port}", []>, OpSize;
547 def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port),
548 "in{l}\t{$port, %eax|%EAX, $port}", []>;
550 let Uses = [DX, AL] in
551 def OUT8rr : I<0xEE, RawFrm, (outs), (ins),
552 "out{b}\t{%al, %dx|%DX, %AL}", []>;
553 let Uses = [DX, AX] in
554 def OUT16rr : I<0xEF, RawFrm, (outs), (ins),
555 "out{w}\t{%ax, %dx|%DX, %AX}", []>, OpSize;
556 let Uses = [DX, EAX] in
557 def OUT32rr : I<0xEF, RawFrm, (outs), (ins),
558 "out{l}\t{%eax, %dx|%DX, %EAX}", []>;
561 def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i16i8imm:$port),
562 "out{b}\t{%al, $port|$port, %AL}", []>;
564 def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
565 "out{w}\t{%ax, $port|$port, %AX}", []>, OpSize;
567 def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port),
568 "out{l}\t{%eax, $port|$port, %EAX}", []>;
570 //===----------------------------------------------------------------------===//
571 // Move Instructions...
573 let neverHasSideEffects = 1 in {
574 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
575 "mov{b}\t{$src, $dst|$dst, $src}", []>;
576 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
577 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
578 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
579 "mov{l}\t{$src, $dst|$dst, $src}", []>;
581 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
582 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
583 "mov{b}\t{$src, $dst|$dst, $src}",
584 [(set GR8:$dst, imm:$src)]>;
585 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
586 "mov{w}\t{$src, $dst|$dst, $src}",
587 [(set GR16:$dst, imm:$src)]>, OpSize;
588 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
589 "mov{l}\t{$src, $dst|$dst, $src}",
590 [(set GR32:$dst, imm:$src)]>;
592 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
593 "mov{b}\t{$src, $dst|$dst, $src}",
594 [(store (i8 imm:$src), addr:$dst)]>;
595 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
596 "mov{w}\t{$src, $dst|$dst, $src}",
597 [(store (i16 imm:$src), addr:$dst)]>, OpSize;
598 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
599 "mov{l}\t{$src, $dst|$dst, $src}",
600 [(store (i32 imm:$src), addr:$dst)]>;
602 let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
603 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
604 "mov{b}\t{$src, $dst|$dst, $src}",
605 [(set GR8:$dst, (load addr:$src))]>;
606 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
607 "mov{w}\t{$src, $dst|$dst, $src}",
608 [(set GR16:$dst, (load addr:$src))]>, OpSize;
609 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
610 "mov{l}\t{$src, $dst|$dst, $src}",
611 [(set GR32:$dst, (load addr:$src))]>;
614 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
615 "mov{b}\t{$src, $dst|$dst, $src}",
616 [(store GR8:$src, addr:$dst)]>;
617 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
618 "mov{w}\t{$src, $dst|$dst, $src}",
619 [(store GR16:$src, addr:$dst)]>, OpSize;
620 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
621 "mov{l}\t{$src, $dst|$dst, $src}",
622 [(store GR32:$src, addr:$dst)]>;
624 //===----------------------------------------------------------------------===//
625 // Fixed-Register Multiplication and Division Instructions...
628 // Extra precision multiplication
629 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
630 def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
631 // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
632 // This probably ought to be moved to a def : Pat<> if the
633 // syntax can be accepted.
634 [(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8
635 let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
636 def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src", []>,
637 OpSize; // AX,DX = AX*GR16
638 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
639 def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>;
640 // EAX,EDX = EAX*GR32
641 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
642 def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
644 // FIXME: Used for 8-bit mul, ignore result upper 8 bits.
645 // This probably ought to be moved to a def : Pat<> if the
646 // syntax can be accepted.
647 [(set AL, (mul AL, (loadi8 addr:$src)))]>; // AL,AH = AL*[mem8]
648 let mayLoad = 1, neverHasSideEffects = 1 in {
649 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
650 def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
651 "mul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
652 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
653 def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
654 "mul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
657 let neverHasSideEffects = 1 in {
658 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
659 def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>;
661 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
662 def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>,
663 OpSize; // AX,DX = AX*GR16
664 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
665 def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>;
666 // EAX,EDX = EAX*GR32
668 let Defs = [AL,AH,EFLAGS], Uses = [AL] in
669 def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
670 "imul{b}\t$src", []>; // AL,AH = AL*[mem8]
671 let Defs = [AX,DX,EFLAGS], Uses = [AX] in
672 def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
673 "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
674 let Defs = [EAX,EDX], Uses = [EAX] in
675 def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
676 "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
679 // unsigned division/remainder
680 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
681 def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
683 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
684 def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
685 "div{w}\t$src", []>, OpSize;
686 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
687 def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
690 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
691 def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
693 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
694 def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
695 "div{w}\t$src", []>, OpSize;
696 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
697 def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX
701 // Signed division/remainder.
702 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
703 def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
704 "idiv{b}\t$src", []>;
705 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
706 def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
707 "idiv{w}\t$src", []>, OpSize;
708 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
709 def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
710 "idiv{l}\t$src", []>;
711 let mayLoad = 1, mayLoad = 1 in {
712 let Defs = [AX,EFLAGS], Uses = [AL,AH] in
713 def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
714 "idiv{b}\t$src", []>;
715 let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
716 def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
717 "idiv{w}\t$src", []>, OpSize;
718 let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
719 def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX
720 "idiv{l}\t$src", []>;
722 } // neverHasSideEffects
724 //===----------------------------------------------------------------------===//
725 // Two address Instructions.
727 let isTwoAddress = 1 in {
730 let Uses = [EFLAGS] in {
731 let isCommutable = 1 in {
732 def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
733 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
734 "cmovb\t{$src2, $dst|$dst, $src2}",
735 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
736 X86_COND_B, EFLAGS))]>,
738 def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
739 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
740 "cmovb\t{$src2, $dst|$dst, $src2}",
741 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
742 X86_COND_B, EFLAGS))]>,
745 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
746 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
747 "cmovae\t{$src2, $dst|$dst, $src2}",
748 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
749 X86_COND_AE, EFLAGS))]>,
751 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
752 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
753 "cmovae\t{$src2, $dst|$dst, $src2}",
754 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
755 X86_COND_AE, EFLAGS))]>,
757 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
758 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
759 "cmove\t{$src2, $dst|$dst, $src2}",
760 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
761 X86_COND_E, EFLAGS))]>,
763 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
764 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
765 "cmove\t{$src2, $dst|$dst, $src2}",
766 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
767 X86_COND_E, EFLAGS))]>,
769 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
770 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
771 "cmovne\t{$src2, $dst|$dst, $src2}",
772 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
773 X86_COND_NE, EFLAGS))]>,
775 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
776 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
777 "cmovne\t{$src2, $dst|$dst, $src2}",
778 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
779 X86_COND_NE, EFLAGS))]>,
781 def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
782 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
783 "cmovbe\t{$src2, $dst|$dst, $src2}",
784 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
785 X86_COND_BE, EFLAGS))]>,
787 def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
788 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
789 "cmovbe\t{$src2, $dst|$dst, $src2}",
790 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
791 X86_COND_BE, EFLAGS))]>,
793 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
794 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
795 "cmova\t{$src2, $dst|$dst, $src2}",
796 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
797 X86_COND_A, EFLAGS))]>,
799 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
800 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
801 "cmova\t{$src2, $dst|$dst, $src2}",
802 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
803 X86_COND_A, EFLAGS))]>,
805 def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
806 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
807 "cmovl\t{$src2, $dst|$dst, $src2}",
808 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
809 X86_COND_L, EFLAGS))]>,
811 def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
812 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
813 "cmovl\t{$src2, $dst|$dst, $src2}",
814 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
815 X86_COND_L, EFLAGS))]>,
817 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
818 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
819 "cmovge\t{$src2, $dst|$dst, $src2}",
820 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
821 X86_COND_GE, EFLAGS))]>,
823 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
824 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
825 "cmovge\t{$src2, $dst|$dst, $src2}",
826 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
827 X86_COND_GE, EFLAGS))]>,
829 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
830 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
831 "cmovle\t{$src2, $dst|$dst, $src2}",
832 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
833 X86_COND_LE, EFLAGS))]>,
835 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
836 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
837 "cmovle\t{$src2, $dst|$dst, $src2}",
838 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
839 X86_COND_LE, EFLAGS))]>,
841 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
842 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
843 "cmovg\t{$src2, $dst|$dst, $src2}",
844 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
845 X86_COND_G, EFLAGS))]>,
847 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
848 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
849 "cmovg\t{$src2, $dst|$dst, $src2}",
850 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
851 X86_COND_G, EFLAGS))]>,
853 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
854 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
855 "cmovs\t{$src2, $dst|$dst, $src2}",
856 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
857 X86_COND_S, EFLAGS))]>,
859 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
860 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
861 "cmovs\t{$src2, $dst|$dst, $src2}",
862 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
863 X86_COND_S, EFLAGS))]>,
865 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
866 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
867 "cmovns\t{$src2, $dst|$dst, $src2}",
868 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
869 X86_COND_NS, EFLAGS))]>,
871 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
872 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
873 "cmovns\t{$src2, $dst|$dst, $src2}",
874 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
875 X86_COND_NS, EFLAGS))]>,
877 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
878 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
879 "cmovp\t{$src2, $dst|$dst, $src2}",
880 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
881 X86_COND_P, EFLAGS))]>,
883 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
884 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
885 "cmovp\t{$src2, $dst|$dst, $src2}",
886 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
887 X86_COND_P, EFLAGS))]>,
889 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
890 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
891 "cmovnp\t{$src2, $dst|$dst, $src2}",
892 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
893 X86_COND_NP, EFLAGS))]>,
895 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
896 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
897 "cmovnp\t{$src2, $dst|$dst, $src2}",
898 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
899 X86_COND_NP, EFLAGS))]>,
901 } // isCommutable = 1
903 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
904 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
905 "cmovnp\t{$src2, $dst|$dst, $src2}",
906 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
907 X86_COND_NP, EFLAGS))]>,
910 def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
911 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
912 "cmovb\t{$src2, $dst|$dst, $src2}",
913 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
914 X86_COND_B, EFLAGS))]>,
916 def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
917 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
918 "cmovb\t{$src2, $dst|$dst, $src2}",
919 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
920 X86_COND_B, EFLAGS))]>,
922 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
923 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
924 "cmovae\t{$src2, $dst|$dst, $src2}",
925 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
926 X86_COND_AE, EFLAGS))]>,
928 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
929 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
930 "cmovae\t{$src2, $dst|$dst, $src2}",
931 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
932 X86_COND_AE, EFLAGS))]>,
934 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
935 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
936 "cmove\t{$src2, $dst|$dst, $src2}",
937 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
938 X86_COND_E, EFLAGS))]>,
940 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
941 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
942 "cmove\t{$src2, $dst|$dst, $src2}",
943 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
944 X86_COND_E, EFLAGS))]>,
946 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
947 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
948 "cmovne\t{$src2, $dst|$dst, $src2}",
949 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
950 X86_COND_NE, EFLAGS))]>,
952 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
953 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
954 "cmovne\t{$src2, $dst|$dst, $src2}",
955 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
956 X86_COND_NE, EFLAGS))]>,
958 def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
959 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
960 "cmovbe\t{$src2, $dst|$dst, $src2}",
961 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
962 X86_COND_BE, EFLAGS))]>,
964 def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
965 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
966 "cmovbe\t{$src2, $dst|$dst, $src2}",
967 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
968 X86_COND_BE, EFLAGS))]>,
970 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
971 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
972 "cmova\t{$src2, $dst|$dst, $src2}",
973 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
974 X86_COND_A, EFLAGS))]>,
976 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
977 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
978 "cmova\t{$src2, $dst|$dst, $src2}",
979 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
980 X86_COND_A, EFLAGS))]>,
982 def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
983 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
984 "cmovl\t{$src2, $dst|$dst, $src2}",
985 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
986 X86_COND_L, EFLAGS))]>,
988 def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
989 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
990 "cmovl\t{$src2, $dst|$dst, $src2}",
991 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
992 X86_COND_L, EFLAGS))]>,
994 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
995 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
996 "cmovge\t{$src2, $dst|$dst, $src2}",
997 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
998 X86_COND_GE, EFLAGS))]>,
1000 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
1001 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1002 "cmovge\t{$src2, $dst|$dst, $src2}",
1003 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1004 X86_COND_GE, EFLAGS))]>,
1006 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
1007 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1008 "cmovle\t{$src2, $dst|$dst, $src2}",
1009 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1010 X86_COND_LE, EFLAGS))]>,
1012 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
1013 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1014 "cmovle\t{$src2, $dst|$dst, $src2}",
1015 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1016 X86_COND_LE, EFLAGS))]>,
1018 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
1019 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1020 "cmovg\t{$src2, $dst|$dst, $src2}",
1021 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1022 X86_COND_G, EFLAGS))]>,
1024 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
1025 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1026 "cmovg\t{$src2, $dst|$dst, $src2}",
1027 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1028 X86_COND_G, EFLAGS))]>,
1030 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
1031 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1032 "cmovs\t{$src2, $dst|$dst, $src2}",
1033 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1034 X86_COND_S, EFLAGS))]>,
1036 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
1037 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1038 "cmovs\t{$src2, $dst|$dst, $src2}",
1039 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1040 X86_COND_S, EFLAGS))]>,
1042 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
1043 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1044 "cmovns\t{$src2, $dst|$dst, $src2}",
1045 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1046 X86_COND_NS, EFLAGS))]>,
1048 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
1049 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1050 "cmovns\t{$src2, $dst|$dst, $src2}",
1051 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1052 X86_COND_NS, EFLAGS))]>,
1054 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
1055 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1056 "cmovp\t{$src2, $dst|$dst, $src2}",
1057 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1058 X86_COND_P, EFLAGS))]>,
1060 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
1061 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1062 "cmovp\t{$src2, $dst|$dst, $src2}",
1063 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1064 X86_COND_P, EFLAGS))]>,
1066 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
1067 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1068 "cmovnp\t{$src2, $dst|$dst, $src2}",
1069 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1070 X86_COND_NP, EFLAGS))]>,
1072 } // Uses = [EFLAGS]
1075 // unary instructions
1076 let CodeSize = 2 in {
1077 let Defs = [EFLAGS] in {
1078 def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src), "neg{b}\t$dst",
1079 [(set GR8:$dst, (ineg GR8:$src))]>;
1080 def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src), "neg{w}\t$dst",
1081 [(set GR16:$dst, (ineg GR16:$src))]>, OpSize;
1082 def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src), "neg{l}\t$dst",
1083 [(set GR32:$dst, (ineg GR32:$src))]>;
1084 let isTwoAddress = 0 in {
1085 def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst), "neg{b}\t$dst",
1086 [(store (ineg (loadi8 addr:$dst)), addr:$dst)]>;
1087 def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst), "neg{w}\t$dst",
1088 [(store (ineg (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
1089 def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst), "neg{l}\t$dst",
1090 [(store (ineg (loadi32 addr:$dst)), addr:$dst)]>;
1093 } // Defs = [EFLAGS]
1095 def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src), "not{b}\t$dst",
1096 [(set GR8:$dst, (not GR8:$src))]>;
1097 def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src), "not{w}\t$dst",
1098 [(set GR16:$dst, (not GR16:$src))]>, OpSize;
1099 def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src), "not{l}\t$dst",
1100 [(set GR32:$dst, (not GR32:$src))]>;
1101 let isTwoAddress = 0 in {
1102 def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst), "not{b}\t$dst",
1103 [(store (not (loadi8 addr:$dst)), addr:$dst)]>;
1104 def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst), "not{w}\t$dst",
1105 [(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
1106 def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst), "not{l}\t$dst",
1107 [(store (not (loadi32 addr:$dst)), addr:$dst)]>;
1111 // TODO: inc/dec is slow for P4, but fast for Pentium-M.
1112 let Defs = [EFLAGS] in {
1114 def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst",
1115 [(set GR8:$dst, (add GR8:$src, 1))]>;
1116 let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
1117 def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
1118 [(set GR16:$dst, (add GR16:$src, 1))]>,
1119 OpSize, Requires<[In32BitMode]>;
1120 def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
1121 [(set GR32:$dst, (add GR32:$src, 1))]>, Requires<[In32BitMode]>;
1123 let isTwoAddress = 0, CodeSize = 2 in {
1124 def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
1125 [(store (add (loadi8 addr:$dst), 1), addr:$dst)]>;
1126 def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
1127 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
1128 OpSize, Requires<[In32BitMode]>;
1129 def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
1130 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
1131 Requires<[In32BitMode]>;
1135 def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst",
1136 [(set GR8:$dst, (add GR8:$src, -1))]>;
1137 let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
1138 def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
1139 [(set GR16:$dst, (add GR16:$src, -1))]>,
1140 OpSize, Requires<[In32BitMode]>;
1141 def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
1142 [(set GR32:$dst, (add GR32:$src, -1))]>, Requires<[In32BitMode]>;
1145 let isTwoAddress = 0, CodeSize = 2 in {
1146 def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
1147 [(store (add (loadi8 addr:$dst), -1), addr:$dst)]>;
1148 def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
1149 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
1150 OpSize, Requires<[In32BitMode]>;
1151 def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
1152 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
1153 Requires<[In32BitMode]>;
1155 } // Defs = [EFLAGS]
1157 // Logical operators...
1158 let Defs = [EFLAGS] in {
1159 let isCommutable = 1 in { // X = AND Y, Z --> X = AND Z, Y
1160 def AND8rr : I<0x20, MRMDestReg,
1161 (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1162 "and{b}\t{$src2, $dst|$dst, $src2}",
1163 [(set GR8:$dst, (and GR8:$src1, GR8:$src2))]>;
1164 def AND16rr : I<0x21, MRMDestReg,
1165 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1166 "and{w}\t{$src2, $dst|$dst, $src2}",
1167 [(set GR16:$dst, (and GR16:$src1, GR16:$src2))]>, OpSize;
1168 def AND32rr : I<0x21, MRMDestReg,
1169 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1170 "and{l}\t{$src2, $dst|$dst, $src2}",
1171 [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
1174 def AND8rm : I<0x22, MRMSrcMem,
1175 (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1176 "and{b}\t{$src2, $dst|$dst, $src2}",
1177 [(set GR8:$dst, (and GR8:$src1, (load addr:$src2)))]>;
1178 def AND16rm : I<0x23, MRMSrcMem,
1179 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1180 "and{w}\t{$src2, $dst|$dst, $src2}",
1181 [(set GR16:$dst, (and GR16:$src1, (load addr:$src2)))]>, OpSize;
1182 def AND32rm : I<0x23, MRMSrcMem,
1183 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1184 "and{l}\t{$src2, $dst|$dst, $src2}",
1185 [(set GR32:$dst, (and GR32:$src1, (load addr:$src2)))]>;
1187 def AND8ri : Ii8<0x80, MRM4r,
1188 (outs GR8 :$dst), (ins GR8 :$src1, i8imm :$src2),
1189 "and{b}\t{$src2, $dst|$dst, $src2}",
1190 [(set GR8:$dst, (and GR8:$src1, imm:$src2))]>;
1191 def AND16ri : Ii16<0x81, MRM4r,
1192 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1193 "and{w}\t{$src2, $dst|$dst, $src2}",
1194 [(set GR16:$dst, (and GR16:$src1, imm:$src2))]>, OpSize;
1195 def AND32ri : Ii32<0x81, MRM4r,
1196 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1197 "and{l}\t{$src2, $dst|$dst, $src2}",
1198 [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
1199 def AND16ri8 : Ii8<0x83, MRM4r,
1200 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1201 "and{w}\t{$src2, $dst|$dst, $src2}",
1202 [(set GR16:$dst, (and GR16:$src1, i16immSExt8:$src2))]>,
1204 def AND32ri8 : Ii8<0x83, MRM4r,
1205 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1206 "and{l}\t{$src2, $dst|$dst, $src2}",
1207 [(set GR32:$dst, (and GR32:$src1, i32immSExt8:$src2))]>;
1209 let isTwoAddress = 0 in {
1210 def AND8mr : I<0x20, MRMDestMem,
1211 (outs), (ins i8mem :$dst, GR8 :$src),
1212 "and{b}\t{$src, $dst|$dst, $src}",
1213 [(store (and (load addr:$dst), GR8:$src), addr:$dst)]>;
1214 def AND16mr : I<0x21, MRMDestMem,
1215 (outs), (ins i16mem:$dst, GR16:$src),
1216 "and{w}\t{$src, $dst|$dst, $src}",
1217 [(store (and (load addr:$dst), GR16:$src), addr:$dst)]>,
1219 def AND32mr : I<0x21, MRMDestMem,
1220 (outs), (ins i32mem:$dst, GR32:$src),
1221 "and{l}\t{$src, $dst|$dst, $src}",
1222 [(store (and (load addr:$dst), GR32:$src), addr:$dst)]>;
1223 def AND8mi : Ii8<0x80, MRM4m,
1224 (outs), (ins i8mem :$dst, i8imm :$src),
1225 "and{b}\t{$src, $dst|$dst, $src}",
1226 [(store (and (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1227 def AND16mi : Ii16<0x81, MRM4m,
1228 (outs), (ins i16mem:$dst, i16imm:$src),
1229 "and{w}\t{$src, $dst|$dst, $src}",
1230 [(store (and (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1232 def AND32mi : Ii32<0x81, MRM4m,
1233 (outs), (ins i32mem:$dst, i32imm:$src),
1234 "and{l}\t{$src, $dst|$dst, $src}",
1235 [(store (and (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1236 def AND16mi8 : Ii8<0x83, MRM4m,
1237 (outs), (ins i16mem:$dst, i16i8imm :$src),
1238 "and{w}\t{$src, $dst|$dst, $src}",
1239 [(store (and (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1241 def AND32mi8 : Ii8<0x83, MRM4m,
1242 (outs), (ins i32mem:$dst, i32i8imm :$src),
1243 "and{l}\t{$src, $dst|$dst, $src}",
1244 [(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1248 let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
1249 def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1250 "or{b}\t{$src2, $dst|$dst, $src2}",
1251 [(set GR8:$dst, (or GR8:$src1, GR8:$src2))]>;
1252 def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1253 "or{w}\t{$src2, $dst|$dst, $src2}",
1254 [(set GR16:$dst, (or GR16:$src1, GR16:$src2))]>, OpSize;
1255 def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1256 "or{l}\t{$src2, $dst|$dst, $src2}",
1257 [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
1259 def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1260 "or{b}\t{$src2, $dst|$dst, $src2}",
1261 [(set GR8:$dst, (or GR8:$src1, (load addr:$src2)))]>;
1262 def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1263 "or{w}\t{$src2, $dst|$dst, $src2}",
1264 [(set GR16:$dst, (or GR16:$src1, (load addr:$src2)))]>, OpSize;
1265 def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1266 "or{l}\t{$src2, $dst|$dst, $src2}",
1267 [(set GR32:$dst, (or GR32:$src1, (load addr:$src2)))]>;
1269 def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1270 "or{b}\t{$src2, $dst|$dst, $src2}",
1271 [(set GR8:$dst, (or GR8:$src1, imm:$src2))]>;
1272 def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1273 "or{w}\t{$src2, $dst|$dst, $src2}",
1274 [(set GR16:$dst, (or GR16:$src1, imm:$src2))]>, OpSize;
1275 def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1276 "or{l}\t{$src2, $dst|$dst, $src2}",
1277 [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
1279 def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1280 "or{w}\t{$src2, $dst|$dst, $src2}",
1281 [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2))]>, OpSize;
1282 def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1283 "or{l}\t{$src2, $dst|$dst, $src2}",
1284 [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2))]>;
1285 let isTwoAddress = 0 in {
1286 def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1287 "or{b}\t{$src, $dst|$dst, $src}",
1288 [(store (or (load addr:$dst), GR8:$src), addr:$dst)]>;
1289 def OR16mr : I<0x09, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1290 "or{w}\t{$src, $dst|$dst, $src}",
1291 [(store (or (load addr:$dst), GR16:$src), addr:$dst)]>, OpSize;
1292 def OR32mr : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1293 "or{l}\t{$src, $dst|$dst, $src}",
1294 [(store (or (load addr:$dst), GR32:$src), addr:$dst)]>;
1295 def OR8mi : Ii8<0x80, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
1296 "or{b}\t{$src, $dst|$dst, $src}",
1297 [(store (or (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1298 def OR16mi : Ii16<0x81, MRM1m, (outs), (ins i16mem:$dst, i16imm:$src),
1299 "or{w}\t{$src, $dst|$dst, $src}",
1300 [(store (or (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1302 def OR32mi : Ii32<0x81, MRM1m, (outs), (ins i32mem:$dst, i32imm:$src),
1303 "or{l}\t{$src, $dst|$dst, $src}",
1304 [(store (or (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1305 def OR16mi8 : Ii8<0x83, MRM1m, (outs), (ins i16mem:$dst, i16i8imm:$src),
1306 "or{w}\t{$src, $dst|$dst, $src}",
1307 [(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1309 def OR32mi8 : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$src),
1310 "or{l}\t{$src, $dst|$dst, $src}",
1311 [(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1312 } // isTwoAddress = 0
1315 let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
1316 def XOR8rr : I<0x30, MRMDestReg,
1317 (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1318 "xor{b}\t{$src2, $dst|$dst, $src2}",
1319 [(set GR8:$dst, (xor GR8:$src1, GR8:$src2))]>;
1320 def XOR16rr : I<0x31, MRMDestReg,
1321 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1322 "xor{w}\t{$src2, $dst|$dst, $src2}",
1323 [(set GR16:$dst, (xor GR16:$src1, GR16:$src2))]>, OpSize;
1324 def XOR32rr : I<0x31, MRMDestReg,
1325 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1326 "xor{l}\t{$src2, $dst|$dst, $src2}",
1327 [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
1328 } // isCommutable = 1
1330 def XOR8rm : I<0x32, MRMSrcMem ,
1331 (outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
1332 "xor{b}\t{$src2, $dst|$dst, $src2}",
1333 [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2)))]>;
1334 def XOR16rm : I<0x33, MRMSrcMem ,
1335 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1336 "xor{w}\t{$src2, $dst|$dst, $src2}",
1337 [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2)))]>,
1339 def XOR32rm : I<0x33, MRMSrcMem ,
1340 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1341 "xor{l}\t{$src2, $dst|$dst, $src2}",
1342 [(set GR32:$dst, (xor GR32:$src1, (load addr:$src2)))]>;
1344 def XOR8ri : Ii8<0x80, MRM6r,
1345 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1346 "xor{b}\t{$src2, $dst|$dst, $src2}",
1347 [(set GR8:$dst, (xor GR8:$src1, imm:$src2))]>;
1348 def XOR16ri : Ii16<0x81, MRM6r,
1349 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1350 "xor{w}\t{$src2, $dst|$dst, $src2}",
1351 [(set GR16:$dst, (xor GR16:$src1, imm:$src2))]>, OpSize;
1352 def XOR32ri : Ii32<0x81, MRM6r,
1353 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1354 "xor{l}\t{$src2, $dst|$dst, $src2}",
1355 [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
1356 def XOR16ri8 : Ii8<0x83, MRM6r,
1357 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1358 "xor{w}\t{$src2, $dst|$dst, $src2}",
1359 [(set GR16:$dst, (xor GR16:$src1, i16immSExt8:$src2))]>,
1361 def XOR32ri8 : Ii8<0x83, MRM6r,
1362 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1363 "xor{l}\t{$src2, $dst|$dst, $src2}",
1364 [(set GR32:$dst, (xor GR32:$src1, i32immSExt8:$src2))]>;
1366 let isTwoAddress = 0 in {
1367 def XOR8mr : I<0x30, MRMDestMem,
1368 (outs), (ins i8mem :$dst, GR8 :$src),
1369 "xor{b}\t{$src, $dst|$dst, $src}",
1370 [(store (xor (load addr:$dst), GR8:$src), addr:$dst)]>;
1371 def XOR16mr : I<0x31, MRMDestMem,
1372 (outs), (ins i16mem:$dst, GR16:$src),
1373 "xor{w}\t{$src, $dst|$dst, $src}",
1374 [(store (xor (load addr:$dst), GR16:$src), addr:$dst)]>,
1376 def XOR32mr : I<0x31, MRMDestMem,
1377 (outs), (ins i32mem:$dst, GR32:$src),
1378 "xor{l}\t{$src, $dst|$dst, $src}",
1379 [(store (xor (load addr:$dst), GR32:$src), addr:$dst)]>;
1380 def XOR8mi : Ii8<0x80, MRM6m,
1381 (outs), (ins i8mem :$dst, i8imm :$src),
1382 "xor{b}\t{$src, $dst|$dst, $src}",
1383 [(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
1384 def XOR16mi : Ii16<0x81, MRM6m,
1385 (outs), (ins i16mem:$dst, i16imm:$src),
1386 "xor{w}\t{$src, $dst|$dst, $src}",
1387 [(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
1389 def XOR32mi : Ii32<0x81, MRM6m,
1390 (outs), (ins i32mem:$dst, i32imm:$src),
1391 "xor{l}\t{$src, $dst|$dst, $src}",
1392 [(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
1393 def XOR16mi8 : Ii8<0x83, MRM6m,
1394 (outs), (ins i16mem:$dst, i16i8imm :$src),
1395 "xor{w}\t{$src, $dst|$dst, $src}",
1396 [(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
1398 def XOR32mi8 : Ii8<0x83, MRM6m,
1399 (outs), (ins i32mem:$dst, i32i8imm :$src),
1400 "xor{l}\t{$src, $dst|$dst, $src}",
1401 [(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
1402 } // isTwoAddress = 0
1403 } // Defs = [EFLAGS]
1405 // Shift instructions
1406 let Defs = [EFLAGS] in {
1407 let Uses = [CL] in {
1408 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src),
1409 "shl{b}\t{%cl, $dst|$dst, %CL}",
1410 [(set GR8:$dst, (shl GR8:$src, CL))]>;
1411 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src),
1412 "shl{w}\t{%cl, $dst|$dst, %CL}",
1413 [(set GR16:$dst, (shl GR16:$src, CL))]>, OpSize;
1414 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src),
1415 "shl{l}\t{%cl, $dst|$dst, %CL}",
1416 [(set GR32:$dst, (shl GR32:$src, CL))]>;
1419 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1420 "shl{b}\t{$src2, $dst|$dst, $src2}",
1421 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
1422 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1423 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1424 "shl{w}\t{$src2, $dst|$dst, $src2}",
1425 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1426 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1427 "shl{l}\t{$src2, $dst|$dst, $src2}",
1428 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>;
1429 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
1431 } // isConvertibleToThreeAddress = 1
1433 let isTwoAddress = 0 in {
1434 let Uses = [CL] in {
1435 def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
1436 "shl{b}\t{%cl, $dst|$dst, %CL}",
1437 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
1438 def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
1439 "shl{w}\t{%cl, $dst|$dst, %CL}",
1440 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1441 def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
1442 "shl{l}\t{%cl, $dst|$dst, %CL}",
1443 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>;
1445 def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
1446 "shl{b}\t{$src, $dst|$dst, $src}",
1447 [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1448 def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
1449 "shl{w}\t{$src, $dst|$dst, $src}",
1450 [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1452 def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
1453 "shl{l}\t{$src, $dst|$dst, $src}",
1454 [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1457 def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
1459 [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1460 def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
1462 [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1464 def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
1466 [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1469 let Uses = [CL] in {
1470 def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src),
1471 "shr{b}\t{%cl, $dst|$dst, %CL}",
1472 [(set GR8:$dst, (srl GR8:$src, CL))]>;
1473 def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src),
1474 "shr{w}\t{%cl, $dst|$dst, %CL}",
1475 [(set GR16:$dst, (srl GR16:$src, CL))]>, OpSize;
1476 def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src),
1477 "shr{l}\t{%cl, $dst|$dst, %CL}",
1478 [(set GR32:$dst, (srl GR32:$src, CL))]>;
1481 def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1482 "shr{b}\t{$src2, $dst|$dst, $src2}",
1483 [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
1484 def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1485 "shr{w}\t{$src2, $dst|$dst, $src2}",
1486 [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1487 def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1488 "shr{l}\t{$src2, $dst|$dst, $src2}",
1489 [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>;
1492 def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
1494 [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
1495 def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
1497 [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize;
1498 def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
1500 [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
1502 let isTwoAddress = 0 in {
1503 let Uses = [CL] in {
1504 def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
1505 "shr{b}\t{%cl, $dst|$dst, %CL}",
1506 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
1507 def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
1508 "shr{w}\t{%cl, $dst|$dst, %CL}",
1509 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
1511 def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
1512 "shr{l}\t{%cl, $dst|$dst, %CL}",
1513 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>;
1515 def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
1516 "shr{b}\t{$src, $dst|$dst, $src}",
1517 [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1518 def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
1519 "shr{w}\t{$src, $dst|$dst, $src}",
1520 [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1522 def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
1523 "shr{l}\t{$src, $dst|$dst, $src}",
1524 [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1527 def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
1529 [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1530 def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
1532 [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,OpSize;
1533 def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
1535 [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1538 let Uses = [CL] in {
1539 def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src),
1540 "sar{b}\t{%cl, $dst|$dst, %CL}",
1541 [(set GR8:$dst, (sra GR8:$src, CL))]>;
1542 def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src),
1543 "sar{w}\t{%cl, $dst|$dst, %CL}",
1544 [(set GR16:$dst, (sra GR16:$src, CL))]>, OpSize;
1545 def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src),
1546 "sar{l}\t{%cl, $dst|$dst, %CL}",
1547 [(set GR32:$dst, (sra GR32:$src, CL))]>;
1550 def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1551 "sar{b}\t{$src2, $dst|$dst, $src2}",
1552 [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
1553 def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1554 "sar{w}\t{$src2, $dst|$dst, $src2}",
1555 [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
1557 def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1558 "sar{l}\t{$src2, $dst|$dst, $src2}",
1559 [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>;
1562 def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
1564 [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
1565 def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
1567 [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize;
1568 def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
1570 [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
1572 let isTwoAddress = 0 in {
1573 let Uses = [CL] in {
1574 def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
1575 "sar{b}\t{%cl, $dst|$dst, %CL}",
1576 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
1577 def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
1578 "sar{w}\t{%cl, $dst|$dst, %CL}",
1579 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1580 def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
1581 "sar{l}\t{%cl, $dst|$dst, %CL}",
1582 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>;
1584 def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
1585 "sar{b}\t{$src, $dst|$dst, $src}",
1586 [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1587 def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
1588 "sar{w}\t{$src, $dst|$dst, $src}",
1589 [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1591 def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
1592 "sar{l}\t{$src, $dst|$dst, $src}",
1593 [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1596 def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
1598 [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1599 def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
1601 [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1603 def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
1605 [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1608 // Rotate instructions
1609 // FIXME: provide shorter instructions when imm8 == 1
1610 let Uses = [CL] in {
1611 def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src),
1612 "rol{b}\t{%cl, $dst|$dst, %CL}",
1613 [(set GR8:$dst, (rotl GR8:$src, CL))]>;
1614 def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src),
1615 "rol{w}\t{%cl, $dst|$dst, %CL}",
1616 [(set GR16:$dst, (rotl GR16:$src, CL))]>, OpSize;
1617 def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src),
1618 "rol{l}\t{%cl, $dst|$dst, %CL}",
1619 [(set GR32:$dst, (rotl GR32:$src, CL))]>;
1622 def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1623 "rol{b}\t{$src2, $dst|$dst, $src2}",
1624 [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
1625 def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1626 "rol{w}\t{$src2, $dst|$dst, $src2}",
1627 [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1628 def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1629 "rol{l}\t{$src2, $dst|$dst, $src2}",
1630 [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>;
1633 def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
1635 [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
1636 def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
1638 [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize;
1639 def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
1641 [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
1643 let isTwoAddress = 0 in {
1644 let Uses = [CL] in {
1645 def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
1646 "rol{b}\t{%cl, $dst|$dst, %CL}",
1647 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
1648 def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
1649 "rol{w}\t{%cl, $dst|$dst, %CL}",
1650 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1651 def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
1652 "rol{l}\t{%cl, $dst|$dst, %CL}",
1653 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>;
1655 def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src),
1656 "rol{b}\t{$src, $dst|$dst, $src}",
1657 [(store (rotl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1658 def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src),
1659 "rol{w}\t{$src, $dst|$dst, $src}",
1660 [(store (rotl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1662 def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src),
1663 "rol{l}\t{$src, $dst|$dst, $src}",
1664 [(store (rotl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1667 def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
1669 [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1670 def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
1672 [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1674 def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
1676 [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1679 let Uses = [CL] in {
1680 def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src),
1681 "ror{b}\t{%cl, $dst|$dst, %CL}",
1682 [(set GR8:$dst, (rotr GR8:$src, CL))]>;
1683 def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src),
1684 "ror{w}\t{%cl, $dst|$dst, %CL}",
1685 [(set GR16:$dst, (rotr GR16:$src, CL))]>, OpSize;
1686 def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src),
1687 "ror{l}\t{%cl, $dst|$dst, %CL}",
1688 [(set GR32:$dst, (rotr GR32:$src, CL))]>;
1691 def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
1692 "ror{b}\t{$src2, $dst|$dst, $src2}",
1693 [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
1694 def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
1695 "ror{w}\t{$src2, $dst|$dst, $src2}",
1696 [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, OpSize;
1697 def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
1698 "ror{l}\t{$src2, $dst|$dst, $src2}",
1699 [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>;
1702 def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
1704 [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
1705 def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
1707 [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize;
1708 def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
1710 [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
1712 let isTwoAddress = 0 in {
1713 let Uses = [CL] in {
1714 def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
1715 "ror{b}\t{%cl, $dst|$dst, %CL}",
1716 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
1717 def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
1718 "ror{w}\t{%cl, $dst|$dst, %CL}",
1719 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
1720 def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
1721 "ror{l}\t{%cl, $dst|$dst, %CL}",
1722 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>;
1724 def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
1725 "ror{b}\t{$src, $dst|$dst, $src}",
1726 [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1727 def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
1728 "ror{w}\t{$src, $dst|$dst, $src}",
1729 [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
1731 def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
1732 "ror{l}\t{$src, $dst|$dst, $src}",
1733 [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
1736 def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
1738 [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
1739 def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
1741 [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
1743 def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
1745 [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
1750 // Double shift instructions (generalizations of rotate)
1751 let Uses = [CL] in {
1752 def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1753 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1754 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB;
1755 def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1756 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1757 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB;
1758 def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1759 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1760 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
1762 def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1763 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1764 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
1768 let isCommutable = 1 in { // These instructions commute to each other.
1769 def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
1770 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3),
1771 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1772 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
1775 def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
1776 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3),
1777 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1778 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
1781 def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
1782 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3),
1783 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1784 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
1787 def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
1788 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3),
1789 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1790 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
1795 let isTwoAddress = 0 in {
1796 let Uses = [CL] in {
1797 def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1798 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1799 [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
1801 def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1802 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1803 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
1806 def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
1807 (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
1808 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1809 [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
1810 (i8 imm:$src3)), addr:$dst)]>,
1812 def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
1813 (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
1814 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1815 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
1816 (i8 imm:$src3)), addr:$dst)]>,
1819 let Uses = [CL] in {
1820 def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1821 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1822 [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
1823 addr:$dst)]>, TB, OpSize;
1824 def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1825 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
1826 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
1827 addr:$dst)]>, TB, OpSize;
1829 def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
1830 (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
1831 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1832 [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
1833 (i8 imm:$src3)), addr:$dst)]>,
1835 def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
1836 (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
1837 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1838 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
1839 (i8 imm:$src3)), addr:$dst)]>,
1842 } // Defs = [EFLAGS]
1846 let Defs = [EFLAGS] in {
1847 let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
1848 def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
1849 (ins GR8 :$src1, GR8 :$src2),
1850 "add{b}\t{$src2, $dst|$dst, $src2}",
1851 [(set GR8:$dst, (add GR8:$src1, GR8:$src2))]>;
1852 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1853 def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
1854 (ins GR16:$src1, GR16:$src2),
1855 "add{w}\t{$src2, $dst|$dst, $src2}",
1856 [(set GR16:$dst, (add GR16:$src1, GR16:$src2))]>, OpSize;
1857 def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
1858 (ins GR32:$src1, GR32:$src2),
1859 "add{l}\t{$src2, $dst|$dst, $src2}",
1860 [(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
1861 } // end isConvertibleToThreeAddress
1862 } // end isCommutable
1863 def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
1864 (ins GR8 :$src1, i8mem :$src2),
1865 "add{b}\t{$src2, $dst|$dst, $src2}",
1866 [(set GR8:$dst, (add GR8:$src1, (load addr:$src2)))]>;
1867 def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
1868 (ins GR16:$src1, i16mem:$src2),
1869 "add{w}\t{$src2, $dst|$dst, $src2}",
1870 [(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>,OpSize;
1871 def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
1872 (ins GR32:$src1, i32mem:$src2),
1873 "add{l}\t{$src2, $dst|$dst, $src2}",
1874 [(set GR32:$dst, (add GR32:$src1, (load addr:$src2)))]>;
1876 def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1877 "add{b}\t{$src2, $dst|$dst, $src2}",
1878 [(set GR8:$dst, (add GR8:$src1, imm:$src2))]>;
1880 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
1881 def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
1882 (ins GR16:$src1, i16imm:$src2),
1883 "add{w}\t{$src2, $dst|$dst, $src2}",
1884 [(set GR16:$dst, (add GR16:$src1, imm:$src2))]>, OpSize;
1885 def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
1886 (ins GR32:$src1, i32imm:$src2),
1887 "add{l}\t{$src2, $dst|$dst, $src2}",
1888 [(set GR32:$dst, (add GR32:$src1, imm:$src2))]>;
1889 def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
1890 (ins GR16:$src1, i16i8imm:$src2),
1891 "add{w}\t{$src2, $dst|$dst, $src2}",
1892 [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, OpSize;
1893 def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
1894 (ins GR32:$src1, i32i8imm:$src2),
1895 "add{l}\t{$src2, $dst|$dst, $src2}",
1896 [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>;
1899 let isTwoAddress = 0 in {
1900 def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
1901 "add{b}\t{$src2, $dst|$dst, $src2}",
1902 [(store (add (load addr:$dst), GR8:$src2), addr:$dst)]>;
1903 def ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1904 "add{w}\t{$src2, $dst|$dst, $src2}",
1905 [(store (add (load addr:$dst), GR16:$src2), addr:$dst)]>,
1907 def ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1908 "add{l}\t{$src2, $dst|$dst, $src2}",
1909 [(store (add (load addr:$dst), GR32:$src2), addr:$dst)]>;
1910 def ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
1911 "add{b}\t{$src2, $dst|$dst, $src2}",
1912 [(store (add (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
1913 def ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
1914 "add{w}\t{$src2, $dst|$dst, $src2}",
1915 [(store (add (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
1917 def ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
1918 "add{l}\t{$src2, $dst|$dst, $src2}",
1919 [(store (add (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
1920 def ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
1921 "add{w}\t{$src2, $dst|$dst, $src2}",
1922 [(store (add (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
1924 def ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
1925 "add{l}\t{$src2, $dst|$dst, $src2}",
1926 [(store (add (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
1929 let Uses = [EFLAGS] in {
1930 let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
1931 def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1932 "adc{l}\t{$src2, $dst|$dst, $src2}",
1933 [(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>;
1935 def ADC32rm : I<0x13, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1936 "adc{l}\t{$src2, $dst|$dst, $src2}",
1937 [(set GR32:$dst, (adde GR32:$src1, (load addr:$src2)))]>;
1938 def ADC32ri : Ii32<0x81, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1939 "adc{l}\t{$src2, $dst|$dst, $src2}",
1940 [(set GR32:$dst, (adde GR32:$src1, imm:$src2))]>;
1941 def ADC32ri8 : Ii8<0x83, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1942 "adc{l}\t{$src2, $dst|$dst, $src2}",
1943 [(set GR32:$dst, (adde GR32:$src1, i32immSExt8:$src2))]>;
1945 let isTwoAddress = 0 in {
1946 def ADC32mr : I<0x11, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
1947 "adc{l}\t{$src2, $dst|$dst, $src2}",
1948 [(store (adde (load addr:$dst), GR32:$src2), addr:$dst)]>;
1949 def ADC32mi : Ii32<0x81, MRM2m, (outs), (ins i32mem:$dst, i32imm:$src2),
1950 "adc{l}\t{$src2, $dst|$dst, $src2}",
1951 [(store (adde (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
1952 def ADC32mi8 : Ii8<0x83, MRM2m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
1953 "adc{l}\t{$src2, $dst|$dst, $src2}",
1954 [(store (adde (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
1956 } // Uses = [EFLAGS]
1958 def SUB8rr : I<0x28, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
1959 "sub{b}\t{$src2, $dst|$dst, $src2}",
1960 [(set GR8:$dst, (sub GR8:$src1, GR8:$src2))]>;
1961 def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1962 "sub{w}\t{$src2, $dst|$dst, $src2}",
1963 [(set GR16:$dst, (sub GR16:$src1, GR16:$src2))]>, OpSize;
1964 def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1965 "sub{l}\t{$src2, $dst|$dst, $src2}",
1966 [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
1967 def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
1968 "sub{b}\t{$src2, $dst|$dst, $src2}",
1969 [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2)))]>;
1970 def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1971 "sub{w}\t{$src2, $dst|$dst, $src2}",
1972 [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2)))]>, OpSize;
1973 def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1974 "sub{l}\t{$src2, $dst|$dst, $src2}",
1975 [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2)))]>;
1977 def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1978 "sub{b}\t{$src2, $dst|$dst, $src2}",
1979 [(set GR8:$dst, (sub GR8:$src1, imm:$src2))]>;
1980 def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1981 "sub{w}\t{$src2, $dst|$dst, $src2}",
1982 [(set GR16:$dst, (sub GR16:$src1, imm:$src2))]>, OpSize;
1983 def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1984 "sub{l}\t{$src2, $dst|$dst, $src2}",
1985 [(set GR32:$dst, (sub GR32:$src1, imm:$src2))]>;
1986 def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1987 "sub{w}\t{$src2, $dst|$dst, $src2}",
1988 [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2))]>,
1990 def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1991 "sub{l}\t{$src2, $dst|$dst, $src2}",
1992 [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2))]>;
1993 let isTwoAddress = 0 in {
1994 def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
1995 "sub{b}\t{$src2, $dst|$dst, $src2}",
1996 [(store (sub (load addr:$dst), GR8:$src2), addr:$dst)]>;
1997 def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
1998 "sub{w}\t{$src2, $dst|$dst, $src2}",
1999 [(store (sub (load addr:$dst), GR16:$src2), addr:$dst)]>,
2001 def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
2002 "sub{l}\t{$src2, $dst|$dst, $src2}",
2003 [(store (sub (load addr:$dst), GR32:$src2), addr:$dst)]>;
2004 def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
2005 "sub{b}\t{$src2, $dst|$dst, $src2}",
2006 [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
2007 def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
2008 "sub{w}\t{$src2, $dst|$dst, $src2}",
2009 [(store (sub (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
2011 def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
2012 "sub{l}\t{$src2, $dst|$dst, $src2}",
2013 [(store (sub (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
2014 def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
2015 "sub{w}\t{$src2, $dst|$dst, $src2}",
2016 [(store (sub (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
2018 def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
2019 "sub{l}\t{$src2, $dst|$dst, $src2}",
2020 [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
2023 let Uses = [EFLAGS] in {
2024 def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2025 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2026 [(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>;
2028 let isTwoAddress = 0 in {
2029 def SBB32mr : I<0x19, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
2030 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2031 [(store (sube (load addr:$dst), GR32:$src2), addr:$dst)]>;
2032 def SBB8mi : Ii32<0x80, MRM3m, (outs), (ins i8mem:$dst, i8imm:$src2),
2033 "sbb{b}\t{$src2, $dst|$dst, $src2}",
2034 [(store (sube (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
2035 def SBB32mi : Ii32<0x81, MRM3m, (outs), (ins i32mem:$dst, i32imm:$src2),
2036 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2037 [(store (sube (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
2038 def SBB32mi8 : Ii8<0x83, MRM3m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
2039 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2040 [(store (sube (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
2042 def SBB32rm : I<0x1B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
2043 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2044 [(set GR32:$dst, (sube GR32:$src1, (load addr:$src2)))]>;
2045 def SBB32ri : Ii32<0x81, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
2046 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2047 [(set GR32:$dst, (sube GR32:$src1, imm:$src2))]>;
2048 def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
2049 "sbb{l}\t{$src2, $dst|$dst, $src2}",
2050 [(set GR32:$dst, (sube GR32:$src1, i32immSExt8:$src2))]>;
2051 } // Uses = [EFLAGS]
2052 } // Defs = [EFLAGS]
2054 let Defs = [EFLAGS] in {
2055 let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
2056 def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2057 "imul{w}\t{$src2, $dst|$dst, $src2}",
2058 [(set GR16:$dst, (mul GR16:$src1, GR16:$src2))]>, TB, OpSize;
2059 def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2060 "imul{l}\t{$src2, $dst|$dst, $src2}",
2061 [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>, TB;
2063 def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
2064 "imul{w}\t{$src2, $dst|$dst, $src2}",
2065 [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2)))]>,
2067 def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
2068 "imul{l}\t{$src2, $dst|$dst, $src2}",
2069 [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2)))]>, TB;
2070 } // Defs = [EFLAGS]
2071 } // end Two Address instructions
2073 // Suprisingly enough, these are not two address instructions!
2074 let Defs = [EFLAGS] in {
2075 def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
2076 (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
2077 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2078 [(set GR16:$dst, (mul GR16:$src1, imm:$src2))]>, OpSize;
2079 def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
2080 (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
2081 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2082 [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>;
2083 def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
2084 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
2085 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2086 [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2))]>,
2088 def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
2089 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
2090 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2091 [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2))]>;
2093 def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
2094 (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
2095 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2096 [(set GR16:$dst, (mul (load addr:$src1), imm:$src2))]>,
2098 def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
2099 (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
2100 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2101 [(set GR32:$dst, (mul (load addr:$src1), imm:$src2))]>;
2102 def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
2103 (outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
2104 "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2105 [(set GR16:$dst, (mul (load addr:$src1), i16immSExt8:$src2))]>,
2107 def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
2108 (outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
2109 "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2110 [(set GR32:$dst, (mul (load addr:$src1), i32immSExt8:$src2))]>;
2111 } // Defs = [EFLAGS]
2113 //===----------------------------------------------------------------------===//
2114 // Test instructions are just like AND, except they don't generate a result.
2116 let Defs = [EFLAGS] in {
2117 let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
2118 def TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2),
2119 "test{b}\t{$src2, $src1|$src1, $src2}",
2120 [(X86cmp (and_su GR8:$src1, GR8:$src2), 0),
2121 (implicit EFLAGS)]>;
2122 def TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
2123 "test{w}\t{$src2, $src1|$src1, $src2}",
2124 [(X86cmp (and_su GR16:$src1, GR16:$src2), 0),
2125 (implicit EFLAGS)]>,
2127 def TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
2128 "test{l}\t{$src2, $src1|$src1, $src2}",
2129 [(X86cmp (and_su GR32:$src1, GR32:$src2), 0),
2130 (implicit EFLAGS)]>;
2133 def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
2134 "test{b}\t{$src2, $src1|$src1, $src2}",
2135 [(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0),
2136 (implicit EFLAGS)]>;
2137 def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
2138 "test{w}\t{$src2, $src1|$src1, $src2}",
2139 [(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0),
2140 (implicit EFLAGS)]>, OpSize;
2141 def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
2142 "test{l}\t{$src2, $src1|$src1, $src2}",
2143 [(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0),
2144 (implicit EFLAGS)]>;
2146 def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
2147 (outs), (ins GR8:$src1, i8imm:$src2),
2148 "test{b}\t{$src2, $src1|$src1, $src2}",
2149 [(X86cmp (and_su GR8:$src1, imm:$src2), 0),
2150 (implicit EFLAGS)]>;
2151 def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
2152 (outs), (ins GR16:$src1, i16imm:$src2),
2153 "test{w}\t{$src2, $src1|$src1, $src2}",
2154 [(X86cmp (and_su GR16:$src1, imm:$src2), 0),
2155 (implicit EFLAGS)]>, OpSize;
2156 def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
2157 (outs), (ins GR32:$src1, i32imm:$src2),
2158 "test{l}\t{$src2, $src1|$src1, $src2}",
2159 [(X86cmp (and_su GR32:$src1, imm:$src2), 0),
2160 (implicit EFLAGS)]>;
2162 def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
2163 (outs), (ins i8mem:$src1, i8imm:$src2),
2164 "test{b}\t{$src2, $src1|$src1, $src2}",
2165 [(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0),
2166 (implicit EFLAGS)]>;
2167 def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
2168 (outs), (ins i16mem:$src1, i16imm:$src2),
2169 "test{w}\t{$src2, $src1|$src1, $src2}",
2170 [(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0),
2171 (implicit EFLAGS)]>, OpSize;
2172 def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
2173 (outs), (ins i32mem:$src1, i32imm:$src2),
2174 "test{l}\t{$src2, $src1|$src1, $src2}",
2175 [(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0),
2176 (implicit EFLAGS)]>;
2177 } // Defs = [EFLAGS]
2180 // Condition code ops, incl. set if equal/not equal/...
2181 let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
2182 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
2183 let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
2184 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
2186 let Uses = [EFLAGS] in {
2187 def SETEr : I<0x94, MRM0r,
2188 (outs GR8 :$dst), (ins),
2190 [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
2192 def SETEm : I<0x94, MRM0m,
2193 (outs), (ins i8mem:$dst),
2195 [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
2197 def SETNEr : I<0x95, MRM0r,
2198 (outs GR8 :$dst), (ins),
2200 [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
2202 def SETNEm : I<0x95, MRM0m,
2203 (outs), (ins i8mem:$dst),
2205 [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
2207 def SETLr : I<0x9C, MRM0r,
2208 (outs GR8 :$dst), (ins),
2210 [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
2211 TB; // GR8 = < signed
2212 def SETLm : I<0x9C, MRM0m,
2213 (outs), (ins i8mem:$dst),
2215 [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
2216 TB; // [mem8] = < signed
2217 def SETGEr : I<0x9D, MRM0r,
2218 (outs GR8 :$dst), (ins),
2220 [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
2221 TB; // GR8 = >= signed
2222 def SETGEm : I<0x9D, MRM0m,
2223 (outs), (ins i8mem:$dst),
2225 [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
2226 TB; // [mem8] = >= signed
2227 def SETLEr : I<0x9E, MRM0r,
2228 (outs GR8 :$dst), (ins),
2230 [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
2231 TB; // GR8 = <= signed
2232 def SETLEm : I<0x9E, MRM0m,
2233 (outs), (ins i8mem:$dst),
2235 [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
2236 TB; // [mem8] = <= signed
2237 def SETGr : I<0x9F, MRM0r,
2238 (outs GR8 :$dst), (ins),
2240 [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
2241 TB; // GR8 = > signed
2242 def SETGm : I<0x9F, MRM0m,
2243 (outs), (ins i8mem:$dst),
2245 [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
2246 TB; // [mem8] = > signed
2248 def SETBr : I<0x92, MRM0r,
2249 (outs GR8 :$dst), (ins),
2251 [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
2252 TB; // GR8 = < unsign
2253 def SETBm : I<0x92, MRM0m,
2254 (outs), (ins i8mem:$dst),
2256 [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
2257 TB; // [mem8] = < unsign
2258 def SETAEr : I<0x93, MRM0r,
2259 (outs GR8 :$dst), (ins),
2261 [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
2262 TB; // GR8 = >= unsign
2263 def SETAEm : I<0x93, MRM0m,
2264 (outs), (ins i8mem:$dst),
2266 [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
2267 TB; // [mem8] = >= unsign
2268 def SETBEr : I<0x96, MRM0r,
2269 (outs GR8 :$dst), (ins),
2271 [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
2272 TB; // GR8 = <= unsign
2273 def SETBEm : I<0x96, MRM0m,
2274 (outs), (ins i8mem:$dst),
2276 [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
2277 TB; // [mem8] = <= unsign
2278 def SETAr : I<0x97, MRM0r,
2279 (outs GR8 :$dst), (ins),
2281 [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
2282 TB; // GR8 = > signed
2283 def SETAm : I<0x97, MRM0m,
2284 (outs), (ins i8mem:$dst),
2286 [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
2287 TB; // [mem8] = > signed
2289 def SETSr : I<0x98, MRM0r,
2290 (outs GR8 :$dst), (ins),
2292 [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
2293 TB; // GR8 = <sign bit>
2294 def SETSm : I<0x98, MRM0m,
2295 (outs), (ins i8mem:$dst),
2297 [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
2298 TB; // [mem8] = <sign bit>
2299 def SETNSr : I<0x99, MRM0r,
2300 (outs GR8 :$dst), (ins),
2302 [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
2303 TB; // GR8 = !<sign bit>
2304 def SETNSm : I<0x99, MRM0m,
2305 (outs), (ins i8mem:$dst),
2307 [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
2308 TB; // [mem8] = !<sign bit>
2309 def SETPr : I<0x9A, MRM0r,
2310 (outs GR8 :$dst), (ins),
2312 [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
2314 def SETPm : I<0x9A, MRM0m,
2315 (outs), (ins i8mem:$dst),
2317 [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
2318 TB; // [mem8] = parity
2319 def SETNPr : I<0x9B, MRM0r,
2320 (outs GR8 :$dst), (ins),
2322 [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
2323 TB; // GR8 = not parity
2324 def SETNPm : I<0x9B, MRM0m,
2325 (outs), (ins i8mem:$dst),
2327 [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
2328 TB; // [mem8] = not parity
2329 } // Uses = [EFLAGS]
2332 // Integer comparisons
2333 let Defs = [EFLAGS] in {
2334 def CMP8rr : I<0x38, MRMDestReg,
2335 (outs), (ins GR8 :$src1, GR8 :$src2),
2336 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2337 [(X86cmp GR8:$src1, GR8:$src2), (implicit EFLAGS)]>;
2338 def CMP16rr : I<0x39, MRMDestReg,
2339 (outs), (ins GR16:$src1, GR16:$src2),
2340 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2341 [(X86cmp GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize;
2342 def CMP32rr : I<0x39, MRMDestReg,
2343 (outs), (ins GR32:$src1, GR32:$src2),
2344 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2345 [(X86cmp GR32:$src1, GR32:$src2), (implicit EFLAGS)]>;
2346 def CMP8mr : I<0x38, MRMDestMem,
2347 (outs), (ins i8mem :$src1, GR8 :$src2),
2348 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2349 [(X86cmp (loadi8 addr:$src1), GR8:$src2),
2350 (implicit EFLAGS)]>;
2351 def CMP16mr : I<0x39, MRMDestMem,
2352 (outs), (ins i16mem:$src1, GR16:$src2),
2353 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2354 [(X86cmp (loadi16 addr:$src1), GR16:$src2),
2355 (implicit EFLAGS)]>, OpSize;
2356 def CMP32mr : I<0x39, MRMDestMem,
2357 (outs), (ins i32mem:$src1, GR32:$src2),
2358 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2359 [(X86cmp (loadi32 addr:$src1), GR32:$src2),
2360 (implicit EFLAGS)]>;
2361 def CMP8rm : I<0x3A, MRMSrcMem,
2362 (outs), (ins GR8 :$src1, i8mem :$src2),
2363 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2364 [(X86cmp GR8:$src1, (loadi8 addr:$src2)),
2365 (implicit EFLAGS)]>;
2366 def CMP16rm : I<0x3B, MRMSrcMem,
2367 (outs), (ins GR16:$src1, i16mem:$src2),
2368 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2369 [(X86cmp GR16:$src1, (loadi16 addr:$src2)),
2370 (implicit EFLAGS)]>, OpSize;
2371 def CMP32rm : I<0x3B, MRMSrcMem,
2372 (outs), (ins GR32:$src1, i32mem:$src2),
2373 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2374 [(X86cmp GR32:$src1, (loadi32 addr:$src2)),
2375 (implicit EFLAGS)]>;
2376 def CMP8ri : Ii8<0x80, MRM7r,
2377 (outs), (ins GR8:$src1, i8imm:$src2),
2378 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2379 [(X86cmp GR8:$src1, imm:$src2), (implicit EFLAGS)]>;
2380 def CMP16ri : Ii16<0x81, MRM7r,
2381 (outs), (ins GR16:$src1, i16imm:$src2),
2382 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2383 [(X86cmp GR16:$src1, imm:$src2),
2384 (implicit EFLAGS)]>, OpSize;
2385 def CMP32ri : Ii32<0x81, MRM7r,
2386 (outs), (ins GR32:$src1, i32imm:$src2),
2387 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2388 [(X86cmp GR32:$src1, imm:$src2), (implicit EFLAGS)]>;
2389 def CMP8mi : Ii8 <0x80, MRM7m,
2390 (outs), (ins i8mem :$src1, i8imm :$src2),
2391 "cmp{b}\t{$src2, $src1|$src1, $src2}",
2392 [(X86cmp (loadi8 addr:$src1), imm:$src2),
2393 (implicit EFLAGS)]>;
2394 def CMP16mi : Ii16<0x81, MRM7m,
2395 (outs), (ins i16mem:$src1, i16imm:$src2),
2396 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2397 [(X86cmp (loadi16 addr:$src1), imm:$src2),
2398 (implicit EFLAGS)]>, OpSize;
2399 def CMP32mi : Ii32<0x81, MRM7m,
2400 (outs), (ins i32mem:$src1, i32imm:$src2),
2401 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2402 [(X86cmp (loadi32 addr:$src1), imm:$src2),
2403 (implicit EFLAGS)]>;
2404 def CMP16ri8 : Ii8<0x83, MRM7r,
2405 (outs), (ins GR16:$src1, i16i8imm:$src2),
2406 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2407 [(X86cmp GR16:$src1, i16immSExt8:$src2),
2408 (implicit EFLAGS)]>, OpSize;
2409 def CMP16mi8 : Ii8<0x83, MRM7m,
2410 (outs), (ins i16mem:$src1, i16i8imm:$src2),
2411 "cmp{w}\t{$src2, $src1|$src1, $src2}",
2412 [(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2),
2413 (implicit EFLAGS)]>, OpSize;
2414 def CMP32mi8 : Ii8<0x83, MRM7m,
2415 (outs), (ins i32mem:$src1, i32i8imm:$src2),
2416 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2417 [(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2),
2418 (implicit EFLAGS)]>;
2419 def CMP32ri8 : Ii8<0x83, MRM7r,
2420 (outs), (ins GR32:$src1, i32i8imm:$src2),
2421 "cmp{l}\t{$src2, $src1|$src1, $src2}",
2422 [(X86cmp GR32:$src1, i32immSExt8:$src2),
2423 (implicit EFLAGS)]>;
2424 } // Defs = [EFLAGS]
2426 // Sign/Zero extenders
2427 // Use movsbl intead of movsbw; we don't care about the high 16 bits
2428 // of the register here. This has a smaller encoding and avoids a
2429 // partial-register update.
2430 def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
2431 "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2432 [(set GR16:$dst, (sext GR8:$src))]>, TB;
2433 def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
2434 "movs{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2435 [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
2436 def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
2437 "movs{bl|x}\t{$src, $dst|$dst, $src}",
2438 [(set GR32:$dst, (sext GR8:$src))]>, TB;
2439 def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
2440 "movs{bl|x}\t{$src, $dst|$dst, $src}",
2441 [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
2442 def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
2443 "movs{wl|x}\t{$src, $dst|$dst, $src}",
2444 [(set GR32:$dst, (sext GR16:$src))]>, TB;
2445 def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
2446 "movs{wl|x}\t{$src, $dst|$dst, $src}",
2447 [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
2449 // Use movzbl intead of movzbw; we don't care about the high 16 bits
2450 // of the register here. This has a smaller encoding and avoids a
2451 // partial-register update.
2452 def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
2453 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2454 [(set GR16:$dst, (zext GR8:$src))]>, TB;
2455 def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
2456 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
2457 [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
2458 def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
2459 "movz{bl|x}\t{$src, $dst|$dst, $src}",
2460 [(set GR32:$dst, (zext GR8:$src))]>, TB;
2461 def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
2462 "movz{bl|x}\t{$src, $dst|$dst, $src}",
2463 [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
2464 def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
2465 "movz{wl|x}\t{$src, $dst|$dst, $src}",
2466 [(set GR32:$dst, (zext GR16:$src))]>, TB;
2467 def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
2468 "movz{wl|x}\t{$src, $dst|$dst, $src}",
2469 [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
2471 let neverHasSideEffects = 1 in {
2472 let Defs = [AX], Uses = [AL] in
2473 def CBW : I<0x98, RawFrm, (outs), (ins),
2474 "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
2475 let Defs = [EAX], Uses = [AX] in
2476 def CWDE : I<0x98, RawFrm, (outs), (ins),
2477 "{cwtl|cwde}", []>; // EAX = signext(AX)
2479 let Defs = [AX,DX], Uses = [AX] in
2480 def CWD : I<0x99, RawFrm, (outs), (ins),
2481 "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
2482 let Defs = [EAX,EDX], Uses = [EAX] in
2483 def CDQ : I<0x99, RawFrm, (outs), (ins),
2484 "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
2487 //===----------------------------------------------------------------------===//
2488 // Alias Instructions
2489 //===----------------------------------------------------------------------===//
2491 // Alias instructions that map movr0 to xor.
2492 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
2493 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1 in {
2494 def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins),
2495 "xor{b}\t$dst, $dst",
2496 [(set GR8:$dst, 0)]>;
2497 // Use xorl instead of xorw since we don't care about the high 16 bits,
2498 // it's smaller, and it avoids a partial-register update.
2499 def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
2500 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
2501 [(set GR16:$dst, 0)]>;
2502 def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins),
2503 "xor{l}\t$dst, $dst",
2504 [(set GR32:$dst, 0)]>;
2507 // Basic operations on GR16 / GR32 subclasses GR16_ and GR32_ which contains only
2508 // those registers that have GR8 sub-registers (i.e. AX - DX, EAX - EDX).
2509 let neverHasSideEffects = 1 in {
2510 def MOV16to16_ : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16:$src),
2511 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2512 def MOV32to32_ : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32:$src),
2513 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2515 def MOV16_rr : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16_:$src),
2516 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2517 def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src),
2518 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2519 } // neverHasSideEffects
2521 let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
2522 def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src),
2523 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2524 def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src),
2525 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2527 let mayStore = 1, neverHasSideEffects = 1 in {
2528 def MOV16_mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16_:$src),
2529 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
2530 def MOV32_mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32_:$src),
2531 "mov{l}\t{$src, $dst|$dst, $src}", []>;
2534 //===----------------------------------------------------------------------===//
2535 // Thread Local Storage Instructions
2539 def TLS_addr32 : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$sym),
2540 "leal\t${sym:mem}(,%ebx,1), $dst",
2541 [(set GR32:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
2543 let AddedComplexity = 10 in
2544 def TLS_gs_rr : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
2545 "movl\t%gs:($src), $dst",
2546 [(set GR32:$dst, (load (add X86TLStp, GR32:$src)))]>;
2548 let AddedComplexity = 15 in
2549 def TLS_gs_ri : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
2550 "movl\t%gs:${src:mem}, $dst",
2552 (load (add X86TLStp, (X86Wrapper tglobaltlsaddr:$src))))]>;
2554 def TLS_tp : I<0, Pseudo, (outs GR32:$dst), (ins),
2555 "movl\t%gs:0, $dst",
2556 [(set GR32:$dst, X86TLStp)]>;
2558 //===----------------------------------------------------------------------===//
2559 // DWARF Pseudo Instructions
2562 def DWARF_LOC : I<0, Pseudo, (outs),
2563 (ins i32imm:$line, i32imm:$col, i32imm:$file),
2564 ".loc\t${file:debug} ${line:debug} ${col:debug}",
2565 [(dwarf_loc (i32 imm:$line), (i32 imm:$col),
2568 //===----------------------------------------------------------------------===//
2569 // EH Pseudo Instructions
2571 let isTerminator = 1, isReturn = 1, isBarrier = 1,
2573 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
2574 "ret\t#eh_return, addr: $addr",
2575 [(X86ehret GR32:$addr)]>;
2579 //===----------------------------------------------------------------------===//
2583 // Atomic swap. These are just normal xchg instructions. But since a memory
2584 // operand is referenced, the atomicity is ensured.
2585 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
2586 def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2587 "xchg{l}\t{$val, $ptr|$ptr, $val}",
2588 [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
2589 def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
2590 "xchg{w}\t{$val, $ptr|$ptr, $val}",
2591 [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
2593 def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
2594 "xchg{b}\t{$val, $ptr|$ptr, $val}",
2595 [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
2598 // Atomic compare and swap.
2599 let Defs = [EAX, EFLAGS], Uses = [EAX] in {
2600 def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
2601 "lock cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
2602 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
2604 let Defs = [EAX, EBX, ECX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
2605 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i32mem:$ptr),
2606 "lock cmpxchg8b\t$ptr",
2607 [(X86cas8 addr:$ptr)]>, TB, LOCK;
2610 let Defs = [AX, EFLAGS], Uses = [AX] in {
2611 def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
2612 "lock cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
2613 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
2615 let Defs = [AL, EFLAGS], Uses = [AL] in {
2616 def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
2617 "lock cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
2618 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
2621 // Atomic exchange and add
2622 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
2623 def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2624 "lock xadd{l}\t{$val, $ptr|$ptr, $val}",
2625 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
2627 def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
2628 "lock xadd{w}\t{$val, $ptr|$ptr, $val}",
2629 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
2631 def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
2632 "lock xadd{b}\t{$val, $ptr|$ptr, $val}",
2633 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
2637 // Atomic exchange, and, or, xor
2638 let Constraints = "$val = $dst", Defs = [EFLAGS],
2639 usesCustomDAGSchedInserter = 1 in {
2640 def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2641 "#ATOMAND32 PSUEDO!",
2642 [(set GR32:$dst, (atomic_load_and addr:$ptr, GR32:$val))]>;
2643 def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2644 "#ATOMOR32 PSUEDO!",
2645 [(set GR32:$dst, (atomic_load_or addr:$ptr, GR32:$val))]>;
2646 def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2647 "#ATOMXOR32 PSUEDO!",
2648 [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
2649 def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2650 "#ATOMNAND32 PSUEDO!",
2651 [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
2653 def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
2654 "#ATOMMIN32 PSUEDO!",
2655 [(set GR32:$dst, (atomic_load_min addr:$ptr, GR32:$val))]>;
2656 def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2657 "#ATOMMAX32 PSUEDO!",
2658 [(set GR32:$dst, (atomic_load_max addr:$ptr, GR32:$val))]>;
2659 def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2660 "#ATOMUMIN32 PSUEDO!",
2661 [(set GR32:$dst, (atomic_load_umin addr:$ptr, GR32:$val))]>;
2662 def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
2663 "#ATOMUMAX32 PSUEDO!",
2664 [(set GR32:$dst, (atomic_load_umax addr:$ptr, GR32:$val))]>;
2667 //===----------------------------------------------------------------------===//
2668 // Non-Instruction Patterns
2669 //===----------------------------------------------------------------------===//
2671 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
2672 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
2673 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
2674 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
2675 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
2676 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
2678 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
2679 (ADD32ri GR32:$src1, tconstpool:$src2)>;
2680 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
2681 (ADD32ri GR32:$src1, tjumptable:$src2)>;
2682 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
2683 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
2684 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
2685 (ADD32ri GR32:$src1, texternalsym:$src2)>;
2687 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
2688 (MOV32mi addr:$dst, tglobaladdr:$src)>;
2689 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
2690 (MOV32mi addr:$dst, texternalsym:$src)>;
2694 def : Pat<(X86tailcall GR32:$dst),
2697 def : Pat<(X86tailcall (i32 tglobaladdr:$dst)),
2699 def : Pat<(X86tailcall (i32 texternalsym:$dst)),
2702 def : Pat<(X86tcret GR32:$dst, imm:$off),
2703 (TCRETURNri GR32:$dst, imm:$off)>;
2705 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
2706 (TCRETURNdi texternalsym:$dst, imm:$off)>;
2708 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
2709 (TCRETURNdi texternalsym:$dst, imm:$off)>;
2711 def : Pat<(X86call (i32 tglobaladdr:$dst)),
2712 (CALLpcrel32 tglobaladdr:$dst)>;
2713 def : Pat<(X86call (i32 texternalsym:$dst)),
2714 (CALLpcrel32 texternalsym:$dst)>;
2716 // X86 specific add which produces a flag.
2717 def : Pat<(addc GR32:$src1, GR32:$src2),
2718 (ADD32rr GR32:$src1, GR32:$src2)>;
2719 def : Pat<(addc GR32:$src1, (load addr:$src2)),
2720 (ADD32rm GR32:$src1, addr:$src2)>;
2721 def : Pat<(addc GR32:$src1, imm:$src2),
2722 (ADD32ri GR32:$src1, imm:$src2)>;
2723 def : Pat<(addc GR32:$src1, i32immSExt8:$src2),
2724 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
2726 def : Pat<(subc GR32:$src1, GR32:$src2),
2727 (SUB32rr GR32:$src1, GR32:$src2)>;
2728 def : Pat<(subc GR32:$src1, (load addr:$src2)),
2729 (SUB32rm GR32:$src1, addr:$src2)>;
2730 def : Pat<(subc GR32:$src1, imm:$src2),
2731 (SUB32ri GR32:$src1, imm:$src2)>;
2732 def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
2733 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
2737 // TEST R,R is smaller than CMP R,0
2738 def : Pat<(parallel (X86cmp GR8:$src1, 0), (implicit EFLAGS)),
2739 (TEST8rr GR8:$src1, GR8:$src1)>;
2740 def : Pat<(parallel (X86cmp GR16:$src1, 0), (implicit EFLAGS)),
2741 (TEST16rr GR16:$src1, GR16:$src1)>;
2742 def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)),
2743 (TEST32rr GR32:$src1, GR32:$src1)>;
2745 // zextload bool -> zextload byte
2746 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
2747 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
2748 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
2750 // extload bool -> extload byte
2751 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
2752 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
2753 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
2754 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
2755 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
2756 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
2759 def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
2760 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
2761 def : Pat<(i32 (anyext GR16:$src)), (MOVZX32rr16 GR16:$src)>;
2762 def : Pat<(i16 (anyext (loadi8 addr:$src))), (MOVZX16rm8 addr:$src)>;
2763 def : Pat<(i32 (anyext (loadi8 addr:$src))), (MOVZX32rm8 addr:$src)>;
2764 def : Pat<(i32 (anyext (loadi16 addr:$src))), (MOVZX32rm16 addr:$src)>;
2766 // (and (i32 load), 255) -> (zextload i8)
2767 def : Pat<(i32 (and (loadi32 addr:$src), (i32 255))), (MOVZX32rm8 addr:$src)>;
2768 def : Pat<(i32 (and (loadi32 addr:$src), (i32 65535))),(MOVZX32rm16 addr:$src)>;
2770 //===----------------------------------------------------------------------===//
2772 //===----------------------------------------------------------------------===//
2774 // r & (2^16-1) ==> movz
2775 def : Pat<(and GR32:$src1, 0xffff),
2776 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit)))>;
2778 // (shl x, 1) ==> (add x, x)
2779 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
2780 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
2781 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
2783 // (or (x >> c) | (y << (32 - c))) ==> (shrd32 x, y, c)
2784 def : Pat<(or (srl GR32:$src1, CL:$amt),
2785 (shl GR32:$src2, (sub 32, CL:$amt))),
2786 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
2788 def : Pat<(store (or (srl (loadi32 addr:$dst), CL:$amt),
2789 (shl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
2790 (SHRD32mrCL addr:$dst, GR32:$src2)>;
2792 // (or (x << c) | (y >> (32 - c))) ==> (shld32 x, y, c)
2793 def : Pat<(or (shl GR32:$src1, CL:$amt),
2794 (srl GR32:$src2, (sub 32, CL:$amt))),
2795 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
2797 def : Pat<(store (or (shl (loadi32 addr:$dst), CL:$amt),
2798 (srl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
2799 (SHLD32mrCL addr:$dst, GR32:$src2)>;
2801 // (or (x >> c) | (y << (16 - c))) ==> (shrd16 x, y, c)
2802 def : Pat<(or (srl GR16:$src1, CL:$amt),
2803 (shl GR16:$src2, (sub 16, CL:$amt))),
2804 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
2806 def : Pat<(store (or (srl (loadi16 addr:$dst), CL:$amt),
2807 (shl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
2808 (SHRD16mrCL addr:$dst, GR16:$src2)>;
2810 // (or (x << c) | (y >> (16 - c))) ==> (shld16 x, y, c)
2811 def : Pat<(or (shl GR16:$src1, CL:$amt),
2812 (srl GR16:$src2, (sub 16, CL:$amt))),
2813 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
2815 def : Pat<(store (or (shl (loadi16 addr:$dst), CL:$amt),
2816 (srl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
2817 (SHLD16mrCL addr:$dst, GR16:$src2)>;
2819 //===----------------------------------------------------------------------===//
2820 // Floating Point Stack Support
2821 //===----------------------------------------------------------------------===//
2823 include "X86InstrFPStack.td"
2825 //===----------------------------------------------------------------------===//
2827 //===----------------------------------------------------------------------===//
2829 include "X86Instr64bit.td"
2831 //===----------------------------------------------------------------------===//
2832 // XMM Floating point support (requires SSE / SSE2)
2833 //===----------------------------------------------------------------------===//
2835 include "X86InstrSSE.td"
2837 //===----------------------------------------------------------------------===//
2838 // MMX and XMM Packed Integer support (requires MMX, SSE, and SSE2)
2839 //===----------------------------------------------------------------------===//
2841 include "X86InstrMMX.td"