1 //===- X86InstrInfo.td - Main X86 Instruction Definition ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTIntShiftDOp: SDTypeProfile<1, 3,
21 [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
22 SDTCisInt<0>, SDTCisInt<3>]>;
24 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
26 def SDTX86Cmov : SDTypeProfile<1, 4,
27 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
28 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
30 // Unary and binary operator instructions that set EFLAGS as a side-effect.
31 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
32 [SDTCisInt<0>, SDTCisVT<1, i32>]>;
34 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
37 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // RES1, RES2, FLAGS = op LHS, RHS
39 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
43 SDTCisInt<0>, SDTCisVT<1, i32>]>;
44 def SDTX86BrCond : SDTypeProfile<0, 3,
45 [SDTCisVT<0, OtherVT>,
46 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
48 def SDTX86SetCC : SDTypeProfile<1, 2,
50 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
51 def SDTX86SetCC_C : SDTypeProfile<1, 2,
53 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
55 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
57 def SDTX86cas8 : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
59 def SDTX86atomicBinary : SDTypeProfile<2, 3, [SDTCisInt<0>, SDTCisInt<1>,
60 SDTCisPtrTy<2>, SDTCisInt<3>,SDTCisInt<4>]>;
61 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i16>]>;
63 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
64 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
67 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
69 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
73 def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
79 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
81 def SDTX86Void : SDTypeProfile<0, 0, []>;
83 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
85 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
87 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
89 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
91 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
93 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
94 def SDT_X86MEMBARRIERNoSSE : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
96 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
98 def X86MemBarrierNoSSE : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIERNoSSE,
100 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
102 def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
104 def X86LFence : SDNode<"X86ISD::LFENCE", SDT_X86MEMBARRIER,
108 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
109 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
110 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
111 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
113 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
114 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
116 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
117 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
119 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
120 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
122 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
123 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
124 SDNPMayLoad, SDNPMemOperand]>;
125 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8,
126 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
127 SDNPMayLoad, SDNPMemOperand]>;
128 def X86AtomAdd64 : SDNode<"X86ISD::ATOMADD64_DAG", SDTX86atomicBinary,
129 [SDNPHasChain, SDNPMayStore,
130 SDNPMayLoad, SDNPMemOperand]>;
131 def X86AtomSub64 : SDNode<"X86ISD::ATOMSUB64_DAG", SDTX86atomicBinary,
132 [SDNPHasChain, SDNPMayStore,
133 SDNPMayLoad, SDNPMemOperand]>;
134 def X86AtomOr64 : SDNode<"X86ISD::ATOMOR64_DAG", SDTX86atomicBinary,
135 [SDNPHasChain, SDNPMayStore,
136 SDNPMayLoad, SDNPMemOperand]>;
137 def X86AtomXor64 : SDNode<"X86ISD::ATOMXOR64_DAG", SDTX86atomicBinary,
138 [SDNPHasChain, SDNPMayStore,
139 SDNPMayLoad, SDNPMemOperand]>;
140 def X86AtomAnd64 : SDNode<"X86ISD::ATOMAND64_DAG", SDTX86atomicBinary,
141 [SDNPHasChain, SDNPMayStore,
142 SDNPMayLoad, SDNPMemOperand]>;
143 def X86AtomNand64 : SDNode<"X86ISD::ATOMNAND64_DAG", SDTX86atomicBinary,
144 [SDNPHasChain, SDNPMayStore,
145 SDNPMayLoad, SDNPMemOperand]>;
146 def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary,
147 [SDNPHasChain, SDNPMayStore,
148 SDNPMayLoad, SDNPMemOperand]>;
149 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
150 [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
152 def X86vastart_save_xmm_regs :
153 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
154 SDT_X86VASTART_SAVE_XMM_REGS,
155 [SDNPHasChain, SDNPVariadic]>;
157 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
158 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
160 def X86callseq_start :
161 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
162 [SDNPHasChain, SDNPOutFlag]>;
164 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
165 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
167 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
168 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag,
171 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
172 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
173 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
174 [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore,
177 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
178 [SDNPHasChain, SDNPOutFlag, SDNPSideEffect]>;
180 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
181 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
183 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
184 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
186 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
189 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
190 [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
192 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
194 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
195 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
197 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
200 def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
201 def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
202 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
204 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
206 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
209 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
211 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
212 [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
214 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
215 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
217 //===----------------------------------------------------------------------===//
218 // X86 Operand Definitions.
221 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
222 // the index operand of an address, to conform to x86 encoding restrictions.
223 def ptr_rc_nosp : PointerLikeRegClass<1>;
225 // *mem - Operand definitions for the funky X86 addressing mode operands.
227 def X86MemAsmOperand : AsmOperandClass {
229 let SuperClasses = [];
231 def X86AbsMemAsmOperand : AsmOperandClass {
233 let SuperClasses = [X86MemAsmOperand];
235 class X86MemOperand<string printMethod> : Operand<iPTR> {
236 let PrintMethod = printMethod;
237 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
238 let ParserMatchClass = X86MemAsmOperand;
241 def opaque32mem : X86MemOperand<"printopaquemem">;
242 def opaque48mem : X86MemOperand<"printopaquemem">;
243 def opaque80mem : X86MemOperand<"printopaquemem">;
244 def opaque512mem : X86MemOperand<"printopaquemem">;
246 def i8mem : X86MemOperand<"printi8mem">;
247 def i16mem : X86MemOperand<"printi16mem">;
248 def i32mem : X86MemOperand<"printi32mem">;
249 def i64mem : X86MemOperand<"printi64mem">;
250 def i128mem : X86MemOperand<"printi128mem">;
251 def i256mem : X86MemOperand<"printi256mem">;
252 def f32mem : X86MemOperand<"printf32mem">;
253 def f64mem : X86MemOperand<"printf64mem">;
254 def f80mem : X86MemOperand<"printf80mem">;
255 def f128mem : X86MemOperand<"printf128mem">;
256 def f256mem : X86MemOperand<"printf256mem">;
258 // A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
259 // plain GR64, so that it doesn't potentially require a REX prefix.
260 def i8mem_NOREX : Operand<i64> {
261 let PrintMethod = "printi8mem";
262 let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX_NOSP, i32imm, i8imm);
263 let ParserMatchClass = X86MemAsmOperand;
266 // Special i32mem for addresses of load folding tail calls. These are not
267 // allowed to use callee-saved registers since they must be scheduled
268 // after callee-saved register are popped.
269 def i32mem_TC : Operand<i32> {
270 let PrintMethod = "printi32mem";
271 let MIOperandInfo = (ops GR32_TC, i8imm, GR32_TC, i32imm, i8imm);
272 let ParserMatchClass = X86MemAsmOperand;
275 // Special i64mem for addresses of load folding tail calls. These are not
276 // allowed to use callee-saved registers since they must be scheduled
277 // after callee-saved register are popped.
278 def i64mem_TC : Operand<i64> {
279 let PrintMethod = "printi64mem";
280 let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
281 let ParserMatchClass = X86MemAsmOperand;
284 let ParserMatchClass = X86AbsMemAsmOperand,
285 PrintMethod = "print_pcrel_imm" in {
286 def i32imm_pcrel : Operand<i32>;
287 def i16imm_pcrel : Operand<i16>;
289 def offset8 : Operand<i64>;
290 def offset16 : Operand<i64>;
291 def offset32 : Operand<i64>;
292 def offset64 : Operand<i64>;
294 // Branch targets have OtherVT type and print as pc-relative values.
295 def brtarget : Operand<OtherVT>;
296 def brtarget8 : Operand<OtherVT>;
300 def SSECC : Operand<i8> {
301 let PrintMethod = "printSSECC";
304 class ImmSExtAsmOperandClass : AsmOperandClass {
305 let SuperClasses = [ImmAsmOperand];
306 let RenderMethod = "addImmOperands";
309 // Sign-extended immediate classes. We don't need to define the full lattice
310 // here because there is no instruction with an ambiguity between ImmSExti64i32
313 // The strange ranges come from the fact that the assembler always works with
314 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
315 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
318 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
319 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
320 let Name = "ImmSExti64i32";
323 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
324 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
325 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
326 let Name = "ImmSExti16i8";
327 let SuperClasses = [ImmSExti64i32AsmOperand];
330 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
331 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
332 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
333 let Name = "ImmSExti32i8";
337 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
338 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
339 let Name = "ImmSExti64i8";
340 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
341 ImmSExti64i32AsmOperand];
344 // A couple of more descriptive operand definitions.
345 // 16-bits but only 8 bits are significant.
346 def i16i8imm : Operand<i16> {
347 let ParserMatchClass = ImmSExti16i8AsmOperand;
349 // 32-bits but only 8 bits are significant.
350 def i32i8imm : Operand<i32> {
351 let ParserMatchClass = ImmSExti32i8AsmOperand;
354 // 64-bits but only 32 bits are significant.
355 def i64i32imm : Operand<i64> {
356 let ParserMatchClass = ImmSExti64i32AsmOperand;
359 // 64-bits but only 32 bits are significant, and those bits are treated as being
361 def i64i32imm_pcrel : Operand<i64> {
362 let PrintMethod = "print_pcrel_imm";
363 let ParserMatchClass = X86AbsMemAsmOperand;
366 // 64-bits but only 8 bits are significant.
367 def i64i8imm : Operand<i64> {
368 let ParserMatchClass = ImmSExti64i8AsmOperand;
371 def lea64_32mem : Operand<i32> {
372 let PrintMethod = "printi32mem";
373 let AsmOperandLowerMethod = "lower_lea64_32mem";
374 let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
375 let ParserMatchClass = X86MemAsmOperand;
379 //===----------------------------------------------------------------------===//
380 // X86 Complex Pattern Definitions.
383 // Define X86 specific addressing mode.
384 def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], [SDNPWantParent]>;
385 def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
386 [add, sub, mul, X86mul_imm, shl, or, frameindex],
388 def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
389 [tglobaltlsaddr], []>;
391 def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
392 [add, sub, mul, X86mul_imm, shl, or, frameindex,
395 def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
396 [tglobaltlsaddr], []>;
398 //===----------------------------------------------------------------------===//
399 // X86 Instruction Predicate Definitions.
400 def HasCMov : Predicate<"Subtarget->hasCMov()">;
401 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
403 // FIXME: temporary hack to let codegen assert or generate poor code in case
404 // no AVX version of the desired intructions is present, this is better for
405 // incremental dev (without fallbacks it's easier to spot what's missing)
406 def HasMMX : Predicate<"Subtarget->hasMMX() && !Subtarget->hasAVX()">;
407 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
408 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
409 def HasSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
410 def HasSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
411 def HasSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
412 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
413 def HasSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
414 def HasSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
415 def HasSSE4A : Predicate<"Subtarget->hasSSE4A() && !Subtarget->hasAVX()">;
417 def HasAVX : Predicate<"Subtarget->hasAVX()">;
418 def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">;
419 def HasFMA3 : Predicate<"Subtarget->hasFMA3()">;
420 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
421 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
422 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
423 def In32BitMode : Predicate<"!Subtarget->is64Bit()">, AssemblerPredicate;
424 def In64BitMode : Predicate<"Subtarget->is64Bit()">, AssemblerPredicate;
425 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
426 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
427 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
428 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
429 def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&"
430 "TM.getCodeModel() != CodeModel::Kernel">;
431 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
432 "TM.getCodeModel() == CodeModel::Kernel">;
433 def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
434 def IsNotPIC : Predicate<"TM.getRelocationModel() != Reloc::PIC_">;
435 def OptForSize : Predicate<"OptForSize">;
436 def OptForSpeed : Predicate<"!OptForSize">;
437 def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
438 def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
439 def HasAES : Predicate<"Subtarget->hasAES()">;
441 //===----------------------------------------------------------------------===//
442 // X86 Instruction Format Definitions.
445 include "X86InstrFormats.td"
447 //===----------------------------------------------------------------------===//
448 // Pattern fragments...
451 // X86 specific condition code. These correspond to CondCode in
452 // X86InstrInfo.h. They must be kept in synch.
453 def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
454 def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
455 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
456 def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
457 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
458 def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
459 def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
460 def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
461 def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
462 def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
463 def X86_COND_NO : PatLeaf<(i8 10)>;
464 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
465 def X86_COND_NS : PatLeaf<(i8 12)>;
466 def X86_COND_O : PatLeaf<(i8 13)>;
467 def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
468 def X86_COND_S : PatLeaf<(i8 15)>;
470 def immSext8 : PatLeaf<(imm), [{ return immSext8(N); }]>;
472 def i16immSExt8 : PatLeaf<(i16 immSext8)>;
473 def i32immSExt8 : PatLeaf<(i32 immSext8)>;
474 def i64immSExt8 : PatLeaf<(i64 immSext8)>;
475 def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>;
476 def i64immZExt32 : PatLeaf<(i64 imm), [{
477 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
478 // unsignedsign extended field.
479 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
482 def i64immZExt32SExt8 : PatLeaf<(i64 imm), [{
483 uint64_t v = N->getZExtValue();
484 return v == (uint32_t)v && (int32_t)v == (int8_t)v;
487 // Helper fragments for loads.
488 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
489 // known to be 32-bit aligned or better. Ditto for i8 to i16.
490 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
491 LoadSDNode *LD = cast<LoadSDNode>(N);
492 ISD::LoadExtType ExtType = LD->getExtensionType();
493 if (ExtType == ISD::NON_EXTLOAD)
495 if (ExtType == ISD::EXTLOAD)
496 return LD->getAlignment() >= 2 && !LD->isVolatile();
500 def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)),[{
501 LoadSDNode *LD = cast<LoadSDNode>(N);
502 ISD::LoadExtType ExtType = LD->getExtensionType();
503 if (ExtType == ISD::EXTLOAD)
504 return LD->getAlignment() >= 2 && !LD->isVolatile();
508 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
509 LoadSDNode *LD = cast<LoadSDNode>(N);
510 ISD::LoadExtType ExtType = LD->getExtensionType();
511 if (ExtType == ISD::NON_EXTLOAD)
513 if (ExtType == ISD::EXTLOAD)
514 return LD->getAlignment() >= 4 && !LD->isVolatile();
518 def loadi8 : PatFrag<(ops node:$ptr), (i8 (load node:$ptr))>;
519 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
520 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
521 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
522 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
524 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
525 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
526 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
527 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
528 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
529 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
531 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
532 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
533 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
534 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
535 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
536 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
537 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
538 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
539 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
540 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
542 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
543 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
544 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
545 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
546 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
547 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
548 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
549 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
550 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
551 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
554 // An 'and' node with a single use.
555 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
556 return N->hasOneUse();
558 // An 'srl' node with a single use.
559 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
560 return N->hasOneUse();
562 // An 'trunc' node with a single use.
563 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
564 return N->hasOneUse();
567 //===----------------------------------------------------------------------===//
572 let neverHasSideEffects = 1 in {
573 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
574 def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero),
575 "nop{w}\t$zero", []>, TB, OpSize;
576 def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero),
577 "nop{l}\t$zero", []>, TB;
581 // Constructing a stack frame.
582 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
583 "enter\t$len, $lvl", []>;
585 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
586 def LEAVE : I<0xC9, RawFrm,
587 (outs), (ins), "leave", []>, Requires<[In32BitMode]>;
589 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
590 def LEAVE64 : I<0xC9, RawFrm,
591 (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
593 //===----------------------------------------------------------------------===//
594 // Miscellaneous Instructions.
597 let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in {
599 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
601 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
602 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
604 def POP16rmm: I<0x8F, MRM0m, (outs i16mem:$dst), (ins), "pop{w}\t$dst", []>,
606 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>;
607 def POP32rmm: I<0x8F, MRM0m, (outs i32mem:$dst), (ins), "pop{l}\t$dst", []>;
609 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
610 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>,
611 Requires<[In32BitMode]>;
614 let mayStore = 1 in {
615 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
617 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
618 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
620 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[]>,
622 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>;
623 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[]>;
625 def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
626 "push{l}\t$imm", []>;
627 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
628 "push{w}\t$imm", []>, OpSize;
629 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
630 "push{l}\t$imm", []>;
632 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize;
633 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
634 Requires<[In32BitMode]>;
639 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
641 def POP64r : I<0x58, AddRegFrm,
642 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
643 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
644 def POP64rmm: I<0x8F, MRM0m, (outs i64mem:$dst), (ins), "pop{q}\t$dst", []>;
646 let mayStore = 1 in {
647 def PUSH64r : I<0x50, AddRegFrm,
648 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
649 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
650 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>;
654 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
655 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
656 "push{q}\t$imm", []>;
657 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
658 "push{q}\t$imm", []>;
659 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
660 "push{q}\t$imm", []>;
663 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
664 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
665 Requires<[In64BitMode]>;
666 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
667 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
668 Requires<[In64BitMode]>;
672 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
673 mayLoad=1, neverHasSideEffects=1 in {
674 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", []>,
675 Requires<[In32BitMode]>;
677 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
678 mayStore=1, neverHasSideEffects=1 in {
679 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", []>,
680 Requires<[In32BitMode]>;
683 let Constraints = "$src = $dst" in { // GR32 = bswap GR32
684 def BSWAP32r : I<0xC8, AddRegFrm,
685 (outs GR32:$dst), (ins GR32:$src),
687 [(set GR32:$dst, (bswap GR32:$src))]>, TB;
689 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
691 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
692 } // Constraints = "$src = $dst"
694 // Bit scan instructions.
695 let Defs = [EFLAGS] in {
696 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
697 "bsf{w}\t{$src, $dst|$dst, $src}",
698 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>, TB, OpSize;
699 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
700 "bsf{w}\t{$src, $dst|$dst, $src}",
701 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>, TB,
703 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
704 "bsf{l}\t{$src, $dst|$dst, $src}",
705 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>, TB;
706 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
707 "bsf{l}\t{$src, $dst|$dst, $src}",
708 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>, TB;
709 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
710 "bsf{q}\t{$src, $dst|$dst, $src}",
711 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
712 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
713 "bsf{q}\t{$src, $dst|$dst, $src}",
714 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
716 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
717 "bsr{w}\t{$src, $dst|$dst, $src}",
718 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>, TB, OpSize;
719 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
720 "bsr{w}\t{$src, $dst|$dst, $src}",
721 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>, TB,
723 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
724 "bsr{l}\t{$src, $dst|$dst, $src}",
725 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>, TB;
726 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
727 "bsr{l}\t{$src, $dst|$dst, $src}",
728 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>, TB;
729 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
730 "bsr{q}\t{$src, $dst|$dst, $src}",
731 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
732 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
733 "bsr{q}\t{$src, $dst|$dst, $src}",
734 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
738 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
739 let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
740 def MOVSB : I<0xA4, RawFrm, (outs), (ins), "{movsb}", []>;
741 def MOVSW : I<0xA5, RawFrm, (outs), (ins), "{movsw}", []>, OpSize;
742 def MOVSD : I<0xA5, RawFrm, (outs), (ins), "{movsl|movsd}", []>;
743 def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
746 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
747 let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
748 def STOSB : I<0xAA, RawFrm, (outs), (ins), "{stosb}", []>;
749 let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
750 def STOSW : I<0xAB, RawFrm, (outs), (ins), "{stosw}", []>, OpSize;
751 let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
752 def STOSD : I<0xAB, RawFrm, (outs), (ins), "{stosl|stosd}", []>;
753 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
754 def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
756 def SCAS8 : I<0xAE, RawFrm, (outs), (ins), "scas{b}", []>;
757 def SCAS16 : I<0xAF, RawFrm, (outs), (ins), "scas{w}", []>, OpSize;
758 def SCAS32 : I<0xAF, RawFrm, (outs), (ins), "scas{l}", []>;
759 def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
761 def CMPS8 : I<0xA6, RawFrm, (outs), (ins), "cmps{b}", []>;
762 def CMPS16 : I<0xA7, RawFrm, (outs), (ins), "cmps{w}", []>, OpSize;
763 def CMPS32 : I<0xA7, RawFrm, (outs), (ins), "cmps{l}", []>;
764 def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
767 //===----------------------------------------------------------------------===//
768 // Move Instructions.
771 let neverHasSideEffects = 1 in {
772 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
773 "mov{b}\t{$src, $dst|$dst, $src}", []>;
774 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
775 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
776 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
777 "mov{l}\t{$src, $dst|$dst, $src}", []>;
778 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
779 "mov{q}\t{$src, $dst|$dst, $src}", []>;
781 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
782 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
783 "mov{b}\t{$src, $dst|$dst, $src}",
784 [(set GR8:$dst, imm:$src)]>;
785 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
786 "mov{w}\t{$src, $dst|$dst, $src}",
787 [(set GR16:$dst, imm:$src)]>, OpSize;
788 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
789 "mov{l}\t{$src, $dst|$dst, $src}",
790 [(set GR32:$dst, imm:$src)]>;
791 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
792 "movabs{q}\t{$src, $dst|$dst, $src}",
793 [(set GR64:$dst, imm:$src)]>;
794 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
795 "mov{q}\t{$src, $dst|$dst, $src}",
796 [(set GR64:$dst, i64immSExt32:$src)]>;
799 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
800 "mov{b}\t{$src, $dst|$dst, $src}",
801 [(store (i8 imm:$src), addr:$dst)]>;
802 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
803 "mov{w}\t{$src, $dst|$dst, $src}",
804 [(store (i16 imm:$src), addr:$dst)]>, OpSize;
805 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
806 "mov{l}\t{$src, $dst|$dst, $src}",
807 [(store (i32 imm:$src), addr:$dst)]>;
808 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
809 "mov{q}\t{$src, $dst|$dst, $src}",
810 [(store i64immSExt32:$src, addr:$dst)]>;
812 /// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
813 /// 32-bit offset from the PC. These are only valid in x86-32 mode.
814 def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src),
815 "mov{b}\t{$src, %al|%al, $src}", []>,
816 Requires<[In32BitMode]>;
817 def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src),
818 "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize,
819 Requires<[In32BitMode]>;
820 def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src),
821 "mov{l}\t{$src, %eax|%eax, $src}", []>,
822 Requires<[In32BitMode]>;
823 def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins),
824 "mov{b}\t{%al, $dst|$dst, %al}", []>,
825 Requires<[In32BitMode]>;
826 def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
827 "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize,
828 Requires<[In32BitMode]>;
829 def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
830 "mov{l}\t{%eax, $dst|$dst, %eax}", []>,
831 Requires<[In32BitMode]>;
833 // FIXME: These definitions are utterly broken
834 // Just leave them commented out for now because they're useless outside
835 // of the large code model, and most compilers won't generate the instructions
838 def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
839 "mov{q}\t{$src, %rax|%rax, $src}", []>;
840 def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
841 "mov{q}\t{$src, %rax|%rax, $src}", []>;
842 def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
843 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
844 def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
845 "mov{q}\t{%rax, $dst|$dst, %rax}", []>;
849 let isCodeGenOnly = 1 in {
850 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
851 "mov{b}\t{$src, $dst|$dst, $src}", []>;
852 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
853 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
854 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
855 "mov{l}\t{$src, $dst|$dst, $src}", []>;
856 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
857 "mov{q}\t{$src, $dst|$dst, $src}", []>;
860 let canFoldAsLoad = 1, isReMaterializable = 1 in {
861 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
862 "mov{b}\t{$src, $dst|$dst, $src}",
863 [(set GR8:$dst, (loadi8 addr:$src))]>;
864 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
865 "mov{w}\t{$src, $dst|$dst, $src}",
866 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize;
867 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
868 "mov{l}\t{$src, $dst|$dst, $src}",
869 [(set GR32:$dst, (loadi32 addr:$src))]>;
870 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
871 "mov{q}\t{$src, $dst|$dst, $src}",
872 [(set GR64:$dst, (load addr:$src))]>;
875 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
876 "mov{b}\t{$src, $dst|$dst, $src}",
877 [(store GR8:$src, addr:$dst)]>;
878 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
879 "mov{w}\t{$src, $dst|$dst, $src}",
880 [(store GR16:$src, addr:$dst)]>, OpSize;
881 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
882 "mov{l}\t{$src, $dst|$dst, $src}",
883 [(store GR32:$src, addr:$dst)]>;
884 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
885 "mov{q}\t{$src, $dst|$dst, $src}",
886 [(store GR64:$src, addr:$dst)]>;
888 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
889 // that they can be used for copying and storing h registers, which can't be
890 // encoded when a REX prefix is present.
891 let isCodeGenOnly = 1 in {
892 let neverHasSideEffects = 1 in
893 def MOV8rr_NOREX : I<0x88, MRMDestReg,
894 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
895 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
897 def MOV8mr_NOREX : I<0x88, MRMDestMem,
898 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
899 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
901 canFoldAsLoad = 1, isReMaterializable = 1 in
902 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
903 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
904 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
908 // Condition code ops, incl. set if equal/not equal/...
909 let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
910 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
911 let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
912 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
915 //===----------------------------------------------------------------------===//
916 // Bit tests instructions: BT, BTS, BTR, BTC.
918 let Defs = [EFLAGS] in {
919 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
920 "bt{w}\t{$src2, $src1|$src1, $src2}",
921 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>, OpSize, TB;
922 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
923 "bt{l}\t{$src2, $src1|$src1, $src2}",
924 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>, TB;
925 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
926 "bt{q}\t{$src2, $src1|$src1, $src2}",
927 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
929 // Unlike with the register+register form, the memory+register form of the
930 // bt instruction does not ignore the high bits of the index. From ISel's
931 // perspective, this is pretty bizarre. Make these instructions disassembly
934 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
935 "bt{w}\t{$src2, $src1|$src1, $src2}",
936 // [(X86bt (loadi16 addr:$src1), GR16:$src2),
937 // (implicit EFLAGS)]
939 >, OpSize, TB, Requires<[FastBTMem]>;
940 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
941 "bt{l}\t{$src2, $src1|$src1, $src2}",
942 // [(X86bt (loadi32 addr:$src1), GR32:$src2),
943 // (implicit EFLAGS)]
945 >, TB, Requires<[FastBTMem]>;
946 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
947 "bt{q}\t{$src2, $src1|$src1, $src2}",
948 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
949 // (implicit EFLAGS)]
953 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
954 "bt{w}\t{$src2, $src1|$src1, $src2}",
955 [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
957 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
958 "bt{l}\t{$src2, $src1|$src1, $src2}",
959 [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>, TB;
960 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
961 "bt{q}\t{$src2, $src1|$src1, $src2}",
962 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
964 // Note that these instructions don't need FastBTMem because that
965 // only applies when the other operand is in a register. When it's
966 // an immediate, bt is still fast.
967 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
968 "bt{w}\t{$src2, $src1|$src1, $src2}",
969 [(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2))
971 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
972 "bt{l}\t{$src2, $src1|$src1, $src2}",
973 [(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
975 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
976 "bt{q}\t{$src2, $src1|$src1, $src2}",
977 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
978 i64immSExt8:$src2))]>, TB;
981 def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
982 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
983 def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
984 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
985 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
986 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
987 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
988 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
989 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
990 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
991 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
992 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
993 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2),
994 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
995 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2),
996 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
997 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
998 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
999 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1000 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1001 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1002 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1003 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1004 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1006 def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1007 "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1008 def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1009 "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1010 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1011 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1012 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1013 "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1014 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1015 "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1016 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1017 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1018 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1019 "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1020 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1021 "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1022 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1023 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1024 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1025 "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1026 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1027 "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1028 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1029 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1031 def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1032 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1033 def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1034 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1035 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1036 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1037 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1038 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1039 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1040 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1041 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1042 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1043 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1044 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1045 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1046 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1047 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1048 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1049 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1050 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
1051 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1052 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB;
1053 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1054 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1055 } // Defs = [EFLAGS]
1058 //===----------------------------------------------------------------------===//
1063 // Atomic swap. These are just normal xchg instructions. But since a memory
1064 // operand is referenced, the atomicity is ensured.
1065 let Constraints = "$val = $dst" in {
1066 def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
1067 "xchg{b}\t{$val, $ptr|$ptr, $val}",
1068 [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
1069 def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),(ins GR16:$val, i16mem:$ptr),
1070 "xchg{w}\t{$val, $ptr|$ptr, $val}",
1071 [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
1073 def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),(ins GR32:$val, i32mem:$ptr),
1074 "xchg{l}\t{$val, $ptr|$ptr, $val}",
1075 [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
1076 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),(ins GR64:$val,i64mem:$ptr),
1077 "xchg{q}\t{$val, $ptr|$ptr, $val}",
1078 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1080 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
1081 "xchg{b}\t{$val, $src|$src, $val}", []>;
1082 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
1083 "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize;
1084 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
1085 "xchg{l}\t{$val, $src|$src, $val}", []>;
1086 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1087 "xchg{q}\t{$val, $src|$src, $val}", []>;
1090 def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
1091 "xchg{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
1092 def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
1093 "xchg{l}\t{$src, %eax|%eax, $src}", []>;
1094 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1095 "xchg{q}\t{$src, %rax|%rax, $src}", []>;
1099 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
1100 "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
1101 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1102 "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
1103 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1104 "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
1105 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1106 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1108 let mayLoad = 1, mayStore = 1 in {
1109 def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1110 "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
1111 def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1112 "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
1113 def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1114 "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
1115 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1116 "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
1120 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
1121 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
1122 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1123 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
1124 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1125 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
1126 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1127 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1129 let mayLoad = 1, mayStore = 1 in {
1130 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1131 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
1132 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1133 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
1134 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1135 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
1136 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1137 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
1140 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
1141 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
1142 "cmpxchg8b\t$dst", []>, TB;
1144 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1145 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1146 "cmpxchg16b\t$dst", []>, TB;
1150 // Lock instruction prefix
1151 def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
1153 // Rex64 instruction prefix
1154 def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>;
1156 // Data16 instruction prefix
1157 def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
1159 // Repeat string operation instruction prefixes
1160 // These uses the DF flag in the EFLAGS register to inc or dec ECX
1161 let Defs = [ECX], Uses = [ECX,EFLAGS] in {
1162 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
1163 def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
1164 // Repeat while not equal (used with CMPS and SCAS)
1165 def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
1169 // String manipulation instructions
1170 def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>;
1171 def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize;
1172 def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", []>;
1173 def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>;
1175 def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", []>;
1176 def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", []>, OpSize;
1177 def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", []>;
1180 // Flag instructions
1181 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
1182 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
1183 def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", []>;
1184 def STI : I<0xFB, RawFrm, (outs), (ins), "sti", []>;
1185 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
1186 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
1187 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
1189 def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", []>, TB;
1191 // Table lookup instructions
1192 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>;
1194 // ASCII Adjust After Addition
1195 // sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
1196 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>, Requires<[In32BitMode]>;
1198 // ASCII Adjust AX Before Division
1199 // sets AL, AH and EFLAGS and uses AL and AH
1200 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
1201 "aad\t$src", []>, Requires<[In32BitMode]>;
1203 // ASCII Adjust AX After Multiply
1204 // sets AL, AH and EFLAGS and uses AL
1205 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
1206 "aam\t$src", []>, Requires<[In32BitMode]>;
1208 // ASCII Adjust AL After Subtraction - sets
1209 // sets AL, AH and CF and AF of EFLAGS and uses AL and AF of EFLAGS
1210 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>, Requires<[In32BitMode]>;
1212 // Decimal Adjust AL after Addition
1213 // sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
1214 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>, Requires<[In32BitMode]>;
1216 // Decimal Adjust AL after Subtraction
1217 // sets AL, CF and AF of EFLAGS and uses AL, CF and AF of EFLAGS
1218 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>, Requires<[In32BitMode]>;
1220 // Check Array Index Against Bounds
1221 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1222 "bound\t{$src, $dst|$dst, $src}", []>, OpSize,
1223 Requires<[In32BitMode]>;
1224 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1225 "bound\t{$src, $dst|$dst, $src}", []>,
1226 Requires<[In32BitMode]>;
1228 // Adjust RPL Field of Segment Selector
1229 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$src), (ins GR16:$dst),
1230 "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
1231 def ARPL16mr : I<0x63, MRMSrcMem, (outs GR16:$src), (ins i16mem:$dst),
1232 "arpl\t{$src, $dst|$dst, $src}", []>, Requires<[In32BitMode]>;
1234 //===----------------------------------------------------------------------===//
1236 //===----------------------------------------------------------------------===//
1238 include "X86InstrArithmetic.td"
1239 include "X86InstrCMovSetCC.td"
1240 include "X86InstrExtension.td"
1241 include "X86InstrControl.td"
1242 include "X86InstrShiftRotate.td"
1244 // X87 Floating Point Stack.
1245 include "X86InstrFPStack.td"
1247 // SIMD support (SSE, MMX and AVX)
1248 include "X86InstrFragmentsSIMD.td"
1250 // FMA - Fused Multiply-Add support (requires FMA)
1251 include "X86InstrFMA.td"
1253 // SSE, MMX and 3DNow! vector support.
1254 include "X86InstrSSE.td"
1255 include "X86InstrMMX.td"
1256 include "X86Instr3DNow.td"
1258 include "X86InstrVMX.td"
1260 // System instructions.
1261 include "X86InstrSystem.td"
1263 // Compiler Pseudo Instructions and Pat Patterns
1264 include "X86InstrCompiler.td"
1266 //===----------------------------------------------------------------------===//
1267 // Assembler Mnemonic Aliases
1268 //===----------------------------------------------------------------------===//
1270 def : MnemonicAlias<"call", "calll">, Requires<[In32BitMode]>;
1271 def : MnemonicAlias<"call", "callq">, Requires<[In64BitMode]>;
1273 def : MnemonicAlias<"cbw", "cbtw">;
1274 def : MnemonicAlias<"cwd", "cwtd">;
1275 def : MnemonicAlias<"cdq", "cltd">;
1276 def : MnemonicAlias<"cwde", "cwtl">;
1277 def : MnemonicAlias<"cdqe", "cltq">;
1279 // lret maps to lretl, it is not ambiguous with lretq.
1280 def : MnemonicAlias<"lret", "lretl">;
1282 def : MnemonicAlias<"pop", "popl">, Requires<[In32BitMode]>;
1283 def : MnemonicAlias<"pop", "popq">, Requires<[In64BitMode]>;
1284 def : MnemonicAlias<"popf", "popfl">, Requires<[In32BitMode]>;
1285 def : MnemonicAlias<"popf", "popfq">, Requires<[In64BitMode]>;
1286 def : MnemonicAlias<"popfd", "popfl">;
1288 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
1289 // all modes. However: "push (addr)" and "push $42" should default to
1290 // pushl/pushq depending on the current mode. Similar for "pop %bx"
1291 def : MnemonicAlias<"push", "pushl">, Requires<[In32BitMode]>;
1292 def : MnemonicAlias<"push", "pushq">, Requires<[In64BitMode]>;
1293 def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
1294 def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
1295 def : MnemonicAlias<"pushfd", "pushfl">;
1297 def : MnemonicAlias<"repe", "rep">;
1298 def : MnemonicAlias<"repz", "rep">;
1299 def : MnemonicAlias<"repnz", "repne">;
1301 def : MnemonicAlias<"retl", "ret">, Requires<[In32BitMode]>;
1302 def : MnemonicAlias<"retq", "ret">, Requires<[In64BitMode]>;
1304 def : MnemonicAlias<"salb", "shlb">;
1305 def : MnemonicAlias<"salw", "shlw">;
1306 def : MnemonicAlias<"sall", "shll">;
1307 def : MnemonicAlias<"salq", "shlq">;
1309 def : MnemonicAlias<"smovb", "movsb">;
1310 def : MnemonicAlias<"smovw", "movsw">;
1311 def : MnemonicAlias<"smovl", "movsl">;
1312 def : MnemonicAlias<"smovq", "movsq">;
1314 def : MnemonicAlias<"ud2a", "ud2">;
1315 def : MnemonicAlias<"verrw", "verr">;
1317 // System instruction aliases.
1318 def : MnemonicAlias<"iret", "iretl">;
1319 def : MnemonicAlias<"sysret", "sysretl">;
1321 def : MnemonicAlias<"lgdtl", "lgdt">, Requires<[In32BitMode]>;
1322 def : MnemonicAlias<"lgdtq", "lgdt">, Requires<[In64BitMode]>;
1323 def : MnemonicAlias<"lidtl", "lidt">, Requires<[In32BitMode]>;
1324 def : MnemonicAlias<"lidtq", "lidt">, Requires<[In64BitMode]>;
1325 def : MnemonicAlias<"sgdtl", "sgdt">, Requires<[In32BitMode]>;
1326 def : MnemonicAlias<"sgdtq", "sgdt">, Requires<[In64BitMode]>;
1327 def : MnemonicAlias<"sidtl", "sidt">, Requires<[In32BitMode]>;
1328 def : MnemonicAlias<"sidtq", "sidt">, Requires<[In64BitMode]>;
1331 // Floating point stack aliases.
1332 def : MnemonicAlias<"fcmovz", "fcmove">;
1333 def : MnemonicAlias<"fcmova", "fcmovnbe">;
1334 def : MnemonicAlias<"fcmovnae", "fcmovb">;
1335 def : MnemonicAlias<"fcmovna", "fcmovbe">;
1336 def : MnemonicAlias<"fcmovae", "fcmovnb">;
1337 def : MnemonicAlias<"fcomip", "fcompi">;
1338 def : MnemonicAlias<"fildq", "fildll">;
1339 def : MnemonicAlias<"fldcww", "fldcw">;
1340 def : MnemonicAlias<"fnstcww", "fnstcw">;
1341 def : MnemonicAlias<"fnstsww", "fnstsw">;
1342 def : MnemonicAlias<"fucomip", "fucompi">;
1343 def : MnemonicAlias<"fwait", "wait">;
1346 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond>
1347 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
1348 !strconcat(Prefix, NewCond, Suffix)>;
1350 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
1351 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
1352 /// example "setz" -> "sete".
1353 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix> {
1354 def C : CondCodeAlias<Prefix, Suffix, "c", "b">; // setc -> setb
1355 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e">; // setz -> sete
1356 def NA : CondCodeAlias<Prefix, Suffix, "na", "be">; // setna -> setbe
1357 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae">; // setnb -> setae
1358 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae">; // setnc -> setae
1359 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le">; // setng -> setle
1360 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge">; // setnl -> setge
1361 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne">; // setnz -> setne
1362 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p">; // setpe -> setp
1363 def PO : CondCodeAlias<Prefix, Suffix, "po", "np">; // setpo -> setnp
1365 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b">; // setnae -> setb
1366 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a">; // setnbe -> seta
1367 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l">; // setnge -> setl
1368 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g">; // setnle -> setg
1371 // Aliases for set<CC>
1372 defm : IntegerCondCodeMnemonicAlias<"set", "">;
1373 // Aliases for j<CC>
1374 defm : IntegerCondCodeMnemonicAlias<"j", "">;
1375 // Aliases for cmov<CC>{w,l,q}
1376 defm : IntegerCondCodeMnemonicAlias<"cmov", "w">;
1377 defm : IntegerCondCodeMnemonicAlias<"cmov", "l">;
1378 defm : IntegerCondCodeMnemonicAlias<"cmov", "q">;
1381 //===----------------------------------------------------------------------===//
1382 // Assembler Instruction Aliases
1383 //===----------------------------------------------------------------------===//
1385 // aad/aam default to base 10 if no operand is specified.
1386 def : InstAlias<"aad", (AAD8i8 10)>;
1387 def : InstAlias<"aam", (AAM8i8 10)>;
1390 def : InstAlias<"clrb $reg", (XOR8rr GR8 :$reg, GR8 :$reg)>;
1391 def : InstAlias<"clrw $reg", (XOR16rr GR16:$reg, GR16:$reg)>;
1392 def : InstAlias<"clrl $reg", (XOR32rr GR32:$reg, GR32:$reg)>;
1393 def : InstAlias<"clrq $reg", (XOR64rr GR64:$reg, GR64:$reg)>;
1395 // div and idiv aliases for explicit A register.
1396 def : InstAlias<"divb $src, %al", (DIV8r GR8 :$src)>;
1397 def : InstAlias<"divw $src, %ax", (DIV16r GR16:$src)>;
1398 def : InstAlias<"divl $src, %eax", (DIV32r GR32:$src)>;
1399 def : InstAlias<"divq $src, %rax", (DIV64r GR64:$src)>;
1400 def : InstAlias<"divb $src, %al", (DIV8m i8mem :$src)>;
1401 def : InstAlias<"divw $src, %ax", (DIV16m i16mem:$src)>;
1402 def : InstAlias<"divl $src, %eax", (DIV32m i32mem:$src)>;
1403 def : InstAlias<"divq $src, %rax", (DIV64m i64mem:$src)>;
1404 def : InstAlias<"idivb $src, %al", (IDIV8r GR8 :$src)>;
1405 def : InstAlias<"idivw $src, %ax", (IDIV16r GR16:$src)>;
1406 def : InstAlias<"idivl $src, %eax", (IDIV32r GR32:$src)>;
1407 def : InstAlias<"idivq $src, %rax", (IDIV64r GR64:$src)>;
1408 def : InstAlias<"idivb $src, %al", (IDIV8m i8mem :$src)>;
1409 def : InstAlias<"idivw $src, %ax", (IDIV16m i16mem:$src)>;
1410 def : InstAlias<"idivl $src, %eax", (IDIV32m i32mem:$src)>;
1411 def : InstAlias<"idivq $src, %rax", (IDIV64m i64mem:$src)>;
1415 // Various unary fpstack operations default to operating on on ST1.
1416 // For example, "fxch" -> "fxch %st(1)"
1417 def : InstAlias<"faddp", (ADD_FPrST0 ST1)>;
1418 def : InstAlias<"fsubp", (SUBR_FPrST0 ST1)>;
1419 def : InstAlias<"fsubrp", (SUB_FPrST0 ST1)>;
1420 def : InstAlias<"fmulp", (MUL_FPrST0 ST1)>;
1421 def : InstAlias<"fdivp", (DIVR_FPrST0 ST1)>;
1422 def : InstAlias<"fdivrp", (DIV_FPrST0 ST1)>;
1423 def : InstAlias<"fxch", (XCH_F ST1)>;
1424 def : InstAlias<"fcomi", (COM_FIr ST1)>;
1425 def : InstAlias<"fcompi", (COM_FIPr ST1)>;
1426 def : InstAlias<"fucom", (UCOM_Fr ST1)>;
1427 def : InstAlias<"fucomp", (UCOM_FPr ST1)>;
1428 def : InstAlias<"fucomi", (UCOM_FIr ST1)>;
1429 def : InstAlias<"fucompi", (UCOM_FIPr ST1)>;
1431 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
1432 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
1433 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
1435 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst> {
1436 def : InstAlias<!strconcat(Mnemonic, " $op, %st(0)"), (Inst RST:$op)>;
1437 def : InstAlias<!strconcat(Mnemonic, " %st(0), %st(0)"), (Inst ST0)>;
1440 defm : FpUnaryAlias<"fadd", ADD_FST0r>;
1441 defm : FpUnaryAlias<"faddp", ADD_FPrST0>;
1442 defm : FpUnaryAlias<"fsub", SUB_FST0r>;
1443 defm : FpUnaryAlias<"fsubp", SUBR_FPrST0>;
1444 defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
1445 defm : FpUnaryAlias<"fsubrp", SUB_FPrST0>;
1446 defm : FpUnaryAlias<"fmul", MUL_FST0r>;
1447 defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
1448 defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
1449 defm : FpUnaryAlias<"fdivp", DIVR_FPrST0>;
1450 defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
1451 defm : FpUnaryAlias<"fdivrp", DIV_FPrST0>;
1452 defm : FpUnaryAlias<"fcomi", COM_FIr>;
1453 defm : FpUnaryAlias<"fucomi", UCOM_FIr>;
1454 defm : FpUnaryAlias<"fcompi", COM_FIPr>;
1455 defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
1458 // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
1459 // commute. We also allow fdivrp/fsubrp even though they don't commute, solely
1460 // because gas supports it.
1461 def : InstAlias<"faddp %st(0), $op", (ADD_FPrST0 RST:$op)>;
1462 def : InstAlias<"fmulp %st(0), $op", (MUL_FPrST0 RST:$op)>;
1463 def : InstAlias<"fsubrp %st(0), $op", (SUB_FPrST0 RST:$op)>;
1464 def : InstAlias<"fdivrp %st(0), $op", (DIV_FPrST0 RST:$op)>;
1466 // We accepts "fnstsw %eax" even though it only writes %ax.
1467 def : InstAlias<"fnstsw %eax", (FNSTSW8r)>;
1468 def : InstAlias<"fnstsw %al" , (FNSTSW8r)>;
1469 def : InstAlias<"fnstsw" , (FNSTSW8r)>;
1471 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
1472 // this is compatible with what GAS does.
1473 def : InstAlias<"lcall $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
1474 def : InstAlias<"ljmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
1475 def : InstAlias<"lcall *$dst", (FARCALL32m opaque48mem:$dst)>;
1476 def : InstAlias<"ljmp *$dst", (FARJMP32m opaque48mem:$dst)>;
1478 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
1479 def : InstAlias<"imulw $imm, $r", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm)>;
1480 def : InstAlias<"imulw $imm, $r", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm)>;
1481 def : InstAlias<"imull $imm, $r", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm)>;
1482 def : InstAlias<"imull $imm, $r", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm)>;
1483 def : InstAlias<"imulq $imm, $r",(IMUL64rri32 GR64:$r, GR64:$r,i64i32imm:$imm)>;
1484 def : InstAlias<"imulq $imm, $r", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm)>;
1486 // inb %dx -> inb %al, %dx
1487 def : InstAlias<"inb %dx", (IN8rr)>;
1488 def : InstAlias<"inw %dx", (IN16rr)>;
1489 def : InstAlias<"inl %dx", (IN32rr)>;
1490 def : InstAlias<"inb $port", (IN8ri i8imm:$port)>;
1491 def : InstAlias<"inw $port", (IN16ri i8imm:$port)>;
1492 def : InstAlias<"inl $port", (IN32ri i8imm:$port)>;
1495 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
1496 def : InstAlias<"call $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
1497 def : InstAlias<"jmp $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
1498 def : InstAlias<"callw $seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>;
1499 def : InstAlias<"jmpw $seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>;
1500 def : InstAlias<"calll $seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
1501 def : InstAlias<"jmpl $seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
1503 // Force mov without a suffix with a segment and mem to prefer the 'l' form of
1504 // the move. All segment/mem forms are equivalent, this has the shortest
1506 def : InstAlias<"mov $mem, $seg", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem)>;
1507 def : InstAlias<"mov $seg, $mem", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg)>;
1509 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
1510 def : InstAlias<"movq $imm, $reg", (MOV64ri GR64:$reg, i64imm:$imm)>;
1512 // Match 'movq GR64, MMX' as an alias for movd.
1513 def : InstAlias<"movq $src, $dst", (MMX_MOVD64to64rr VR64:$dst, GR64:$src)>;
1514 def : InstAlias<"movq $src, $dst", (MMX_MOVD64from64rr GR64:$dst, VR64:$src)>;
1516 // movsd with no operands (as opposed to the SSE scalar move of a double) is an
1517 // alias for movsl. (as in rep; movsd)
1518 def : InstAlias<"movsd", (MOVSD)>;
1521 def : InstAlias<"movsx $src, $dst", (MOVSX16rr8W GR16:$dst, GR8:$src)>;
1522 def : InstAlias<"movsx $src, $dst", (MOVSX16rm8W GR16:$dst, i8mem:$src)>;
1523 def : InstAlias<"movsx $src, $dst", (MOVSX32rr8 GR32:$dst, GR8:$src)>;
1524 def : InstAlias<"movsx $src, $dst", (MOVSX32rr16 GR32:$dst, GR16:$src)>;
1525 def : InstAlias<"movsx $src, $dst", (MOVSX64rr8 GR64:$dst, GR8:$src)>;
1526 def : InstAlias<"movsx $src, $dst", (MOVSX64rr16 GR64:$dst, GR16:$src)>;
1527 def : InstAlias<"movsx $src, $dst", (MOVSX64rr32 GR64:$dst, GR32:$src)>;
1530 def : InstAlias<"movzx $src, $dst", (MOVZX16rr8W GR16:$dst, GR8:$src)>;
1531 def : InstAlias<"movzx $src, $dst", (MOVZX16rm8W GR16:$dst, i8mem:$src)>;
1532 def : InstAlias<"movzx $src, $dst", (MOVZX32rr8 GR32:$dst, GR8:$src)>;
1533 def : InstAlias<"movzx $src, $dst", (MOVZX32rr16 GR32:$dst, GR16:$src)>;
1534 def : InstAlias<"movzx $src, $dst", (MOVZX64rr8_Q GR64:$dst, GR8:$src)>;
1535 def : InstAlias<"movzx $src, $dst", (MOVZX64rr16_Q GR64:$dst, GR16:$src)>;
1536 // Note: No GR32->GR64 movzx form.
1538 // outb %dx -> outb %al, %dx
1539 def : InstAlias<"outb %dx", (OUT8rr)>;
1540 def : InstAlias<"outw %dx", (OUT16rr)>;
1541 def : InstAlias<"outl %dx", (OUT32rr)>;
1542 def : InstAlias<"outb $port", (OUT8ir i8imm:$port)>;
1543 def : InstAlias<"outw $port", (OUT16ir i8imm:$port)>;
1544 def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>;
1546 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
1547 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
1548 // errors, since its encoding is the most compact.
1549 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>;
1551 // shld/shrd op,op -> shld op, op, 1
1552 def : InstAlias<"shldw $r1, $r2", (SHLD16rri8 GR16:$r1, GR16:$r2, 1)>;
1553 def : InstAlias<"shldl $r1, $r2", (SHLD32rri8 GR32:$r1, GR32:$r2, 1)>;
1554 def : InstAlias<"shldq $r1, $r2", (SHLD64rri8 GR64:$r1, GR64:$r2, 1)>;
1555 def : InstAlias<"shrdw $r1, $r2", (SHRD16rri8 GR16:$r1, GR16:$r2, 1)>;
1556 def : InstAlias<"shrdl $r1, $r2", (SHRD32rri8 GR32:$r1, GR32:$r2, 1)>;
1557 def : InstAlias<"shrdq $r1, $r2", (SHRD64rri8 GR64:$r1, GR64:$r2, 1)>;
1559 def : InstAlias<"shldw $mem, $reg", (SHLD16mri8 i16mem:$mem, GR16:$reg, 1)>;
1560 def : InstAlias<"shldl $mem, $reg", (SHLD32mri8 i32mem:$mem, GR32:$reg, 1)>;
1561 def : InstAlias<"shldq $mem, $reg", (SHLD64mri8 i64mem:$mem, GR64:$reg, 1)>;
1562 def : InstAlias<"shrdw $mem, $reg", (SHRD16mri8 i16mem:$mem, GR16:$reg, 1)>;
1563 def : InstAlias<"shrdl $mem, $reg", (SHRD32mri8 i32mem:$mem, GR32:$reg, 1)>;
1564 def : InstAlias<"shrdq $mem, $reg", (SHRD64mri8 i64mem:$mem, GR64:$reg, 1)>;
1566 /* FIXME: This is disabled because the asm matcher is currently incapable of
1567 * matching a fixed immediate like $1.
1568 // "shl X, $1" is an alias for "shl X".
1569 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
1570 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
1571 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
1572 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
1573 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
1574 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
1575 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
1576 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
1577 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
1578 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
1579 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
1580 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
1581 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
1582 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
1583 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
1584 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
1585 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
1588 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
1589 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
1590 defm : ShiftRotateByOneAlias<"rol", "ROL">;
1591 defm : ShiftRotateByOneAlias<"ror", "ROR">;
1594 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
1595 def : InstAlias<"testb $val, $mem", (TEST8rm GR8 :$val, i8mem :$mem)>;
1596 def : InstAlias<"testw $val, $mem", (TEST16rm GR16:$val, i16mem:$mem)>;
1597 def : InstAlias<"testl $val, $mem", (TEST32rm GR32:$val, i32mem:$mem)>;
1598 def : InstAlias<"testq $val, $mem", (TEST64rm GR64:$val, i64mem:$mem)>;
1600 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
1601 def : InstAlias<"xchgb $mem, $val", (XCHG8rm GR8 :$val, i8mem :$mem)>;
1602 def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>;
1603 def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>;
1604 def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>;