1 //===- X86InstrControl.td - Control Flow Instructions ------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 jump, return, call, and related instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Control Flow Instructions.
18 // Return instructions.
19 let isTerminator = 1, isReturn = 1, isBarrier = 1,
20 hasCtrlDep = 1, FPForm = SpecialFP in {
21 def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
24 def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
26 [(X86retflag timm:$amt)]>;
27 def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
30 def LRETL : I <0xCB, RawFrm, (outs), (ins),
32 def LRETQ : RI <0xCB, RawFrm, (outs), (ins),
34 def LRETI : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
36 def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
37 "lretw\t$amt", []>, OpSize;
40 // Unconditional branches.
41 let isBarrier = 1, isBranch = 1, isTerminator = 1 in {
42 def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
43 "jmp\t$dst", [(br bb:$dst)]>;
44 def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
46 // FIXME : Intel syntax for JMP64pcrel32 such that it is not ambiguious
48 def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
52 // Conditional Branches.
53 let isBranch = 1, isTerminator = 1, Uses = [EFLAGS] in {
54 multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
55 def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, []>;
56 def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
57 [(X86brcond bb:$dst, Cond, EFLAGS)]>, TB;
61 defm JO : ICBr<0x70, 0x80, "jo\t$dst" , X86_COND_O>;
62 defm JNO : ICBr<0x71, 0x81, "jno\t$dst" , X86_COND_NO>;
63 defm JB : ICBr<0x72, 0x82, "jb\t$dst" , X86_COND_B>;
64 defm JAE : ICBr<0x73, 0x83, "jae\t$dst", X86_COND_AE>;
65 defm JE : ICBr<0x74, 0x84, "je\t$dst" , X86_COND_E>;
66 defm JNE : ICBr<0x75, 0x85, "jne\t$dst", X86_COND_NE>;
67 defm JBE : ICBr<0x76, 0x86, "jbe\t$dst", X86_COND_BE>;
68 defm JA : ICBr<0x77, 0x87, "ja\t$dst" , X86_COND_A>;
69 defm JS : ICBr<0x78, 0x88, "js\t$dst" , X86_COND_S>;
70 defm JNS : ICBr<0x79, 0x89, "jns\t$dst", X86_COND_NS>;
71 defm JP : ICBr<0x7A, 0x8A, "jp\t$dst" , X86_COND_P>;
72 defm JNP : ICBr<0x7B, 0x8B, "jnp\t$dst", X86_COND_NP>;
73 defm JL : ICBr<0x7C, 0x8C, "jl\t$dst" , X86_COND_L>;
74 defm JGE : ICBr<0x7D, 0x8D, "jge\t$dst", X86_COND_GE>;
75 defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>;
76 defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>;
78 // jcx/jecx/jrcx instructions.
79 let isAsmParserOnly = 1, isBranch = 1, isTerminator = 1 in {
80 // These are the 32-bit versions of this instruction for the asmparser. In
81 // 32-bit mode, the address size prefix is jcxz and the unprefixed version is
84 def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
85 "jcxz\t$dst", []>, AdSize, Requires<[In32BitMode]>;
87 def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
88 "jecxz\t$dst", []>, Requires<[In32BitMode]>;
90 // J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
91 // In 64-bit mode, the address size prefix is jecxz and the unprefixed version
94 def JECXZ_64 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
95 "jecxz\t$dst", []>, AdSize, Requires<[In64BitMode]>;
97 def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
98 "jrcxz\t$dst", []>, Requires<[In64BitMode]>;
102 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
103 def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
104 [(brind GR32:$dst)]>, Requires<[In32BitMode]>;
105 def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
106 [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>;
108 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
109 [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
110 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
111 [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
113 def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
114 (ins i16imm:$off, i16imm:$seg),
115 "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
116 def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
117 (ins i32imm:$off, i16imm:$seg),
118 "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
119 def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
120 "ljmp{q}\t{*}$dst", []>;
122 def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
123 "ljmp{w}\t{*}$dst", []>, OpSize;
124 def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
125 "ljmp{l}\t{*}$dst", []>;
131 def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
132 def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
133 def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
135 //===----------------------------------------------------------------------===//
136 // Call Instructions...
139 // All calls clobber the non-callee saved registers. ESP is marked as
140 // a use to prevent stack-pointer assignments that appear immediately
141 // before calls from potentially appearing dead. Uses for argument
142 // registers are added manually.
143 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
144 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
145 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
146 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
148 def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
149 (outs), (ins i32imm_pcrel:$dst,variable_ops),
150 "call{l}\t$dst", []>, Requires<[In32BitMode]>;
151 def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
152 "call{l}\t{*}$dst", [(X86call GR32:$dst)]>,
153 Requires<[In32BitMode]>;
154 def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
155 "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
156 Requires<[In32BitMode]>;
158 def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
159 (ins i16imm:$off, i16imm:$seg),
160 "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
161 def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
162 (ins i32imm:$off, i16imm:$seg),
163 "lcall{l}\t{$seg, $off|$off, $seg}", []>;
165 def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
166 "lcall{w}\t{*}$dst", []>, OpSize;
167 def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
168 "lcall{l}\t{*}$dst", []>;
170 // callw for 16 bit code for the assembler.
171 let isAsmParserOnly = 1 in
172 def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
173 (outs), (ins i16imm_pcrel:$dst, variable_ops),
174 "callw\t$dst", []>, OpSize;
180 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
182 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
183 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
184 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
185 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
187 def TCRETURNdi : PseudoI<(outs),
188 (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>;
189 def TCRETURNri : PseudoI<(outs),
190 (ins GR32_TC:$dst, i32imm:$offset, variable_ops), []>;
192 def TCRETURNmi : PseudoI<(outs),
193 (ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>;
195 // FIXME: The should be pseudo instructions that are lowered when going to
197 def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
198 (ins i32imm_pcrel:$dst, variable_ops),
199 "jmp\t$dst # TAILCALL",
201 def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
202 "", []>; // FIXME: Remove encoding when JIT is dead.
204 def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
205 "jmp{l}\t{*}$dst # TAILCALL", []>;
209 //===----------------------------------------------------------------------===//
210 // Call Instructions...
213 // All calls clobber the non-callee saved registers. RSP is marked as
214 // a use to prevent stack-pointer assignments that appear immediately
215 // before calls from potentially appearing dead. Uses for argument
216 // registers are added manually.
217 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
218 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
219 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
220 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
221 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
224 // NOTE: this pattern doesn't match "X86call imm", because we do not know
225 // that the offset between an arbitrary immediate and the call will fit in
226 // the 32-bit pcrel field that we have.
227 def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
228 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
229 "call{q}\t$dst", []>,
230 Requires<[In64BitMode, NotWin64]>;
231 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
232 "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
233 Requires<[In64BitMode, NotWin64]>;
234 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
235 "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
236 Requires<[In64BitMode, NotWin64]>;
238 def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
239 "lcall{q}\t{*}$dst", []>;
242 // FIXME: We need to teach codegen about single list of call-clobbered
244 let isCall = 1, isCodeGenOnly = 1 in
245 // All calls clobber the non-callee saved registers. RSP is marked as
246 // a use to prevent stack-pointer assignments that appear immediately
247 // before calls from potentially appearing dead. Uses for argument
248 // registers are added manually.
249 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
250 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
251 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
252 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, YMM_HI_6_15, EFLAGS],
254 def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
255 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
256 "call{q}\t$dst", []>,
258 def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
260 [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
261 def WINCALL64m : I<0xFF, MRM2m, (outs),
262 (ins i64mem:$dst,variable_ops),
264 [(X86call (loadi64 addr:$dst))]>,
268 let isCall = 1, isCodeGenOnly = 1 in
269 // __chkstk(MSVC): clobber R10, R11 and EFLAGS.
270 // ___chkstk(Mingw64): clobber R10, R11, RAX and EFLAGS, and update RSP.
271 let Defs = [RAX, R10, R11, RSP, EFLAGS],
273 def W64ALLOCA : Ii32PCRel<0xE8, RawFrm,
274 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
275 "call{q}\t$dst", []>,
279 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
281 // AMD64 cc clobbers RSI, RDI, XMM6-XMM15.
282 let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
283 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
284 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
285 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
287 usesCustomInserter = 1 in {
288 def TCRETURNdi64 : PseudoI<(outs),
289 (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
291 def TCRETURNri64 : PseudoI<(outs),
292 (ins ptr_rc_tailcall:$dst, i32imm:$offset, variable_ops), []>;
294 def TCRETURNmi64 : PseudoI<(outs),
295 (ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>;
297 def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
298 (ins i64i32imm_pcrel:$dst, variable_ops),
299 "jmp\t$dst # TAILCALL", []>;
300 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops),
301 "jmp{q}\t{*}$dst # TAILCALL", []>;
304 def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
305 "jmp{q}\t{*}$dst # TAILCALL", []>;