1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
65 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
66 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
68 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
69 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
70 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
71 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
73 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
74 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
75 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
76 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
78 //===----------------------------------------------------------------------===//
79 // Instruction list...
82 let isImplicitDef = 1 in
83 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (outs GR64:$dst), (ins),
85 [(set GR64:$dst, (undef))]>;
87 //===----------------------------------------------------------------------===//
88 // Call Instructions...
91 // All calls clobber the non-callee saved registers...
92 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
93 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
94 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
95 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
96 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
97 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
98 "call\t${dst:call}", []>;
99 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
100 "call\t{*}$dst", [(X86call GR64:$dst)]>;
101 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
102 "call\t{*}$dst", []>;
107 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
108 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset),
109 "#TC_RETURN $dst $offset",
112 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
113 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset),
114 "#TC_RETURN $dst $offset",
118 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
119 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
123 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
124 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
125 [(brind GR64:$dst)]>;
126 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
127 [(brind (loadi64 addr:$dst))]>;
130 //===----------------------------------------------------------------------===//
131 // Miscellaneous Instructions...
133 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
134 def LEAVE64 : I<0xC9, RawFrm,
135 (outs), (ins), "leave", []>;
136 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
138 def POP64r : I<0x58, AddRegFrm,
139 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
141 def PUSH64r : I<0x50, AddRegFrm,
142 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
145 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
146 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
147 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
148 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
150 def LEA64_32r : I<0x8D, MRMSrcMem,
151 (outs GR32:$dst), (ins lea64_32mem:$src),
152 "lea{l}\t{$src|$dst}, {$dst|$src}",
153 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
155 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
156 "lea{q}\t{$src|$dst}, {$dst|$src}",
157 [(set GR64:$dst, lea64addr:$src)]>;
159 let isTwoAddress = 1 in
160 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
162 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
164 // Bit scan instructions.
165 let Defs = [EFLAGS] in {
166 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
167 "bsf{q}\t{$src, $dst|$dst, $src}",
168 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
169 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
170 "bsf{q}\t{$src, $dst|$dst, $src}",
171 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
172 (implicit EFLAGS)]>, TB;
174 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
175 "bsr{q}\t{$src, $dst|$dst, $src}",
176 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
177 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
178 "bsr{q}\t{$src, $dst|$dst, $src}",
179 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
180 (implicit EFLAGS)]>, TB;
184 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
185 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
186 [(X86rep_movs i64)]>, REP;
187 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
188 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
189 [(X86rep_stos i64)]>, REP;
191 //===----------------------------------------------------------------------===//
192 // Move Instructions...
195 let neverHasSideEffects = 1 in
196 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
197 "mov{q}\t{$src, $dst|$dst, $src}", []>;
199 let isReMaterializable = 1 in {
200 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
201 "movabs{q}\t{$src, $dst|$dst, $src}",
202 [(set GR64:$dst, imm:$src)]>;
203 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
204 "mov{q}\t{$src, $dst|$dst, $src}",
205 [(set GR64:$dst, i64immSExt32:$src)]>;
208 let isSimpleLoad = 1 in
209 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
210 "mov{q}\t{$src, $dst|$dst, $src}",
211 [(set GR64:$dst, (load addr:$src))]>;
213 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
214 "mov{q}\t{$src, $dst|$dst, $src}",
215 [(store GR64:$src, addr:$dst)]>;
216 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
217 "mov{q}\t{$src, $dst|$dst, $src}",
218 [(store i64immSExt32:$src, addr:$dst)]>;
220 // Sign/Zero extenders
222 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
223 "movs{bq|x}\t{$src, $dst|$dst, $src}",
224 [(set GR64:$dst, (sext GR8:$src))]>, TB;
225 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
226 "movs{bq|x}\t{$src, $dst|$dst, $src}",
227 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
228 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
229 "movs{wq|x}\t{$src, $dst|$dst, $src}",
230 [(set GR64:$dst, (sext GR16:$src))]>, TB;
231 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
232 "movs{wq|x}\t{$src, $dst|$dst, $src}",
233 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
234 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
235 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
236 [(set GR64:$dst, (sext GR32:$src))]>;
237 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
238 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
239 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
241 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
242 "movz{bq|x}\t{$src, $dst|$dst, $src}",
243 [(set GR64:$dst, (zext GR8:$src))]>, TB;
244 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
245 "movz{bq|x}\t{$src, $dst|$dst, $src}",
246 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
247 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
248 "movz{wq|x}\t{$src, $dst|$dst, $src}",
249 [(set GR64:$dst, (zext GR16:$src))]>, TB;
250 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
251 "movz{wq|x}\t{$src, $dst|$dst, $src}",
252 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
254 let neverHasSideEffects = 1 in {
255 let Defs = [RAX], Uses = [EAX] in
256 def CDQE : RI<0x98, RawFrm, (outs), (ins),
257 "{cltq|cdqe}", []>; // RAX = signext(EAX)
259 let Defs = [RAX,RDX], Uses = [RAX] in
260 def CQO : RI<0x99, RawFrm, (outs), (ins),
261 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
264 //===----------------------------------------------------------------------===//
265 // Arithmetic Instructions...
268 let Defs = [EFLAGS] in {
269 let isTwoAddress = 1 in {
270 let isConvertibleToThreeAddress = 1 in {
271 let isCommutable = 1 in
272 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
273 "add{q}\t{$src2, $dst|$dst, $src2}",
274 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
276 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
277 "add{q}\t{$src2, $dst|$dst, $src2}",
278 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
279 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
280 "add{q}\t{$src2, $dst|$dst, $src2}",
281 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
282 } // isConvertibleToThreeAddress
284 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
285 "add{q}\t{$src2, $dst|$dst, $src2}",
286 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
289 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
290 "add{q}\t{$src2, $dst|$dst, $src2}",
291 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
292 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
293 "add{q}\t{$src2, $dst|$dst, $src2}",
294 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
295 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
296 "add{q}\t{$src2, $dst|$dst, $src2}",
297 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
299 let Uses = [EFLAGS] in {
300 let isTwoAddress = 1 in {
301 let isCommutable = 1 in
302 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
303 "adc{q}\t{$src2, $dst|$dst, $src2}",
304 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
306 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
307 "adc{q}\t{$src2, $dst|$dst, $src2}",
308 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
310 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
311 "adc{q}\t{$src2, $dst|$dst, $src2}",
312 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
313 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
314 "adc{q}\t{$src2, $dst|$dst, $src2}",
315 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
318 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
319 "adc{q}\t{$src2, $dst|$dst, $src2}",
320 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
321 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
322 "adc{q}\t{$src2, $dst|$dst, $src2}",
323 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
324 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
325 "adc{q}\t{$src2, $dst|$dst, $src2}",
326 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
329 let isTwoAddress = 1 in {
330 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
331 "sub{q}\t{$src2, $dst|$dst, $src2}",
332 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
334 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
335 "sub{q}\t{$src2, $dst|$dst, $src2}",
336 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
338 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
339 "sub{q}\t{$src2, $dst|$dst, $src2}",
340 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
341 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
342 "sub{q}\t{$src2, $dst|$dst, $src2}",
343 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
346 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
347 "sub{q}\t{$src2, $dst|$dst, $src2}",
348 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
349 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
350 "sub{q}\t{$src2, $dst|$dst, $src2}",
351 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
352 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
353 "sub{q}\t{$src2, $dst|$dst, $src2}",
354 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
356 let Uses = [EFLAGS] in {
357 let isTwoAddress = 1 in {
358 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
359 "sbb{q}\t{$src2, $dst|$dst, $src2}",
360 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
362 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
363 "sbb{q}\t{$src2, $dst|$dst, $src2}",
364 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
366 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
367 "sbb{q}\t{$src2, $dst|$dst, $src2}",
368 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
369 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
370 "sbb{q}\t{$src2, $dst|$dst, $src2}",
371 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
374 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
375 "sbb{q}\t{$src2, $dst|$dst, $src2}",
376 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
377 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
378 "sbb{q}\t{$src2, $dst|$dst, $src2}",
379 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
380 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
381 "sbb{q}\t{$src2, $dst|$dst, $src2}",
382 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
386 // Unsigned multiplication
387 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
388 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
389 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
391 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
392 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
394 // Signed multiplication
395 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
396 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
398 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
399 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
402 let Defs = [EFLAGS] in {
403 let isTwoAddress = 1 in {
404 let isCommutable = 1 in
405 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
406 "imul{q}\t{$src2, $dst|$dst, $src2}",
407 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
409 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
410 "imul{q}\t{$src2, $dst|$dst, $src2}",
411 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
414 // Suprisingly enough, these are not two address instructions!
415 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
416 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
417 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
418 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
419 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
420 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
421 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
422 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
423 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
424 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
425 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
426 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
427 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
428 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
429 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
430 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
433 // Unsigned division / remainder
434 let neverHasSideEffects = 1 in {
435 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
436 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
438 // Signed division / remainder
439 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
440 "idiv{q}\t$src", []>;
442 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
444 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
445 "idiv{q}\t$src", []>;
450 // Unary instructions
451 let Defs = [EFLAGS], CodeSize = 2 in {
452 let isTwoAddress = 1 in
453 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
454 [(set GR64:$dst, (ineg GR64:$src))]>;
455 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
456 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
458 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
459 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
460 [(set GR64:$dst, (add GR64:$src, 1))]>;
461 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
462 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
464 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
465 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
466 [(set GR64:$dst, (add GR64:$src, -1))]>;
467 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
468 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
470 // In 64-bit mode, single byte INC and DEC cannot be encoded.
471 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
472 // Can transform into LEA.
473 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
474 [(set GR16:$dst, (add GR16:$src, 1))]>,
475 OpSize, Requires<[In64BitMode]>;
476 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
477 [(set GR32:$dst, (add GR32:$src, 1))]>,
478 Requires<[In64BitMode]>;
479 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
480 [(set GR16:$dst, (add GR16:$src, -1))]>,
481 OpSize, Requires<[In64BitMode]>;
482 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
483 [(set GR32:$dst, (add GR32:$src, -1))]>,
484 Requires<[In64BitMode]>;
485 } // isConvertibleToThreeAddress
487 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
488 // how to unfold them.
489 let isTwoAddress = 0, CodeSize = 2 in {
490 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
491 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
492 OpSize, Requires<[In64BitMode]>;
493 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
494 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
495 Requires<[In64BitMode]>;
496 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
497 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
498 OpSize, Requires<[In64BitMode]>;
499 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
500 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
501 Requires<[In64BitMode]>;
503 } // Defs = [EFLAGS], CodeSize
506 let Defs = [EFLAGS] in {
507 // Shift instructions
508 let isTwoAddress = 1 in {
510 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
511 "shl{q}\t{%cl, $dst|$dst, %CL}",
512 [(set GR64:$dst, (shl GR64:$src, CL))]>;
513 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
514 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
515 "shl{q}\t{$src2, $dst|$dst, $src2}",
516 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
517 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
522 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
523 "shl{q}\t{%cl, $dst|$dst, %CL}",
524 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
525 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
526 "shl{q}\t{$src, $dst|$dst, $src}",
527 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
528 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
530 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
532 let isTwoAddress = 1 in {
534 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
535 "shr{q}\t{%cl, $dst|$dst, %CL}",
536 [(set GR64:$dst, (srl GR64:$src, CL))]>;
537 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
538 "shr{q}\t{$src2, $dst|$dst, $src2}",
539 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
540 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
542 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
546 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
547 "shr{q}\t{%cl, $dst|$dst, %CL}",
548 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
549 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
550 "shr{q}\t{$src, $dst|$dst, $src}",
551 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
552 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
554 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
556 let isTwoAddress = 1 in {
558 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
559 "sar{q}\t{%cl, $dst|$dst, %CL}",
560 [(set GR64:$dst, (sra GR64:$src, CL))]>;
561 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
562 "sar{q}\t{$src2, $dst|$dst, $src2}",
563 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
564 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
566 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
570 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
571 "sar{q}\t{%cl, $dst|$dst, %CL}",
572 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
573 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
574 "sar{q}\t{$src, $dst|$dst, $src}",
575 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
576 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
578 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
580 // Rotate instructions
581 let isTwoAddress = 1 in {
583 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
584 "rol{q}\t{%cl, $dst|$dst, %CL}",
585 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
586 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
587 "rol{q}\t{$src2, $dst|$dst, $src2}",
588 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
589 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
591 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
595 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
596 "rol{q}\t{%cl, $dst|$dst, %CL}",
597 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
598 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
599 "rol{q}\t{$src, $dst|$dst, $src}",
600 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
601 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
603 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
605 let isTwoAddress = 1 in {
607 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
608 "ror{q}\t{%cl, $dst|$dst, %CL}",
609 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
610 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
611 "ror{q}\t{$src2, $dst|$dst, $src2}",
612 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
613 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
615 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
619 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
620 "ror{q}\t{%cl, $dst|$dst, %CL}",
621 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
622 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
623 "ror{q}\t{$src, $dst|$dst, $src}",
624 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
625 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
627 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
629 // Double shift instructions (generalizations of rotate)
630 let isTwoAddress = 1 in {
632 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
633 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
634 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
635 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
636 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
637 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
640 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
641 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
642 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
643 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
644 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
647 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
648 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
649 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
650 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
657 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
658 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
659 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
661 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
662 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
663 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
666 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
667 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
668 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
669 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
670 (i8 imm:$src3)), addr:$dst)]>,
672 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
673 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
674 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
675 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
676 (i8 imm:$src3)), addr:$dst)]>,
680 //===----------------------------------------------------------------------===//
681 // Logical Instructions...
684 let isTwoAddress = 1 in
685 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
686 [(set GR64:$dst, (not GR64:$src))]>;
687 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
688 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
690 let Defs = [EFLAGS] in {
691 let isTwoAddress = 1 in {
692 let isCommutable = 1 in
693 def AND64rr : RI<0x21, MRMDestReg,
694 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
695 "and{q}\t{$src2, $dst|$dst, $src2}",
696 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
697 def AND64rm : RI<0x23, MRMSrcMem,
698 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
699 "and{q}\t{$src2, $dst|$dst, $src2}",
700 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
701 def AND64ri32 : RIi32<0x81, MRM4r,
702 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
703 "and{q}\t{$src2, $dst|$dst, $src2}",
704 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
705 def AND64ri8 : RIi8<0x83, MRM4r,
706 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
707 "and{q}\t{$src2, $dst|$dst, $src2}",
708 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
711 def AND64mr : RI<0x21, MRMDestMem,
712 (outs), (ins i64mem:$dst, GR64:$src),
713 "and{q}\t{$src, $dst|$dst, $src}",
714 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
715 def AND64mi32 : RIi32<0x81, MRM4m,
716 (outs), (ins i64mem:$dst, i64i32imm:$src),
717 "and{q}\t{$src, $dst|$dst, $src}",
718 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
719 def AND64mi8 : RIi8<0x83, MRM4m,
720 (outs), (ins i64mem:$dst, i64i8imm :$src),
721 "and{q}\t{$src, $dst|$dst, $src}",
722 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
724 let isTwoAddress = 1 in {
725 let isCommutable = 1 in
726 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
727 "or{q}\t{$src2, $dst|$dst, $src2}",
728 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
729 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
730 "or{q}\t{$src2, $dst|$dst, $src2}",
731 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
732 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
733 "or{q}\t{$src2, $dst|$dst, $src2}",
734 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
735 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
736 "or{q}\t{$src2, $dst|$dst, $src2}",
737 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
740 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
741 "or{q}\t{$src, $dst|$dst, $src}",
742 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
743 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
744 "or{q}\t{$src, $dst|$dst, $src}",
745 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
746 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
747 "or{q}\t{$src, $dst|$dst, $src}",
748 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
750 let isTwoAddress = 1 in {
751 let isCommutable = 1 in
752 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
753 "xor{q}\t{$src2, $dst|$dst, $src2}",
754 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
755 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
756 "xor{q}\t{$src2, $dst|$dst, $src2}",
757 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
758 def XOR64ri32 : RIi32<0x81, MRM6r,
759 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
760 "xor{q}\t{$src2, $dst|$dst, $src2}",
761 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
762 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
763 "xor{q}\t{$src2, $dst|$dst, $src2}",
764 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
767 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
768 "xor{q}\t{$src, $dst|$dst, $src}",
769 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
770 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
771 "xor{q}\t{$src, $dst|$dst, $src}",
772 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
773 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
774 "xor{q}\t{$src, $dst|$dst, $src}",
775 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
778 //===----------------------------------------------------------------------===//
779 // Comparison Instructions...
782 // Integer comparison
783 let Defs = [EFLAGS] in {
784 let isCommutable = 1 in
785 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
786 "test{q}\t{$src2, $src1|$src1, $src2}",
787 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
789 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
790 "test{q}\t{$src2, $src1|$src1, $src2}",
791 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
793 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
794 (ins GR64:$src1, i64i32imm:$src2),
795 "test{q}\t{$src2, $src1|$src1, $src2}",
796 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
798 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
799 (ins i64mem:$src1, i64i32imm:$src2),
800 "test{q}\t{$src2, $src1|$src1, $src2}",
801 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
804 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
805 "cmp{q}\t{$src2, $src1|$src1, $src2}",
806 [(X86cmp GR64:$src1, GR64:$src2),
808 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
809 "cmp{q}\t{$src2, $src1|$src1, $src2}",
810 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
812 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
813 "cmp{q}\t{$src2, $src1|$src1, $src2}",
814 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
816 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
817 "cmp{q}\t{$src2, $src1|$src1, $src2}",
818 [(X86cmp GR64:$src1, i64immSExt32:$src2),
820 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
821 (ins i64mem:$src1, i64i32imm:$src2),
822 "cmp{q}\t{$src2, $src1|$src1, $src2}",
823 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
825 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
826 "cmp{q}\t{$src2, $src1|$src1, $src2}",
827 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
829 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
830 "cmp{q}\t{$src2, $src1|$src1, $src2}",
831 [(X86cmp GR64:$src1, i64immSExt8:$src2),
836 let Uses = [EFLAGS], isTwoAddress = 1 in {
837 let isCommutable = 1 in {
838 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
839 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
840 "cmovb\t{$src2, $dst|$dst, $src2}",
841 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
842 X86_COND_B, EFLAGS))]>, TB;
843 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
844 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
845 "cmovae\t{$src2, $dst|$dst, $src2}",
846 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
847 X86_COND_AE, EFLAGS))]>, TB;
848 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
849 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
850 "cmove\t{$src2, $dst|$dst, $src2}",
851 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
852 X86_COND_E, EFLAGS))]>, TB;
853 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
854 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
855 "cmovne\t{$src2, $dst|$dst, $src2}",
856 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
857 X86_COND_NE, EFLAGS))]>, TB;
858 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
859 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
860 "cmovbe\t{$src2, $dst|$dst, $src2}",
861 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
862 X86_COND_BE, EFLAGS))]>, TB;
863 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
864 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
865 "cmova\t{$src2, $dst|$dst, $src2}",
866 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
867 X86_COND_A, EFLAGS))]>, TB;
868 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
869 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
870 "cmovl\t{$src2, $dst|$dst, $src2}",
871 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
872 X86_COND_L, EFLAGS))]>, TB;
873 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
874 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
875 "cmovge\t{$src2, $dst|$dst, $src2}",
876 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
877 X86_COND_GE, EFLAGS))]>, TB;
878 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
879 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
880 "cmovle\t{$src2, $dst|$dst, $src2}",
881 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
882 X86_COND_LE, EFLAGS))]>, TB;
883 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
884 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
885 "cmovg\t{$src2, $dst|$dst, $src2}",
886 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
887 X86_COND_G, EFLAGS))]>, TB;
888 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
889 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
890 "cmovs\t{$src2, $dst|$dst, $src2}",
891 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
892 X86_COND_S, EFLAGS))]>, TB;
893 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
894 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
895 "cmovns\t{$src2, $dst|$dst, $src2}",
896 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
897 X86_COND_NS, EFLAGS))]>, TB;
898 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
899 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
900 "cmovp\t{$src2, $dst|$dst, $src2}",
901 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
902 X86_COND_P, EFLAGS))]>, TB;
903 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
904 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
905 "cmovnp\t{$src2, $dst|$dst, $src2}",
906 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
907 X86_COND_NP, EFLAGS))]>, TB;
908 } // isCommutable = 1
910 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
911 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
912 "cmovb\t{$src2, $dst|$dst, $src2}",
913 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
914 X86_COND_B, EFLAGS))]>, TB;
915 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
916 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
917 "cmovae\t{$src2, $dst|$dst, $src2}",
918 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
919 X86_COND_AE, EFLAGS))]>, TB;
920 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
921 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
922 "cmove\t{$src2, $dst|$dst, $src2}",
923 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
924 X86_COND_E, EFLAGS))]>, TB;
925 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
926 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
927 "cmovne\t{$src2, $dst|$dst, $src2}",
928 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
929 X86_COND_NE, EFLAGS))]>, TB;
930 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
931 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
932 "cmovbe\t{$src2, $dst|$dst, $src2}",
933 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
934 X86_COND_BE, EFLAGS))]>, TB;
935 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
936 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
937 "cmova\t{$src2, $dst|$dst, $src2}",
938 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
939 X86_COND_A, EFLAGS))]>, TB;
940 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
941 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
942 "cmovl\t{$src2, $dst|$dst, $src2}",
943 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
944 X86_COND_L, EFLAGS))]>, TB;
945 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
946 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
947 "cmovge\t{$src2, $dst|$dst, $src2}",
948 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
949 X86_COND_GE, EFLAGS))]>, TB;
950 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
951 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
952 "cmovle\t{$src2, $dst|$dst, $src2}",
953 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
954 X86_COND_LE, EFLAGS))]>, TB;
955 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
956 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
957 "cmovg\t{$src2, $dst|$dst, $src2}",
958 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
959 X86_COND_G, EFLAGS))]>, TB;
960 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
961 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
962 "cmovs\t{$src2, $dst|$dst, $src2}",
963 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
964 X86_COND_S, EFLAGS))]>, TB;
965 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
966 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
967 "cmovns\t{$src2, $dst|$dst, $src2}",
968 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
969 X86_COND_NS, EFLAGS))]>, TB;
970 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
971 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
972 "cmovp\t{$src2, $dst|$dst, $src2}",
973 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
974 X86_COND_P, EFLAGS))]>, TB;
975 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
976 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
977 "cmovnp\t{$src2, $dst|$dst, $src2}",
978 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
979 X86_COND_NP, EFLAGS))]>, TB;
982 //===----------------------------------------------------------------------===//
983 // Conversion Instructions...
987 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
988 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
990 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
991 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
992 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
993 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
994 (load addr:$src)))]>;
995 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
996 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
997 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
998 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
999 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1000 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1001 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1002 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1004 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1005 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1006 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1008 (int_x86_sse2_cvttsd2si64
1009 (load addr:$src)))]>;
1011 // Signed i64 -> f64
1012 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1013 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1014 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1015 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1016 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1017 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1019 let isTwoAddress = 1 in {
1020 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1021 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1022 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1024 (int_x86_sse2_cvtsi642sd VR128:$src1,
1026 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1027 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1028 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1030 (int_x86_sse2_cvtsi642sd VR128:$src1,
1031 (loadi64 addr:$src2)))]>;
1034 // Signed i64 -> f32
1035 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1036 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1037 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1038 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1039 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1040 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1042 let isTwoAddress = 1 in {
1043 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1044 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1045 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1047 (int_x86_sse_cvtsi642ss VR128:$src1,
1049 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1050 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1051 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1053 (int_x86_sse_cvtsi642ss VR128:$src1,
1054 (loadi64 addr:$src2)))]>;
1057 // f32 -> signed i64
1058 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1059 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1061 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1062 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1063 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1064 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1065 (load addr:$src)))]>;
1066 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1067 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1068 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1069 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1070 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1071 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1072 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1073 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1075 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1076 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1077 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1079 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1081 //===----------------------------------------------------------------------===//
1082 // Alias Instructions
1083 //===----------------------------------------------------------------------===//
1086 // TODO: Remove this after proper i32 -> i64 zext support.
1087 def PsMOVZX64rr32: I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
1088 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1089 [(set GR64:$dst, (zext GR32:$src))]>;
1090 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
1091 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1092 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1095 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1096 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1098 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1099 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1100 // when we have a better way to specify isel priority.
1101 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1102 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1103 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1104 [(set GR64:$dst, 0)]>;
1106 // Materialize i64 constant where top 32-bits are zero.
1107 let AddedComplexity = 1, isReMaterializable = 1 in
1108 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1109 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1110 [(set GR64:$dst, i64immZExt32:$src)]>;
1112 //===----------------------------------------------------------------------===//
1113 // Non-Instruction Patterns
1114 //===----------------------------------------------------------------------===//
1116 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1117 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1118 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1119 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1120 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1121 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1122 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1123 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1124 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1126 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1127 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1128 Requires<[SmallCode, HasLow4G, IsStatic]>;
1129 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1130 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1131 Requires<[SmallCode, HasLow4G, IsStatic]>;
1132 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1133 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1134 Requires<[SmallCode, HasLow4G, IsStatic]>;
1135 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1136 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1137 Requires<[SmallCode, HasLow4G, IsStatic]>;
1140 // Direct PC relative function call for small code model. 32-bit displacement
1141 // sign extended to 64-bit.
1142 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1143 (CALL64pcrel32 tglobaladdr:$dst)>;
1144 def : Pat<(X86call (i64 texternalsym:$dst)),
1145 (CALL64pcrel32 texternalsym:$dst)>;
1147 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1148 (CALL64pcrel32 tglobaladdr:$dst)>;
1149 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1150 (CALL64pcrel32 texternalsym:$dst)>;
1152 def : Pat<(X86tailcall GR64:$dst),
1153 (CALL64r GR64:$dst)>;
1157 def : Pat<(X86tailcall GR32:$dst),
1159 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1161 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1164 def : Pat<(X86tcret GR64:$dst, imm:$off),
1165 (TCRETURNri64 GR64:$dst, imm:$off)>;
1167 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1168 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1170 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1171 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1175 // TEST R,R is smaller than CMP R,0
1176 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1177 (TEST64rr GR64:$src1, GR64:$src1)>;
1179 // zextload bool -> zextload byte
1180 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1183 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1184 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1185 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1186 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1189 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1190 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1191 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1192 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1193 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1194 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1196 //===----------------------------------------------------------------------===//
1198 //===----------------------------------------------------------------------===//
1200 // (shl x, 1) ==> (add x, x)
1201 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1203 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1204 def : Pat<(or (srl GR64:$src1, CL:$amt),
1205 (shl GR64:$src2, (sub 64, CL:$amt))),
1206 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1208 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1209 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1210 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1212 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1213 def : Pat<(or (shl GR64:$src1, CL:$amt),
1214 (srl GR64:$src2, (sub 64, CL:$amt))),
1215 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1217 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1218 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1219 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1221 // X86 specific add which produces a flag.
1222 def : Pat<(addc GR64:$src1, GR64:$src2),
1223 (ADD64rr GR64:$src1, GR64:$src2)>;
1224 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1225 (ADD64rm GR64:$src1, addr:$src2)>;
1226 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1227 (ADD64ri32 GR64:$src1, imm:$src2)>;
1228 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1229 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1231 def : Pat<(subc GR64:$src1, GR64:$src2),
1232 (SUB64rr GR64:$src1, GR64:$src2)>;
1233 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1234 (SUB64rm GR64:$src1, addr:$src2)>;
1235 def : Pat<(subc GR64:$src1, imm:$src2),
1236 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1237 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1238 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1241 //===----------------------------------------------------------------------===//
1242 // X86-64 SSE Instructions
1243 //===----------------------------------------------------------------------===//
1245 // Move instructions...
1247 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1248 "mov{d|q}\t{$src, $dst|$dst, $src}",
1250 (v2i64 (scalar_to_vector GR64:$src)))]>;
1251 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1252 "mov{d|q}\t{$src, $dst|$dst, $src}",
1253 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1256 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1257 "mov{d|q}\t{$src, $dst|$dst, $src}",
1258 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1259 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1260 "mov{d|q}\t{$src, $dst|$dst, $src}",
1261 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1263 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1264 "mov{d|q}\t{$src, $dst|$dst, $src}",
1265 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1266 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1267 "mov{d|q}\t{$src, $dst|$dst, $src}",
1268 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;