1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
65 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
66 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
67 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
69 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
70 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
71 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
72 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
74 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
75 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
76 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
77 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
79 //===----------------------------------------------------------------------===//
80 // Instruction list...
83 let isImplicitDef = 1 in
84 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (outs GR64:$dst), (ins),
86 [(set GR64:$dst, (undef))]>;
88 //===----------------------------------------------------------------------===//
89 // Call Instructions...
92 // All calls clobber the non-callee saved registers...
93 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
94 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
95 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
96 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
97 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
98 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
99 "call\t${dst:call}", []>;
100 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
101 "call\t{*}$dst", [(X86call GR64:$dst)]>;
102 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
103 "call\t{*}$dst", []>;
108 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
109 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset),
110 "#TC_RETURN $dst $offset",
113 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
114 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset),
115 "#TC_RETURN $dst $offset",
119 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
120 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
124 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
125 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
126 [(brind GR64:$dst)]>;
127 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
128 [(brind (loadi64 addr:$dst))]>;
131 //===----------------------------------------------------------------------===//
132 // Miscellaneous Instructions...
134 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
135 def LEAVE64 : I<0xC9, RawFrm,
136 (outs), (ins), "leave", []>;
137 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
139 def POP64r : I<0x58, AddRegFrm,
140 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
142 def PUSH64r : I<0x50, AddRegFrm,
143 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
146 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
147 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
148 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
149 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
151 def LEA64_32r : I<0x8D, MRMSrcMem,
152 (outs GR32:$dst), (ins lea64_32mem:$src),
153 "lea{l}\t{$src|$dst}, {$dst|$src}",
154 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
156 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
157 "lea{q}\t{$src|$dst}, {$dst|$src}",
158 [(set GR64:$dst, lea64addr:$src)]>;
160 let isTwoAddress = 1 in
161 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
163 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
165 // Bit scan instructions.
166 let Defs = [EFLAGS] in {
167 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
168 "bsf{q}\t{$src, $dst|$dst, $src}",
169 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
170 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
171 "bsf{q}\t{$src, $dst|$dst, $src}",
172 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
173 (implicit EFLAGS)]>, TB;
175 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
176 "bsr{q}\t{$src, $dst|$dst, $src}",
177 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
178 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
179 "bsr{q}\t{$src, $dst|$dst, $src}",
180 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
181 (implicit EFLAGS)]>, TB;
185 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
186 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
187 [(X86rep_movs i64)]>, REP;
188 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
189 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
190 [(X86rep_stos i64)]>, REP;
192 //===----------------------------------------------------------------------===//
193 // Move Instructions...
196 let neverHasSideEffects = 1 in
197 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
198 "mov{q}\t{$src, $dst|$dst, $src}", []>;
200 let isReMaterializable = 1 in {
201 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
202 "movabs{q}\t{$src, $dst|$dst, $src}",
203 [(set GR64:$dst, imm:$src)]>;
204 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
205 "mov{q}\t{$src, $dst|$dst, $src}",
206 [(set GR64:$dst, i64immSExt32:$src)]>;
209 let isSimpleLoad = 1 in
210 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
211 "mov{q}\t{$src, $dst|$dst, $src}",
212 [(set GR64:$dst, (load addr:$src))]>;
214 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
215 "mov{q}\t{$src, $dst|$dst, $src}",
216 [(store GR64:$src, addr:$dst)]>;
217 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
218 "mov{q}\t{$src, $dst|$dst, $src}",
219 [(store i64immSExt32:$src, addr:$dst)]>;
221 // Sign/Zero extenders
223 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
224 "movs{bq|x}\t{$src, $dst|$dst, $src}",
225 [(set GR64:$dst, (sext GR8:$src))]>, TB;
226 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
227 "movs{bq|x}\t{$src, $dst|$dst, $src}",
228 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
229 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
230 "movs{wq|x}\t{$src, $dst|$dst, $src}",
231 [(set GR64:$dst, (sext GR16:$src))]>, TB;
232 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
233 "movs{wq|x}\t{$src, $dst|$dst, $src}",
234 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
235 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
236 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
237 [(set GR64:$dst, (sext GR32:$src))]>;
238 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
239 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
240 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
242 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
243 "movz{bq|x}\t{$src, $dst|$dst, $src}",
244 [(set GR64:$dst, (zext GR8:$src))]>, TB;
245 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
246 "movz{bq|x}\t{$src, $dst|$dst, $src}",
247 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
248 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
249 "movz{wq|x}\t{$src, $dst|$dst, $src}",
250 [(set GR64:$dst, (zext GR16:$src))]>, TB;
251 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
252 "movz{wq|x}\t{$src, $dst|$dst, $src}",
253 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
255 let neverHasSideEffects = 1 in {
256 let Defs = [RAX], Uses = [EAX] in
257 def CDQE : RI<0x98, RawFrm, (outs), (ins),
258 "{cltq|cdqe}", []>; // RAX = signext(EAX)
260 let Defs = [RAX,RDX], Uses = [RAX] in
261 def CQO : RI<0x99, RawFrm, (outs), (ins),
262 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
265 //===----------------------------------------------------------------------===//
266 // Arithmetic Instructions...
269 let Defs = [EFLAGS] in {
270 let isTwoAddress = 1 in {
271 let isConvertibleToThreeAddress = 1 in {
272 let isCommutable = 1 in
273 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
274 "add{q}\t{$src2, $dst|$dst, $src2}",
275 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
277 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
278 "add{q}\t{$src2, $dst|$dst, $src2}",
279 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
280 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
281 "add{q}\t{$src2, $dst|$dst, $src2}",
282 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
283 } // isConvertibleToThreeAddress
285 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
286 "add{q}\t{$src2, $dst|$dst, $src2}",
287 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
290 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
291 "add{q}\t{$src2, $dst|$dst, $src2}",
292 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
293 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
294 "add{q}\t{$src2, $dst|$dst, $src2}",
295 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
296 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
297 "add{q}\t{$src2, $dst|$dst, $src2}",
298 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
300 let Uses = [EFLAGS] in {
301 let isTwoAddress = 1 in {
302 let isCommutable = 1 in
303 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
304 "adc{q}\t{$src2, $dst|$dst, $src2}",
305 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
307 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
308 "adc{q}\t{$src2, $dst|$dst, $src2}",
309 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
311 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
312 "adc{q}\t{$src2, $dst|$dst, $src2}",
313 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
314 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
315 "adc{q}\t{$src2, $dst|$dst, $src2}",
316 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
319 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
320 "adc{q}\t{$src2, $dst|$dst, $src2}",
321 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
322 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
323 "adc{q}\t{$src2, $dst|$dst, $src2}",
324 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
325 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
326 "adc{q}\t{$src2, $dst|$dst, $src2}",
327 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
330 let isTwoAddress = 1 in {
331 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
332 "sub{q}\t{$src2, $dst|$dst, $src2}",
333 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
335 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
336 "sub{q}\t{$src2, $dst|$dst, $src2}",
337 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
339 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
340 "sub{q}\t{$src2, $dst|$dst, $src2}",
341 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
342 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
343 "sub{q}\t{$src2, $dst|$dst, $src2}",
344 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
347 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
348 "sub{q}\t{$src2, $dst|$dst, $src2}",
349 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
350 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
351 "sub{q}\t{$src2, $dst|$dst, $src2}",
352 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
353 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
354 "sub{q}\t{$src2, $dst|$dst, $src2}",
355 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
357 let Uses = [EFLAGS] in {
358 let isTwoAddress = 1 in {
359 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
360 "sbb{q}\t{$src2, $dst|$dst, $src2}",
361 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
363 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
364 "sbb{q}\t{$src2, $dst|$dst, $src2}",
365 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
367 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
368 "sbb{q}\t{$src2, $dst|$dst, $src2}",
369 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
370 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
371 "sbb{q}\t{$src2, $dst|$dst, $src2}",
372 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
375 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
376 "sbb{q}\t{$src2, $dst|$dst, $src2}",
377 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
378 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
379 "sbb{q}\t{$src2, $dst|$dst, $src2}",
380 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
381 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
382 "sbb{q}\t{$src2, $dst|$dst, $src2}",
383 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
387 // Unsigned multiplication
388 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
389 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
390 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
392 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
393 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
395 // Signed multiplication
396 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
397 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
399 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
400 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
403 let Defs = [EFLAGS] in {
404 let isTwoAddress = 1 in {
405 let isCommutable = 1 in
406 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
407 "imul{q}\t{$src2, $dst|$dst, $src2}",
408 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
410 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
411 "imul{q}\t{$src2, $dst|$dst, $src2}",
412 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
415 // Suprisingly enough, these are not two address instructions!
416 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
417 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
418 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
419 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
420 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
421 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
422 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
423 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
424 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
425 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
426 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
427 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
428 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
429 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
430 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
431 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
434 // Unsigned division / remainder
435 let neverHasSideEffects = 1 in {
436 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
437 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
439 // Signed division / remainder
440 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
441 "idiv{q}\t$src", []>;
443 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
445 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
446 "idiv{q}\t$src", []>;
451 // Unary instructions
452 let Defs = [EFLAGS], CodeSize = 2 in {
453 let isTwoAddress = 1 in
454 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
455 [(set GR64:$dst, (ineg GR64:$src))]>;
456 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
457 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
459 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
460 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
461 [(set GR64:$dst, (add GR64:$src, 1))]>;
462 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
463 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
465 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
466 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
467 [(set GR64:$dst, (add GR64:$src, -1))]>;
468 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
469 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
471 // In 64-bit mode, single byte INC and DEC cannot be encoded.
472 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
473 // Can transform into LEA.
474 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
475 [(set GR16:$dst, (add GR16:$src, 1))]>,
476 OpSize, Requires<[In64BitMode]>;
477 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
478 [(set GR32:$dst, (add GR32:$src, 1))]>,
479 Requires<[In64BitMode]>;
480 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
481 [(set GR16:$dst, (add GR16:$src, -1))]>,
482 OpSize, Requires<[In64BitMode]>;
483 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
484 [(set GR32:$dst, (add GR32:$src, -1))]>,
485 Requires<[In64BitMode]>;
486 } // isConvertibleToThreeAddress
488 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
489 // how to unfold them.
490 let isTwoAddress = 0, CodeSize = 2 in {
491 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
492 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
493 OpSize, Requires<[In64BitMode]>;
494 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
495 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
496 Requires<[In64BitMode]>;
497 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
498 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
499 OpSize, Requires<[In64BitMode]>;
500 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
501 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
502 Requires<[In64BitMode]>;
504 } // Defs = [EFLAGS], CodeSize
507 let Defs = [EFLAGS] in {
508 // Shift instructions
509 let isTwoAddress = 1 in {
511 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
512 "shl{q}\t{%cl, $dst|$dst, %CL}",
513 [(set GR64:$dst, (shl GR64:$src, CL))]>;
514 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
515 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
516 "shl{q}\t{$src2, $dst|$dst, $src2}",
517 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
518 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
523 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
524 "shl{q}\t{%cl, $dst|$dst, %CL}",
525 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
526 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
527 "shl{q}\t{$src, $dst|$dst, $src}",
528 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
529 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
531 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
533 let isTwoAddress = 1 in {
535 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
536 "shr{q}\t{%cl, $dst|$dst, %CL}",
537 [(set GR64:$dst, (srl GR64:$src, CL))]>;
538 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
539 "shr{q}\t{$src2, $dst|$dst, $src2}",
540 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
541 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
543 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
547 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
548 "shr{q}\t{%cl, $dst|$dst, %CL}",
549 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
550 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
551 "shr{q}\t{$src, $dst|$dst, $src}",
552 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
553 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
555 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
557 let isTwoAddress = 1 in {
559 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
560 "sar{q}\t{%cl, $dst|$dst, %CL}",
561 [(set GR64:$dst, (sra GR64:$src, CL))]>;
562 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
563 "sar{q}\t{$src2, $dst|$dst, $src2}",
564 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
565 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
567 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
571 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
572 "sar{q}\t{%cl, $dst|$dst, %CL}",
573 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
574 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
575 "sar{q}\t{$src, $dst|$dst, $src}",
576 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
577 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
579 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
581 // Rotate instructions
582 let isTwoAddress = 1 in {
584 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
585 "rol{q}\t{%cl, $dst|$dst, %CL}",
586 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
587 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
588 "rol{q}\t{$src2, $dst|$dst, $src2}",
589 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
590 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
592 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
596 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
597 "rol{q}\t{%cl, $dst|$dst, %CL}",
598 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
599 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
600 "rol{q}\t{$src, $dst|$dst, $src}",
601 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
602 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
604 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
606 let isTwoAddress = 1 in {
608 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
609 "ror{q}\t{%cl, $dst|$dst, %CL}",
610 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
611 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
612 "ror{q}\t{$src2, $dst|$dst, $src2}",
613 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
614 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
616 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
620 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
621 "ror{q}\t{%cl, $dst|$dst, %CL}",
622 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
623 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
624 "ror{q}\t{$src, $dst|$dst, $src}",
625 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
626 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
628 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
630 // Double shift instructions (generalizations of rotate)
631 let isTwoAddress = 1 in {
633 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
634 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
635 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
636 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
637 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
638 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
641 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
642 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
643 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
644 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
645 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
648 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
649 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
650 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
651 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
658 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
659 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
660 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
662 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
663 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
664 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
667 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
668 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
669 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
670 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
671 (i8 imm:$src3)), addr:$dst)]>,
673 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
674 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
675 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
676 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
677 (i8 imm:$src3)), addr:$dst)]>,
681 //===----------------------------------------------------------------------===//
682 // Logical Instructions...
685 let isTwoAddress = 1 in
686 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
687 [(set GR64:$dst, (not GR64:$src))]>;
688 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
689 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
691 let Defs = [EFLAGS] in {
692 let isTwoAddress = 1 in {
693 let isCommutable = 1 in
694 def AND64rr : RI<0x21, MRMDestReg,
695 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
696 "and{q}\t{$src2, $dst|$dst, $src2}",
697 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
698 def AND64rm : RI<0x23, MRMSrcMem,
699 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
700 "and{q}\t{$src2, $dst|$dst, $src2}",
701 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
702 def AND64ri32 : RIi32<0x81, MRM4r,
703 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
704 "and{q}\t{$src2, $dst|$dst, $src2}",
705 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
706 def AND64ri8 : RIi8<0x83, MRM4r,
707 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
708 "and{q}\t{$src2, $dst|$dst, $src2}",
709 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
712 def AND64mr : RI<0x21, MRMDestMem,
713 (outs), (ins i64mem:$dst, GR64:$src),
714 "and{q}\t{$src, $dst|$dst, $src}",
715 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
716 def AND64mi32 : RIi32<0x81, MRM4m,
717 (outs), (ins i64mem:$dst, i64i32imm:$src),
718 "and{q}\t{$src, $dst|$dst, $src}",
719 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
720 def AND64mi8 : RIi8<0x83, MRM4m,
721 (outs), (ins i64mem:$dst, i64i8imm :$src),
722 "and{q}\t{$src, $dst|$dst, $src}",
723 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
725 let isTwoAddress = 1 in {
726 let isCommutable = 1 in
727 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
728 "or{q}\t{$src2, $dst|$dst, $src2}",
729 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
730 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
731 "or{q}\t{$src2, $dst|$dst, $src2}",
732 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
733 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
734 "or{q}\t{$src2, $dst|$dst, $src2}",
735 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
736 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
737 "or{q}\t{$src2, $dst|$dst, $src2}",
738 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
741 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
742 "or{q}\t{$src, $dst|$dst, $src}",
743 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
744 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
745 "or{q}\t{$src, $dst|$dst, $src}",
746 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
747 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
748 "or{q}\t{$src, $dst|$dst, $src}",
749 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
751 let isTwoAddress = 1 in {
752 let isCommutable = 1 in
753 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
754 "xor{q}\t{$src2, $dst|$dst, $src2}",
755 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
756 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
757 "xor{q}\t{$src2, $dst|$dst, $src2}",
758 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
759 def XOR64ri32 : RIi32<0x81, MRM6r,
760 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
761 "xor{q}\t{$src2, $dst|$dst, $src2}",
762 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
763 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
764 "xor{q}\t{$src2, $dst|$dst, $src2}",
765 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
768 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
769 "xor{q}\t{$src, $dst|$dst, $src}",
770 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
771 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
772 "xor{q}\t{$src, $dst|$dst, $src}",
773 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
774 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
775 "xor{q}\t{$src, $dst|$dst, $src}",
776 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
779 //===----------------------------------------------------------------------===//
780 // Comparison Instructions...
783 // Integer comparison
784 let Defs = [EFLAGS] in {
785 let isCommutable = 1 in
786 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
787 "test{q}\t{$src2, $src1|$src1, $src2}",
788 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
790 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
791 "test{q}\t{$src2, $src1|$src1, $src2}",
792 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
794 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
795 (ins GR64:$src1, i64i32imm:$src2),
796 "test{q}\t{$src2, $src1|$src1, $src2}",
797 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
799 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
800 (ins i64mem:$src1, i64i32imm:$src2),
801 "test{q}\t{$src2, $src1|$src1, $src2}",
802 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
805 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
806 "cmp{q}\t{$src2, $src1|$src1, $src2}",
807 [(X86cmp GR64:$src1, GR64:$src2),
809 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
810 "cmp{q}\t{$src2, $src1|$src1, $src2}",
811 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
813 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
814 "cmp{q}\t{$src2, $src1|$src1, $src2}",
815 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
817 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
818 "cmp{q}\t{$src2, $src1|$src1, $src2}",
819 [(X86cmp GR64:$src1, i64immSExt32:$src2),
821 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
822 (ins i64mem:$src1, i64i32imm:$src2),
823 "cmp{q}\t{$src2, $src1|$src1, $src2}",
824 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
826 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
827 "cmp{q}\t{$src2, $src1|$src1, $src2}",
828 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
830 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
831 "cmp{q}\t{$src2, $src1|$src1, $src2}",
832 [(X86cmp GR64:$src1, i64immSExt8:$src2),
837 let Uses = [EFLAGS], isTwoAddress = 1 in {
838 let isCommutable = 1 in {
839 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
840 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
841 "cmovb\t{$src2, $dst|$dst, $src2}",
842 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
843 X86_COND_B, EFLAGS))]>, TB;
844 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
845 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
846 "cmovae\t{$src2, $dst|$dst, $src2}",
847 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
848 X86_COND_AE, EFLAGS))]>, TB;
849 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
850 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
851 "cmove\t{$src2, $dst|$dst, $src2}",
852 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
853 X86_COND_E, EFLAGS))]>, TB;
854 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
855 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
856 "cmovne\t{$src2, $dst|$dst, $src2}",
857 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
858 X86_COND_NE, EFLAGS))]>, TB;
859 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
860 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
861 "cmovbe\t{$src2, $dst|$dst, $src2}",
862 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
863 X86_COND_BE, EFLAGS))]>, TB;
864 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
865 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
866 "cmova\t{$src2, $dst|$dst, $src2}",
867 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
868 X86_COND_A, EFLAGS))]>, TB;
869 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
870 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
871 "cmovl\t{$src2, $dst|$dst, $src2}",
872 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
873 X86_COND_L, EFLAGS))]>, TB;
874 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
875 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
876 "cmovge\t{$src2, $dst|$dst, $src2}",
877 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
878 X86_COND_GE, EFLAGS))]>, TB;
879 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
880 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
881 "cmovle\t{$src2, $dst|$dst, $src2}",
882 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
883 X86_COND_LE, EFLAGS))]>, TB;
884 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
885 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
886 "cmovg\t{$src2, $dst|$dst, $src2}",
887 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
888 X86_COND_G, EFLAGS))]>, TB;
889 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
890 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
891 "cmovs\t{$src2, $dst|$dst, $src2}",
892 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
893 X86_COND_S, EFLAGS))]>, TB;
894 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
895 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
896 "cmovns\t{$src2, $dst|$dst, $src2}",
897 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
898 X86_COND_NS, EFLAGS))]>, TB;
899 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
900 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
901 "cmovp\t{$src2, $dst|$dst, $src2}",
902 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
903 X86_COND_P, EFLAGS))]>, TB;
904 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
905 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
906 "cmovnp\t{$src2, $dst|$dst, $src2}",
907 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
908 X86_COND_NP, EFLAGS))]>, TB;
909 } // isCommutable = 1
911 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
912 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
913 "cmovb\t{$src2, $dst|$dst, $src2}",
914 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
915 X86_COND_B, EFLAGS))]>, TB;
916 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
917 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
918 "cmovae\t{$src2, $dst|$dst, $src2}",
919 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
920 X86_COND_AE, EFLAGS))]>, TB;
921 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
922 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
923 "cmove\t{$src2, $dst|$dst, $src2}",
924 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
925 X86_COND_E, EFLAGS))]>, TB;
926 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
927 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
928 "cmovne\t{$src2, $dst|$dst, $src2}",
929 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
930 X86_COND_NE, EFLAGS))]>, TB;
931 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
932 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
933 "cmovbe\t{$src2, $dst|$dst, $src2}",
934 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
935 X86_COND_BE, EFLAGS))]>, TB;
936 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
937 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
938 "cmova\t{$src2, $dst|$dst, $src2}",
939 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
940 X86_COND_A, EFLAGS))]>, TB;
941 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
942 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
943 "cmovl\t{$src2, $dst|$dst, $src2}",
944 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
945 X86_COND_L, EFLAGS))]>, TB;
946 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
947 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
948 "cmovge\t{$src2, $dst|$dst, $src2}",
949 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
950 X86_COND_GE, EFLAGS))]>, TB;
951 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
952 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
953 "cmovle\t{$src2, $dst|$dst, $src2}",
954 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
955 X86_COND_LE, EFLAGS))]>, TB;
956 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
957 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
958 "cmovg\t{$src2, $dst|$dst, $src2}",
959 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
960 X86_COND_G, EFLAGS))]>, TB;
961 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
962 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
963 "cmovs\t{$src2, $dst|$dst, $src2}",
964 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
965 X86_COND_S, EFLAGS))]>, TB;
966 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
967 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
968 "cmovns\t{$src2, $dst|$dst, $src2}",
969 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
970 X86_COND_NS, EFLAGS))]>, TB;
971 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
972 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
973 "cmovp\t{$src2, $dst|$dst, $src2}",
974 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
975 X86_COND_P, EFLAGS))]>, TB;
976 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
977 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
978 "cmovnp\t{$src2, $dst|$dst, $src2}",
979 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
980 X86_COND_NP, EFLAGS))]>, TB;
983 //===----------------------------------------------------------------------===//
984 // Conversion Instructions...
988 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
989 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
991 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
992 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
993 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
994 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
995 (load addr:$src)))]>;
996 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
997 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
998 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
999 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1000 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1001 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1002 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1003 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1005 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1006 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1007 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1009 (int_x86_sse2_cvttsd2si64
1010 (load addr:$src)))]>;
1012 // Signed i64 -> f64
1013 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1014 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1015 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1016 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1017 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1018 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1020 let isTwoAddress = 1 in {
1021 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1022 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1023 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1025 (int_x86_sse2_cvtsi642sd VR128:$src1,
1027 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1028 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1029 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1031 (int_x86_sse2_cvtsi642sd VR128:$src1,
1032 (loadi64 addr:$src2)))]>;
1035 // Signed i64 -> f32
1036 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1037 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1038 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1039 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1040 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1041 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1043 let isTwoAddress = 1 in {
1044 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1045 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1046 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1048 (int_x86_sse_cvtsi642ss VR128:$src1,
1050 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1051 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1052 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1054 (int_x86_sse_cvtsi642ss VR128:$src1,
1055 (loadi64 addr:$src2)))]>;
1058 // f32 -> signed i64
1059 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1060 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1062 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1063 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1064 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1065 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1066 (load addr:$src)))]>;
1067 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1068 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1069 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1070 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1071 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1072 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1073 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1074 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1076 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1077 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1078 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1080 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1082 //===----------------------------------------------------------------------===//
1083 // Alias Instructions
1084 //===----------------------------------------------------------------------===//
1087 // TODO: Remove this after proper i32 -> i64 zext support.
1088 def PsMOVZX64rr32: I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
1089 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1090 [(set GR64:$dst, (zext GR32:$src))]>;
1091 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
1092 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1093 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1096 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1097 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1099 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1100 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1101 // when we have a better way to specify isel priority.
1102 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1103 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1104 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1105 [(set GR64:$dst, 0)]>;
1107 // Materialize i64 constant where top 32-bits are zero.
1108 let AddedComplexity = 1, isReMaterializable = 1 in
1109 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1110 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1111 [(set GR64:$dst, i64immZExt32:$src)]>;
1113 //===----------------------------------------------------------------------===//
1114 // Non-Instruction Patterns
1115 //===----------------------------------------------------------------------===//
1117 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1118 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1119 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1120 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1121 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1122 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1123 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1124 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1125 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1127 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1128 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1129 Requires<[SmallCode, HasLow4G, IsStatic]>;
1130 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1131 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1132 Requires<[SmallCode, HasLow4G, IsStatic]>;
1133 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1134 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1135 Requires<[SmallCode, HasLow4G, IsStatic]>;
1136 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1137 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1138 Requires<[SmallCode, HasLow4G, IsStatic]>;
1141 // Direct PC relative function call for small code model. 32-bit displacement
1142 // sign extended to 64-bit.
1143 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1144 (CALL64pcrel32 tglobaladdr:$dst)>;
1145 def : Pat<(X86call (i64 texternalsym:$dst)),
1146 (CALL64pcrel32 texternalsym:$dst)>;
1148 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1149 (CALL64pcrel32 tglobaladdr:$dst)>;
1150 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1151 (CALL64pcrel32 texternalsym:$dst)>;
1153 def : Pat<(X86tailcall GR64:$dst),
1154 (CALL64r GR64:$dst)>;
1158 def : Pat<(X86tailcall GR32:$dst),
1160 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1162 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1165 def : Pat<(X86tcret GR64:$dst, imm:$off),
1166 (TCRETURNri64 GR64:$dst, imm:$off)>;
1168 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1169 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1171 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1172 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1176 // TEST R,R is smaller than CMP R,0
1177 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1178 (TEST64rr GR64:$src1, GR64:$src1)>;
1180 // {s|z}extload bool -> {s|z}extload byte
1181 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1182 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1185 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1186 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1187 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1188 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1191 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1192 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1193 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1194 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1195 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1196 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1198 //===----------------------------------------------------------------------===//
1200 //===----------------------------------------------------------------------===//
1202 // (shl x, 1) ==> (add x, x)
1203 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1205 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1206 def : Pat<(or (srl GR64:$src1, CL:$amt),
1207 (shl GR64:$src2, (sub 64, CL:$amt))),
1208 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1210 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1211 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1212 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1214 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1215 def : Pat<(or (shl GR64:$src1, CL:$amt),
1216 (srl GR64:$src2, (sub 64, CL:$amt))),
1217 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1219 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1220 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1221 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1223 // X86 specific add which produces a flag.
1224 def : Pat<(addc GR64:$src1, GR64:$src2),
1225 (ADD64rr GR64:$src1, GR64:$src2)>;
1226 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1227 (ADD64rm GR64:$src1, addr:$src2)>;
1228 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1229 (ADD64ri32 GR64:$src1, imm:$src2)>;
1230 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1231 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1233 def : Pat<(subc GR64:$src1, GR64:$src2),
1234 (SUB64rr GR64:$src1, GR64:$src2)>;
1235 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1236 (SUB64rm GR64:$src1, addr:$src2)>;
1237 def : Pat<(subc GR64:$src1, imm:$src2),
1238 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1239 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1240 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1243 //===----------------------------------------------------------------------===//
1244 // X86-64 SSE Instructions
1245 //===----------------------------------------------------------------------===//
1247 // Move instructions...
1249 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1250 "mov{d|q}\t{$src, $dst|$dst, $src}",
1252 (v2i64 (scalar_to_vector GR64:$src)))]>;
1253 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1254 "mov{d|q}\t{$src, $dst|$dst, $src}",
1255 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1258 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1259 "mov{d|q}\t{$src, $dst|$dst, $src}",
1260 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1261 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1262 "mov{d|q}\t{$src, $dst|$dst, $src}",
1263 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1265 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1266 "mov{d|q}\t{$src, $dst|$dst, $src}",
1267 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1268 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1269 "mov{d|q}\t{$src, $dst|$dst, $src}",
1270 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;