1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def i64immFFFFFFFF : PatLeaf<(i64 imm), [{
65 // i64immFFFFFFFF - True if this is a specific constant we can't write in
67 return N->getValue() == 0x00000000FFFFFFFFULL;
71 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
72 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
73 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
75 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
76 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
77 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
78 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
80 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
81 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
82 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
83 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
85 //===----------------------------------------------------------------------===//
86 // Instruction list...
89 let isImplicitDef = 1 in
90 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (outs GR64:$dst), (ins),
92 [(set GR64:$dst, (undef))]>;
94 //===----------------------------------------------------------------------===//
95 // Call Instructions...
98 // All calls clobber the non-callee saved registers...
99 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
100 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
101 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
102 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
103 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
104 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
105 "call\t${dst:call}", []>;
106 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
107 "call\t{*}$dst", [(X86call GR64:$dst)]>;
108 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
109 "call\t{*}$dst", []>;
114 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
115 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset),
116 "#TC_RETURN $dst $offset",
119 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
120 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset),
121 "#TC_RETURN $dst $offset",
125 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
126 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
130 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
131 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
132 [(brind GR64:$dst)]>;
133 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
134 [(brind (loadi64 addr:$dst))]>;
137 //===----------------------------------------------------------------------===//
138 // Miscellaneous Instructions...
140 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
141 def LEAVE64 : I<0xC9, RawFrm,
142 (outs), (ins), "leave", []>;
143 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
145 def POP64r : I<0x58, AddRegFrm,
146 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
148 def PUSH64r : I<0x50, AddRegFrm,
149 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
152 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
153 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
154 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
155 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
157 def LEA64_32r : I<0x8D, MRMSrcMem,
158 (outs GR32:$dst), (ins lea64_32mem:$src),
159 "lea{l}\t{$src|$dst}, {$dst|$src}",
160 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
162 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
163 "lea{q}\t{$src|$dst}, {$dst|$src}",
164 [(set GR64:$dst, lea64addr:$src)]>;
166 let isTwoAddress = 1 in
167 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
169 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
171 // Bit scan instructions.
172 let Defs = [EFLAGS] in {
173 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
174 "bsf{q}\t{$src, $dst|$dst, $src}",
175 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
176 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
177 "bsf{q}\t{$src, $dst|$dst, $src}",
178 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
179 (implicit EFLAGS)]>, TB;
181 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
182 "bsr{q}\t{$src, $dst|$dst, $src}",
183 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
184 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
185 "bsr{q}\t{$src, $dst|$dst, $src}",
186 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
187 (implicit EFLAGS)]>, TB;
191 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
192 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
193 [(X86rep_movs i64)]>, REP;
194 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
195 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
196 [(X86rep_stos i64)]>, REP;
198 //===----------------------------------------------------------------------===//
199 // Move Instructions...
202 let neverHasSideEffects = 1 in
203 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
204 "mov{q}\t{$src, $dst|$dst, $src}", []>;
206 let isReMaterializable = 1 in {
207 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
208 "movabs{q}\t{$src, $dst|$dst, $src}",
209 [(set GR64:$dst, imm:$src)]>;
210 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
211 "mov{q}\t{$src, $dst|$dst, $src}",
212 [(set GR64:$dst, i64immSExt32:$src)]>;
215 let isSimpleLoad = 1 in
216 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
217 "mov{q}\t{$src, $dst|$dst, $src}",
218 [(set GR64:$dst, (load addr:$src))]>;
220 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
221 "mov{q}\t{$src, $dst|$dst, $src}",
222 [(store GR64:$src, addr:$dst)]>;
223 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
224 "mov{q}\t{$src, $dst|$dst, $src}",
225 [(store i64immSExt32:$src, addr:$dst)]>;
227 // Sign/Zero extenders
229 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
230 "movs{bq|x}\t{$src, $dst|$dst, $src}",
231 [(set GR64:$dst, (sext GR8:$src))]>, TB;
232 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
233 "movs{bq|x}\t{$src, $dst|$dst, $src}",
234 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
235 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
236 "movs{wq|x}\t{$src, $dst|$dst, $src}",
237 [(set GR64:$dst, (sext GR16:$src))]>, TB;
238 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
239 "movs{wq|x}\t{$src, $dst|$dst, $src}",
240 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
241 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
242 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
243 [(set GR64:$dst, (sext GR32:$src))]>;
244 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
245 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
246 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
248 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
249 "movz{bq|x}\t{$src, $dst|$dst, $src}",
250 [(set GR64:$dst, (zext GR8:$src))]>, TB;
251 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
252 "movz{bq|x}\t{$src, $dst|$dst, $src}",
253 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
254 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
255 "movz{wq|x}\t{$src, $dst|$dst, $src}",
256 [(set GR64:$dst, (zext GR16:$src))]>, TB;
257 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
258 "movz{wq|x}\t{$src, $dst|$dst, $src}",
259 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
261 let neverHasSideEffects = 1 in {
262 let Defs = [RAX], Uses = [EAX] in
263 def CDQE : RI<0x98, RawFrm, (outs), (ins),
264 "{cltq|cdqe}", []>; // RAX = signext(EAX)
266 let Defs = [RAX,RDX], Uses = [RAX] in
267 def CQO : RI<0x99, RawFrm, (outs), (ins),
268 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
271 //===----------------------------------------------------------------------===//
272 // Arithmetic Instructions...
275 let Defs = [EFLAGS] in {
276 let isTwoAddress = 1 in {
277 let isConvertibleToThreeAddress = 1 in {
278 let isCommutable = 1 in
279 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
280 "add{q}\t{$src2, $dst|$dst, $src2}",
281 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
283 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
284 "add{q}\t{$src2, $dst|$dst, $src2}",
285 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
286 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
287 "add{q}\t{$src2, $dst|$dst, $src2}",
288 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
289 } // isConvertibleToThreeAddress
291 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
292 "add{q}\t{$src2, $dst|$dst, $src2}",
293 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
296 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
297 "add{q}\t{$src2, $dst|$dst, $src2}",
298 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
299 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
300 "add{q}\t{$src2, $dst|$dst, $src2}",
301 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
302 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
303 "add{q}\t{$src2, $dst|$dst, $src2}",
304 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
306 let Uses = [EFLAGS] in {
307 let isTwoAddress = 1 in {
308 let isCommutable = 1 in
309 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
310 "adc{q}\t{$src2, $dst|$dst, $src2}",
311 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
313 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
314 "adc{q}\t{$src2, $dst|$dst, $src2}",
315 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
317 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
318 "adc{q}\t{$src2, $dst|$dst, $src2}",
319 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
320 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
321 "adc{q}\t{$src2, $dst|$dst, $src2}",
322 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
325 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
326 "adc{q}\t{$src2, $dst|$dst, $src2}",
327 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
328 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
329 "adc{q}\t{$src2, $dst|$dst, $src2}",
330 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
331 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
332 "adc{q}\t{$src2, $dst|$dst, $src2}",
333 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
336 let isTwoAddress = 1 in {
337 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
338 "sub{q}\t{$src2, $dst|$dst, $src2}",
339 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
341 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
342 "sub{q}\t{$src2, $dst|$dst, $src2}",
343 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
345 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
346 "sub{q}\t{$src2, $dst|$dst, $src2}",
347 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
348 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
349 "sub{q}\t{$src2, $dst|$dst, $src2}",
350 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
353 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
354 "sub{q}\t{$src2, $dst|$dst, $src2}",
355 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
356 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
357 "sub{q}\t{$src2, $dst|$dst, $src2}",
358 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
359 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
360 "sub{q}\t{$src2, $dst|$dst, $src2}",
361 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
363 let Uses = [EFLAGS] in {
364 let isTwoAddress = 1 in {
365 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
366 "sbb{q}\t{$src2, $dst|$dst, $src2}",
367 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
369 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
370 "sbb{q}\t{$src2, $dst|$dst, $src2}",
371 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
373 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
374 "sbb{q}\t{$src2, $dst|$dst, $src2}",
375 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
376 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
377 "sbb{q}\t{$src2, $dst|$dst, $src2}",
378 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
381 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
382 "sbb{q}\t{$src2, $dst|$dst, $src2}",
383 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
384 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
385 "sbb{q}\t{$src2, $dst|$dst, $src2}",
386 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
387 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
388 "sbb{q}\t{$src2, $dst|$dst, $src2}",
389 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
393 // Unsigned multiplication
394 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
395 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
396 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
398 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
399 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
401 // Signed multiplication
402 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
403 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
405 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
406 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
409 let Defs = [EFLAGS] in {
410 let isTwoAddress = 1 in {
411 let isCommutable = 1 in
412 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
413 "imul{q}\t{$src2, $dst|$dst, $src2}",
414 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
416 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
417 "imul{q}\t{$src2, $dst|$dst, $src2}",
418 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
421 // Suprisingly enough, these are not two address instructions!
422 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
423 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
424 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
425 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
426 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
427 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
428 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
429 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
430 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
431 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
432 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
433 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
434 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
435 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
436 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
437 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
440 // Unsigned division / remainder
441 let neverHasSideEffects = 1 in {
442 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
443 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
445 // Signed division / remainder
446 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
447 "idiv{q}\t$src", []>;
449 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
451 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
452 "idiv{q}\t$src", []>;
457 // Unary instructions
458 let Defs = [EFLAGS], CodeSize = 2 in {
459 let isTwoAddress = 1 in
460 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
461 [(set GR64:$dst, (ineg GR64:$src))]>;
462 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
463 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
465 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
466 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
467 [(set GR64:$dst, (add GR64:$src, 1))]>;
468 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
469 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
471 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
472 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
473 [(set GR64:$dst, (add GR64:$src, -1))]>;
474 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
475 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
477 // In 64-bit mode, single byte INC and DEC cannot be encoded.
478 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
479 // Can transform into LEA.
480 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
481 [(set GR16:$dst, (add GR16:$src, 1))]>,
482 OpSize, Requires<[In64BitMode]>;
483 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
484 [(set GR32:$dst, (add GR32:$src, 1))]>,
485 Requires<[In64BitMode]>;
486 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
487 [(set GR16:$dst, (add GR16:$src, -1))]>,
488 OpSize, Requires<[In64BitMode]>;
489 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
490 [(set GR32:$dst, (add GR32:$src, -1))]>,
491 Requires<[In64BitMode]>;
492 } // isConvertibleToThreeAddress
494 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
495 // how to unfold them.
496 let isTwoAddress = 0, CodeSize = 2 in {
497 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
498 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
499 OpSize, Requires<[In64BitMode]>;
500 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
501 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
502 Requires<[In64BitMode]>;
503 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
504 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
505 OpSize, Requires<[In64BitMode]>;
506 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
507 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
508 Requires<[In64BitMode]>;
510 } // Defs = [EFLAGS], CodeSize
513 let Defs = [EFLAGS] in {
514 // Shift instructions
515 let isTwoAddress = 1 in {
517 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
518 "shl{q}\t{%cl, $dst|$dst, %CL}",
519 [(set GR64:$dst, (shl GR64:$src, CL))]>;
520 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
521 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
522 "shl{q}\t{$src2, $dst|$dst, $src2}",
523 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
524 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
529 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
530 "shl{q}\t{%cl, $dst|$dst, %CL}",
531 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
532 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
533 "shl{q}\t{$src, $dst|$dst, $src}",
534 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
535 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
537 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
539 let isTwoAddress = 1 in {
541 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
542 "shr{q}\t{%cl, $dst|$dst, %CL}",
543 [(set GR64:$dst, (srl GR64:$src, CL))]>;
544 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
545 "shr{q}\t{$src2, $dst|$dst, $src2}",
546 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
547 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
549 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
553 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
554 "shr{q}\t{%cl, $dst|$dst, %CL}",
555 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
556 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
557 "shr{q}\t{$src, $dst|$dst, $src}",
558 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
559 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
561 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
563 let isTwoAddress = 1 in {
565 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
566 "sar{q}\t{%cl, $dst|$dst, %CL}",
567 [(set GR64:$dst, (sra GR64:$src, CL))]>;
568 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
569 "sar{q}\t{$src2, $dst|$dst, $src2}",
570 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
571 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
573 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
577 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
578 "sar{q}\t{%cl, $dst|$dst, %CL}",
579 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
580 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
581 "sar{q}\t{$src, $dst|$dst, $src}",
582 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
583 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
585 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
587 // Rotate instructions
588 let isTwoAddress = 1 in {
590 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
591 "rol{q}\t{%cl, $dst|$dst, %CL}",
592 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
593 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
594 "rol{q}\t{$src2, $dst|$dst, $src2}",
595 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
596 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
598 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
602 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
603 "rol{q}\t{%cl, $dst|$dst, %CL}",
604 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
605 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
606 "rol{q}\t{$src, $dst|$dst, $src}",
607 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
608 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
610 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
612 let isTwoAddress = 1 in {
614 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
615 "ror{q}\t{%cl, $dst|$dst, %CL}",
616 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
617 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
618 "ror{q}\t{$src2, $dst|$dst, $src2}",
619 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
620 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
622 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
626 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
627 "ror{q}\t{%cl, $dst|$dst, %CL}",
628 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
629 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
630 "ror{q}\t{$src, $dst|$dst, $src}",
631 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
632 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
634 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
636 // Double shift instructions (generalizations of rotate)
637 let isTwoAddress = 1 in {
639 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
640 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
641 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
642 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
643 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
644 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
647 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
648 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
649 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
650 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
651 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
654 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
655 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
656 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
657 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
664 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
665 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
666 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
668 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
669 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
670 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
673 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
674 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
675 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
676 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
677 (i8 imm:$src3)), addr:$dst)]>,
679 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
680 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
681 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
682 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
683 (i8 imm:$src3)), addr:$dst)]>,
687 //===----------------------------------------------------------------------===//
688 // Logical Instructions...
691 let isTwoAddress = 1 in
692 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
693 [(set GR64:$dst, (not GR64:$src))]>;
694 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
695 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
697 let Defs = [EFLAGS] in {
698 let isTwoAddress = 1 in {
699 let isCommutable = 1 in
700 def AND64rr : RI<0x21, MRMDestReg,
701 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
702 "and{q}\t{$src2, $dst|$dst, $src2}",
703 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
704 def AND64rm : RI<0x23, MRMSrcMem,
705 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
706 "and{q}\t{$src2, $dst|$dst, $src2}",
707 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
708 def AND64ri32 : RIi32<0x81, MRM4r,
709 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
710 "and{q}\t{$src2, $dst|$dst, $src2}",
711 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
712 def AND64ri8 : RIi8<0x83, MRM4r,
713 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
714 "and{q}\t{$src2, $dst|$dst, $src2}",
715 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
718 def AND64mr : RI<0x21, MRMDestMem,
719 (outs), (ins i64mem:$dst, GR64:$src),
720 "and{q}\t{$src, $dst|$dst, $src}",
721 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
722 def AND64mi32 : RIi32<0x81, MRM4m,
723 (outs), (ins i64mem:$dst, i64i32imm:$src),
724 "and{q}\t{$src, $dst|$dst, $src}",
725 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
726 def AND64mi8 : RIi8<0x83, MRM4m,
727 (outs), (ins i64mem:$dst, i64i8imm :$src),
728 "and{q}\t{$src, $dst|$dst, $src}",
729 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
731 let isTwoAddress = 1 in {
732 let isCommutable = 1 in
733 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
734 "or{q}\t{$src2, $dst|$dst, $src2}",
735 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
736 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
737 "or{q}\t{$src2, $dst|$dst, $src2}",
738 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
739 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
740 "or{q}\t{$src2, $dst|$dst, $src2}",
741 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
742 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
743 "or{q}\t{$src2, $dst|$dst, $src2}",
744 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
747 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
748 "or{q}\t{$src, $dst|$dst, $src}",
749 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
750 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
751 "or{q}\t{$src, $dst|$dst, $src}",
752 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
753 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
754 "or{q}\t{$src, $dst|$dst, $src}",
755 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
757 let isTwoAddress = 1 in {
758 let isCommutable = 1 in
759 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
760 "xor{q}\t{$src2, $dst|$dst, $src2}",
761 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
762 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
763 "xor{q}\t{$src2, $dst|$dst, $src2}",
764 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
765 def XOR64ri32 : RIi32<0x81, MRM6r,
766 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
767 "xor{q}\t{$src2, $dst|$dst, $src2}",
768 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
769 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
770 "xor{q}\t{$src2, $dst|$dst, $src2}",
771 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
774 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
775 "xor{q}\t{$src, $dst|$dst, $src}",
776 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
777 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
778 "xor{q}\t{$src, $dst|$dst, $src}",
779 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
780 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
781 "xor{q}\t{$src, $dst|$dst, $src}",
782 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
785 //===----------------------------------------------------------------------===//
786 // Comparison Instructions...
789 // Integer comparison
790 let Defs = [EFLAGS] in {
791 let isCommutable = 1 in
792 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
793 "test{q}\t{$src2, $src1|$src1, $src2}",
794 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
796 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
797 "test{q}\t{$src2, $src1|$src1, $src2}",
798 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
800 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
801 (ins GR64:$src1, i64i32imm:$src2),
802 "test{q}\t{$src2, $src1|$src1, $src2}",
803 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
805 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
806 (ins i64mem:$src1, i64i32imm:$src2),
807 "test{q}\t{$src2, $src1|$src1, $src2}",
808 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
811 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
812 "cmp{q}\t{$src2, $src1|$src1, $src2}",
813 [(X86cmp GR64:$src1, GR64:$src2),
815 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
816 "cmp{q}\t{$src2, $src1|$src1, $src2}",
817 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
819 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
820 "cmp{q}\t{$src2, $src1|$src1, $src2}",
821 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
823 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
824 "cmp{q}\t{$src2, $src1|$src1, $src2}",
825 [(X86cmp GR64:$src1, i64immSExt32:$src2),
827 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
828 (ins i64mem:$src1, i64i32imm:$src2),
829 "cmp{q}\t{$src2, $src1|$src1, $src2}",
830 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
832 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
833 "cmp{q}\t{$src2, $src1|$src1, $src2}",
834 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
836 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
837 "cmp{q}\t{$src2, $src1|$src1, $src2}",
838 [(X86cmp GR64:$src1, i64immSExt8:$src2),
843 let Uses = [EFLAGS], isTwoAddress = 1 in {
844 let isCommutable = 1 in {
845 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
846 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
847 "cmovb\t{$src2, $dst|$dst, $src2}",
848 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
849 X86_COND_B, EFLAGS))]>, TB;
850 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
851 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
852 "cmovae\t{$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
854 X86_COND_AE, EFLAGS))]>, TB;
855 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
856 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
857 "cmove\t{$src2, $dst|$dst, $src2}",
858 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
859 X86_COND_E, EFLAGS))]>, TB;
860 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
861 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
862 "cmovne\t{$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
864 X86_COND_NE, EFLAGS))]>, TB;
865 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
866 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
867 "cmovbe\t{$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
869 X86_COND_BE, EFLAGS))]>, TB;
870 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
871 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
872 "cmova\t{$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
874 X86_COND_A, EFLAGS))]>, TB;
875 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
876 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
877 "cmovl\t{$src2, $dst|$dst, $src2}",
878 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
879 X86_COND_L, EFLAGS))]>, TB;
880 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
881 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
882 "cmovge\t{$src2, $dst|$dst, $src2}",
883 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
884 X86_COND_GE, EFLAGS))]>, TB;
885 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
886 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
887 "cmovle\t{$src2, $dst|$dst, $src2}",
888 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
889 X86_COND_LE, EFLAGS))]>, TB;
890 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
891 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
892 "cmovg\t{$src2, $dst|$dst, $src2}",
893 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
894 X86_COND_G, EFLAGS))]>, TB;
895 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
896 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
897 "cmovs\t{$src2, $dst|$dst, $src2}",
898 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
899 X86_COND_S, EFLAGS))]>, TB;
900 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
901 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
902 "cmovns\t{$src2, $dst|$dst, $src2}",
903 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
904 X86_COND_NS, EFLAGS))]>, TB;
905 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
906 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
907 "cmovp\t{$src2, $dst|$dst, $src2}",
908 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
909 X86_COND_P, EFLAGS))]>, TB;
910 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
911 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
912 "cmovnp\t{$src2, $dst|$dst, $src2}",
913 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
914 X86_COND_NP, EFLAGS))]>, TB;
915 } // isCommutable = 1
917 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
918 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
919 "cmovb\t{$src2, $dst|$dst, $src2}",
920 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
921 X86_COND_B, EFLAGS))]>, TB;
922 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
923 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
924 "cmovae\t{$src2, $dst|$dst, $src2}",
925 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
926 X86_COND_AE, EFLAGS))]>, TB;
927 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
928 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
929 "cmove\t{$src2, $dst|$dst, $src2}",
930 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
931 X86_COND_E, EFLAGS))]>, TB;
932 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
933 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
934 "cmovne\t{$src2, $dst|$dst, $src2}",
935 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
936 X86_COND_NE, EFLAGS))]>, TB;
937 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
938 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
939 "cmovbe\t{$src2, $dst|$dst, $src2}",
940 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
941 X86_COND_BE, EFLAGS))]>, TB;
942 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
943 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
944 "cmova\t{$src2, $dst|$dst, $src2}",
945 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
946 X86_COND_A, EFLAGS))]>, TB;
947 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
948 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
949 "cmovl\t{$src2, $dst|$dst, $src2}",
950 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
951 X86_COND_L, EFLAGS))]>, TB;
952 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
953 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
954 "cmovge\t{$src2, $dst|$dst, $src2}",
955 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
956 X86_COND_GE, EFLAGS))]>, TB;
957 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
958 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
959 "cmovle\t{$src2, $dst|$dst, $src2}",
960 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
961 X86_COND_LE, EFLAGS))]>, TB;
962 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
963 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
964 "cmovg\t{$src2, $dst|$dst, $src2}",
965 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
966 X86_COND_G, EFLAGS))]>, TB;
967 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
968 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
969 "cmovs\t{$src2, $dst|$dst, $src2}",
970 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
971 X86_COND_S, EFLAGS))]>, TB;
972 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
973 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
974 "cmovns\t{$src2, $dst|$dst, $src2}",
975 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
976 X86_COND_NS, EFLAGS))]>, TB;
977 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
978 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
979 "cmovp\t{$src2, $dst|$dst, $src2}",
980 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
981 X86_COND_P, EFLAGS))]>, TB;
982 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
983 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
984 "cmovnp\t{$src2, $dst|$dst, $src2}",
985 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
986 X86_COND_NP, EFLAGS))]>, TB;
989 //===----------------------------------------------------------------------===//
990 // Conversion Instructions...
994 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
995 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
997 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
998 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
999 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1000 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1001 (load addr:$src)))]>;
1002 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1003 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1004 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1005 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1006 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1007 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1008 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1009 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1011 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1012 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1013 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1015 (int_x86_sse2_cvttsd2si64
1016 (load addr:$src)))]>;
1018 // Signed i64 -> f64
1019 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1020 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1021 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1022 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1023 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1024 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1026 let isTwoAddress = 1 in {
1027 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1028 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1029 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1031 (int_x86_sse2_cvtsi642sd VR128:$src1,
1033 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1034 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1035 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1037 (int_x86_sse2_cvtsi642sd VR128:$src1,
1038 (loadi64 addr:$src2)))]>;
1041 // Signed i64 -> f32
1042 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1043 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1044 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1045 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1046 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1047 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1049 let isTwoAddress = 1 in {
1050 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1051 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1052 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1054 (int_x86_sse_cvtsi642ss VR128:$src1,
1056 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1057 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1058 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1060 (int_x86_sse_cvtsi642ss VR128:$src1,
1061 (loadi64 addr:$src2)))]>;
1064 // f32 -> signed i64
1065 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1066 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1068 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1069 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1070 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1071 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1072 (load addr:$src)))]>;
1073 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1074 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1075 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1076 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1077 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1078 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1079 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1080 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1082 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1083 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1084 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1086 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1088 //===----------------------------------------------------------------------===//
1089 // Alias Instructions
1090 //===----------------------------------------------------------------------===//
1093 // TODO: Remove this after proper i32 -> i64 zext support.
1094 def PsMOVZX64rr32: I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
1095 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1096 [(set GR64:$dst, (zext GR32:$src))]>;
1097 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
1098 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1099 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1101 /// PsAND64rrFFFFFFFF - r = r & (2^32-1)
1102 def PsAND64rrFFFFFFFF
1103 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1104 "mov{l}\t{${src:subreg32}, ${dst:subreg32}|${dst:subreg32}, ${src:subreg32}}",
1105 [(set GR64:$dst, (and GR64:$src, i64immFFFFFFFF))]>;
1108 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1109 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1111 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1112 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1113 // when we have a better way to specify isel priority.
1114 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1115 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1116 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1117 [(set GR64:$dst, 0)]>;
1119 // Materialize i64 constant where top 32-bits are zero.
1120 let AddedComplexity = 1, isReMaterializable = 1 in
1121 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1122 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1123 [(set GR64:$dst, i64immZExt32:$src)]>;
1126 //===----------------------------------------------------------------------===//
1127 // Atomic Instructions
1128 //===----------------------------------------------------------------------===//
1130 //FIXME: Please check the format Pseudo is certainly wrong, but the opcode and
1131 // prefixes should be correct
1133 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1134 def CMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap),
1135 "cmpxchgq $swap,$ptr", []>, TB;
1136 def LCMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap),
1137 "lock cmpxchgq $swap,$ptr",
1138 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1141 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
1142 def LXADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1143 "lock xadd $val, $ptr",
1144 [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
1146 def XADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1147 "xadd $val, $ptr", []>, TB;
1148 def LXCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1149 "lock xchg $val, $ptr",
1150 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>, LOCK;
1151 def XCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1152 "xchg $val, $ptr", []>;
1156 //===----------------------------------------------------------------------===//
1157 // Non-Instruction Patterns
1158 //===----------------------------------------------------------------------===//
1160 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1161 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1162 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1163 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1164 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1165 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1166 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1167 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1168 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1170 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1171 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1172 Requires<[SmallCode, IsStatic]>;
1173 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1174 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1175 Requires<[SmallCode, IsStatic]>;
1176 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1177 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1178 Requires<[SmallCode, IsStatic]>;
1179 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1180 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1181 Requires<[SmallCode, IsStatic]>;
1184 // Direct PC relative function call for small code model. 32-bit displacement
1185 // sign extended to 64-bit.
1186 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1187 (CALL64pcrel32 tglobaladdr:$dst)>;
1188 def : Pat<(X86call (i64 texternalsym:$dst)),
1189 (CALL64pcrel32 texternalsym:$dst)>;
1191 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1192 (CALL64pcrel32 tglobaladdr:$dst)>;
1193 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1194 (CALL64pcrel32 texternalsym:$dst)>;
1196 def : Pat<(X86tailcall GR64:$dst),
1197 (CALL64r GR64:$dst)>;
1201 def : Pat<(X86tailcall GR32:$dst),
1203 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1205 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1208 def : Pat<(X86tcret GR64:$dst, imm:$off),
1209 (TCRETURNri64 GR64:$dst, imm:$off)>;
1211 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1212 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1214 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1215 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1219 // TEST R,R is smaller than CMP R,0
1220 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1221 (TEST64rr GR64:$src1, GR64:$src1)>;
1223 // zextload bool -> zextload byte
1224 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1227 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1228 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1229 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1230 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1233 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1234 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1235 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1236 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1237 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1238 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1240 //===----------------------------------------------------------------------===//
1242 //===----------------------------------------------------------------------===//
1244 // (shl x, 1) ==> (add x, x)
1245 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1247 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1248 def : Pat<(or (srl GR64:$src1, CL:$amt),
1249 (shl GR64:$src2, (sub 64, CL:$amt))),
1250 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1252 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1253 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1254 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1256 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1257 def : Pat<(or (shl GR64:$src1, CL:$amt),
1258 (srl GR64:$src2, (sub 64, CL:$amt))),
1259 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1261 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1262 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1263 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1265 // X86 specific add which produces a flag.
1266 def : Pat<(addc GR64:$src1, GR64:$src2),
1267 (ADD64rr GR64:$src1, GR64:$src2)>;
1268 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1269 (ADD64rm GR64:$src1, addr:$src2)>;
1270 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1271 (ADD64ri32 GR64:$src1, imm:$src2)>;
1272 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1273 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1275 def : Pat<(subc GR64:$src1, GR64:$src2),
1276 (SUB64rr GR64:$src1, GR64:$src2)>;
1277 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1278 (SUB64rm GR64:$src1, addr:$src2)>;
1279 def : Pat<(subc GR64:$src1, imm:$src2),
1280 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1281 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1282 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1285 //===----------------------------------------------------------------------===//
1286 // X86-64 SSE Instructions
1287 //===----------------------------------------------------------------------===//
1289 // Move instructions...
1291 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1292 "mov{d|q}\t{$src, $dst|$dst, $src}",
1294 (v2i64 (scalar_to_vector GR64:$src)))]>;
1295 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1296 "mov{d|q}\t{$src, $dst|$dst, $src}",
1297 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1300 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1301 "mov{d|q}\t{$src, $dst|$dst, $src}",
1302 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1303 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1304 "mov{d|q}\t{$src, $dst|$dst, $src}",
1305 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1307 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1308 "mov{d|q}\t{$src, $dst|$dst, $src}",
1309 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1310 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1311 "mov{d|q}\t{$src, $dst|$dst, $src}",
1312 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1314 //===----------------------------------------------------------------------===//
1315 // X86-64 SSE4.1 Instructions
1316 //===----------------------------------------------------------------------===//
1318 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1319 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1320 def rr : SS4AI<opc, MRMSrcReg, (outs GR64:$dst),
1321 (ins VR128:$src1, i32i8imm:$src2),
1322 !strconcat(OpcodeStr,
1323 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1325 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1326 def mr : SS4AI<opc, MRMDestMem, (outs),
1327 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1328 !strconcat(OpcodeStr,
1329 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1330 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1331 addr:$dst)]>, OpSize, REX_W;
1334 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1336 let isTwoAddress = 1 in {
1337 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1338 def rr : SS4AI<opc, MRMSrcReg, (outs VR128:$dst),
1339 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1340 !strconcat(OpcodeStr,
1341 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1343 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1345 def rm : SS4AI<opc, MRMSrcMem, (outs VR128:$dst),
1346 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1347 !strconcat(OpcodeStr,
1348 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1350 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1351 imm:$src3)))]>, OpSize, REX_W;
1355 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;