1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def i64immFFFFFFFF : PatLeaf<(i64 imm), [{
65 // i64immFFFFFFFF - True if this is a specific constant we can't write in
67 return N->getValue() == 0x00000000FFFFFFFFULL;
71 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
72 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
73 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
75 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
76 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
77 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
78 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
80 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
81 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
82 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
83 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
85 //===----------------------------------------------------------------------===//
86 // Instruction list...
89 //===----------------------------------------------------------------------===//
90 // Call Instructions...
93 // All calls clobber the non-callee saved registers...
94 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
95 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
96 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
97 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
98 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
99 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
100 "call\t${dst:call}", []>;
101 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
102 "call\t{*}$dst", [(X86call GR64:$dst)]>;
103 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
104 "call\t{*}$dst", []>;
109 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
110 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset),
111 "#TC_RETURN $dst $offset",
114 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
115 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset),
116 "#TC_RETURN $dst $offset",
120 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
121 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
125 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
126 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
127 [(brind GR64:$dst)]>;
128 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
129 [(brind (loadi64 addr:$dst))]>;
132 //===----------------------------------------------------------------------===//
133 // Miscellaneous Instructions...
135 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
136 def LEAVE64 : I<0xC9, RawFrm,
137 (outs), (ins), "leave", []>;
138 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
140 def POP64r : I<0x58, AddRegFrm,
141 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
143 def PUSH64r : I<0x50, AddRegFrm,
144 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
147 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
148 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
149 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
150 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
152 def LEA64_32r : I<0x8D, MRMSrcMem,
153 (outs GR32:$dst), (ins lea64_32mem:$src),
154 "lea{l}\t{$src|$dst}, {$dst|$src}",
155 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
157 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
158 "lea{q}\t{$src|$dst}, {$dst|$src}",
159 [(set GR64:$dst, lea64addr:$src)]>;
161 let isTwoAddress = 1 in
162 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
164 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
166 // Bit scan instructions.
167 let Defs = [EFLAGS] in {
168 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
169 "bsf{q}\t{$src, $dst|$dst, $src}",
170 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
171 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
172 "bsf{q}\t{$src, $dst|$dst, $src}",
173 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
174 (implicit EFLAGS)]>, TB;
176 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
177 "bsr{q}\t{$src, $dst|$dst, $src}",
178 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
179 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
180 "bsr{q}\t{$src, $dst|$dst, $src}",
181 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
182 (implicit EFLAGS)]>, TB;
186 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
187 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
188 [(X86rep_movs i64)]>, REP;
189 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
190 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
191 [(X86rep_stos i64)]>, REP;
193 //===----------------------------------------------------------------------===//
194 // Move Instructions...
197 let neverHasSideEffects = 1 in
198 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
199 "mov{q}\t{$src, $dst|$dst, $src}", []>;
201 let isReMaterializable = 1 in {
202 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
203 "movabs{q}\t{$src, $dst|$dst, $src}",
204 [(set GR64:$dst, imm:$src)]>;
205 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
206 "mov{q}\t{$src, $dst|$dst, $src}",
207 [(set GR64:$dst, i64immSExt32:$src)]>;
210 let isSimpleLoad = 1 in
211 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
212 "mov{q}\t{$src, $dst|$dst, $src}",
213 [(set GR64:$dst, (load addr:$src))]>;
215 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
216 "mov{q}\t{$src, $dst|$dst, $src}",
217 [(store GR64:$src, addr:$dst)]>;
218 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
219 "mov{q}\t{$src, $dst|$dst, $src}",
220 [(store i64immSExt32:$src, addr:$dst)]>;
222 // Sign/Zero extenders
224 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
225 "movs{bq|x}\t{$src, $dst|$dst, $src}",
226 [(set GR64:$dst, (sext GR8:$src))]>, TB;
227 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
228 "movs{bq|x}\t{$src, $dst|$dst, $src}",
229 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
230 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
231 "movs{wq|x}\t{$src, $dst|$dst, $src}",
232 [(set GR64:$dst, (sext GR16:$src))]>, TB;
233 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
234 "movs{wq|x}\t{$src, $dst|$dst, $src}",
235 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
236 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
237 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
238 [(set GR64:$dst, (sext GR32:$src))]>;
239 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
240 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
241 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
243 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
244 "movz{bq|x}\t{$src, $dst|$dst, $src}",
245 [(set GR64:$dst, (zext GR8:$src))]>, TB;
246 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
247 "movz{bq|x}\t{$src, $dst|$dst, $src}",
248 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
249 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
250 "movz{wq|x}\t{$src, $dst|$dst, $src}",
251 [(set GR64:$dst, (zext GR16:$src))]>, TB;
252 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
253 "movz{wq|x}\t{$src, $dst|$dst, $src}",
254 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
256 let neverHasSideEffects = 1 in {
257 let Defs = [RAX], Uses = [EAX] in
258 def CDQE : RI<0x98, RawFrm, (outs), (ins),
259 "{cltq|cdqe}", []>; // RAX = signext(EAX)
261 let Defs = [RAX,RDX], Uses = [RAX] in
262 def CQO : RI<0x99, RawFrm, (outs), (ins),
263 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
266 //===----------------------------------------------------------------------===//
267 // Arithmetic Instructions...
270 let Defs = [EFLAGS] in {
271 let isTwoAddress = 1 in {
272 let isConvertibleToThreeAddress = 1 in {
273 let isCommutable = 1 in
274 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
275 "add{q}\t{$src2, $dst|$dst, $src2}",
276 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
278 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
279 "add{q}\t{$src2, $dst|$dst, $src2}",
280 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
281 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
282 "add{q}\t{$src2, $dst|$dst, $src2}",
283 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
284 } // isConvertibleToThreeAddress
286 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
287 "add{q}\t{$src2, $dst|$dst, $src2}",
288 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
291 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
292 "add{q}\t{$src2, $dst|$dst, $src2}",
293 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
294 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
295 "add{q}\t{$src2, $dst|$dst, $src2}",
296 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
297 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
298 "add{q}\t{$src2, $dst|$dst, $src2}",
299 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
301 let Uses = [EFLAGS] in {
302 let isTwoAddress = 1 in {
303 let isCommutable = 1 in
304 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
305 "adc{q}\t{$src2, $dst|$dst, $src2}",
306 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
308 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
309 "adc{q}\t{$src2, $dst|$dst, $src2}",
310 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
312 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
313 "adc{q}\t{$src2, $dst|$dst, $src2}",
314 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
315 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
316 "adc{q}\t{$src2, $dst|$dst, $src2}",
317 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
320 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
321 "adc{q}\t{$src2, $dst|$dst, $src2}",
322 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
323 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
324 "adc{q}\t{$src2, $dst|$dst, $src2}",
325 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
326 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
327 "adc{q}\t{$src2, $dst|$dst, $src2}",
328 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
331 let isTwoAddress = 1 in {
332 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
333 "sub{q}\t{$src2, $dst|$dst, $src2}",
334 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
336 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
337 "sub{q}\t{$src2, $dst|$dst, $src2}",
338 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
340 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
341 "sub{q}\t{$src2, $dst|$dst, $src2}",
342 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
343 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
344 "sub{q}\t{$src2, $dst|$dst, $src2}",
345 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
348 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
349 "sub{q}\t{$src2, $dst|$dst, $src2}",
350 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
351 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
352 "sub{q}\t{$src2, $dst|$dst, $src2}",
353 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
354 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
355 "sub{q}\t{$src2, $dst|$dst, $src2}",
356 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
358 let Uses = [EFLAGS] in {
359 let isTwoAddress = 1 in {
360 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
361 "sbb{q}\t{$src2, $dst|$dst, $src2}",
362 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
364 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
365 "sbb{q}\t{$src2, $dst|$dst, $src2}",
366 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
368 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
369 "sbb{q}\t{$src2, $dst|$dst, $src2}",
370 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
371 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
372 "sbb{q}\t{$src2, $dst|$dst, $src2}",
373 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
376 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
377 "sbb{q}\t{$src2, $dst|$dst, $src2}",
378 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
379 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
380 "sbb{q}\t{$src2, $dst|$dst, $src2}",
381 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
382 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
383 "sbb{q}\t{$src2, $dst|$dst, $src2}",
384 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
388 // Unsigned multiplication
389 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
390 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
391 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
393 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
394 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
396 // Signed multiplication
397 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
398 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
400 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
401 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
404 let Defs = [EFLAGS] in {
405 let isTwoAddress = 1 in {
406 let isCommutable = 1 in
407 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
408 "imul{q}\t{$src2, $dst|$dst, $src2}",
409 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
411 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
412 "imul{q}\t{$src2, $dst|$dst, $src2}",
413 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
416 // Suprisingly enough, these are not two address instructions!
417 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
418 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
419 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
420 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
421 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
422 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
423 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
424 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
425 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
426 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
427 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
428 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
429 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
430 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
431 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
432 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
435 // Unsigned division / remainder
436 let neverHasSideEffects = 1 in {
437 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
438 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
440 // Signed division / remainder
441 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
442 "idiv{q}\t$src", []>;
444 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
446 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
447 "idiv{q}\t$src", []>;
452 // Unary instructions
453 let Defs = [EFLAGS], CodeSize = 2 in {
454 let isTwoAddress = 1 in
455 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
456 [(set GR64:$dst, (ineg GR64:$src))]>;
457 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
458 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
460 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
461 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
462 [(set GR64:$dst, (add GR64:$src, 1))]>;
463 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
464 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
466 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
467 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
468 [(set GR64:$dst, (add GR64:$src, -1))]>;
469 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
470 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
472 // In 64-bit mode, single byte INC and DEC cannot be encoded.
473 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
474 // Can transform into LEA.
475 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
476 [(set GR16:$dst, (add GR16:$src, 1))]>,
477 OpSize, Requires<[In64BitMode]>;
478 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
479 [(set GR32:$dst, (add GR32:$src, 1))]>,
480 Requires<[In64BitMode]>;
481 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
482 [(set GR16:$dst, (add GR16:$src, -1))]>,
483 OpSize, Requires<[In64BitMode]>;
484 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
485 [(set GR32:$dst, (add GR32:$src, -1))]>,
486 Requires<[In64BitMode]>;
487 } // isConvertibleToThreeAddress
489 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
490 // how to unfold them.
491 let isTwoAddress = 0, CodeSize = 2 in {
492 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
493 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
494 OpSize, Requires<[In64BitMode]>;
495 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
496 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
497 Requires<[In64BitMode]>;
498 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
499 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
500 OpSize, Requires<[In64BitMode]>;
501 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
502 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
503 Requires<[In64BitMode]>;
505 } // Defs = [EFLAGS], CodeSize
508 let Defs = [EFLAGS] in {
509 // Shift instructions
510 let isTwoAddress = 1 in {
512 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
513 "shl{q}\t{%cl, $dst|$dst, %CL}",
514 [(set GR64:$dst, (shl GR64:$src, CL))]>;
515 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
516 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
517 "shl{q}\t{$src2, $dst|$dst, $src2}",
518 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
519 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
524 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
525 "shl{q}\t{%cl, $dst|$dst, %CL}",
526 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
527 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
528 "shl{q}\t{$src, $dst|$dst, $src}",
529 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
530 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
532 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
534 let isTwoAddress = 1 in {
536 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
537 "shr{q}\t{%cl, $dst|$dst, %CL}",
538 [(set GR64:$dst, (srl GR64:$src, CL))]>;
539 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
540 "shr{q}\t{$src2, $dst|$dst, $src2}",
541 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
542 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
544 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
548 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
549 "shr{q}\t{%cl, $dst|$dst, %CL}",
550 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
551 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
552 "shr{q}\t{$src, $dst|$dst, $src}",
553 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
554 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
556 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
558 let isTwoAddress = 1 in {
560 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
561 "sar{q}\t{%cl, $dst|$dst, %CL}",
562 [(set GR64:$dst, (sra GR64:$src, CL))]>;
563 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
564 "sar{q}\t{$src2, $dst|$dst, $src2}",
565 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
566 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
568 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
572 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
573 "sar{q}\t{%cl, $dst|$dst, %CL}",
574 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
575 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
576 "sar{q}\t{$src, $dst|$dst, $src}",
577 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
578 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
580 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
582 // Rotate instructions
583 let isTwoAddress = 1 in {
585 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
586 "rol{q}\t{%cl, $dst|$dst, %CL}",
587 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
588 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
589 "rol{q}\t{$src2, $dst|$dst, $src2}",
590 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
591 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
593 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
597 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
598 "rol{q}\t{%cl, $dst|$dst, %CL}",
599 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
600 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
601 "rol{q}\t{$src, $dst|$dst, $src}",
602 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
603 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
605 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
607 let isTwoAddress = 1 in {
609 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
610 "ror{q}\t{%cl, $dst|$dst, %CL}",
611 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
612 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
613 "ror{q}\t{$src2, $dst|$dst, $src2}",
614 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
615 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
617 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
621 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
622 "ror{q}\t{%cl, $dst|$dst, %CL}",
623 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
624 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
625 "ror{q}\t{$src, $dst|$dst, $src}",
626 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
627 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
629 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
631 // Double shift instructions (generalizations of rotate)
632 let isTwoAddress = 1 in {
634 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
635 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
636 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
637 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
638 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
639 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
642 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
643 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
644 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
645 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
646 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
649 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
650 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
651 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
652 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
659 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
660 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
661 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
663 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
664 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
665 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
668 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
669 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
670 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
671 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
672 (i8 imm:$src3)), addr:$dst)]>,
674 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
675 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
676 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
677 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
678 (i8 imm:$src3)), addr:$dst)]>,
682 //===----------------------------------------------------------------------===//
683 // Logical Instructions...
686 let isTwoAddress = 1 in
687 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
688 [(set GR64:$dst, (not GR64:$src))]>;
689 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
690 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
692 let Defs = [EFLAGS] in {
693 let isTwoAddress = 1 in {
694 let isCommutable = 1 in
695 def AND64rr : RI<0x21, MRMDestReg,
696 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
697 "and{q}\t{$src2, $dst|$dst, $src2}",
698 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
699 def AND64rm : RI<0x23, MRMSrcMem,
700 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
701 "and{q}\t{$src2, $dst|$dst, $src2}",
702 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
703 def AND64ri32 : RIi32<0x81, MRM4r,
704 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
705 "and{q}\t{$src2, $dst|$dst, $src2}",
706 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
707 def AND64ri8 : RIi8<0x83, MRM4r,
708 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
709 "and{q}\t{$src2, $dst|$dst, $src2}",
710 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
713 def AND64mr : RI<0x21, MRMDestMem,
714 (outs), (ins i64mem:$dst, GR64:$src),
715 "and{q}\t{$src, $dst|$dst, $src}",
716 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
717 def AND64mi32 : RIi32<0x81, MRM4m,
718 (outs), (ins i64mem:$dst, i64i32imm:$src),
719 "and{q}\t{$src, $dst|$dst, $src}",
720 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
721 def AND64mi8 : RIi8<0x83, MRM4m,
722 (outs), (ins i64mem:$dst, i64i8imm :$src),
723 "and{q}\t{$src, $dst|$dst, $src}",
724 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
726 let isTwoAddress = 1 in {
727 let isCommutable = 1 in
728 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
729 "or{q}\t{$src2, $dst|$dst, $src2}",
730 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
731 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
732 "or{q}\t{$src2, $dst|$dst, $src2}",
733 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
734 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
735 "or{q}\t{$src2, $dst|$dst, $src2}",
736 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
737 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
738 "or{q}\t{$src2, $dst|$dst, $src2}",
739 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
742 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
743 "or{q}\t{$src, $dst|$dst, $src}",
744 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
745 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
746 "or{q}\t{$src, $dst|$dst, $src}",
747 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
748 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
749 "or{q}\t{$src, $dst|$dst, $src}",
750 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
752 let isTwoAddress = 1 in {
753 let isCommutable = 1 in
754 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
755 "xor{q}\t{$src2, $dst|$dst, $src2}",
756 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
757 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
758 "xor{q}\t{$src2, $dst|$dst, $src2}",
759 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
760 def XOR64ri32 : RIi32<0x81, MRM6r,
761 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
762 "xor{q}\t{$src2, $dst|$dst, $src2}",
763 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
764 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
765 "xor{q}\t{$src2, $dst|$dst, $src2}",
766 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
769 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
770 "xor{q}\t{$src, $dst|$dst, $src}",
771 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
772 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
773 "xor{q}\t{$src, $dst|$dst, $src}",
774 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
775 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
776 "xor{q}\t{$src, $dst|$dst, $src}",
777 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
780 //===----------------------------------------------------------------------===//
781 // Comparison Instructions...
784 // Integer comparison
785 let Defs = [EFLAGS] in {
786 let isCommutable = 1 in
787 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
788 "test{q}\t{$src2, $src1|$src1, $src2}",
789 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
791 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
792 "test{q}\t{$src2, $src1|$src1, $src2}",
793 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
795 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
796 (ins GR64:$src1, i64i32imm:$src2),
797 "test{q}\t{$src2, $src1|$src1, $src2}",
798 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
800 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
801 (ins i64mem:$src1, i64i32imm:$src2),
802 "test{q}\t{$src2, $src1|$src1, $src2}",
803 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
806 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
807 "cmp{q}\t{$src2, $src1|$src1, $src2}",
808 [(X86cmp GR64:$src1, GR64:$src2),
810 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
811 "cmp{q}\t{$src2, $src1|$src1, $src2}",
812 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
814 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
815 "cmp{q}\t{$src2, $src1|$src1, $src2}",
816 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
818 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
819 "cmp{q}\t{$src2, $src1|$src1, $src2}",
820 [(X86cmp GR64:$src1, i64immSExt32:$src2),
822 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
823 (ins i64mem:$src1, i64i32imm:$src2),
824 "cmp{q}\t{$src2, $src1|$src1, $src2}",
825 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
827 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
828 "cmp{q}\t{$src2, $src1|$src1, $src2}",
829 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
831 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
832 "cmp{q}\t{$src2, $src1|$src1, $src2}",
833 [(X86cmp GR64:$src1, i64immSExt8:$src2),
838 let Uses = [EFLAGS], isTwoAddress = 1 in {
839 let isCommutable = 1 in {
840 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
841 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
842 "cmovb\t{$src2, $dst|$dst, $src2}",
843 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
844 X86_COND_B, EFLAGS))]>, TB;
845 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
846 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
847 "cmovae\t{$src2, $dst|$dst, $src2}",
848 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
849 X86_COND_AE, EFLAGS))]>, TB;
850 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
851 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
852 "cmove\t{$src2, $dst|$dst, $src2}",
853 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
854 X86_COND_E, EFLAGS))]>, TB;
855 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
856 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
857 "cmovne\t{$src2, $dst|$dst, $src2}",
858 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
859 X86_COND_NE, EFLAGS))]>, TB;
860 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
861 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
862 "cmovbe\t{$src2, $dst|$dst, $src2}",
863 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
864 X86_COND_BE, EFLAGS))]>, TB;
865 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
866 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
867 "cmova\t{$src2, $dst|$dst, $src2}",
868 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
869 X86_COND_A, EFLAGS))]>, TB;
870 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
871 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
872 "cmovl\t{$src2, $dst|$dst, $src2}",
873 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
874 X86_COND_L, EFLAGS))]>, TB;
875 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
876 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
877 "cmovge\t{$src2, $dst|$dst, $src2}",
878 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
879 X86_COND_GE, EFLAGS))]>, TB;
880 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
881 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
882 "cmovle\t{$src2, $dst|$dst, $src2}",
883 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
884 X86_COND_LE, EFLAGS))]>, TB;
885 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
886 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
887 "cmovg\t{$src2, $dst|$dst, $src2}",
888 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
889 X86_COND_G, EFLAGS))]>, TB;
890 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
891 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
892 "cmovs\t{$src2, $dst|$dst, $src2}",
893 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
894 X86_COND_S, EFLAGS))]>, TB;
895 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
896 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
897 "cmovns\t{$src2, $dst|$dst, $src2}",
898 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
899 X86_COND_NS, EFLAGS))]>, TB;
900 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
901 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
902 "cmovp\t{$src2, $dst|$dst, $src2}",
903 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
904 X86_COND_P, EFLAGS))]>, TB;
905 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
906 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
907 "cmovnp\t{$src2, $dst|$dst, $src2}",
908 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
909 X86_COND_NP, EFLAGS))]>, TB;
910 } // isCommutable = 1
912 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
913 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
914 "cmovb\t{$src2, $dst|$dst, $src2}",
915 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
916 X86_COND_B, EFLAGS))]>, TB;
917 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
918 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
919 "cmovae\t{$src2, $dst|$dst, $src2}",
920 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
921 X86_COND_AE, EFLAGS))]>, TB;
922 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
923 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
924 "cmove\t{$src2, $dst|$dst, $src2}",
925 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
926 X86_COND_E, EFLAGS))]>, TB;
927 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
928 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
929 "cmovne\t{$src2, $dst|$dst, $src2}",
930 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
931 X86_COND_NE, EFLAGS))]>, TB;
932 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
933 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
934 "cmovbe\t{$src2, $dst|$dst, $src2}",
935 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
936 X86_COND_BE, EFLAGS))]>, TB;
937 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
938 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
939 "cmova\t{$src2, $dst|$dst, $src2}",
940 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
941 X86_COND_A, EFLAGS))]>, TB;
942 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
943 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
944 "cmovl\t{$src2, $dst|$dst, $src2}",
945 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
946 X86_COND_L, EFLAGS))]>, TB;
947 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
948 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
949 "cmovge\t{$src2, $dst|$dst, $src2}",
950 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
951 X86_COND_GE, EFLAGS))]>, TB;
952 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
953 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
954 "cmovle\t{$src2, $dst|$dst, $src2}",
955 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
956 X86_COND_LE, EFLAGS))]>, TB;
957 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
958 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
959 "cmovg\t{$src2, $dst|$dst, $src2}",
960 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
961 X86_COND_G, EFLAGS))]>, TB;
962 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
963 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
964 "cmovs\t{$src2, $dst|$dst, $src2}",
965 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
966 X86_COND_S, EFLAGS))]>, TB;
967 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
968 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
969 "cmovns\t{$src2, $dst|$dst, $src2}",
970 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
971 X86_COND_NS, EFLAGS))]>, TB;
972 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
973 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
974 "cmovp\t{$src2, $dst|$dst, $src2}",
975 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
976 X86_COND_P, EFLAGS))]>, TB;
977 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
978 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
979 "cmovnp\t{$src2, $dst|$dst, $src2}",
980 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
981 X86_COND_NP, EFLAGS))]>, TB;
984 //===----------------------------------------------------------------------===//
985 // Conversion Instructions...
989 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
990 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
992 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
993 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
994 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
995 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
996 (load addr:$src)))]>;
997 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
998 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
999 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1000 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1001 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1002 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1003 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1004 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1006 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1007 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1008 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1010 (int_x86_sse2_cvttsd2si64
1011 (load addr:$src)))]>;
1013 // Signed i64 -> f64
1014 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1015 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1016 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1017 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1018 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1019 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1021 let isTwoAddress = 1 in {
1022 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1023 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1024 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1026 (int_x86_sse2_cvtsi642sd VR128:$src1,
1028 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1029 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1030 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1032 (int_x86_sse2_cvtsi642sd VR128:$src1,
1033 (loadi64 addr:$src2)))]>;
1036 // Signed i64 -> f32
1037 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1038 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1039 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1040 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1041 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1042 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1044 let isTwoAddress = 1 in {
1045 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1046 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1047 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1049 (int_x86_sse_cvtsi642ss VR128:$src1,
1051 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1052 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1053 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1055 (int_x86_sse_cvtsi642ss VR128:$src1,
1056 (loadi64 addr:$src2)))]>;
1059 // f32 -> signed i64
1060 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1061 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1063 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1064 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1065 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1066 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1067 (load addr:$src)))]>;
1068 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1069 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1070 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1071 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1072 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1073 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1074 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1075 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1077 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1078 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1079 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1081 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1083 //===----------------------------------------------------------------------===//
1084 // Alias Instructions
1085 //===----------------------------------------------------------------------===//
1087 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1088 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1090 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1091 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1092 // when we have a better way to specify isel priority.
1093 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1094 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1095 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1096 [(set GR64:$dst, 0)]>;
1098 // Materialize i64 constant where top 32-bits are zero.
1099 let AddedComplexity = 1, isReMaterializable = 1 in
1100 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1101 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1102 [(set GR64:$dst, i64immZExt32:$src)]>;
1105 //===----------------------------------------------------------------------===//
1106 // Atomic Instructions
1107 //===----------------------------------------------------------------------===//
1109 //FIXME: Please check the format Pseudo is certainly wrong, but the opcode and
1110 // prefixes should be correct
1112 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1113 def CMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap),
1114 "cmpxchgq $swap,$ptr", []>, TB;
1115 def LCMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap),
1116 "lock cmpxchgq $swap,$ptr",
1117 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1120 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
1121 def LXADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1122 "lock xadd $val, $ptr",
1123 [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
1125 def XADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1126 "xadd $val, $ptr", []>, TB;
1127 def LXCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1128 "lock xchg $val, $ptr",
1129 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>, LOCK;
1130 def XCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1131 "xchg $val, $ptr", []>;
1135 //===----------------------------------------------------------------------===//
1136 // Non-Instruction Patterns
1137 //===----------------------------------------------------------------------===//
1139 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1140 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1141 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1142 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1143 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1144 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1145 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1146 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1147 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1149 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1150 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1151 Requires<[SmallCode, IsStatic]>;
1152 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1153 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1154 Requires<[SmallCode, IsStatic]>;
1155 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1156 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1157 Requires<[SmallCode, IsStatic]>;
1158 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1159 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1160 Requires<[SmallCode, IsStatic]>;
1163 // Direct PC relative function call for small code model. 32-bit displacement
1164 // sign extended to 64-bit.
1165 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1166 (CALL64pcrel32 tglobaladdr:$dst)>;
1167 def : Pat<(X86call (i64 texternalsym:$dst)),
1168 (CALL64pcrel32 texternalsym:$dst)>;
1170 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1171 (CALL64pcrel32 tglobaladdr:$dst)>;
1172 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1173 (CALL64pcrel32 texternalsym:$dst)>;
1175 def : Pat<(X86tailcall GR64:$dst),
1176 (CALL64r GR64:$dst)>;
1180 def : Pat<(X86tailcall GR32:$dst),
1182 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1184 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1187 def : Pat<(X86tcret GR64:$dst, imm:$off),
1188 (TCRETURNri64 GR64:$dst, imm:$off)>;
1190 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1191 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1193 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1194 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1198 // TEST R,R is smaller than CMP R,0
1199 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1200 (TEST64rr GR64:$src1, GR64:$src1)>;
1205 def : Pat<(i64 (zext GR32:$src)), (INSERT_SUBREG tii_impl_val_zero,
1206 GR32:$src, x86_subreg_32bit)>;
1208 // zextload bool -> zextload byte
1209 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1211 def : Pat<(zextloadi64i32 addr:$src), (INSERT_SUBREG tii_impl_val_zero,
1212 (MOV32rm addr:$src), x86_subreg_32bit)>;
1215 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1216 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1217 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1218 def : Pat<(extloadi64i32 addr:$src), (INSERT_SUBREG tii_impl_val_undef,
1219 (MOV32rm addr:$src), x86_subreg_32bit)>;
1222 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1223 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1224 def : Pat<(i64 (anyext GR32:$src)), (INSERT_SUBREG tii_impl_val_undef,
1225 GR32:$src, x86_subreg_32bit)>;
1227 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1228 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1229 def : Pat<(i64 (anyext (loadi32 addr:$src))), (INSERT_SUBREG tii_impl_val_undef,
1230 (MOV32rm addr:$src),
1233 //===----------------------------------------------------------------------===//
1235 //===----------------------------------------------------------------------===//
1238 // r & (2^32-1) ==> mov32 + implicit zext
1239 def : Pat<(and GR64:$src, i64immFFFFFFFF),
1240 (INSERT_SUBREG tii_impl_val_zero,
1241 (MOV32rr (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)),
1244 // (shl x, 1) ==> (add x, x)
1245 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1247 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1248 def : Pat<(or (srl GR64:$src1, CL:$amt),
1249 (shl GR64:$src2, (sub 64, CL:$amt))),
1250 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1252 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1253 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1254 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1256 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1257 def : Pat<(or (shl GR64:$src1, CL:$amt),
1258 (srl GR64:$src2, (sub 64, CL:$amt))),
1259 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1261 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1262 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1263 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1265 // X86 specific add which produces a flag.
1266 def : Pat<(addc GR64:$src1, GR64:$src2),
1267 (ADD64rr GR64:$src1, GR64:$src2)>;
1268 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1269 (ADD64rm GR64:$src1, addr:$src2)>;
1270 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1271 (ADD64ri32 GR64:$src1, imm:$src2)>;
1272 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1273 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1275 def : Pat<(subc GR64:$src1, GR64:$src2),
1276 (SUB64rr GR64:$src1, GR64:$src2)>;
1277 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1278 (SUB64rm GR64:$src1, addr:$src2)>;
1279 def : Pat<(subc GR64:$src1, imm:$src2),
1280 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1281 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1282 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1285 //===----------------------------------------------------------------------===//
1286 // X86-64 SSE Instructions
1287 //===----------------------------------------------------------------------===//
1289 // Move instructions...
1291 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1292 "mov{d|q}\t{$src, $dst|$dst, $src}",
1294 (v2i64 (scalar_to_vector GR64:$src)))]>;
1295 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1296 "mov{d|q}\t{$src, $dst|$dst, $src}",
1297 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1300 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1301 "mov{d|q}\t{$src, $dst|$dst, $src}",
1302 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1303 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1304 "mov{d|q}\t{$src, $dst|$dst, $src}",
1305 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1307 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1308 "mov{d|q}\t{$src, $dst|$dst, $src}",
1309 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1310 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1311 "mov{d|q}\t{$src, $dst|$dst, $src}",
1312 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1314 //===----------------------------------------------------------------------===//
1315 // X86-64 SSE4.1 Instructions
1316 //===----------------------------------------------------------------------===//
1318 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1319 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1320 def rr : SS4AIi8<opc, MRMSrcReg, (outs GR64:$dst),
1321 (ins VR128:$src1, i32i8imm:$src2),
1322 !strconcat(OpcodeStr,
1323 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1325 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1326 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1327 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1328 !strconcat(OpcodeStr,
1329 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1330 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1331 addr:$dst)]>, OpSize, REX_W;
1334 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1336 let isTwoAddress = 1 in {
1337 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1338 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1339 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1340 !strconcat(OpcodeStr,
1341 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1343 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1345 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1346 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1347 !strconcat(OpcodeStr,
1348 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1350 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1351 imm:$src3)))]>, OpSize, REX_W;
1355 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;