1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def sextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
65 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
66 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
67 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
69 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
70 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
71 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
72 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
74 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
75 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
76 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
77 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
79 //===----------------------------------------------------------------------===//
80 // Instruction list...
83 let isImplicitDef = 1 in
84 def IMPLICIT_DEF_GR64 : I<0, Pseudo, (outs GR64:$dst), (ins),
86 [(set GR64:$dst, (undef))]>;
88 //===----------------------------------------------------------------------===//
89 // Call Instructions...
92 // All calls clobber the non-callee saved registers...
93 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
94 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
95 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
96 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
97 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
98 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
99 "call\t${dst:call}", []>;
100 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
101 "call\t{*}$dst", [(X86call GR64:$dst)]>;
102 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
103 "call\t{*}$dst", []>;
108 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
109 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset),
110 "#TC_RETURN $dst $offset",
113 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
114 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset),
115 "#TC_RETURN $dst $offset",
119 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
120 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
124 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
125 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
126 [(brind GR64:$dst)]>;
127 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
128 [(brind (loadi64 addr:$dst))]>;
131 //===----------------------------------------------------------------------===//
132 // Miscellaneous Instructions...
134 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
135 def LEAVE64 : I<0xC9, RawFrm,
136 (outs), (ins), "leave", []>;
137 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
139 def POP64r : I<0x58, AddRegFrm,
140 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
142 def PUSH64r : I<0x50, AddRegFrm,
143 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
146 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
147 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
148 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
149 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
151 def LEA64_32r : I<0x8D, MRMSrcMem,
152 (outs GR32:$dst), (ins lea64_32mem:$src),
153 "lea{l}\t{$src|$dst}, {$dst|$src}",
154 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
156 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
157 "lea{q}\t{$src|$dst}, {$dst|$src}",
158 [(set GR64:$dst, lea64addr:$src)]>;
160 let isTwoAddress = 1 in
161 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
163 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
165 let neverHasSideEffects = 1 in {
166 def XCHG64rr : RI<0x87, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
167 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
168 let mayLoad = 1, mayStore = 1 in {
169 def XCHG64mr : RI<0x87, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
170 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
171 def XCHG64rm : RI<0x87, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
172 "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>;
176 // Bit scan instructions.
177 let Defs = [EFLAGS] in {
178 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
179 "bsf{q}\t{$src, $dst|$dst, $src}",
180 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
181 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
182 "bsf{q}\t{$src, $dst|$dst, $src}",
183 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
184 (implicit EFLAGS)]>, TB;
186 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
187 "bsr{q}\t{$src, $dst|$dst, $src}",
188 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
189 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
190 "bsr{q}\t{$src, $dst|$dst, $src}",
191 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
192 (implicit EFLAGS)]>, TB;
196 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
197 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
198 [(X86rep_movs i64)]>, REP;
199 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
200 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
201 [(X86rep_stos i64)]>, REP;
203 //===----------------------------------------------------------------------===//
204 // Move Instructions...
207 let neverHasSideEffects = 1 in
208 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
209 "mov{q}\t{$src, $dst|$dst, $src}", []>;
211 let isReMaterializable = 1 in {
212 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
213 "movabs{q}\t{$src, $dst|$dst, $src}",
214 [(set GR64:$dst, imm:$src)]>;
215 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
216 "mov{q}\t{$src, $dst|$dst, $src}",
217 [(set GR64:$dst, i64immSExt32:$src)]>;
220 let isSimpleLoad = 1 in
221 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
222 "mov{q}\t{$src, $dst|$dst, $src}",
223 [(set GR64:$dst, (load addr:$src))]>;
225 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
226 "mov{q}\t{$src, $dst|$dst, $src}",
227 [(store GR64:$src, addr:$dst)]>;
228 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
229 "mov{q}\t{$src, $dst|$dst, $src}",
230 [(store i64immSExt32:$src, addr:$dst)]>;
232 // Sign/Zero extenders
234 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
235 "movs{bq|x}\t{$src, $dst|$dst, $src}",
236 [(set GR64:$dst, (sext GR8:$src))]>, TB;
237 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
238 "movs{bq|x}\t{$src, $dst|$dst, $src}",
239 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
240 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
241 "movs{wq|x}\t{$src, $dst|$dst, $src}",
242 [(set GR64:$dst, (sext GR16:$src))]>, TB;
243 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
244 "movs{wq|x}\t{$src, $dst|$dst, $src}",
245 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
246 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
247 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
248 [(set GR64:$dst, (sext GR32:$src))]>;
249 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
250 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
251 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
253 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
254 "movz{bq|x}\t{$src, $dst|$dst, $src}",
255 [(set GR64:$dst, (zext GR8:$src))]>, TB;
256 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
257 "movz{bq|x}\t{$src, $dst|$dst, $src}",
258 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
259 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
260 "movz{wq|x}\t{$src, $dst|$dst, $src}",
261 [(set GR64:$dst, (zext GR16:$src))]>, TB;
262 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
263 "movz{wq|x}\t{$src, $dst|$dst, $src}",
264 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
266 let neverHasSideEffects = 1 in {
267 let Defs = [RAX], Uses = [EAX] in
268 def CDQE : RI<0x98, RawFrm, (outs), (ins),
269 "{cltq|cdqe}", []>; // RAX = signext(EAX)
271 let Defs = [RAX,RDX], Uses = [RAX] in
272 def CQO : RI<0x99, RawFrm, (outs), (ins),
273 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
276 //===----------------------------------------------------------------------===//
277 // Arithmetic Instructions...
280 let Defs = [EFLAGS] in {
281 let isTwoAddress = 1 in {
282 let isConvertibleToThreeAddress = 1 in {
283 let isCommutable = 1 in
284 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
285 "add{q}\t{$src2, $dst|$dst, $src2}",
286 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
288 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
289 "add{q}\t{$src2, $dst|$dst, $src2}",
290 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
291 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
292 "add{q}\t{$src2, $dst|$dst, $src2}",
293 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
294 } // isConvertibleToThreeAddress
296 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
297 "add{q}\t{$src2, $dst|$dst, $src2}",
298 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
301 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
302 "add{q}\t{$src2, $dst|$dst, $src2}",
303 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
304 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
305 "add{q}\t{$src2, $dst|$dst, $src2}",
306 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
307 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
308 "add{q}\t{$src2, $dst|$dst, $src2}",
309 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
311 let Uses = [EFLAGS] in {
312 let isTwoAddress = 1 in {
313 let isCommutable = 1 in
314 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
315 "adc{q}\t{$src2, $dst|$dst, $src2}",
316 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
318 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
319 "adc{q}\t{$src2, $dst|$dst, $src2}",
320 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
322 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
323 "adc{q}\t{$src2, $dst|$dst, $src2}",
324 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
325 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
326 "adc{q}\t{$src2, $dst|$dst, $src2}",
327 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
330 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
331 "adc{q}\t{$src2, $dst|$dst, $src2}",
332 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
333 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
334 "adc{q}\t{$src2, $dst|$dst, $src2}",
335 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
336 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
337 "adc{q}\t{$src2, $dst|$dst, $src2}",
338 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
341 let isTwoAddress = 1 in {
342 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
343 "sub{q}\t{$src2, $dst|$dst, $src2}",
344 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
346 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
347 "sub{q}\t{$src2, $dst|$dst, $src2}",
348 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
350 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
351 "sub{q}\t{$src2, $dst|$dst, $src2}",
352 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
353 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
354 "sub{q}\t{$src2, $dst|$dst, $src2}",
355 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
358 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
359 "sub{q}\t{$src2, $dst|$dst, $src2}",
360 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
361 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
362 "sub{q}\t{$src2, $dst|$dst, $src2}",
363 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
364 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
365 "sub{q}\t{$src2, $dst|$dst, $src2}",
366 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
368 let Uses = [EFLAGS] in {
369 let isTwoAddress = 1 in {
370 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
371 "sbb{q}\t{$src2, $dst|$dst, $src2}",
372 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
374 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
375 "sbb{q}\t{$src2, $dst|$dst, $src2}",
376 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
378 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
379 "sbb{q}\t{$src2, $dst|$dst, $src2}",
380 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
381 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
382 "sbb{q}\t{$src2, $dst|$dst, $src2}",
383 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
386 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
387 "sbb{q}\t{$src2, $dst|$dst, $src2}",
388 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
389 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
390 "sbb{q}\t{$src2, $dst|$dst, $src2}",
391 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
392 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
393 "sbb{q}\t{$src2, $dst|$dst, $src2}",
394 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
398 // Unsigned multiplication
399 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
400 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
401 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
403 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
404 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
406 // Signed multiplication
407 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
408 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
410 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
411 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
414 let Defs = [EFLAGS] in {
415 let isTwoAddress = 1 in {
416 let isCommutable = 1 in
417 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
418 "imul{q}\t{$src2, $dst|$dst, $src2}",
419 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
421 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
422 "imul{q}\t{$src2, $dst|$dst, $src2}",
423 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
426 // Suprisingly enough, these are not two address instructions!
427 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
428 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
429 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
430 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
431 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
432 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
433 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
434 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
435 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
436 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
437 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
438 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
439 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
440 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
441 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
442 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
445 // Unsigned division / remainder
446 let neverHasSideEffects = 1 in {
447 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
448 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
450 // Signed division / remainder
451 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
452 "idiv{q}\t$src", []>;
454 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
456 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
457 "idiv{q}\t$src", []>;
462 // Unary instructions
463 let Defs = [EFLAGS], CodeSize = 2 in {
464 let isTwoAddress = 1 in
465 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
466 [(set GR64:$dst, (ineg GR64:$src))]>;
467 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
468 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
470 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
471 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
472 [(set GR64:$dst, (add GR64:$src, 1))]>;
473 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
474 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
476 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
477 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
478 [(set GR64:$dst, (add GR64:$src, -1))]>;
479 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
480 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
482 // In 64-bit mode, single byte INC and DEC cannot be encoded.
483 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
484 // Can transform into LEA.
485 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
486 [(set GR16:$dst, (add GR16:$src, 1))]>,
487 OpSize, Requires<[In64BitMode]>;
488 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
489 [(set GR32:$dst, (add GR32:$src, 1))]>,
490 Requires<[In64BitMode]>;
491 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
492 [(set GR16:$dst, (add GR16:$src, -1))]>,
493 OpSize, Requires<[In64BitMode]>;
494 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
495 [(set GR32:$dst, (add GR32:$src, -1))]>,
496 Requires<[In64BitMode]>;
497 } // isConvertibleToThreeAddress
499 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
500 // how to unfold them.
501 let isTwoAddress = 0, CodeSize = 2 in {
502 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
503 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
504 OpSize, Requires<[In64BitMode]>;
505 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
506 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
507 Requires<[In64BitMode]>;
508 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
509 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
510 OpSize, Requires<[In64BitMode]>;
511 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
512 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
513 Requires<[In64BitMode]>;
515 } // Defs = [EFLAGS], CodeSize
518 let Defs = [EFLAGS] in {
519 // Shift instructions
520 let isTwoAddress = 1 in {
522 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
523 "shl{q}\t{%cl, $dst|$dst, %CL}",
524 [(set GR64:$dst, (shl GR64:$src, CL))]>;
525 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
526 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
527 "shl{q}\t{$src2, $dst|$dst, $src2}",
528 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
529 let neverHasSideEffects = 1 in
530 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
535 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
536 "shl{q}\t{%cl, $dst|$dst, %CL}",
537 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
538 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
539 "shl{q}\t{$src, $dst|$dst, $src}",
540 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
541 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
543 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
545 let isTwoAddress = 1 in {
547 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
548 "shr{q}\t{%cl, $dst|$dst, %CL}",
549 [(set GR64:$dst, (srl GR64:$src, CL))]>;
550 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
551 "shr{q}\t{$src2, $dst|$dst, $src2}",
552 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
553 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
555 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
559 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
560 "shr{q}\t{%cl, $dst|$dst, %CL}",
561 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
562 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
563 "shr{q}\t{$src, $dst|$dst, $src}",
564 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
565 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
567 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
569 let isTwoAddress = 1 in {
571 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
572 "sar{q}\t{%cl, $dst|$dst, %CL}",
573 [(set GR64:$dst, (sra GR64:$src, CL))]>;
574 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
575 "sar{q}\t{$src2, $dst|$dst, $src2}",
576 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
577 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
579 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
583 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
584 "sar{q}\t{%cl, $dst|$dst, %CL}",
585 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
586 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
587 "sar{q}\t{$src, $dst|$dst, $src}",
588 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
589 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
591 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
593 // Rotate instructions
594 let isTwoAddress = 1 in {
596 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
597 "rol{q}\t{%cl, $dst|$dst, %CL}",
598 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
599 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
600 "rol{q}\t{$src2, $dst|$dst, $src2}",
601 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
602 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
604 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
608 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
609 "rol{q}\t{%cl, $dst|$dst, %CL}",
610 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
611 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
612 "rol{q}\t{$src, $dst|$dst, $src}",
613 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
614 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
616 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
618 let isTwoAddress = 1 in {
620 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
621 "ror{q}\t{%cl, $dst|$dst, %CL}",
622 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
623 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
624 "ror{q}\t{$src2, $dst|$dst, $src2}",
625 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
626 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
628 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
632 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
633 "ror{q}\t{%cl, $dst|$dst, %CL}",
634 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
635 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
636 "ror{q}\t{$src, $dst|$dst, $src}",
637 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
638 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
640 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
642 // Double shift instructions (generalizations of rotate)
643 let isTwoAddress = 1 in {
645 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
646 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
647 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
648 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
649 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
650 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
653 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
654 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
655 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
656 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
657 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
660 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
661 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
662 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
663 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
670 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
671 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
672 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
674 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
675 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
676 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
679 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
680 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
681 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
682 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
683 (i8 imm:$src3)), addr:$dst)]>,
685 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
686 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
687 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
688 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
689 (i8 imm:$src3)), addr:$dst)]>,
693 //===----------------------------------------------------------------------===//
694 // Logical Instructions...
697 let isTwoAddress = 1 in
698 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
699 [(set GR64:$dst, (not GR64:$src))]>;
700 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
701 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
703 let Defs = [EFLAGS] in {
704 let isTwoAddress = 1 in {
705 let isCommutable = 1 in
706 def AND64rr : RI<0x21, MRMDestReg,
707 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
708 "and{q}\t{$src2, $dst|$dst, $src2}",
709 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
710 def AND64rm : RI<0x23, MRMSrcMem,
711 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
712 "and{q}\t{$src2, $dst|$dst, $src2}",
713 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
714 def AND64ri32 : RIi32<0x81, MRM4r,
715 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
716 "and{q}\t{$src2, $dst|$dst, $src2}",
717 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
718 def AND64ri8 : RIi8<0x83, MRM4r,
719 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
720 "and{q}\t{$src2, $dst|$dst, $src2}",
721 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
724 def AND64mr : RI<0x21, MRMDestMem,
725 (outs), (ins i64mem:$dst, GR64:$src),
726 "and{q}\t{$src, $dst|$dst, $src}",
727 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
728 def AND64mi32 : RIi32<0x81, MRM4m,
729 (outs), (ins i64mem:$dst, i64i32imm:$src),
730 "and{q}\t{$src, $dst|$dst, $src}",
731 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
732 def AND64mi8 : RIi8<0x83, MRM4m,
733 (outs), (ins i64mem:$dst, i64i8imm :$src),
734 "and{q}\t{$src, $dst|$dst, $src}",
735 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
737 let isTwoAddress = 1 in {
738 let isCommutable = 1 in
739 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
740 "or{q}\t{$src2, $dst|$dst, $src2}",
741 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
742 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
743 "or{q}\t{$src2, $dst|$dst, $src2}",
744 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
745 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
746 "or{q}\t{$src2, $dst|$dst, $src2}",
747 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
748 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
749 "or{q}\t{$src2, $dst|$dst, $src2}",
750 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
753 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
754 "or{q}\t{$src, $dst|$dst, $src}",
755 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
756 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
757 "or{q}\t{$src, $dst|$dst, $src}",
758 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
759 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
760 "or{q}\t{$src, $dst|$dst, $src}",
761 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
763 let isTwoAddress = 1 in {
764 let isCommutable = 1 in
765 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
766 "xor{q}\t{$src2, $dst|$dst, $src2}",
767 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
768 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
769 "xor{q}\t{$src2, $dst|$dst, $src2}",
770 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
771 def XOR64ri32 : RIi32<0x81, MRM6r,
772 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
773 "xor{q}\t{$src2, $dst|$dst, $src2}",
774 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
775 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
776 "xor{q}\t{$src2, $dst|$dst, $src2}",
777 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
780 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
781 "xor{q}\t{$src, $dst|$dst, $src}",
782 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
783 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
784 "xor{q}\t{$src, $dst|$dst, $src}",
785 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
786 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
787 "xor{q}\t{$src, $dst|$dst, $src}",
788 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
791 //===----------------------------------------------------------------------===//
792 // Comparison Instructions...
795 // Integer comparison
796 let Defs = [EFLAGS] in {
797 let isCommutable = 1 in
798 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
799 "test{q}\t{$src2, $src1|$src1, $src2}",
800 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
802 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
803 "test{q}\t{$src2, $src1|$src1, $src2}",
804 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
806 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
807 (ins GR64:$src1, i64i32imm:$src2),
808 "test{q}\t{$src2, $src1|$src1, $src2}",
809 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
811 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
812 (ins i64mem:$src1, i64i32imm:$src2),
813 "test{q}\t{$src2, $src1|$src1, $src2}",
814 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
817 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
818 "cmp{q}\t{$src2, $src1|$src1, $src2}",
819 [(X86cmp GR64:$src1, GR64:$src2),
821 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
822 "cmp{q}\t{$src2, $src1|$src1, $src2}",
823 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
825 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
826 "cmp{q}\t{$src2, $src1|$src1, $src2}",
827 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
829 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
830 "cmp{q}\t{$src2, $src1|$src1, $src2}",
831 [(X86cmp GR64:$src1, i64immSExt32:$src2),
833 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
834 (ins i64mem:$src1, i64i32imm:$src2),
835 "cmp{q}\t{$src2, $src1|$src1, $src2}",
836 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
838 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
839 "cmp{q}\t{$src2, $src1|$src1, $src2}",
840 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
842 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
843 "cmp{q}\t{$src2, $src1|$src1, $src2}",
844 [(X86cmp GR64:$src1, i64immSExt8:$src2),
849 let Uses = [EFLAGS], isTwoAddress = 1 in {
850 let isCommutable = 1 in {
851 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
852 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
853 "cmovb\t{$src2, $dst|$dst, $src2}",
854 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
855 X86_COND_B, EFLAGS))]>, TB;
856 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
857 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
858 "cmovae\t{$src2, $dst|$dst, $src2}",
859 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
860 X86_COND_AE, EFLAGS))]>, TB;
861 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
862 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
863 "cmove\t{$src2, $dst|$dst, $src2}",
864 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
865 X86_COND_E, EFLAGS))]>, TB;
866 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
867 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
868 "cmovne\t{$src2, $dst|$dst, $src2}",
869 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
870 X86_COND_NE, EFLAGS))]>, TB;
871 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
872 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
873 "cmovbe\t{$src2, $dst|$dst, $src2}",
874 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
875 X86_COND_BE, EFLAGS))]>, TB;
876 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
877 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
878 "cmova\t{$src2, $dst|$dst, $src2}",
879 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
880 X86_COND_A, EFLAGS))]>, TB;
881 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
882 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
883 "cmovl\t{$src2, $dst|$dst, $src2}",
884 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
885 X86_COND_L, EFLAGS))]>, TB;
886 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
887 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
888 "cmovge\t{$src2, $dst|$dst, $src2}",
889 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
890 X86_COND_GE, EFLAGS))]>, TB;
891 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
892 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
893 "cmovle\t{$src2, $dst|$dst, $src2}",
894 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
895 X86_COND_LE, EFLAGS))]>, TB;
896 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
897 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
898 "cmovg\t{$src2, $dst|$dst, $src2}",
899 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
900 X86_COND_G, EFLAGS))]>, TB;
901 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
902 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
903 "cmovs\t{$src2, $dst|$dst, $src2}",
904 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
905 X86_COND_S, EFLAGS))]>, TB;
906 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
907 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
908 "cmovns\t{$src2, $dst|$dst, $src2}",
909 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
910 X86_COND_NS, EFLAGS))]>, TB;
911 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
912 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
913 "cmovp\t{$src2, $dst|$dst, $src2}",
914 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
915 X86_COND_P, EFLAGS))]>, TB;
916 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
917 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
918 "cmovnp\t{$src2, $dst|$dst, $src2}",
919 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
920 X86_COND_NP, EFLAGS))]>, TB;
921 } // isCommutable = 1
923 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
924 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
925 "cmovb\t{$src2, $dst|$dst, $src2}",
926 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
927 X86_COND_B, EFLAGS))]>, TB;
928 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
929 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
930 "cmovae\t{$src2, $dst|$dst, $src2}",
931 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
932 X86_COND_AE, EFLAGS))]>, TB;
933 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
934 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
935 "cmove\t{$src2, $dst|$dst, $src2}",
936 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
937 X86_COND_E, EFLAGS))]>, TB;
938 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
939 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
940 "cmovne\t{$src2, $dst|$dst, $src2}",
941 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
942 X86_COND_NE, EFLAGS))]>, TB;
943 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
944 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
945 "cmovbe\t{$src2, $dst|$dst, $src2}",
946 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
947 X86_COND_BE, EFLAGS))]>, TB;
948 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
949 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
950 "cmova\t{$src2, $dst|$dst, $src2}",
951 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
952 X86_COND_A, EFLAGS))]>, TB;
953 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
954 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
955 "cmovl\t{$src2, $dst|$dst, $src2}",
956 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
957 X86_COND_L, EFLAGS))]>, TB;
958 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
959 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
960 "cmovge\t{$src2, $dst|$dst, $src2}",
961 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
962 X86_COND_GE, EFLAGS))]>, TB;
963 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
964 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
965 "cmovle\t{$src2, $dst|$dst, $src2}",
966 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
967 X86_COND_LE, EFLAGS))]>, TB;
968 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
969 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
970 "cmovg\t{$src2, $dst|$dst, $src2}",
971 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
972 X86_COND_G, EFLAGS))]>, TB;
973 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
974 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
975 "cmovs\t{$src2, $dst|$dst, $src2}",
976 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
977 X86_COND_S, EFLAGS))]>, TB;
978 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
979 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
980 "cmovns\t{$src2, $dst|$dst, $src2}",
981 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
982 X86_COND_NS, EFLAGS))]>, TB;
983 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
984 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
985 "cmovp\t{$src2, $dst|$dst, $src2}",
986 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
987 X86_COND_P, EFLAGS))]>, TB;
988 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
989 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
990 "cmovnp\t{$src2, $dst|$dst, $src2}",
991 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
992 X86_COND_NP, EFLAGS))]>, TB;
995 //===----------------------------------------------------------------------===//
996 // Conversion Instructions...
1000 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1001 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1003 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1004 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1005 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1006 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1007 (load addr:$src)))]>;
1008 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1009 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1010 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1011 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1012 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1013 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1014 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1015 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1017 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1018 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1019 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1021 (int_x86_sse2_cvttsd2si64
1022 (load addr:$src)))]>;
1024 // Signed i64 -> f64
1025 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1026 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1027 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1028 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1029 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1030 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1031 let isTwoAddress = 1 in {
1032 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1033 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1034 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1036 (int_x86_sse2_cvtsi642sd VR128:$src1,
1038 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1039 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1040 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1042 (int_x86_sse2_cvtsi642sd VR128:$src1,
1043 (loadi64 addr:$src2)))]>;
1046 // Signed i64 -> f32
1047 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1048 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1049 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1050 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1051 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1052 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1053 let isTwoAddress = 1, neverHasSideEffects = 1 in {
1054 def Int_CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg,
1055 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1056 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1057 []>; // TODO: add intrinsic
1059 def Int_CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem,
1060 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1061 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1062 []>; // TODO: add intrinsic
1065 // f32 -> signed i64
1066 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1067 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1069 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1070 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1071 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1072 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1073 (load addr:$src)))]>;
1074 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1075 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1076 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1077 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1078 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1079 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1080 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1081 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1083 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1084 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1085 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1087 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1089 let isTwoAddress = 1 in {
1090 def Int_CVTSI642SSrr : RSSI<0x2A, MRMSrcReg,
1091 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1092 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1094 (int_x86_sse_cvtsi642ss VR128:$src1,
1096 def Int_CVTSI642SSrm : RSSI<0x2A, MRMSrcMem,
1097 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1098 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1100 (int_x86_sse_cvtsi642ss VR128:$src1,
1101 (loadi64 addr:$src2)))]>;
1104 //===----------------------------------------------------------------------===//
1105 // Alias Instructions
1106 //===----------------------------------------------------------------------===//
1109 // TODO: Remove this after proper i32 -> i64 zext support.
1110 def PsMOVZX64rr32: I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
1111 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1112 [(set GR64:$dst, (zext GR32:$src))]>;
1113 def PsMOVZX64rm32: I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
1114 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1115 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
1118 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1119 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1121 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1122 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1123 // when we have a better way to specify isel priority.
1124 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1125 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1126 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1127 [(set GR64:$dst, 0)]>;
1129 // Materialize i64 constant where top 32-bits are zero.
1130 let AddedComplexity = 1, isReMaterializable = 1 in
1131 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1132 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1133 [(set GR64:$dst, i64immZExt32:$src)]>;
1135 //===----------------------------------------------------------------------===//
1136 // Non-Instruction Patterns
1137 //===----------------------------------------------------------------------===//
1139 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1140 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1141 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1142 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1143 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1144 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1145 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1146 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1147 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1149 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1150 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1151 Requires<[SmallCode, HasLow4G, IsStatic]>;
1152 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1153 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1154 Requires<[SmallCode, HasLow4G, IsStatic]>;
1155 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1156 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1157 Requires<[SmallCode, HasLow4G, IsStatic]>;
1158 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1159 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1160 Requires<[SmallCode, HasLow4G, IsStatic]>;
1163 // Direct PC relative function call for small code model. 32-bit displacement
1164 // sign extended to 64-bit.
1165 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1166 (CALL64pcrel32 tglobaladdr:$dst)>;
1167 def : Pat<(X86call (i64 texternalsym:$dst)),
1168 (CALL64pcrel32 texternalsym:$dst)>;
1170 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1171 (CALL64pcrel32 tglobaladdr:$dst)>;
1172 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1173 (CALL64pcrel32 texternalsym:$dst)>;
1175 def : Pat<(X86tailcall GR64:$dst),
1176 (CALL64r GR64:$dst)>;
1180 def : Pat<(X86tailcall GR32:$dst),
1182 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1184 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1187 def : Pat<(X86tcret GR64:$dst, imm:$off),
1188 (TCRETURNri64 GR64:$dst, imm:$off)>;
1190 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1191 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1193 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1194 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1198 // TEST R,R is smaller than CMP R,0
1199 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1200 (TEST64rr GR64:$src1, GR64:$src1)>;
1202 // {s|z}extload bool -> {s|z}extload byte
1203 def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
1204 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1207 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1208 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1209 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1210 def : Pat<(extloadi64i32 addr:$src), (PsMOVZX64rm32 addr:$src)>;
1213 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1214 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1215 def : Pat<(i64 (anyext GR32:$src)), (PsMOVZX64rr32 GR32:$src)>;
1216 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1217 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1218 def : Pat<(i64 (anyext (loadi32 addr:$src))), (PsMOVZX64rm32 addr:$src)>;
1220 //===----------------------------------------------------------------------===//
1222 //===----------------------------------------------------------------------===//
1224 // (shl x, 1) ==> (add x, x)
1225 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1227 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1228 def : Pat<(or (srl GR64:$src1, CL:$amt),
1229 (shl GR64:$src2, (sub 64, CL:$amt))),
1230 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1232 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1233 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1234 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1236 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1237 def : Pat<(or (shl GR64:$src1, CL:$amt),
1238 (srl GR64:$src2, (sub 64, CL:$amt))),
1239 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1241 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1242 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1243 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1245 // X86 specific add which produces a flag.
1246 def : Pat<(addc GR64:$src1, GR64:$src2),
1247 (ADD64rr GR64:$src1, GR64:$src2)>;
1248 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1249 (ADD64rm GR64:$src1, addr:$src2)>;
1250 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1251 (ADD64ri32 GR64:$src1, imm:$src2)>;
1252 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1253 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1255 def : Pat<(subc GR64:$src1, GR64:$src2),
1256 (SUB64rr GR64:$src1, GR64:$src2)>;
1257 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1258 (SUB64rm GR64:$src1, addr:$src2)>;
1259 def : Pat<(subc GR64:$src1, imm:$src2),
1260 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1261 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1262 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1265 //===----------------------------------------------------------------------===//
1266 // X86-64 SSE Instructions
1267 //===----------------------------------------------------------------------===//
1269 // Move instructions...
1271 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1272 "mov{d|q}\t{$src, $dst|$dst, $src}",
1274 (v2i64 (scalar_to_vector GR64:$src)))]>;
1275 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1276 "mov{d|q}\t{$src, $dst|$dst, $src}",
1277 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1280 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1281 "mov{d|q}\t{$src, $dst|$dst, $src}",
1282 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1283 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1284 "mov{d|q}\t{$src, $dst|$dst, $src}",
1285 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1287 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1288 "mov{d|q}\t{$src, $dst|$dst, $src}",
1289 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1290 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1291 "mov{d|q}\t{$src, $dst|$dst, $src}",
1292 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;