1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getValue() == (int32_t)N->getValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getValue() == (uint32_t)N->getValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getValue() == (int8_t)N->getValue();
64 def i64immFFFFFFFF : PatLeaf<(i64 imm), [{
65 // i64immFFFFFFFF - True if this is a specific constant we can't write in
67 return N->getValue() == 0x00000000FFFFFFFFULL;
71 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
72 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
73 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
75 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
76 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
77 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
78 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
80 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
81 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
82 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
83 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
85 //===----------------------------------------------------------------------===//
86 // Instruction list...
89 //===----------------------------------------------------------------------===//
90 // Call Instructions...
93 // All calls clobber the non-callee saved registers...
94 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
95 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
96 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
97 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
98 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in {
99 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
100 "call\t${dst:call}", []>;
101 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
102 "call\t{*}$dst", [(X86call GR64:$dst)]>;
103 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
104 "call\t{*}$dst", []>;
109 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
110 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset, variable_ops),
111 "#TC_RETURN $dst $offset",
114 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
115 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset, variable_ops),
116 "#TC_RETURN $dst $offset",
120 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
121 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
125 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
126 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
127 [(brind GR64:$dst)]>;
128 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
129 [(brind (loadi64 addr:$dst))]>;
132 //===----------------------------------------------------------------------===//
133 // Miscellaneous Instructions...
135 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
136 def LEAVE64 : I<0xC9, RawFrm,
137 (outs), (ins), "leave", []>;
138 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
140 def POP64r : I<0x58, AddRegFrm,
141 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
143 def PUSH64r : I<0x50, AddRegFrm,
144 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
147 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
148 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
149 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
150 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
152 def LEA64_32r : I<0x8D, MRMSrcMem,
153 (outs GR32:$dst), (ins lea64_32mem:$src),
154 "lea{l}\t{$src|$dst}, {$dst|$src}",
155 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
157 let isReMaterializable = 1 in
158 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
159 "lea{q}\t{$src|$dst}, {$dst|$src}",
160 [(set GR64:$dst, lea64addr:$src)]>;
162 let isTwoAddress = 1 in
163 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
165 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
167 // Bit scan instructions.
168 let Defs = [EFLAGS] in {
169 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
170 "bsf{q}\t{$src, $dst|$dst, $src}",
171 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
172 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
173 "bsf{q}\t{$src, $dst|$dst, $src}",
174 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
175 (implicit EFLAGS)]>, TB;
177 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
178 "bsr{q}\t{$src, $dst|$dst, $src}",
179 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
180 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
181 "bsr{q}\t{$src, $dst|$dst, $src}",
182 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
183 (implicit EFLAGS)]>, TB;
187 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
188 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
189 [(X86rep_movs i64)]>, REP;
190 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
191 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
192 [(X86rep_stos i64)]>, REP;
194 //===----------------------------------------------------------------------===//
195 // Move Instructions...
198 let neverHasSideEffects = 1 in
199 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
200 "mov{q}\t{$src, $dst|$dst, $src}", []>;
202 let isReMaterializable = 1 in {
203 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
204 "movabs{q}\t{$src, $dst|$dst, $src}",
205 [(set GR64:$dst, imm:$src)]>;
206 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
207 "mov{q}\t{$src, $dst|$dst, $src}",
208 [(set GR64:$dst, i64immSExt32:$src)]>;
211 let isSimpleLoad = 1 in
212 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
213 "mov{q}\t{$src, $dst|$dst, $src}",
214 [(set GR64:$dst, (load addr:$src))]>;
216 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
217 "mov{q}\t{$src, $dst|$dst, $src}",
218 [(store GR64:$src, addr:$dst)]>;
219 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
220 "mov{q}\t{$src, $dst|$dst, $src}",
221 [(store i64immSExt32:$src, addr:$dst)]>;
223 // Sign/Zero extenders
225 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
226 "movs{bq|x}\t{$src, $dst|$dst, $src}",
227 [(set GR64:$dst, (sext GR8:$src))]>, TB;
228 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
229 "movs{bq|x}\t{$src, $dst|$dst, $src}",
230 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
231 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
232 "movs{wq|x}\t{$src, $dst|$dst, $src}",
233 [(set GR64:$dst, (sext GR16:$src))]>, TB;
234 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
235 "movs{wq|x}\t{$src, $dst|$dst, $src}",
236 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
237 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
238 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
239 [(set GR64:$dst, (sext GR32:$src))]>;
240 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
241 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
242 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
244 def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
245 "movz{bq|x}\t{$src, $dst|$dst, $src}",
246 [(set GR64:$dst, (zext GR8:$src))]>, TB;
247 def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
248 "movz{bq|x}\t{$src, $dst|$dst, $src}",
249 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
250 def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
251 "movz{wq|x}\t{$src, $dst|$dst, $src}",
252 [(set GR64:$dst, (zext GR16:$src))]>, TB;
253 def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
254 "movz{wq|x}\t{$src, $dst|$dst, $src}",
255 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
257 let neverHasSideEffects = 1 in {
258 let Defs = [RAX], Uses = [EAX] in
259 def CDQE : RI<0x98, RawFrm, (outs), (ins),
260 "{cltq|cdqe}", []>; // RAX = signext(EAX)
262 let Defs = [RAX,RDX], Uses = [RAX] in
263 def CQO : RI<0x99, RawFrm, (outs), (ins),
264 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
267 //===----------------------------------------------------------------------===//
268 // Arithmetic Instructions...
271 let Defs = [EFLAGS] in {
272 let isTwoAddress = 1 in {
273 let isConvertibleToThreeAddress = 1 in {
274 let isCommutable = 1 in
275 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
276 "add{q}\t{$src2, $dst|$dst, $src2}",
277 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
279 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
280 "add{q}\t{$src2, $dst|$dst, $src2}",
281 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
282 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
283 "add{q}\t{$src2, $dst|$dst, $src2}",
284 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
285 } // isConvertibleToThreeAddress
287 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
288 "add{q}\t{$src2, $dst|$dst, $src2}",
289 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
292 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
293 "add{q}\t{$src2, $dst|$dst, $src2}",
294 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
295 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
296 "add{q}\t{$src2, $dst|$dst, $src2}",
297 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
298 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
299 "add{q}\t{$src2, $dst|$dst, $src2}",
300 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
302 let Uses = [EFLAGS] in {
303 let isTwoAddress = 1 in {
304 let isCommutable = 1 in
305 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
306 "adc{q}\t{$src2, $dst|$dst, $src2}",
307 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
309 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
310 "adc{q}\t{$src2, $dst|$dst, $src2}",
311 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
313 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
314 "adc{q}\t{$src2, $dst|$dst, $src2}",
315 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
316 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
317 "adc{q}\t{$src2, $dst|$dst, $src2}",
318 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
321 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
322 "adc{q}\t{$src2, $dst|$dst, $src2}",
323 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
324 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
325 "adc{q}\t{$src2, $dst|$dst, $src2}",
326 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
327 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
328 "adc{q}\t{$src2, $dst|$dst, $src2}",
329 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
332 let isTwoAddress = 1 in {
333 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
334 "sub{q}\t{$src2, $dst|$dst, $src2}",
335 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
337 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
338 "sub{q}\t{$src2, $dst|$dst, $src2}",
339 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
341 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
342 "sub{q}\t{$src2, $dst|$dst, $src2}",
343 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
344 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
345 "sub{q}\t{$src2, $dst|$dst, $src2}",
346 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
349 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
350 "sub{q}\t{$src2, $dst|$dst, $src2}",
351 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
352 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
353 "sub{q}\t{$src2, $dst|$dst, $src2}",
354 [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
355 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
356 "sub{q}\t{$src2, $dst|$dst, $src2}",
357 [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
359 let Uses = [EFLAGS] in {
360 let isTwoAddress = 1 in {
361 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
362 "sbb{q}\t{$src2, $dst|$dst, $src2}",
363 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
365 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
366 "sbb{q}\t{$src2, $dst|$dst, $src2}",
367 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
369 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
370 "sbb{q}\t{$src2, $dst|$dst, $src2}",
371 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
372 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
373 "sbb{q}\t{$src2, $dst|$dst, $src2}",
374 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
377 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
378 "sbb{q}\t{$src2, $dst|$dst, $src2}",
379 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
380 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
381 "sbb{q}\t{$src2, $dst|$dst, $src2}",
382 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
383 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
384 "sbb{q}\t{$src2, $dst|$dst, $src2}",
385 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
389 // Unsigned multiplication
390 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
391 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
392 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
394 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
395 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
397 // Signed multiplication
398 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
399 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
401 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
402 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
405 let Defs = [EFLAGS] in {
406 let isTwoAddress = 1 in {
407 let isCommutable = 1 in
408 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
409 "imul{q}\t{$src2, $dst|$dst, $src2}",
410 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
412 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
413 "imul{q}\t{$src2, $dst|$dst, $src2}",
414 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
417 // Suprisingly enough, these are not two address instructions!
418 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
419 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
420 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
421 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
422 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
423 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
424 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
425 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
426 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
427 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
428 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
429 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
430 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
431 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
432 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
433 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
436 // Unsigned division / remainder
437 let neverHasSideEffects = 1 in {
438 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
439 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
441 // Signed division / remainder
442 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
443 "idiv{q}\t$src", []>;
445 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
447 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
448 "idiv{q}\t$src", []>;
453 // Unary instructions
454 let Defs = [EFLAGS], CodeSize = 2 in {
455 let isTwoAddress = 1 in
456 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
457 [(set GR64:$dst, (ineg GR64:$src))]>;
458 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
459 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
461 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
462 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
463 [(set GR64:$dst, (add GR64:$src, 1))]>;
464 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
465 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
467 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
468 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
469 [(set GR64:$dst, (add GR64:$src, -1))]>;
470 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
471 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
473 // In 64-bit mode, single byte INC and DEC cannot be encoded.
474 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
475 // Can transform into LEA.
476 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
477 [(set GR16:$dst, (add GR16:$src, 1))]>,
478 OpSize, Requires<[In64BitMode]>;
479 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
480 [(set GR32:$dst, (add GR32:$src, 1))]>,
481 Requires<[In64BitMode]>;
482 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
483 [(set GR16:$dst, (add GR16:$src, -1))]>,
484 OpSize, Requires<[In64BitMode]>;
485 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
486 [(set GR32:$dst, (add GR32:$src, -1))]>,
487 Requires<[In64BitMode]>;
488 } // isConvertibleToThreeAddress
490 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
491 // how to unfold them.
492 let isTwoAddress = 0, CodeSize = 2 in {
493 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
494 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
495 OpSize, Requires<[In64BitMode]>;
496 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
497 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
498 Requires<[In64BitMode]>;
499 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
500 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
501 OpSize, Requires<[In64BitMode]>;
502 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
503 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
504 Requires<[In64BitMode]>;
506 } // Defs = [EFLAGS], CodeSize
509 let Defs = [EFLAGS] in {
510 // Shift instructions
511 let isTwoAddress = 1 in {
513 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
514 "shl{q}\t{%cl, $dst|$dst, %CL}",
515 [(set GR64:$dst, (shl GR64:$src, CL))]>;
516 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
517 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
518 "shl{q}\t{$src2, $dst|$dst, $src2}",
519 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
520 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
525 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
526 "shl{q}\t{%cl, $dst|$dst, %CL}",
527 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
528 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
529 "shl{q}\t{$src, $dst|$dst, $src}",
530 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
531 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
533 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
535 let isTwoAddress = 1 in {
537 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
538 "shr{q}\t{%cl, $dst|$dst, %CL}",
539 [(set GR64:$dst, (srl GR64:$src, CL))]>;
540 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
541 "shr{q}\t{$src2, $dst|$dst, $src2}",
542 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
543 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
545 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
549 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
550 "shr{q}\t{%cl, $dst|$dst, %CL}",
551 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
552 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
553 "shr{q}\t{$src, $dst|$dst, $src}",
554 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
555 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
557 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
559 let isTwoAddress = 1 in {
561 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
562 "sar{q}\t{%cl, $dst|$dst, %CL}",
563 [(set GR64:$dst, (sra GR64:$src, CL))]>;
564 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
565 "sar{q}\t{$src2, $dst|$dst, $src2}",
566 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
567 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
569 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
573 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
574 "sar{q}\t{%cl, $dst|$dst, %CL}",
575 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
576 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
577 "sar{q}\t{$src, $dst|$dst, $src}",
578 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
579 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
581 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
583 // Rotate instructions
584 let isTwoAddress = 1 in {
586 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
587 "rol{q}\t{%cl, $dst|$dst, %CL}",
588 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
589 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
590 "rol{q}\t{$src2, $dst|$dst, $src2}",
591 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
592 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
594 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
598 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
599 "rol{q}\t{%cl, $dst|$dst, %CL}",
600 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
601 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
602 "rol{q}\t{$src, $dst|$dst, $src}",
603 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
604 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
606 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
608 let isTwoAddress = 1 in {
610 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
611 "ror{q}\t{%cl, $dst|$dst, %CL}",
612 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
613 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
614 "ror{q}\t{$src2, $dst|$dst, $src2}",
615 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
616 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
618 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
622 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
623 "ror{q}\t{%cl, $dst|$dst, %CL}",
624 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
625 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
626 "ror{q}\t{$src, $dst|$dst, $src}",
627 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
628 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
630 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
632 // Double shift instructions (generalizations of rotate)
633 let isTwoAddress = 1 in {
635 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
636 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
637 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
638 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
639 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
640 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
643 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
644 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
645 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
646 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
647 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
650 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
651 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
652 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
653 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
660 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
661 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
662 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
664 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
665 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
666 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
669 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
670 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
671 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
672 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
673 (i8 imm:$src3)), addr:$dst)]>,
675 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
676 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
677 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
678 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
679 (i8 imm:$src3)), addr:$dst)]>,
683 //===----------------------------------------------------------------------===//
684 // Logical Instructions...
687 let isTwoAddress = 1 in
688 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
689 [(set GR64:$dst, (not GR64:$src))]>;
690 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
691 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
693 let Defs = [EFLAGS] in {
694 let isTwoAddress = 1 in {
695 let isCommutable = 1 in
696 def AND64rr : RI<0x21, MRMDestReg,
697 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
698 "and{q}\t{$src2, $dst|$dst, $src2}",
699 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
700 def AND64rm : RI<0x23, MRMSrcMem,
701 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
702 "and{q}\t{$src2, $dst|$dst, $src2}",
703 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
704 def AND64ri32 : RIi32<0x81, MRM4r,
705 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
706 "and{q}\t{$src2, $dst|$dst, $src2}",
707 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
708 def AND64ri8 : RIi8<0x83, MRM4r,
709 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
710 "and{q}\t{$src2, $dst|$dst, $src2}",
711 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
714 def AND64mr : RI<0x21, MRMDestMem,
715 (outs), (ins i64mem:$dst, GR64:$src),
716 "and{q}\t{$src, $dst|$dst, $src}",
717 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
718 def AND64mi32 : RIi32<0x81, MRM4m,
719 (outs), (ins i64mem:$dst, i64i32imm:$src),
720 "and{q}\t{$src, $dst|$dst, $src}",
721 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
722 def AND64mi8 : RIi8<0x83, MRM4m,
723 (outs), (ins i64mem:$dst, i64i8imm :$src),
724 "and{q}\t{$src, $dst|$dst, $src}",
725 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
727 let isTwoAddress = 1 in {
728 let isCommutable = 1 in
729 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
730 "or{q}\t{$src2, $dst|$dst, $src2}",
731 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
732 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
733 "or{q}\t{$src2, $dst|$dst, $src2}",
734 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
735 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
736 "or{q}\t{$src2, $dst|$dst, $src2}",
737 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
738 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
739 "or{q}\t{$src2, $dst|$dst, $src2}",
740 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
743 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
744 "or{q}\t{$src, $dst|$dst, $src}",
745 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
746 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
747 "or{q}\t{$src, $dst|$dst, $src}",
748 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
749 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
750 "or{q}\t{$src, $dst|$dst, $src}",
751 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
753 let isTwoAddress = 1 in {
754 let isCommutable = 1 in
755 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
756 "xor{q}\t{$src2, $dst|$dst, $src2}",
757 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
758 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
759 "xor{q}\t{$src2, $dst|$dst, $src2}",
760 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
761 def XOR64ri32 : RIi32<0x81, MRM6r,
762 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
763 "xor{q}\t{$src2, $dst|$dst, $src2}",
764 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
765 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
766 "xor{q}\t{$src2, $dst|$dst, $src2}",
767 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
770 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
771 "xor{q}\t{$src, $dst|$dst, $src}",
772 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
773 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
774 "xor{q}\t{$src, $dst|$dst, $src}",
775 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
776 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
777 "xor{q}\t{$src, $dst|$dst, $src}",
778 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
781 //===----------------------------------------------------------------------===//
782 // Comparison Instructions...
785 // Integer comparison
786 let Defs = [EFLAGS] in {
787 let isCommutable = 1 in
788 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
789 "test{q}\t{$src2, $src1|$src1, $src2}",
790 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
792 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
793 "test{q}\t{$src2, $src1|$src1, $src2}",
794 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
796 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
797 (ins GR64:$src1, i64i32imm:$src2),
798 "test{q}\t{$src2, $src1|$src1, $src2}",
799 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
801 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
802 (ins i64mem:$src1, i64i32imm:$src2),
803 "test{q}\t{$src2, $src1|$src1, $src2}",
804 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
807 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
808 "cmp{q}\t{$src2, $src1|$src1, $src2}",
809 [(X86cmp GR64:$src1, GR64:$src2),
811 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
812 "cmp{q}\t{$src2, $src1|$src1, $src2}",
813 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
815 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
816 "cmp{q}\t{$src2, $src1|$src1, $src2}",
817 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
819 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
820 "cmp{q}\t{$src2, $src1|$src1, $src2}",
821 [(X86cmp GR64:$src1, i64immSExt32:$src2),
823 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
824 (ins i64mem:$src1, i64i32imm:$src2),
825 "cmp{q}\t{$src2, $src1|$src1, $src2}",
826 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
828 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
829 "cmp{q}\t{$src2, $src1|$src1, $src2}",
830 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
832 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
833 "cmp{q}\t{$src2, $src1|$src1, $src2}",
834 [(X86cmp GR64:$src1, i64immSExt8:$src2),
839 let Uses = [EFLAGS], isTwoAddress = 1 in {
840 let isCommutable = 1 in {
841 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
842 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
843 "cmovb\t{$src2, $dst|$dst, $src2}",
844 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
845 X86_COND_B, EFLAGS))]>, TB;
846 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
847 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
848 "cmovae\t{$src2, $dst|$dst, $src2}",
849 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
850 X86_COND_AE, EFLAGS))]>, TB;
851 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
852 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
853 "cmove\t{$src2, $dst|$dst, $src2}",
854 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
855 X86_COND_E, EFLAGS))]>, TB;
856 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
857 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
858 "cmovne\t{$src2, $dst|$dst, $src2}",
859 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
860 X86_COND_NE, EFLAGS))]>, TB;
861 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
862 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
863 "cmovbe\t{$src2, $dst|$dst, $src2}",
864 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
865 X86_COND_BE, EFLAGS))]>, TB;
866 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
867 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
868 "cmova\t{$src2, $dst|$dst, $src2}",
869 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
870 X86_COND_A, EFLAGS))]>, TB;
871 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
872 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
873 "cmovl\t{$src2, $dst|$dst, $src2}",
874 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
875 X86_COND_L, EFLAGS))]>, TB;
876 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
877 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
878 "cmovge\t{$src2, $dst|$dst, $src2}",
879 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
880 X86_COND_GE, EFLAGS))]>, TB;
881 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
882 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
883 "cmovle\t{$src2, $dst|$dst, $src2}",
884 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
885 X86_COND_LE, EFLAGS))]>, TB;
886 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
887 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
888 "cmovg\t{$src2, $dst|$dst, $src2}",
889 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
890 X86_COND_G, EFLAGS))]>, TB;
891 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
892 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
893 "cmovs\t{$src2, $dst|$dst, $src2}",
894 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
895 X86_COND_S, EFLAGS))]>, TB;
896 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
897 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
898 "cmovns\t{$src2, $dst|$dst, $src2}",
899 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
900 X86_COND_NS, EFLAGS))]>, TB;
901 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
902 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
903 "cmovp\t{$src2, $dst|$dst, $src2}",
904 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
905 X86_COND_P, EFLAGS))]>, TB;
906 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
907 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
908 "cmovnp\t{$src2, $dst|$dst, $src2}",
909 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
910 X86_COND_NP, EFLAGS))]>, TB;
911 } // isCommutable = 1
913 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
914 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
915 "cmovb\t{$src2, $dst|$dst, $src2}",
916 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
917 X86_COND_B, EFLAGS))]>, TB;
918 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
919 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
920 "cmovae\t{$src2, $dst|$dst, $src2}",
921 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
922 X86_COND_AE, EFLAGS))]>, TB;
923 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
924 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
925 "cmove\t{$src2, $dst|$dst, $src2}",
926 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
927 X86_COND_E, EFLAGS))]>, TB;
928 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
929 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
930 "cmovne\t{$src2, $dst|$dst, $src2}",
931 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
932 X86_COND_NE, EFLAGS))]>, TB;
933 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
934 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
935 "cmovbe\t{$src2, $dst|$dst, $src2}",
936 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
937 X86_COND_BE, EFLAGS))]>, TB;
938 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
939 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
940 "cmova\t{$src2, $dst|$dst, $src2}",
941 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
942 X86_COND_A, EFLAGS))]>, TB;
943 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
944 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
945 "cmovl\t{$src2, $dst|$dst, $src2}",
946 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
947 X86_COND_L, EFLAGS))]>, TB;
948 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
949 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
950 "cmovge\t{$src2, $dst|$dst, $src2}",
951 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
952 X86_COND_GE, EFLAGS))]>, TB;
953 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
954 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
955 "cmovle\t{$src2, $dst|$dst, $src2}",
956 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
957 X86_COND_LE, EFLAGS))]>, TB;
958 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
959 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
960 "cmovg\t{$src2, $dst|$dst, $src2}",
961 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
962 X86_COND_G, EFLAGS))]>, TB;
963 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
964 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
965 "cmovs\t{$src2, $dst|$dst, $src2}",
966 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
967 X86_COND_S, EFLAGS))]>, TB;
968 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
969 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
970 "cmovns\t{$src2, $dst|$dst, $src2}",
971 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
972 X86_COND_NS, EFLAGS))]>, TB;
973 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
974 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
975 "cmovp\t{$src2, $dst|$dst, $src2}",
976 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
977 X86_COND_P, EFLAGS))]>, TB;
978 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
979 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
980 "cmovnp\t{$src2, $dst|$dst, $src2}",
981 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
982 X86_COND_NP, EFLAGS))]>, TB;
985 //===----------------------------------------------------------------------===//
986 // Conversion Instructions...
990 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
991 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
993 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
994 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
995 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
996 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
997 (load addr:$src)))]>;
998 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
999 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1000 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1001 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1002 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1003 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1004 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1005 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1007 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1008 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1009 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1011 (int_x86_sse2_cvttsd2si64
1012 (load addr:$src)))]>;
1014 // Signed i64 -> f64
1015 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1016 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1017 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1018 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1019 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1020 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1022 let isTwoAddress = 1 in {
1023 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1024 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1025 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1027 (int_x86_sse2_cvtsi642sd VR128:$src1,
1029 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1030 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1031 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1033 (int_x86_sse2_cvtsi642sd VR128:$src1,
1034 (loadi64 addr:$src2)))]>;
1037 // Signed i64 -> f32
1038 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1039 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1040 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1041 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1042 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1043 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1045 let isTwoAddress = 1 in {
1046 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1047 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1048 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1050 (int_x86_sse_cvtsi642ss VR128:$src1,
1052 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1053 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1054 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1056 (int_x86_sse_cvtsi642ss VR128:$src1,
1057 (loadi64 addr:$src2)))]>;
1060 // f32 -> signed i64
1061 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1062 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1064 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1065 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1066 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1067 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1068 (load addr:$src)))]>;
1069 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1070 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1071 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1072 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1073 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1074 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1075 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1076 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1078 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1079 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1080 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1082 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1084 //===----------------------------------------------------------------------===//
1085 // Alias Instructions
1086 //===----------------------------------------------------------------------===//
1088 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1089 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1091 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1092 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1093 // when we have a better way to specify isel priority.
1094 let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in
1095 def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1096 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1097 [(set GR64:$dst, 0)]>;
1099 // Materialize i64 constant where top 32-bits are zero.
1100 let AddedComplexity = 1, isReMaterializable = 1 in
1101 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1102 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1103 [(set GR64:$dst, i64immZExt32:$src)]>;
1105 //===----------------------------------------------------------------------===//
1106 // Thread Local Storage Instructions
1107 //===----------------------------------------------------------------------===//
1109 def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
1110 ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
1111 [(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
1113 //===----------------------------------------------------------------------===//
1114 // Atomic Instructions
1115 //===----------------------------------------------------------------------===//
1117 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1118 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1119 "lock cmpxchgq $swap,$ptr",
1120 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1123 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
1124 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1125 "lock xadd $val, $ptr",
1126 [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
1128 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1130 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1134 //===----------------------------------------------------------------------===//
1135 // Non-Instruction Patterns
1136 //===----------------------------------------------------------------------===//
1138 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1139 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1140 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1141 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1142 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1143 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1144 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1145 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1146 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1148 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1149 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1150 Requires<[SmallCode, IsStatic]>;
1151 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1152 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1153 Requires<[SmallCode, IsStatic]>;
1154 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1155 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1156 Requires<[SmallCode, IsStatic]>;
1157 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1158 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1159 Requires<[SmallCode, IsStatic]>;
1162 // Direct PC relative function call for small code model. 32-bit displacement
1163 // sign extended to 64-bit.
1164 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1165 (CALL64pcrel32 tglobaladdr:$dst)>;
1166 def : Pat<(X86call (i64 texternalsym:$dst)),
1167 (CALL64pcrel32 texternalsym:$dst)>;
1169 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1170 (CALL64pcrel32 tglobaladdr:$dst)>;
1171 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1172 (CALL64pcrel32 texternalsym:$dst)>;
1174 def : Pat<(X86tailcall GR64:$dst),
1175 (CALL64r GR64:$dst)>;
1179 def : Pat<(X86tailcall GR32:$dst),
1181 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1183 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1186 def : Pat<(X86tcret GR64:$dst, imm:$off),
1187 (TCRETURNri64 GR64:$dst, imm:$off)>;
1189 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1190 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1192 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1193 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1197 // TEST R,R is smaller than CMP R,0
1198 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1199 (TEST64rr GR64:$src1, GR64:$src1)>;
1204 def : Pat<(i64 (zext GR32:$src)),
1205 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
1207 // zextload bool -> zextload byte
1208 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1210 def : Pat<(zextloadi64i32 addr:$src),
1211 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), x86_subreg_32bit)>;
1214 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1215 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1216 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1217 def : Pat<(extloadi64i32 addr:$src),
1218 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1222 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1223 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>;
1224 def : Pat<(i64 (anyext GR32:$src)),
1225 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
1227 def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>;
1228 def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>;
1229 def : Pat<(i64 (anyext (loadi32 addr:$src))),
1230 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1233 //===----------------------------------------------------------------------===//
1235 //===----------------------------------------------------------------------===//
1237 // r & (2^32-1) ==> mov32 + implicit zext
1238 def : Pat<(and GR64:$src, i64immFFFFFFFF),
1239 (SUBREG_TO_REG (i64 0),
1240 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)),
1243 // (shl x, 1) ==> (add x, x)
1244 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1246 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1247 def : Pat<(or (srl GR64:$src1, CL:$amt),
1248 (shl GR64:$src2, (sub 64, CL:$amt))),
1249 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1251 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1252 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1253 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1255 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1256 def : Pat<(or (shl GR64:$src1, CL:$amt),
1257 (srl GR64:$src2, (sub 64, CL:$amt))),
1258 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1260 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1261 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1262 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1264 // X86 specific add which produces a flag.
1265 def : Pat<(addc GR64:$src1, GR64:$src2),
1266 (ADD64rr GR64:$src1, GR64:$src2)>;
1267 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1268 (ADD64rm GR64:$src1, addr:$src2)>;
1269 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1270 (ADD64ri32 GR64:$src1, imm:$src2)>;
1271 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1272 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1274 def : Pat<(subc GR64:$src1, GR64:$src2),
1275 (SUB64rr GR64:$src1, GR64:$src2)>;
1276 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1277 (SUB64rm GR64:$src1, addr:$src2)>;
1278 def : Pat<(subc GR64:$src1, imm:$src2),
1279 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1280 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1281 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1284 //===----------------------------------------------------------------------===//
1285 // X86-64 SSE Instructions
1286 //===----------------------------------------------------------------------===//
1288 // Move instructions...
1290 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1291 "mov{d|q}\t{$src, $dst|$dst, $src}",
1293 (v2i64 (scalar_to_vector GR64:$src)))]>;
1294 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1295 "mov{d|q}\t{$src, $dst|$dst, $src}",
1296 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1299 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1300 "mov{d|q}\t{$src, $dst|$dst, $src}",
1301 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1302 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1303 "mov{d|q}\t{$src, $dst|$dst, $src}",
1304 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1306 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1307 "mov{d|q}\t{$src, $dst|$dst, $src}",
1308 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1309 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1310 "mov{d|q}\t{$src, $dst|$dst, $src}",
1311 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1313 //===----------------------------------------------------------------------===//
1314 // X86-64 SSE4.1 Instructions
1315 //===----------------------------------------------------------------------===//
1317 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1318 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1319 def rr : SS4AIi8<opc, MRMSrcReg, (outs GR64:$dst),
1320 (ins VR128:$src1, i32i8imm:$src2),
1321 !strconcat(OpcodeStr,
1322 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1324 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1325 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1326 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1327 !strconcat(OpcodeStr,
1328 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1329 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1330 addr:$dst)]>, OpSize, REX_W;
1333 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1335 let isTwoAddress = 1 in {
1336 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1337 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1338 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1339 !strconcat(OpcodeStr,
1340 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1342 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1344 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1345 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1346 !strconcat(OpcodeStr,
1347 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1349 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1350 imm:$src3)))]>, OpSize, REX_W;
1354 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;