1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt8 : PatLeaf<(i64 imm), [{
47 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
48 // sign extended field.
49 return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
52 def i64immSExt32 : PatLeaf<(i64 imm), [{
53 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // sign extended field.
55 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
58 def i64immZExt32 : PatLeaf<(i64 imm), [{
59 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
60 // unsignedsign extended field.
61 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
64 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
65 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
66 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
68 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
69 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
70 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
71 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
73 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
74 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
75 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
76 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
78 //===----------------------------------------------------------------------===//
79 // Instruction list...
82 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
83 // a stack adjustment and the codegen must know that they may modify the stack
84 // pointer before prolog-epilog rewriting occurs.
85 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
86 // sub / add which can clobber EFLAGS.
87 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
88 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
90 [(X86callseq_start timm:$amt)]>,
91 Requires<[In64BitMode]>;
92 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
94 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
95 Requires<[In64BitMode]>;
98 //===----------------------------------------------------------------------===//
99 // Call Instructions...
102 // All calls clobber the non-callee saved registers. RSP is marked as
103 // a use to prevent stack-pointer assignments that appear immediately
104 // before calls from potentially appearing dead. Uses for argument
105 // registers are added manually.
106 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
107 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
108 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
109 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
110 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
112 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
113 "call\t${dst:call}", []>;
114 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
115 "call\t{*}$dst", [(X86call GR64:$dst)]>;
116 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
117 "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>;
122 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
123 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
125 "#TC_RETURN $dst $offset",
128 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
129 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
131 "#TC_RETURN $dst $offset",
135 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
136 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst),
137 "jmp{q}\t{*}$dst # TAILCALL",
141 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
142 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
143 [(brind GR64:$dst)]>;
144 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
145 [(brind (loadi64 addr:$dst))]>;
148 //===----------------------------------------------------------------------===//
149 // EH Pseudo Instructions
151 let isTerminator = 1, isReturn = 1, isBarrier = 1,
153 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
154 "ret\t#eh_return, addr: $addr",
155 [(X86ehret GR64:$addr)]>;
159 //===----------------------------------------------------------------------===//
160 // Miscellaneous Instructions...
162 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
163 def LEAVE64 : I<0xC9, RawFrm,
164 (outs), (ins), "leave", []>;
165 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
167 def POP64r : I<0x58, AddRegFrm,
168 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
170 def PUSH64r : I<0x50, AddRegFrm,
171 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
174 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
175 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
176 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
177 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
179 def LEA64_32r : I<0x8D, MRMSrcMem,
180 (outs GR32:$dst), (ins lea64_32mem:$src),
181 "lea{l}\t{$src|$dst}, {$dst|$src}",
182 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
184 let isReMaterializable = 1 in
185 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
186 "lea{q}\t{$src|$dst}, {$dst|$src}",
187 [(set GR64:$dst, lea64addr:$src)]>;
189 let isTwoAddress = 1 in
190 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
192 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
194 // Bit scan instructions.
195 let Defs = [EFLAGS] in {
196 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
197 "bsf{q}\t{$src, $dst|$dst, $src}",
198 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
199 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
200 "bsf{q}\t{$src, $dst|$dst, $src}",
201 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
202 (implicit EFLAGS)]>, TB;
204 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
205 "bsr{q}\t{$src, $dst|$dst, $src}",
206 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
207 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
208 "bsr{q}\t{$src, $dst|$dst, $src}",
209 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
210 (implicit EFLAGS)]>, TB;
214 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
215 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
216 [(X86rep_movs i64)]>, REP;
217 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
218 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
219 [(X86rep_stos i64)]>, REP;
221 //===----------------------------------------------------------------------===//
222 // Move Instructions...
225 let neverHasSideEffects = 1 in
226 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
227 "mov{q}\t{$src, $dst|$dst, $src}", []>;
229 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
230 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
231 "movabs{q}\t{$src, $dst|$dst, $src}",
232 [(set GR64:$dst, imm:$src)]>;
233 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
234 "mov{q}\t{$src, $dst|$dst, $src}",
235 [(set GR64:$dst, i64immSExt32:$src)]>;
238 let canFoldAsLoad = 1 in
239 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
240 "mov{q}\t{$src, $dst|$dst, $src}",
241 [(set GR64:$dst, (load addr:$src))]>;
243 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
244 "mov{q}\t{$src, $dst|$dst, $src}",
245 [(store GR64:$src, addr:$dst)]>;
246 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
247 "mov{q}\t{$src, $dst|$dst, $src}",
248 [(store i64immSExt32:$src, addr:$dst)]>;
250 // Sign/Zero extenders
252 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
253 "movs{bq|x}\t{$src, $dst|$dst, $src}",
254 [(set GR64:$dst, (sext GR8:$src))]>, TB;
255 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
256 "movs{bq|x}\t{$src, $dst|$dst, $src}",
257 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
258 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
259 "movs{wq|x}\t{$src, $dst|$dst, $src}",
260 [(set GR64:$dst, (sext GR16:$src))]>, TB;
261 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
262 "movs{wq|x}\t{$src, $dst|$dst, $src}",
263 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
264 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
265 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
266 [(set GR64:$dst, (sext GR32:$src))]>;
267 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
268 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
269 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
271 // Use movzbl instead of movzbq when the destination is a register; it's
272 // equivalent due to implicit zero-extending, and it has a smaller encoding.
273 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
274 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
275 [(set GR64:$dst, (zext GR8:$src))]>, TB;
276 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
277 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
278 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
279 // Use movzwl instead of movzwq when the destination is a register; it's
280 // equivalent due to implicit zero-extending, and it has a smaller encoding.
281 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
282 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
283 [(set GR64:$dst, (zext GR16:$src))]>, TB;
284 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
285 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
286 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
288 // There's no movzlq instruction, but movl can be used for this purpose, using
289 // implicit zero-extension. We need this because the seeming alternative for
290 // implementing zext from 32 to 64, an EXTRACT_SUBREG/SUBREG_TO_REG pair, isn't
291 // safe because both instructions could be optimized away in the
292 // register-to-register case, leaving nothing behind to do the zero extension.
293 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
294 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
295 [(set GR64:$dst, (zext GR32:$src))]>;
296 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
297 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
298 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
300 let neverHasSideEffects = 1 in {
301 let Defs = [RAX], Uses = [EAX] in
302 def CDQE : RI<0x98, RawFrm, (outs), (ins),
303 "{cltq|cdqe}", []>; // RAX = signext(EAX)
305 let Defs = [RAX,RDX], Uses = [RAX] in
306 def CQO : RI<0x99, RawFrm, (outs), (ins),
307 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
310 //===----------------------------------------------------------------------===//
311 // Arithmetic Instructions...
314 let Defs = [EFLAGS] in {
315 let isTwoAddress = 1 in {
316 let isConvertibleToThreeAddress = 1 in {
317 let isCommutable = 1 in
318 // Register-Register Addition
319 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
320 "add{q}\t{$src2, $dst|$dst, $src2}",
321 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
324 // Register-Integer Addition
325 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
326 "add{q}\t{$src2, $dst|$dst, $src2}",
327 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
329 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
330 "add{q}\t{$src2, $dst|$dst, $src2}",
331 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
333 } // isConvertibleToThreeAddress
335 // Register-Memory Addition
336 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
337 "add{q}\t{$src2, $dst|$dst, $src2}",
338 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
342 // Memory-Register Addition
343 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
344 "add{q}\t{$src2, $dst|$dst, $src2}",
345 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
347 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
348 "add{q}\t{$src2, $dst|$dst, $src2}",
349 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
351 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
352 "add{q}\t{$src2, $dst|$dst, $src2}",
353 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
356 let Uses = [EFLAGS] in {
357 let isTwoAddress = 1 in {
358 let isCommutable = 1 in
359 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
360 "adc{q}\t{$src2, $dst|$dst, $src2}",
361 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
363 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
364 "adc{q}\t{$src2, $dst|$dst, $src2}",
365 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
367 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
368 "adc{q}\t{$src2, $dst|$dst, $src2}",
369 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
370 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
371 "adc{q}\t{$src2, $dst|$dst, $src2}",
372 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
375 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
376 "adc{q}\t{$src2, $dst|$dst, $src2}",
377 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
378 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
379 "adc{q}\t{$src2, $dst|$dst, $src2}",
380 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
381 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
382 "adc{q}\t{$src2, $dst|$dst, $src2}",
383 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
386 let isTwoAddress = 1 in {
387 // Register-Register Subtraction
388 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
389 "sub{q}\t{$src2, $dst|$dst, $src2}",
390 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
393 // Register-Memory Subtraction
394 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
395 "sub{q}\t{$src2, $dst|$dst, $src2}",
396 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
399 // Register-Integer Subtraction
400 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
401 (ins GR64:$src1, i64i8imm:$src2),
402 "sub{q}\t{$src2, $dst|$dst, $src2}",
403 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
405 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
406 (ins GR64:$src1, i64i32imm:$src2),
407 "sub{q}\t{$src2, $dst|$dst, $src2}",
408 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
412 // Memory-Register Subtraction
413 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
414 "sub{q}\t{$src2, $dst|$dst, $src2}",
415 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
418 // Memory-Integer Subtraction
419 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
420 "sub{q}\t{$src2, $dst|$dst, $src2}",
421 [(store (sub (load addr:$dst), i64immSExt8:$src2),
424 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
425 "sub{q}\t{$src2, $dst|$dst, $src2}",
426 [(store (sub (load addr:$dst), i64immSExt32:$src2),
430 let Uses = [EFLAGS] in {
431 let isTwoAddress = 1 in {
432 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
433 "sbb{q}\t{$src2, $dst|$dst, $src2}",
434 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
436 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
437 "sbb{q}\t{$src2, $dst|$dst, $src2}",
438 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
440 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
441 "sbb{q}\t{$src2, $dst|$dst, $src2}",
442 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
443 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
444 "sbb{q}\t{$src2, $dst|$dst, $src2}",
445 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
448 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
449 "sbb{q}\t{$src2, $dst|$dst, $src2}",
450 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
451 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
452 "sbb{q}\t{$src2, $dst|$dst, $src2}",
453 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
454 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
455 "sbb{q}\t{$src2, $dst|$dst, $src2}",
456 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
460 // Unsigned multiplication
461 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
462 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
463 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
465 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
466 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
468 // Signed multiplication
469 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
470 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
472 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
473 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
476 let Defs = [EFLAGS] in {
477 let isTwoAddress = 1 in {
478 let isCommutable = 1 in
479 // Register-Register Signed Integer Multiplication
480 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
481 (ins GR64:$src1, GR64:$src2),
482 "imul{q}\t{$src2, $dst|$dst, $src2}",
483 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
484 (implicit EFLAGS)]>, TB;
486 // Register-Memory Signed Integer Multiplication
487 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
488 (ins GR64:$src1, i64mem:$src2),
489 "imul{q}\t{$src2, $dst|$dst, $src2}",
490 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
491 (implicit EFLAGS)]>, TB;
494 // Suprisingly enough, these are not two address instructions!
496 // Register-Integer Signed Integer Multiplication
497 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
498 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
499 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
500 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
502 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
503 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
504 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
505 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
508 // Memory-Integer Signed Integer Multiplication
509 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
510 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
511 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
512 [(set GR64:$dst, (mul (load addr:$src1),
515 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
516 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
517 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
518 [(set GR64:$dst, (mul (load addr:$src1),
519 i64immSExt32:$src2)),
523 // Unsigned division / remainder
524 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
525 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
527 // Signed division / remainder
528 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
529 "idiv{q}\t$src", []>;
531 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
533 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
534 "idiv{q}\t$src", []>;
538 // Unary instructions
539 let Defs = [EFLAGS], CodeSize = 2 in {
540 let isTwoAddress = 1 in
541 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
542 [(set GR64:$dst, (ineg GR64:$src)),
544 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
545 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
548 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
549 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
550 [(set GR64:$dst, (add GR64:$src, 1)),
552 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
553 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
556 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
557 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
558 [(set GR64:$dst, (add GR64:$src, -1)),
560 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
561 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
564 // In 64-bit mode, single byte INC and DEC cannot be encoded.
565 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
566 // Can transform into LEA.
567 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
568 [(set GR16:$dst, (add GR16:$src, 1)),
570 OpSize, Requires<[In64BitMode]>;
571 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
572 [(set GR32:$dst, (add GR32:$src, 1)),
574 Requires<[In64BitMode]>;
575 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
576 [(set GR16:$dst, (add GR16:$src, -1)),
578 OpSize, Requires<[In64BitMode]>;
579 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
580 [(set GR32:$dst, (add GR32:$src, -1)),
582 Requires<[In64BitMode]>;
583 } // isConvertibleToThreeAddress
585 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
586 // how to unfold them.
587 let isTwoAddress = 0, CodeSize = 2 in {
588 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
589 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
591 OpSize, Requires<[In64BitMode]>;
592 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
593 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
595 Requires<[In64BitMode]>;
596 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
597 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
599 OpSize, Requires<[In64BitMode]>;
600 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
601 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
603 Requires<[In64BitMode]>;
605 } // Defs = [EFLAGS], CodeSize
608 let Defs = [EFLAGS] in {
609 // Shift instructions
610 let isTwoAddress = 1 in {
612 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
613 "shl{q}\t{%cl, $dst|$dst, %CL}",
614 [(set GR64:$dst, (shl GR64:$src, CL))]>;
615 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
616 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
617 "shl{q}\t{$src2, $dst|$dst, $src2}",
618 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
619 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
624 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
625 "shl{q}\t{%cl, $dst|$dst, %CL}",
626 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
627 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
628 "shl{q}\t{$src, $dst|$dst, $src}",
629 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
630 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
632 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
634 let isTwoAddress = 1 in {
636 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
637 "shr{q}\t{%cl, $dst|$dst, %CL}",
638 [(set GR64:$dst, (srl GR64:$src, CL))]>;
639 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
640 "shr{q}\t{$src2, $dst|$dst, $src2}",
641 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
642 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
644 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
648 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
649 "shr{q}\t{%cl, $dst|$dst, %CL}",
650 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
651 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
652 "shr{q}\t{$src, $dst|$dst, $src}",
653 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
654 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
656 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
658 let isTwoAddress = 1 in {
660 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
661 "sar{q}\t{%cl, $dst|$dst, %CL}",
662 [(set GR64:$dst, (sra GR64:$src, CL))]>;
663 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
664 "sar{q}\t{$src2, $dst|$dst, $src2}",
665 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
666 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
668 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
672 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
673 "sar{q}\t{%cl, $dst|$dst, %CL}",
674 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
675 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
676 "sar{q}\t{$src, $dst|$dst, $src}",
677 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
678 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
680 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
682 // Rotate instructions
683 let isTwoAddress = 1 in {
685 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
686 "rol{q}\t{%cl, $dst|$dst, %CL}",
687 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
688 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
689 "rol{q}\t{$src2, $dst|$dst, $src2}",
690 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
691 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
693 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
697 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
698 "rol{q}\t{%cl, $dst|$dst, %CL}",
699 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
700 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
701 "rol{q}\t{$src, $dst|$dst, $src}",
702 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
703 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
705 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
707 let isTwoAddress = 1 in {
709 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
710 "ror{q}\t{%cl, $dst|$dst, %CL}",
711 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
712 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
713 "ror{q}\t{$src2, $dst|$dst, $src2}",
714 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
715 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
717 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
721 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
722 "ror{q}\t{%cl, $dst|$dst, %CL}",
723 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
724 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
725 "ror{q}\t{$src, $dst|$dst, $src}",
726 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
727 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
729 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
731 // Double shift instructions (generalizations of rotate)
732 let isTwoAddress = 1 in {
734 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
735 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
736 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
737 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
738 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
739 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
742 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
743 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
744 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
745 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
746 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
749 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
750 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
751 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
752 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
759 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
760 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
761 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
763 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
764 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
765 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
768 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
769 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
770 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
771 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
772 (i8 imm:$src3)), addr:$dst)]>,
774 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
775 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
776 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
777 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
778 (i8 imm:$src3)), addr:$dst)]>,
782 //===----------------------------------------------------------------------===//
783 // Logical Instructions...
786 let isTwoAddress = 1 , AddedComplexity = 15 in
787 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
788 [(set GR64:$dst, (not GR64:$src))]>;
789 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
790 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
792 let Defs = [EFLAGS] in {
793 let isTwoAddress = 1 in {
794 let isCommutable = 1 in
795 def AND64rr : RI<0x21, MRMDestReg,
796 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
797 "and{q}\t{$src2, $dst|$dst, $src2}",
798 [(set GR64:$dst, (and GR64:$src1, GR64:$src2)),
800 def AND64rm : RI<0x23, MRMSrcMem,
801 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
802 "and{q}\t{$src2, $dst|$dst, $src2}",
803 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))),
805 def AND64ri8 : RIi8<0x83, MRM4r,
806 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
807 "and{q}\t{$src2, $dst|$dst, $src2}",
808 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)),
810 def AND64ri32 : RIi32<0x81, MRM4r,
811 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
812 "and{q}\t{$src2, $dst|$dst, $src2}",
813 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)),
817 def AND64mr : RI<0x21, MRMDestMem,
818 (outs), (ins i64mem:$dst, GR64:$src),
819 "and{q}\t{$src, $dst|$dst, $src}",
820 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
822 def AND64mi8 : RIi8<0x83, MRM4m,
823 (outs), (ins i64mem:$dst, i64i8imm :$src),
824 "and{q}\t{$src, $dst|$dst, $src}",
825 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
827 def AND64mi32 : RIi32<0x81, MRM4m,
828 (outs), (ins i64mem:$dst, i64i32imm:$src),
829 "and{q}\t{$src, $dst|$dst, $src}",
830 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
833 let isTwoAddress = 1 in {
834 let isCommutable = 1 in
835 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
836 "or{q}\t{$src2, $dst|$dst, $src2}",
837 [(set GR64:$dst, (or GR64:$src1, GR64:$src2)),
839 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
840 "or{q}\t{$src2, $dst|$dst, $src2}",
841 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))),
843 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
844 "or{q}\t{$src2, $dst|$dst, $src2}",
845 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)),
847 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
848 "or{q}\t{$src2, $dst|$dst, $src2}",
849 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)),
853 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
854 "or{q}\t{$src, $dst|$dst, $src}",
855 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
857 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
858 "or{q}\t{$src, $dst|$dst, $src}",
859 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
861 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
862 "or{q}\t{$src, $dst|$dst, $src}",
863 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
866 let isTwoAddress = 1 in {
867 let isCommutable = 1 in
868 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
869 "xor{q}\t{$src2, $dst|$dst, $src2}",
870 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)),
872 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
873 "xor{q}\t{$src2, $dst|$dst, $src2}",
874 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))),
876 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
877 "xor{q}\t{$src2, $dst|$dst, $src2}",
878 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)),
880 def XOR64ri32 : RIi32<0x81, MRM6r,
881 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
882 "xor{q}\t{$src2, $dst|$dst, $src2}",
883 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)),
887 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
888 "xor{q}\t{$src, $dst|$dst, $src}",
889 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
891 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
892 "xor{q}\t{$src, $dst|$dst, $src}",
893 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
895 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
896 "xor{q}\t{$src, $dst|$dst, $src}",
897 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
901 //===----------------------------------------------------------------------===//
902 // Comparison Instructions...
905 // Integer comparison
906 let Defs = [EFLAGS] in {
907 let isCommutable = 1 in
908 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
909 "test{q}\t{$src2, $src1|$src1, $src2}",
910 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
912 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
913 "test{q}\t{$src2, $src1|$src1, $src2}",
914 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
916 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
917 (ins GR64:$src1, i64i32imm:$src2),
918 "test{q}\t{$src2, $src1|$src1, $src2}",
919 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
921 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
922 (ins i64mem:$src1, i64i32imm:$src2),
923 "test{q}\t{$src2, $src1|$src1, $src2}",
924 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
927 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
928 "cmp{q}\t{$src2, $src1|$src1, $src2}",
929 [(X86cmp GR64:$src1, GR64:$src2),
931 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
932 "cmp{q}\t{$src2, $src1|$src1, $src2}",
933 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
935 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
936 "cmp{q}\t{$src2, $src1|$src1, $src2}",
937 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
939 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
940 "cmp{q}\t{$src2, $src1|$src1, $src2}",
941 [(X86cmp GR64:$src1, i64immSExt8:$src2),
943 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
944 "cmp{q}\t{$src2, $src1|$src1, $src2}",
945 [(X86cmp GR64:$src1, i64immSExt32:$src2),
947 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
948 "cmp{q}\t{$src2, $src1|$src1, $src2}",
949 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
951 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
952 (ins i64mem:$src1, i64i32imm:$src2),
953 "cmp{q}\t{$src2, $src1|$src1, $src2}",
954 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
959 // TODO: BTC, BTR, and BTS
960 let Defs = [EFLAGS] in {
961 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
962 "bt{q}\t{$src2, $src1|$src1, $src2}",
963 [(X86bt GR64:$src1, GR64:$src2),
964 (implicit EFLAGS)]>, TB;
966 // Unlike with the register+register form, the memory+register form of the
967 // bt instruction does not ignore the high bits of the index. From ISel's
968 // perspective, this is pretty bizarre. Disable these instructions for now.
969 //def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
970 // "bt{q}\t{$src2, $src1|$src1, $src2}",
971 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
972 // (implicit EFLAGS)]>, TB;
974 def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
975 "bt{q}\t{$src2, $src1|$src1, $src2}",
976 [(X86bt GR64:$src1, i64immSExt8:$src2),
977 (implicit EFLAGS)]>, TB;
978 // Note that these instructions don't need FastBTMem because that
979 // only applies when the other operand is in a register. When it's
980 // an immediate, bt is still fast.
981 def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
982 "bt{q}\t{$src2, $src1|$src1, $src2}",
983 [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2),
984 (implicit EFLAGS)]>, TB;
988 let Uses = [EFLAGS], isTwoAddress = 1 in {
989 let isCommutable = 1 in {
990 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
991 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
992 "cmovb\t{$src2, $dst|$dst, $src2}",
993 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
994 X86_COND_B, EFLAGS))]>, TB;
995 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
996 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
997 "cmovae\t{$src2, $dst|$dst, $src2}",
998 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
999 X86_COND_AE, EFLAGS))]>, TB;
1000 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1001 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1002 "cmove\t{$src2, $dst|$dst, $src2}",
1003 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1004 X86_COND_E, EFLAGS))]>, TB;
1005 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1006 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1007 "cmovne\t{$src2, $dst|$dst, $src2}",
1008 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1009 X86_COND_NE, EFLAGS))]>, TB;
1010 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1011 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1012 "cmovbe\t{$src2, $dst|$dst, $src2}",
1013 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1014 X86_COND_BE, EFLAGS))]>, TB;
1015 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1016 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1017 "cmova\t{$src2, $dst|$dst, $src2}",
1018 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1019 X86_COND_A, EFLAGS))]>, TB;
1020 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1021 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1022 "cmovl\t{$src2, $dst|$dst, $src2}",
1023 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1024 X86_COND_L, EFLAGS))]>, TB;
1025 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1026 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1027 "cmovge\t{$src2, $dst|$dst, $src2}",
1028 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1029 X86_COND_GE, EFLAGS))]>, TB;
1030 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1031 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1032 "cmovle\t{$src2, $dst|$dst, $src2}",
1033 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1034 X86_COND_LE, EFLAGS))]>, TB;
1035 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1036 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1037 "cmovg\t{$src2, $dst|$dst, $src2}",
1038 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1039 X86_COND_G, EFLAGS))]>, TB;
1040 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1041 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1042 "cmovs\t{$src2, $dst|$dst, $src2}",
1043 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1044 X86_COND_S, EFLAGS))]>, TB;
1045 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1046 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1047 "cmovns\t{$src2, $dst|$dst, $src2}",
1048 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1049 X86_COND_NS, EFLAGS))]>, TB;
1050 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1051 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1052 "cmovp\t{$src2, $dst|$dst, $src2}",
1053 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1054 X86_COND_P, EFLAGS))]>, TB;
1055 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1056 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1057 "cmovnp\t{$src2, $dst|$dst, $src2}",
1058 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1059 X86_COND_NP, EFLAGS))]>, TB;
1060 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1061 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1062 "cmovo\t{$src2, $dst|$dst, $src2}",
1063 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1064 X86_COND_O, EFLAGS))]>, TB;
1065 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1066 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1067 "cmovno\t{$src2, $dst|$dst, $src2}",
1068 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1069 X86_COND_NO, EFLAGS))]>, TB;
1070 } // isCommutable = 1
1072 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1073 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1074 "cmovb\t{$src2, $dst|$dst, $src2}",
1075 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1076 X86_COND_B, EFLAGS))]>, TB;
1077 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1078 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1079 "cmovae\t{$src2, $dst|$dst, $src2}",
1080 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1081 X86_COND_AE, EFLAGS))]>, TB;
1082 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1083 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1084 "cmove\t{$src2, $dst|$dst, $src2}",
1085 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1086 X86_COND_E, EFLAGS))]>, TB;
1087 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1088 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1089 "cmovne\t{$src2, $dst|$dst, $src2}",
1090 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1091 X86_COND_NE, EFLAGS))]>, TB;
1092 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1093 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1094 "cmovbe\t{$src2, $dst|$dst, $src2}",
1095 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1096 X86_COND_BE, EFLAGS))]>, TB;
1097 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1098 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1099 "cmova\t{$src2, $dst|$dst, $src2}",
1100 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1101 X86_COND_A, EFLAGS))]>, TB;
1102 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1103 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1104 "cmovl\t{$src2, $dst|$dst, $src2}",
1105 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1106 X86_COND_L, EFLAGS))]>, TB;
1107 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1108 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1109 "cmovge\t{$src2, $dst|$dst, $src2}",
1110 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1111 X86_COND_GE, EFLAGS))]>, TB;
1112 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1113 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1114 "cmovle\t{$src2, $dst|$dst, $src2}",
1115 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1116 X86_COND_LE, EFLAGS))]>, TB;
1117 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1118 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1119 "cmovg\t{$src2, $dst|$dst, $src2}",
1120 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1121 X86_COND_G, EFLAGS))]>, TB;
1122 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1123 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1124 "cmovs\t{$src2, $dst|$dst, $src2}",
1125 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1126 X86_COND_S, EFLAGS))]>, TB;
1127 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1128 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1129 "cmovns\t{$src2, $dst|$dst, $src2}",
1130 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1131 X86_COND_NS, EFLAGS))]>, TB;
1132 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1133 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1134 "cmovp\t{$src2, $dst|$dst, $src2}",
1135 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1136 X86_COND_P, EFLAGS))]>, TB;
1137 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1138 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1139 "cmovnp\t{$src2, $dst|$dst, $src2}",
1140 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1141 X86_COND_NP, EFLAGS))]>, TB;
1142 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1143 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1144 "cmovo\t{$src2, $dst|$dst, $src2}",
1145 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1146 X86_COND_O, EFLAGS))]>, TB;
1147 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1148 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1149 "cmovno\t{$src2, $dst|$dst, $src2}",
1150 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1151 X86_COND_NO, EFLAGS))]>, TB;
1154 //===----------------------------------------------------------------------===//
1155 // Conversion Instructions...
1158 // f64 -> signed i64
1159 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1160 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1162 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1163 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1164 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1165 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1166 (load addr:$src)))]>;
1167 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1168 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1169 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1170 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1171 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1172 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1173 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1174 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1176 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1177 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1178 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1180 (int_x86_sse2_cvttsd2si64
1181 (load addr:$src)))]>;
1183 // Signed i64 -> f64
1184 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1185 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1186 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1187 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1188 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1189 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1191 let isTwoAddress = 1 in {
1192 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1193 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1194 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1196 (int_x86_sse2_cvtsi642sd VR128:$src1,
1198 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1199 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1200 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1202 (int_x86_sse2_cvtsi642sd VR128:$src1,
1203 (loadi64 addr:$src2)))]>;
1206 // Signed i64 -> f32
1207 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1208 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1209 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1210 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1211 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1212 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1214 let isTwoAddress = 1 in {
1215 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1216 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1217 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1219 (int_x86_sse_cvtsi642ss VR128:$src1,
1221 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1222 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1223 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1225 (int_x86_sse_cvtsi642ss VR128:$src1,
1226 (loadi64 addr:$src2)))]>;
1229 // f32 -> signed i64
1230 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1231 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1233 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1234 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1235 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1236 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1237 (load addr:$src)))]>;
1238 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1239 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1240 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1241 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1242 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1243 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1244 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1245 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1247 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1248 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1249 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1251 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1253 //===----------------------------------------------------------------------===//
1254 // Alias Instructions
1255 //===----------------------------------------------------------------------===//
1257 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1258 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1260 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1261 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1262 // when we have a better way to specify isel priority.
1263 let Defs = [EFLAGS], AddedComplexity = 1,
1264 isReMaterializable = 1, isAsCheapAsAMove = 1 in
1265 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1266 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1267 [(set GR64:$dst, 0)]>;
1269 // Materialize i64 constant where top 32-bits are zero.
1270 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1271 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1272 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1273 [(set GR64:$dst, i64immZExt32:$src)]>;
1275 //===----------------------------------------------------------------------===//
1276 // Thread Local Storage Instructions
1277 //===----------------------------------------------------------------------===//
1279 def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
1280 ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
1281 [(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
1283 let AddedComplexity = 5 in
1284 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1285 "movq\t%gs:$src, $dst",
1286 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1288 //===----------------------------------------------------------------------===//
1289 // Atomic Instructions
1290 //===----------------------------------------------------------------------===//
1292 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1293 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1294 "lock\n\tcmpxchgq\t$swap,$ptr",
1295 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1298 let Constraints = "$val = $dst" in {
1299 let Defs = [EFLAGS] in
1300 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1301 "lock\n\txadd\t$val, $ptr",
1302 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1304 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1306 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1309 // Atomic exchange, and, or, xor
1310 let Constraints = "$val = $dst", Defs = [EFLAGS],
1311 usesCustomDAGSchedInserter = 1 in {
1312 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1313 "#ATOMAND64 PSEUDO!",
1314 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1315 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1316 "#ATOMOR64 PSEUDO!",
1317 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1318 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1319 "#ATOMXOR64 PSEUDO!",
1320 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1321 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1322 "#ATOMNAND64 PSEUDO!",
1323 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1324 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1325 "#ATOMMIN64 PSEUDO!",
1326 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1327 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1328 "#ATOMMAX64 PSEUDO!",
1329 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1330 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1331 "#ATOMUMIN64 PSEUDO!",
1332 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1333 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1334 "#ATOMUMAX64 PSEUDO!",
1335 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1338 //===----------------------------------------------------------------------===//
1339 // Non-Instruction Patterns
1340 //===----------------------------------------------------------------------===//
1342 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1343 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1344 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1345 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1346 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1347 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1348 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1349 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1350 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1352 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1353 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1354 Requires<[SmallCode, IsStatic]>;
1355 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1356 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1357 Requires<[SmallCode, IsStatic]>;
1358 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1359 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1360 Requires<[SmallCode, IsStatic]>;
1361 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1362 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1363 Requires<[SmallCode, IsStatic]>;
1366 // Direct PC relative function call for small code model. 32-bit displacement
1367 // sign extended to 64-bit.
1368 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1369 (CALL64pcrel32 tglobaladdr:$dst)>;
1370 def : Pat<(X86call (i64 texternalsym:$dst)),
1371 (CALL64pcrel32 texternalsym:$dst)>;
1373 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1374 (CALL64pcrel32 tglobaladdr:$dst)>;
1375 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1376 (CALL64pcrel32 texternalsym:$dst)>;
1378 def : Pat<(X86tailcall GR64:$dst),
1379 (CALL64r GR64:$dst)>;
1383 def : Pat<(X86tailcall GR32:$dst),
1385 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1387 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1390 def : Pat<(X86tcret GR64:$dst, imm:$off),
1391 (TCRETURNri64 GR64:$dst, imm:$off)>;
1393 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1394 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1396 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1397 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1401 // TEST R,R is smaller than CMP R,0
1402 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1403 (TEST64rr GR64:$src1, GR64:$src1)>;
1405 // Conditional moves with folded loads with operands swapped and conditions
1407 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1408 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1409 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1410 (CMOVB64rm GR64:$src2, addr:$src1)>;
1411 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1412 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1413 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1414 (CMOVE64rm GR64:$src2, addr:$src1)>;
1415 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
1416 (CMOVA64rm GR64:$src2, addr:$src1)>;
1417 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
1418 (CMOVBE64rm GR64:$src2, addr:$src1)>;
1419 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
1420 (CMOVGE64rm GR64:$src2, addr:$src1)>;
1421 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
1422 (CMOVL64rm GR64:$src2, addr:$src1)>;
1423 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
1424 (CMOVG64rm GR64:$src2, addr:$src1)>;
1425 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
1426 (CMOVLE64rm GR64:$src2, addr:$src1)>;
1427 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
1428 (CMOVNP64rm GR64:$src2, addr:$src1)>;
1429 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
1430 (CMOVP64rm GR64:$src2, addr:$src1)>;
1431 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
1432 (CMOVNS64rm GR64:$src2, addr:$src1)>;
1433 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
1434 (CMOVS64rm GR64:$src2, addr:$src1)>;
1435 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
1436 (CMOVNO64rm GR64:$src2, addr:$src1)>;
1437 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
1438 (CMOVO64rm GR64:$src2, addr:$src1)>;
1441 def : Pat<(i64 (zext GR32:$src)),
1442 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
1444 // zextload bool -> zextload byte
1445 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1448 // When extloading from 16-bit and smaller memory locations into 64-bit registers,
1449 // use zero-extending loads so that the entire 64-bit register is defined, avoiding
1450 // partial-register updates.
1451 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1452 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1453 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1454 // For other extloads, use subregs, since the high contents of the register are
1455 // defined after an extload.
1456 def : Pat<(extloadi64i32 addr:$src),
1457 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1459 def : Pat<(extloadi16i1 addr:$src),
1460 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1462 Requires<[In64BitMode]>;
1463 def : Pat<(extloadi16i8 addr:$src),
1464 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1466 Requires<[In64BitMode]>;
1469 def : Pat<(i64 (anyext GR8:$src)),
1470 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
1471 def : Pat<(i64 (anyext GR16:$src)),
1472 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
1473 def : Pat<(i64 (anyext GR32:$src)),
1474 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
1475 def : Pat<(i16 (anyext GR8:$src)),
1476 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1477 Requires<[In64BitMode]>;
1478 def : Pat<(i32 (anyext GR8:$src)),
1479 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1480 Requires<[In64BitMode]>;
1482 //===----------------------------------------------------------------------===//
1484 //===----------------------------------------------------------------------===//
1486 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1487 // +128 doesn't, so in this special case use a sub instead of an add.
1488 def : Pat<(add GR64:$src1, 128),
1489 (SUB64ri8 GR64:$src1, -128)>;
1490 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1491 (SUB64mi8 addr:$dst, -128)>;
1493 // The same trick applies for 32-bit immediate fields in 64-bit
1495 def : Pat<(add GR64:$src1, 0x0000000080000000),
1496 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1497 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1498 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1500 // r & (2^32-1) ==> movz
1501 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1502 (MOVZX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
1503 // r & (2^16-1) ==> movz
1504 def : Pat<(and GR64:$src, 0xffff),
1505 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1506 // r & (2^8-1) ==> movz
1507 def : Pat<(and GR64:$src, 0xff),
1508 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1509 // r & (2^8-1) ==> movz
1510 def : Pat<(and GR32:$src1, 0xff),
1511 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit)))>,
1512 Requires<[In64BitMode]>;
1513 // r & (2^8-1) ==> movz
1514 def : Pat<(and GR16:$src1, 0xff),
1515 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
1516 Requires<[In64BitMode]>;
1518 // sext_inreg patterns
1519 def : Pat<(sext_inreg GR64:$src, i32),
1520 (MOVSX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
1521 def : Pat<(sext_inreg GR64:$src, i16),
1522 (MOVSX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1523 def : Pat<(sext_inreg GR64:$src, i8),
1524 (MOVSX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1525 def : Pat<(sext_inreg GR32:$src, i8),
1526 (MOVSX32rr8 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)))>,
1527 Requires<[In64BitMode]>;
1528 def : Pat<(sext_inreg GR16:$src, i8),
1529 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
1530 Requires<[In64BitMode]>;
1533 def : Pat<(i32 (trunc GR64:$src)),
1534 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1535 def : Pat<(i16 (trunc GR64:$src)),
1536 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
1537 def : Pat<(i8 (trunc GR64:$src)),
1538 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
1539 def : Pat<(i8 (trunc GR32:$src)),
1540 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
1541 Requires<[In64BitMode]>;
1542 def : Pat<(i8 (trunc GR16:$src)),
1543 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit))>,
1544 Requires<[In64BitMode]>;
1546 // (shl x, 1) ==> (add x, x)
1547 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1549 // (shl x (and y, 63)) ==> (shl x, y)
1550 def : Pat<(shl GR64:$src1, (and CL:$amt, 63)),
1551 (SHL64rCL GR64:$src1)>;
1552 def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1553 (SHL64mCL addr:$dst)>;
1555 def : Pat<(srl GR64:$src1, (and CL:$amt, 63)),
1556 (SHR64rCL GR64:$src1)>;
1557 def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1558 (SHR64mCL addr:$dst)>;
1560 def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
1561 (SAR64rCL GR64:$src1)>;
1562 def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1563 (SAR64mCL addr:$dst)>;
1565 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1566 def : Pat<(or (srl GR64:$src1, CL:$amt),
1567 (shl GR64:$src2, (sub 64, CL:$amt))),
1568 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1570 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1571 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1572 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1574 def : Pat<(or (srl GR64:$src1, (i8 (trunc RCX:$amt))),
1575 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1576 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1578 def : Pat<(store (or (srl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1579 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1581 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1583 def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1584 (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1586 def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
1587 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1588 (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1590 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1591 def : Pat<(or (shl GR64:$src1, CL:$amt),
1592 (srl GR64:$src2, (sub 64, CL:$amt))),
1593 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1595 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1596 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1597 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1599 def : Pat<(or (shl GR64:$src1, (i8 (trunc RCX:$amt))),
1600 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1601 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1603 def : Pat<(store (or (shl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1604 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1606 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1608 def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1609 (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1611 def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1),
1612 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1613 (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1615 // X86 specific add which produces a flag.
1616 def : Pat<(addc GR64:$src1, GR64:$src2),
1617 (ADD64rr GR64:$src1, GR64:$src2)>;
1618 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1619 (ADD64rm GR64:$src1, addr:$src2)>;
1620 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1621 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1622 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1623 (ADD64ri32 GR64:$src1, imm:$src2)>;
1625 def : Pat<(subc GR64:$src1, GR64:$src2),
1626 (SUB64rr GR64:$src1, GR64:$src2)>;
1627 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1628 (SUB64rm GR64:$src1, addr:$src2)>;
1629 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1630 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1631 def : Pat<(subc GR64:$src1, imm:$src2),
1632 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1634 //===----------------------------------------------------------------------===//
1635 // EFLAGS-defining Patterns
1636 //===----------------------------------------------------------------------===//
1638 // Register-Register Addition with EFLAGS result
1639 def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2),
1641 (ADD64rr GR64:$src1, GR64:$src2)>;
1643 // Register-Integer Addition with EFLAGS result
1644 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2),
1646 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1647 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2),
1649 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1651 // Register-Memory Addition with EFLAGS result
1652 def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)),
1654 (ADD64rm GR64:$src1, addr:$src2)>;
1656 // Memory-Register Addition with EFLAGS result
1657 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2),
1660 (ADD64mr addr:$dst, GR64:$src2)>;
1661 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2),
1664 (ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
1665 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2),
1668 (ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
1670 // Register-Register Subtraction with EFLAGS result
1671 def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2),
1673 (SUB64rr GR64:$src1, GR64:$src2)>;
1675 // Register-Memory Subtraction with EFLAGS result
1676 def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)),
1678 (SUB64rm GR64:$src1, addr:$src2)>;
1680 // Register-Integer Subtraction with EFLAGS result
1681 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2),
1683 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1684 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2),
1686 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1688 // Memory-Register Subtraction with EFLAGS result
1689 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2),
1692 (SUB64mr addr:$dst, GR64:$src2)>;
1694 // Memory-Integer Subtraction with EFLAGS result
1695 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2),
1698 (SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
1699 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2),
1702 (SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
1704 // Register-Register Signed Integer Multiplication with EFLAGS result
1705 def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2),
1707 (IMUL64rr GR64:$src1, GR64:$src2)>;
1709 // Register-Memory Signed Integer Multiplication with EFLAGS result
1710 def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)),
1712 (IMUL64rm GR64:$src1, addr:$src2)>;
1714 // Register-Integer Signed Integer Multiplication with EFLAGS result
1715 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2),
1717 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1718 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2),
1720 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1722 // Memory-Integer Signed Integer Multiplication with EFLAGS result
1723 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2),
1725 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1726 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2),
1728 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1730 // INC and DEC with EFLAGS result. Note that these do not set CF.
1731 def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)),
1732 (INC64r GR64:$src)>;
1733 def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst),
1735 (INC64m addr:$dst)>;
1736 def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)),
1737 (DEC64r GR64:$src)>;
1738 def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst),
1740 (DEC64m addr:$dst)>;
1742 //===----------------------------------------------------------------------===//
1743 // X86-64 SSE Instructions
1744 //===----------------------------------------------------------------------===//
1746 // Move instructions...
1748 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1749 "mov{d|q}\t{$src, $dst|$dst, $src}",
1751 (v2i64 (scalar_to_vector GR64:$src)))]>;
1752 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1753 "mov{d|q}\t{$src, $dst|$dst, $src}",
1754 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1757 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1758 "mov{d|q}\t{$src, $dst|$dst, $src}",
1759 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1760 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1761 "movq\t{$src, $dst|$dst, $src}",
1762 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1764 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1765 "mov{d|q}\t{$src, $dst|$dst, $src}",
1766 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1767 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1768 "movq\t{$src, $dst|$dst, $src}",
1769 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1771 //===----------------------------------------------------------------------===//
1772 // X86-64 SSE4.1 Instructions
1773 //===----------------------------------------------------------------------===//
1775 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1776 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1777 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
1778 (ins VR128:$src1, i32i8imm:$src2),
1779 !strconcat(OpcodeStr,
1780 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1782 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1783 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1784 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1785 !strconcat(OpcodeStr,
1786 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1787 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1788 addr:$dst)]>, OpSize, REX_W;
1791 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1793 let isTwoAddress = 1 in {
1794 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1795 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1796 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1797 !strconcat(OpcodeStr,
1798 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1800 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1802 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1803 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1804 !strconcat(OpcodeStr,
1805 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1807 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1808 imm:$src3)))]>, OpSize, REX_W;
1812 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;