1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
23 // 64-bits but only 32 bits are significant, and those bits are treated as being
25 def i64i32imm_pcrel : Operand<i64> {
26 let PrintMethod = "print_pcrel_imm";
30 // 64-bits but only 8 bits are significant.
31 def i64i8imm : Operand<i64>;
33 def lea64mem : Operand<i64> {
34 let PrintMethod = "printlea64mem";
35 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
38 def lea64_32mem : Operand<i32> {
39 let PrintMethod = "printlea64_32mem";
40 let AsmOperandLowerMethod = "lower_lea64_32mem";
41 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
44 //===----------------------------------------------------------------------===//
45 // Complex Pattern Definitions.
47 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
48 [add, mul, X86mul_imm, shl, or, frameindex, X86Wrapper,
52 def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
53 [tglobaltlsaddr], []>;
55 //===----------------------------------------------------------------------===//
59 def i64immSExt8 : PatLeaf<(i64 imm), [{
60 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
61 // sign extended field.
62 return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
65 def i64immSExt32 : PatLeaf<(i64 imm), [{
66 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
67 // sign extended field.
68 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
71 def i64immZExt32 : PatLeaf<(i64 imm), [{
72 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
73 // unsignedsign extended field.
74 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
77 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
78 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
79 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
81 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
82 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
83 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
84 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
86 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
87 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
88 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
89 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
91 //===----------------------------------------------------------------------===//
92 // Instruction list...
95 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
96 // a stack adjustment and the codegen must know that they may modify the stack
97 // pointer before prolog-epilog rewriting occurs.
98 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
99 // sub / add which can clobber EFLAGS.
100 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
101 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
103 [(X86callseq_start timm:$amt)]>,
104 Requires<[In64BitMode]>;
105 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
107 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
108 Requires<[In64BitMode]>;
111 //===----------------------------------------------------------------------===//
112 // Call Instructions...
115 // All calls clobber the non-callee saved registers. RSP is marked as
116 // a use to prevent stack-pointer assignments that appear immediately
117 // before calls from potentially appearing dead. Uses for argument
118 // registers are added manually.
119 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
120 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
121 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
122 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
123 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
126 // NOTE: this pattern doesn't match "X86call imm", because we do not know
127 // that the offset between an arbitrary immediate and the call will fit in
128 // the 32-bit pcrel field that we have.
129 def CALL64pcrel32 : Ii32<0xE8, RawFrm,
130 (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
132 Requires<[In64BitMode]>;
133 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
134 "call\t{*}$dst", [(X86call GR64:$dst)]>;
135 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
136 "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>;
141 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
142 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
144 "#TC_RETURN $dst $offset",
147 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
148 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
150 "#TC_RETURN $dst $offset",
154 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
155 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst),
156 "jmp{q}\t{*}$dst # TAILCALL",
160 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
161 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
162 [(brind GR64:$dst)]>;
163 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
164 [(brind (loadi64 addr:$dst))]>;
167 //===----------------------------------------------------------------------===//
168 // EH Pseudo Instructions
170 let isTerminator = 1, isReturn = 1, isBarrier = 1,
172 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
173 "ret\t#eh_return, addr: $addr",
174 [(X86ehret GR64:$addr)]>;
178 //===----------------------------------------------------------------------===//
179 // Miscellaneous Instructions...
181 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
182 def LEAVE64 : I<0xC9, RawFrm,
183 (outs), (ins), "leave", []>;
184 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
186 def POP64r : I<0x58, AddRegFrm,
187 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
189 def PUSH64r : I<0x50, AddRegFrm,
190 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
193 let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in {
194 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
195 "push{q}\t$imm", []>;
196 def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
197 "push{q}\t$imm", []>;
198 def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
199 "push{q}\t$imm", []>;
202 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
203 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
204 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
205 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
207 def LEA64_32r : I<0x8D, MRMSrcMem,
208 (outs GR32:$dst), (ins lea64_32mem:$src),
209 "lea{l}\t{$src|$dst}, {$dst|$src}",
210 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
212 let isReMaterializable = 1 in
213 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
214 "lea{q}\t{$src|$dst}, {$dst|$src}",
215 [(set GR64:$dst, lea64addr:$src)]>;
217 let isTwoAddress = 1 in
218 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
220 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
222 // Bit scan instructions.
223 let Defs = [EFLAGS] in {
224 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
225 "bsf{q}\t{$src, $dst|$dst, $src}",
226 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
227 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
228 "bsf{q}\t{$src, $dst|$dst, $src}",
229 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
230 (implicit EFLAGS)]>, TB;
232 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
233 "bsr{q}\t{$src, $dst|$dst, $src}",
234 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
235 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
236 "bsr{q}\t{$src, $dst|$dst, $src}",
237 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
238 (implicit EFLAGS)]>, TB;
242 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
243 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
244 [(X86rep_movs i64)]>, REP;
245 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
246 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
247 [(X86rep_stos i64)]>, REP;
249 //===----------------------------------------------------------------------===//
250 // Move Instructions...
253 let neverHasSideEffects = 1 in
254 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
255 "mov{q}\t{$src, $dst|$dst, $src}", []>;
257 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
258 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
259 "movabs{q}\t{$src, $dst|$dst, $src}",
260 [(set GR64:$dst, imm:$src)]>;
261 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
262 "mov{q}\t{$src, $dst|$dst, $src}",
263 [(set GR64:$dst, i64immSExt32:$src)]>;
266 let canFoldAsLoad = 1 in
267 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
268 "mov{q}\t{$src, $dst|$dst, $src}",
269 [(set GR64:$dst, (load addr:$src))]>;
271 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
272 "mov{q}\t{$src, $dst|$dst, $src}",
273 [(store GR64:$src, addr:$dst)]>;
274 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
275 "mov{q}\t{$src, $dst|$dst, $src}",
276 [(store i64immSExt32:$src, addr:$dst)]>;
278 // Sign/Zero extenders
280 // MOVSX64rr8 always has a REX prefix and it has an 8-bit register
281 // operand, which makes it a rare instruction with an 8-bit register
282 // operand that can never access an h register. If support for h registers
283 // were generalized, this would require a special register class.
284 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
285 "movs{bq|x}\t{$src, $dst|$dst, $src}",
286 [(set GR64:$dst, (sext GR8:$src))]>, TB;
287 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
288 "movs{bq|x}\t{$src, $dst|$dst, $src}",
289 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
290 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
291 "movs{wq|x}\t{$src, $dst|$dst, $src}",
292 [(set GR64:$dst, (sext GR16:$src))]>, TB;
293 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
294 "movs{wq|x}\t{$src, $dst|$dst, $src}",
295 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
296 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
297 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
298 [(set GR64:$dst, (sext GR32:$src))]>;
299 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
300 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
301 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
303 // Use movzbl instead of movzbq when the destination is a register; it's
304 // equivalent due to implicit zero-extending, and it has a smaller encoding.
305 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
306 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
307 [(set GR64:$dst, (zext GR8:$src))]>, TB;
308 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
309 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
310 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
311 // Use movzwl instead of movzwq when the destination is a register; it's
312 // equivalent due to implicit zero-extending, and it has a smaller encoding.
313 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
314 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
315 [(set GR64:$dst, (zext GR16:$src))]>, TB;
316 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
317 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
318 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
320 // There's no movzlq instruction, but movl can be used for this purpose, using
321 // implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
322 // extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
323 // zero-extension, however this isn't possible when the 32-bit value is
324 // defined by a truncate or is copied from something where the high bits aren't
325 // necessarily all zero. In such cases, we fall back to these explicit zext
327 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
328 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
329 [(set GR64:$dst, (zext GR32:$src))]>;
330 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
331 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
332 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
334 // Any instruction that defines a 32-bit result leaves the high half of the
335 // register. Truncate can be lowered to EXTRACT_SUBREG, and CopyFromReg may
336 // be copying from a truncate, but any other 32-bit operation will zero-extend
338 def def32 : PatLeaf<(i32 GR32:$src), [{
339 return N->getOpcode() != ISD::TRUNCATE &&
340 N->getOpcode() != TargetInstrInfo::EXTRACT_SUBREG &&
341 N->getOpcode() != ISD::CopyFromReg;
344 // In the case of a 32-bit def that is known to implicitly zero-extend,
345 // we can use a SUBREG_TO_REG.
346 def : Pat<(i64 (zext def32:$src)),
347 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
349 let neverHasSideEffects = 1 in {
350 let Defs = [RAX], Uses = [EAX] in
351 def CDQE : RI<0x98, RawFrm, (outs), (ins),
352 "{cltq|cdqe}", []>; // RAX = signext(EAX)
354 let Defs = [RAX,RDX], Uses = [RAX] in
355 def CQO : RI<0x99, RawFrm, (outs), (ins),
356 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
359 //===----------------------------------------------------------------------===//
360 // Arithmetic Instructions...
363 let Defs = [EFLAGS] in {
364 let isTwoAddress = 1 in {
365 let isConvertibleToThreeAddress = 1 in {
366 let isCommutable = 1 in
367 // Register-Register Addition
368 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
369 "add{q}\t{$src2, $dst|$dst, $src2}",
370 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
373 // Register-Integer Addition
374 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
375 "add{q}\t{$src2, $dst|$dst, $src2}",
376 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
378 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
379 "add{q}\t{$src2, $dst|$dst, $src2}",
380 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
382 } // isConvertibleToThreeAddress
384 // Register-Memory Addition
385 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
386 "add{q}\t{$src2, $dst|$dst, $src2}",
387 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
391 // Memory-Register Addition
392 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
393 "add{q}\t{$src2, $dst|$dst, $src2}",
394 [(store (add (load addr:$dst), GR64:$src2), addr:$dst),
396 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
397 "add{q}\t{$src2, $dst|$dst, $src2}",
398 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
400 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
401 "add{q}\t{$src2, $dst|$dst, $src2}",
402 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
405 let Uses = [EFLAGS] in {
406 let isTwoAddress = 1 in {
407 let isCommutable = 1 in
408 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
409 "adc{q}\t{$src2, $dst|$dst, $src2}",
410 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
412 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
413 "adc{q}\t{$src2, $dst|$dst, $src2}",
414 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
416 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
417 "adc{q}\t{$src2, $dst|$dst, $src2}",
418 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
419 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
420 "adc{q}\t{$src2, $dst|$dst, $src2}",
421 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
424 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
425 "adc{q}\t{$src2, $dst|$dst, $src2}",
426 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
427 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
428 "adc{q}\t{$src2, $dst|$dst, $src2}",
429 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
430 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
431 "adc{q}\t{$src2, $dst|$dst, $src2}",
432 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
435 let isTwoAddress = 1 in {
436 // Register-Register Subtraction
437 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
438 "sub{q}\t{$src2, $dst|$dst, $src2}",
439 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
442 // Register-Memory Subtraction
443 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
444 "sub{q}\t{$src2, $dst|$dst, $src2}",
445 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
448 // Register-Integer Subtraction
449 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
450 (ins GR64:$src1, i64i8imm:$src2),
451 "sub{q}\t{$src2, $dst|$dst, $src2}",
452 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
454 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
455 (ins GR64:$src1, i64i32imm:$src2),
456 "sub{q}\t{$src2, $dst|$dst, $src2}",
457 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
461 // Memory-Register Subtraction
462 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
463 "sub{q}\t{$src2, $dst|$dst, $src2}",
464 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
467 // Memory-Integer Subtraction
468 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
469 "sub{q}\t{$src2, $dst|$dst, $src2}",
470 [(store (sub (load addr:$dst), i64immSExt8:$src2),
473 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
474 "sub{q}\t{$src2, $dst|$dst, $src2}",
475 [(store (sub (load addr:$dst), i64immSExt32:$src2),
479 let Uses = [EFLAGS] in {
480 let isTwoAddress = 1 in {
481 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
482 "sbb{q}\t{$src2, $dst|$dst, $src2}",
483 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
485 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
486 "sbb{q}\t{$src2, $dst|$dst, $src2}",
487 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
489 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
490 "sbb{q}\t{$src2, $dst|$dst, $src2}",
491 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
492 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
493 "sbb{q}\t{$src2, $dst|$dst, $src2}",
494 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
497 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
498 "sbb{q}\t{$src2, $dst|$dst, $src2}",
499 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
500 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
501 "sbb{q}\t{$src2, $dst|$dst, $src2}",
502 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
503 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
504 "sbb{q}\t{$src2, $dst|$dst, $src2}",
505 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
509 // Unsigned multiplication
510 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
511 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
512 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
514 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
515 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
517 // Signed multiplication
518 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
519 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
521 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
522 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
525 let Defs = [EFLAGS] in {
526 let isTwoAddress = 1 in {
527 let isCommutable = 1 in
528 // Register-Register Signed Integer Multiplication
529 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
530 (ins GR64:$src1, GR64:$src2),
531 "imul{q}\t{$src2, $dst|$dst, $src2}",
532 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
533 (implicit EFLAGS)]>, TB;
535 // Register-Memory Signed Integer Multiplication
536 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
537 (ins GR64:$src1, i64mem:$src2),
538 "imul{q}\t{$src2, $dst|$dst, $src2}",
539 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
540 (implicit EFLAGS)]>, TB;
543 // Suprisingly enough, these are not two address instructions!
545 // Register-Integer Signed Integer Multiplication
546 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
547 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
548 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
549 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
551 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
552 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
553 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
554 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
557 // Memory-Integer Signed Integer Multiplication
558 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
559 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
560 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
561 [(set GR64:$dst, (mul (load addr:$src1),
564 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
565 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
566 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
567 [(set GR64:$dst, (mul (load addr:$src1),
568 i64immSExt32:$src2)),
572 // Unsigned division / remainder
573 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
574 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
576 // Signed division / remainder
577 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
578 "idiv{q}\t$src", []>;
580 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
582 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
583 "idiv{q}\t$src", []>;
587 // Unary instructions
588 let Defs = [EFLAGS], CodeSize = 2 in {
589 let isTwoAddress = 1 in
590 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
591 [(set GR64:$dst, (ineg GR64:$src)),
593 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
594 [(store (ineg (loadi64 addr:$dst)), addr:$dst),
597 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
598 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
599 [(set GR64:$dst, (add GR64:$src, 1)),
601 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
602 [(store (add (loadi64 addr:$dst), 1), addr:$dst),
605 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
606 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
607 [(set GR64:$dst, (add GR64:$src, -1)),
609 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
610 [(store (add (loadi64 addr:$dst), -1), addr:$dst),
613 // In 64-bit mode, single byte INC and DEC cannot be encoded.
614 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
615 // Can transform into LEA.
616 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
617 [(set GR16:$dst, (add GR16:$src, 1)),
619 OpSize, Requires<[In64BitMode]>;
620 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
621 [(set GR32:$dst, (add GR32:$src, 1)),
623 Requires<[In64BitMode]>;
624 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
625 [(set GR16:$dst, (add GR16:$src, -1)),
627 OpSize, Requires<[In64BitMode]>;
628 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
629 [(set GR32:$dst, (add GR32:$src, -1)),
631 Requires<[In64BitMode]>;
632 } // isConvertibleToThreeAddress
634 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
635 // how to unfold them.
636 let isTwoAddress = 0, CodeSize = 2 in {
637 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
638 [(store (add (loadi16 addr:$dst), 1), addr:$dst),
640 OpSize, Requires<[In64BitMode]>;
641 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
642 [(store (add (loadi32 addr:$dst), 1), addr:$dst),
644 Requires<[In64BitMode]>;
645 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
646 [(store (add (loadi16 addr:$dst), -1), addr:$dst),
648 OpSize, Requires<[In64BitMode]>;
649 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
650 [(store (add (loadi32 addr:$dst), -1), addr:$dst),
652 Requires<[In64BitMode]>;
654 } // Defs = [EFLAGS], CodeSize
657 let Defs = [EFLAGS] in {
658 // Shift instructions
659 let isTwoAddress = 1 in {
661 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
662 "shl{q}\t{%cl, $dst|$dst, %CL}",
663 [(set GR64:$dst, (shl GR64:$src, CL))]>;
664 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
665 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
666 "shl{q}\t{$src2, $dst|$dst, $src2}",
667 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
668 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
673 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
674 "shl{q}\t{%cl, $dst|$dst, %CL}",
675 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
676 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
677 "shl{q}\t{$src, $dst|$dst, $src}",
678 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
679 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
681 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
683 let isTwoAddress = 1 in {
685 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
686 "shr{q}\t{%cl, $dst|$dst, %CL}",
687 [(set GR64:$dst, (srl GR64:$src, CL))]>;
688 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
689 "shr{q}\t{$src2, $dst|$dst, $src2}",
690 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
691 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
693 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
697 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
698 "shr{q}\t{%cl, $dst|$dst, %CL}",
699 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
700 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
701 "shr{q}\t{$src, $dst|$dst, $src}",
702 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
703 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
705 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
707 let isTwoAddress = 1 in {
709 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
710 "sar{q}\t{%cl, $dst|$dst, %CL}",
711 [(set GR64:$dst, (sra GR64:$src, CL))]>;
712 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
713 "sar{q}\t{$src2, $dst|$dst, $src2}",
714 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
715 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
717 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
721 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
722 "sar{q}\t{%cl, $dst|$dst, %CL}",
723 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
724 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
725 "sar{q}\t{$src, $dst|$dst, $src}",
726 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
727 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
729 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
731 // Rotate instructions
732 let isTwoAddress = 1 in {
734 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
735 "rol{q}\t{%cl, $dst|$dst, %CL}",
736 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
737 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
738 "rol{q}\t{$src2, $dst|$dst, $src2}",
739 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
740 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
742 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
746 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
747 "rol{q}\t{%cl, $dst|$dst, %CL}",
748 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
749 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
750 "rol{q}\t{$src, $dst|$dst, $src}",
751 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
752 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
754 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
756 let isTwoAddress = 1 in {
758 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
759 "ror{q}\t{%cl, $dst|$dst, %CL}",
760 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
761 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
762 "ror{q}\t{$src2, $dst|$dst, $src2}",
763 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
764 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
766 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
770 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
771 "ror{q}\t{%cl, $dst|$dst, %CL}",
772 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
773 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
774 "ror{q}\t{$src, $dst|$dst, $src}",
775 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
776 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
778 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
780 // Double shift instructions (generalizations of rotate)
781 let isTwoAddress = 1 in {
783 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
784 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
785 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
786 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
787 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
788 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
791 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
792 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
793 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
794 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
795 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
798 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
799 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
800 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
801 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
808 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
809 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
810 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
812 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
813 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
814 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
817 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
818 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
819 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
820 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
821 (i8 imm:$src3)), addr:$dst)]>,
823 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
824 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
825 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
826 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
827 (i8 imm:$src3)), addr:$dst)]>,
831 //===----------------------------------------------------------------------===//
832 // Logical Instructions...
835 let isTwoAddress = 1 , AddedComplexity = 15 in
836 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
837 [(set GR64:$dst, (not GR64:$src))]>;
838 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
839 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
841 let Defs = [EFLAGS] in {
842 let isTwoAddress = 1 in {
843 let isCommutable = 1 in
844 def AND64rr : RI<0x21, MRMDestReg,
845 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
846 "and{q}\t{$src2, $dst|$dst, $src2}",
847 [(set GR64:$dst, (and GR64:$src1, GR64:$src2)),
849 def AND64rm : RI<0x23, MRMSrcMem,
850 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
851 "and{q}\t{$src2, $dst|$dst, $src2}",
852 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))),
854 def AND64ri8 : RIi8<0x83, MRM4r,
855 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
856 "and{q}\t{$src2, $dst|$dst, $src2}",
857 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)),
859 def AND64ri32 : RIi32<0x81, MRM4r,
860 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
861 "and{q}\t{$src2, $dst|$dst, $src2}",
862 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)),
866 def AND64mr : RI<0x21, MRMDestMem,
867 (outs), (ins i64mem:$dst, GR64:$src),
868 "and{q}\t{$src, $dst|$dst, $src}",
869 [(store (and (load addr:$dst), GR64:$src), addr:$dst),
871 def AND64mi8 : RIi8<0x83, MRM4m,
872 (outs), (ins i64mem:$dst, i64i8imm :$src),
873 "and{q}\t{$src, $dst|$dst, $src}",
874 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
876 def AND64mi32 : RIi32<0x81, MRM4m,
877 (outs), (ins i64mem:$dst, i64i32imm:$src),
878 "and{q}\t{$src, $dst|$dst, $src}",
879 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
882 let isTwoAddress = 1 in {
883 let isCommutable = 1 in
884 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
885 "or{q}\t{$src2, $dst|$dst, $src2}",
886 [(set GR64:$dst, (or GR64:$src1, GR64:$src2)),
888 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
889 "or{q}\t{$src2, $dst|$dst, $src2}",
890 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))),
892 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
893 "or{q}\t{$src2, $dst|$dst, $src2}",
894 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)),
896 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
897 "or{q}\t{$src2, $dst|$dst, $src2}",
898 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)),
902 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
903 "or{q}\t{$src, $dst|$dst, $src}",
904 [(store (or (load addr:$dst), GR64:$src), addr:$dst),
906 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
907 "or{q}\t{$src, $dst|$dst, $src}",
908 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
910 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
911 "or{q}\t{$src, $dst|$dst, $src}",
912 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
915 let isTwoAddress = 1 in {
916 let isCommutable = 1 in
917 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
918 "xor{q}\t{$src2, $dst|$dst, $src2}",
919 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)),
921 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
922 "xor{q}\t{$src2, $dst|$dst, $src2}",
923 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))),
925 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
926 "xor{q}\t{$src2, $dst|$dst, $src2}",
927 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)),
929 def XOR64ri32 : RIi32<0x81, MRM6r,
930 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
931 "xor{q}\t{$src2, $dst|$dst, $src2}",
932 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)),
936 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
937 "xor{q}\t{$src, $dst|$dst, $src}",
938 [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
940 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
941 "xor{q}\t{$src, $dst|$dst, $src}",
942 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
944 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
945 "xor{q}\t{$src, $dst|$dst, $src}",
946 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
950 //===----------------------------------------------------------------------===//
951 // Comparison Instructions...
954 // Integer comparison
955 let Defs = [EFLAGS] in {
956 let isCommutable = 1 in
957 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
958 "test{q}\t{$src2, $src1|$src1, $src2}",
959 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
961 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
962 "test{q}\t{$src2, $src1|$src1, $src2}",
963 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
965 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
966 (ins GR64:$src1, i64i32imm:$src2),
967 "test{q}\t{$src2, $src1|$src1, $src2}",
968 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
970 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
971 (ins i64mem:$src1, i64i32imm:$src2),
972 "test{q}\t{$src2, $src1|$src1, $src2}",
973 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
976 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
977 "cmp{q}\t{$src2, $src1|$src1, $src2}",
978 [(X86cmp GR64:$src1, GR64:$src2),
980 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
981 "cmp{q}\t{$src2, $src1|$src1, $src2}",
982 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
984 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
985 "cmp{q}\t{$src2, $src1|$src1, $src2}",
986 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
988 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
989 "cmp{q}\t{$src2, $src1|$src1, $src2}",
990 [(X86cmp GR64:$src1, i64immSExt8:$src2),
992 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
993 "cmp{q}\t{$src2, $src1|$src1, $src2}",
994 [(X86cmp GR64:$src1, i64immSExt32:$src2),
996 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
997 "cmp{q}\t{$src2, $src1|$src1, $src2}",
998 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
1000 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1001 (ins i64mem:$src1, i64i32imm:$src2),
1002 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1003 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
1004 (implicit EFLAGS)]>;
1005 } // Defs = [EFLAGS]
1008 // TODO: BTC, BTR, and BTS
1009 let Defs = [EFLAGS] in {
1010 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1011 "bt{q}\t{$src2, $src1|$src1, $src2}",
1012 [(X86bt GR64:$src1, GR64:$src2),
1013 (implicit EFLAGS)]>, TB;
1015 // Unlike with the register+register form, the memory+register form of the
1016 // bt instruction does not ignore the high bits of the index. From ISel's
1017 // perspective, this is pretty bizarre. Disable these instructions for now.
1018 //def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1019 // "bt{q}\t{$src2, $src1|$src1, $src2}",
1020 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1021 // (implicit EFLAGS)]>, TB;
1023 def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1024 "bt{q}\t{$src2, $src1|$src1, $src2}",
1025 [(X86bt GR64:$src1, i64immSExt8:$src2),
1026 (implicit EFLAGS)]>, TB;
1027 // Note that these instructions don't need FastBTMem because that
1028 // only applies when the other operand is in a register. When it's
1029 // an immediate, bt is still fast.
1030 def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1031 "bt{q}\t{$src2, $src1|$src1, $src2}",
1032 [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2),
1033 (implicit EFLAGS)]>, TB;
1034 } // Defs = [EFLAGS]
1036 // Conditional moves
1037 let Uses = [EFLAGS], isTwoAddress = 1 in {
1038 let isCommutable = 1 in {
1039 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1040 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1041 "cmovb\t{$src2, $dst|$dst, $src2}",
1042 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1043 X86_COND_B, EFLAGS))]>, TB;
1044 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1045 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1046 "cmovae\t{$src2, $dst|$dst, $src2}",
1047 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1048 X86_COND_AE, EFLAGS))]>, TB;
1049 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1050 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1051 "cmove\t{$src2, $dst|$dst, $src2}",
1052 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1053 X86_COND_E, EFLAGS))]>, TB;
1054 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1055 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1056 "cmovne\t{$src2, $dst|$dst, $src2}",
1057 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1058 X86_COND_NE, EFLAGS))]>, TB;
1059 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1060 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1061 "cmovbe\t{$src2, $dst|$dst, $src2}",
1062 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1063 X86_COND_BE, EFLAGS))]>, TB;
1064 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1065 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1066 "cmova\t{$src2, $dst|$dst, $src2}",
1067 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1068 X86_COND_A, EFLAGS))]>, TB;
1069 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1070 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1071 "cmovl\t{$src2, $dst|$dst, $src2}",
1072 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1073 X86_COND_L, EFLAGS))]>, TB;
1074 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1075 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1076 "cmovge\t{$src2, $dst|$dst, $src2}",
1077 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1078 X86_COND_GE, EFLAGS))]>, TB;
1079 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1080 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1081 "cmovle\t{$src2, $dst|$dst, $src2}",
1082 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1083 X86_COND_LE, EFLAGS))]>, TB;
1084 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1085 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1086 "cmovg\t{$src2, $dst|$dst, $src2}",
1087 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1088 X86_COND_G, EFLAGS))]>, TB;
1089 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1090 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1091 "cmovs\t{$src2, $dst|$dst, $src2}",
1092 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1093 X86_COND_S, EFLAGS))]>, TB;
1094 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1095 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1096 "cmovns\t{$src2, $dst|$dst, $src2}",
1097 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1098 X86_COND_NS, EFLAGS))]>, TB;
1099 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1100 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1101 "cmovp\t{$src2, $dst|$dst, $src2}",
1102 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1103 X86_COND_P, EFLAGS))]>, TB;
1104 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1105 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1106 "cmovnp\t{$src2, $dst|$dst, $src2}",
1107 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1108 X86_COND_NP, EFLAGS))]>, TB;
1109 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1110 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1111 "cmovo\t{$src2, $dst|$dst, $src2}",
1112 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1113 X86_COND_O, EFLAGS))]>, TB;
1114 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1115 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1116 "cmovno\t{$src2, $dst|$dst, $src2}",
1117 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1118 X86_COND_NO, EFLAGS))]>, TB;
1119 } // isCommutable = 1
1121 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1122 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1123 "cmovb\t{$src2, $dst|$dst, $src2}",
1124 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1125 X86_COND_B, EFLAGS))]>, TB;
1126 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1127 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1128 "cmovae\t{$src2, $dst|$dst, $src2}",
1129 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1130 X86_COND_AE, EFLAGS))]>, TB;
1131 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1132 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1133 "cmove\t{$src2, $dst|$dst, $src2}",
1134 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1135 X86_COND_E, EFLAGS))]>, TB;
1136 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1137 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1138 "cmovne\t{$src2, $dst|$dst, $src2}",
1139 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1140 X86_COND_NE, EFLAGS))]>, TB;
1141 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1142 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1143 "cmovbe\t{$src2, $dst|$dst, $src2}",
1144 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1145 X86_COND_BE, EFLAGS))]>, TB;
1146 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1147 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1148 "cmova\t{$src2, $dst|$dst, $src2}",
1149 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1150 X86_COND_A, EFLAGS))]>, TB;
1151 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1152 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1153 "cmovl\t{$src2, $dst|$dst, $src2}",
1154 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1155 X86_COND_L, EFLAGS))]>, TB;
1156 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1157 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1158 "cmovge\t{$src2, $dst|$dst, $src2}",
1159 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1160 X86_COND_GE, EFLAGS))]>, TB;
1161 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1162 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1163 "cmovle\t{$src2, $dst|$dst, $src2}",
1164 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1165 X86_COND_LE, EFLAGS))]>, TB;
1166 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1167 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1168 "cmovg\t{$src2, $dst|$dst, $src2}",
1169 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1170 X86_COND_G, EFLAGS))]>, TB;
1171 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1172 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1173 "cmovs\t{$src2, $dst|$dst, $src2}",
1174 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1175 X86_COND_S, EFLAGS))]>, TB;
1176 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1177 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1178 "cmovns\t{$src2, $dst|$dst, $src2}",
1179 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1180 X86_COND_NS, EFLAGS))]>, TB;
1181 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1182 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1183 "cmovp\t{$src2, $dst|$dst, $src2}",
1184 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1185 X86_COND_P, EFLAGS))]>, TB;
1186 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1187 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1188 "cmovnp\t{$src2, $dst|$dst, $src2}",
1189 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1190 X86_COND_NP, EFLAGS))]>, TB;
1191 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1192 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1193 "cmovo\t{$src2, $dst|$dst, $src2}",
1194 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1195 X86_COND_O, EFLAGS))]>, TB;
1196 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1197 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1198 "cmovno\t{$src2, $dst|$dst, $src2}",
1199 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1200 X86_COND_NO, EFLAGS))]>, TB;
1203 //===----------------------------------------------------------------------===//
1204 // Conversion Instructions...
1207 // f64 -> signed i64
1208 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1209 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1211 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1212 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1213 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1214 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1215 (load addr:$src)))]>;
1216 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1217 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1218 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1219 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1220 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1221 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1222 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1223 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1225 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1226 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1227 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1229 (int_x86_sse2_cvttsd2si64
1230 (load addr:$src)))]>;
1232 // Signed i64 -> f64
1233 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1234 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1235 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1236 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1237 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1238 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1240 let isTwoAddress = 1 in {
1241 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1242 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1243 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1245 (int_x86_sse2_cvtsi642sd VR128:$src1,
1247 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1248 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1249 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1251 (int_x86_sse2_cvtsi642sd VR128:$src1,
1252 (loadi64 addr:$src2)))]>;
1255 // Signed i64 -> f32
1256 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1257 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1258 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1259 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1260 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1261 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1263 let isTwoAddress = 1 in {
1264 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1265 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1266 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1268 (int_x86_sse_cvtsi642ss VR128:$src1,
1270 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1271 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1272 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1274 (int_x86_sse_cvtsi642ss VR128:$src1,
1275 (loadi64 addr:$src2)))]>;
1278 // f32 -> signed i64
1279 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1280 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1282 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1283 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1284 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1285 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1286 (load addr:$src)))]>;
1287 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1288 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1289 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1290 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1291 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1292 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1293 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1294 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1296 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1297 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1298 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1300 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1302 //===----------------------------------------------------------------------===//
1303 // Alias Instructions
1304 //===----------------------------------------------------------------------===//
1306 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1307 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1309 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1310 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1311 // when we have a better way to specify isel priority.
1312 let Defs = [EFLAGS], AddedComplexity = 1,
1313 isReMaterializable = 1, isAsCheapAsAMove = 1 in
1314 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1315 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1316 [(set GR64:$dst, 0)]>;
1318 // Materialize i64 constant where top 32-bits are zero.
1319 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
1320 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1321 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1322 [(set GR64:$dst, i64immZExt32:$src)]>;
1324 //===----------------------------------------------------------------------===//
1325 // Thread Local Storage Instructions
1326 //===----------------------------------------------------------------------===//
1328 // All calls clobber the non-callee saved registers. RSP is marked as
1329 // a use to prevent stack-pointer assignments that appear immediately
1330 // before calls from potentially appearing dead.
1331 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
1332 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
1333 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
1334 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
1335 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
1337 def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
1339 "leaq\t$sym(%rip), %rdi; "
1342 "call\t__tls_get_addr@PLT",
1343 [(X86tlsaddr tls64addr:$sym)]>,
1344 Requires<[In64BitMode]>;
1346 let AddedComplexity = 5 in
1347 def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1348 "movq\t%gs:$src, $dst",
1349 [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
1351 let AddedComplexity = 5 in
1352 def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1353 "movq\t%fs:$src, $dst",
1354 [(set GR64:$dst, (fsload addr:$src))]>, SegFS;
1356 //===----------------------------------------------------------------------===//
1357 // Atomic Instructions
1358 //===----------------------------------------------------------------------===//
1360 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1361 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1363 "cmpxchgq\t$swap,$ptr",
1364 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1367 let Constraints = "$val = $dst" in {
1368 let Defs = [EFLAGS] in
1369 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1372 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1374 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1376 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1379 // Atomic exchange, and, or, xor
1380 let Constraints = "$val = $dst", Defs = [EFLAGS],
1381 usesCustomDAGSchedInserter = 1 in {
1382 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1383 "#ATOMAND64 PSEUDO!",
1384 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1385 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1386 "#ATOMOR64 PSEUDO!",
1387 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1388 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1389 "#ATOMXOR64 PSEUDO!",
1390 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1391 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1392 "#ATOMNAND64 PSEUDO!",
1393 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1394 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1395 "#ATOMMIN64 PSEUDO!",
1396 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1397 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1398 "#ATOMMAX64 PSEUDO!",
1399 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1400 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1401 "#ATOMUMIN64 PSEUDO!",
1402 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1403 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1404 "#ATOMUMAX64 PSEUDO!",
1405 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1408 //===----------------------------------------------------------------------===//
1409 // Non-Instruction Patterns
1410 //===----------------------------------------------------------------------===//
1412 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1413 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1414 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1415 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1416 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1417 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1418 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1419 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1420 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1422 // If we have small model and -static mode, it is safe to store global addresses
1423 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1424 // should handle this sort of thing.
1425 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1426 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1427 Requires<[SmallCode, IsStatic]>;
1428 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1429 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1430 Requires<[SmallCode, IsStatic]>;
1431 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1432 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1433 Requires<[SmallCode, IsStatic]>;
1434 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1435 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1436 Requires<[SmallCode, IsStatic]>;
1438 // If we have small model and -static mode, it is safe to store global addresses
1439 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1440 // should handle this sort of thing.
1441 def : Pat<(store (i64 (X86WrapperRIP tconstpool:$src)), addr:$dst),
1442 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1443 Requires<[SmallCode, IsStatic]>;
1444 def : Pat<(store (i64 (X86WrapperRIP tjumptable:$src)), addr:$dst),
1445 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1446 Requires<[SmallCode, IsStatic]>;
1447 def : Pat<(store (i64 (X86WrapperRIP tglobaladdr:$src)), addr:$dst),
1448 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1449 Requires<[SmallCode, IsStatic]>;
1450 def : Pat<(store (i64 (X86WrapperRIP texternalsym:$src)), addr:$dst),
1451 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1452 Requires<[SmallCode, IsStatic]>;
1456 // Direct PC relative function call for small code model. 32-bit displacement
1457 // sign extended to 64-bit.
1458 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1459 (CALL64pcrel32 tglobaladdr:$dst)>;
1460 def : Pat<(X86call (i64 texternalsym:$dst)),
1461 (CALL64pcrel32 texternalsym:$dst)>;
1463 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1464 (CALL64pcrel32 tglobaladdr:$dst)>;
1465 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1466 (CALL64pcrel32 texternalsym:$dst)>;
1468 def : Pat<(X86tailcall GR64:$dst),
1469 (CALL64r GR64:$dst)>;
1473 def : Pat<(X86tailcall GR32:$dst),
1475 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1477 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1480 def : Pat<(X86tcret GR64:$dst, imm:$off),
1481 (TCRETURNri64 GR64:$dst, imm:$off)>;
1483 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1484 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1486 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1487 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1491 // TEST R,R is smaller than CMP R,0
1492 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1493 (TEST64rr GR64:$src1, GR64:$src1)>;
1495 // Conditional moves with folded loads with operands swapped and conditions
1497 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
1498 (CMOVAE64rm GR64:$src2, addr:$src1)>;
1499 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
1500 (CMOVB64rm GR64:$src2, addr:$src1)>;
1501 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
1502 (CMOVNE64rm GR64:$src2, addr:$src1)>;
1503 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
1504 (CMOVE64rm GR64:$src2, addr:$src1)>;
1505 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
1506 (CMOVA64rm GR64:$src2, addr:$src1)>;
1507 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
1508 (CMOVBE64rm GR64:$src2, addr:$src1)>;
1509 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
1510 (CMOVGE64rm GR64:$src2, addr:$src1)>;
1511 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
1512 (CMOVL64rm GR64:$src2, addr:$src1)>;
1513 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
1514 (CMOVG64rm GR64:$src2, addr:$src1)>;
1515 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
1516 (CMOVLE64rm GR64:$src2, addr:$src1)>;
1517 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
1518 (CMOVNP64rm GR64:$src2, addr:$src1)>;
1519 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
1520 (CMOVP64rm GR64:$src2, addr:$src1)>;
1521 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
1522 (CMOVNS64rm GR64:$src2, addr:$src1)>;
1523 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
1524 (CMOVS64rm GR64:$src2, addr:$src1)>;
1525 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
1526 (CMOVNO64rm GR64:$src2, addr:$src1)>;
1527 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
1528 (CMOVO64rm GR64:$src2, addr:$src1)>;
1530 // zextload bool -> zextload byte
1531 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1534 // When extloading from 16-bit and smaller memory locations into 64-bit registers,
1535 // use zero-extending loads so that the entire 64-bit register is defined, avoiding
1536 // partial-register updates.
1537 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1538 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1539 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1540 // For other extloads, use subregs, since the high contents of the register are
1541 // defined after an extload.
1542 def : Pat<(extloadi64i32 addr:$src),
1543 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1545 def : Pat<(extloadi16i1 addr:$src),
1546 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1548 Requires<[In64BitMode]>;
1549 def : Pat<(extloadi16i8 addr:$src),
1550 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1552 Requires<[In64BitMode]>;
1555 def : Pat<(i64 (anyext GR8:$src)),
1556 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
1557 def : Pat<(i64 (anyext GR16:$src)),
1558 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
1559 def : Pat<(i64 (anyext GR32:$src)),
1560 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
1561 def : Pat<(i16 (anyext GR8:$src)),
1562 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1563 Requires<[In64BitMode]>;
1564 def : Pat<(i32 (anyext GR8:$src)),
1565 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1566 Requires<[In64BitMode]>;
1568 //===----------------------------------------------------------------------===//
1570 //===----------------------------------------------------------------------===//
1572 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1573 // +128 doesn't, so in this special case use a sub instead of an add.
1574 def : Pat<(add GR64:$src1, 128),
1575 (SUB64ri8 GR64:$src1, -128)>;
1576 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1577 (SUB64mi8 addr:$dst, -128)>;
1579 // The same trick applies for 32-bit immediate fields in 64-bit
1581 def : Pat<(add GR64:$src1, 0x0000000080000000),
1582 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1583 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1584 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1586 // r & (2^32-1) ==> movz
1587 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1588 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1589 // r & (2^16-1) ==> movz
1590 def : Pat<(and GR64:$src, 0xffff),
1591 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1592 // r & (2^8-1) ==> movz
1593 def : Pat<(and GR64:$src, 0xff),
1594 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1595 // r & (2^8-1) ==> movz
1596 def : Pat<(and GR32:$src1, 0xff),
1597 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>,
1598 Requires<[In64BitMode]>;
1599 // r & (2^8-1) ==> movz
1600 def : Pat<(and GR16:$src1, 0xff),
1601 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
1602 Requires<[In64BitMode]>;
1604 // sext_inreg patterns
1605 def : Pat<(sext_inreg GR64:$src, i32),
1606 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1607 def : Pat<(sext_inreg GR64:$src, i16),
1608 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
1609 def : Pat<(sext_inreg GR64:$src, i8),
1610 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
1611 def : Pat<(sext_inreg GR32:$src, i8),
1612 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
1613 Requires<[In64BitMode]>;
1614 def : Pat<(sext_inreg GR16:$src, i8),
1615 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
1616 Requires<[In64BitMode]>;
1619 def : Pat<(i32 (trunc GR64:$src)),
1620 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>;
1621 def : Pat<(i16 (trunc GR64:$src)),
1622 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>;
1623 def : Pat<(i8 (trunc GR64:$src)),
1624 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>;
1625 def : Pat<(i8 (trunc GR32:$src)),
1626 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>,
1627 Requires<[In64BitMode]>;
1628 def : Pat<(i8 (trunc GR16:$src)),
1629 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>,
1630 Requires<[In64BitMode]>;
1632 // h-register tricks.
1633 // For now, be conservative on x86-64 and use an h-register extract only if the
1634 // value is immediately zero-extended or stored, which are somewhat common
1635 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1636 // from being allocated in the same instruction as the h register, as there's
1637 // currently no way to describe this requirement to the register allocator.
1639 // h-register extract and zero-extend.
1640 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1644 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_ABCD),
1645 x86_subreg_8bit_hi)),
1647 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1649 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD),
1650 x86_subreg_8bit_hi))>,
1651 Requires<[In64BitMode]>;
1652 def : Pat<(srl_su GR16:$src, (i8 8)),
1655 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
1656 x86_subreg_8bit_hi)),
1658 Requires<[In64BitMode]>;
1659 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1661 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
1662 x86_subreg_8bit_hi))>,
1663 Requires<[In64BitMode]>;
1664 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1668 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
1669 x86_subreg_8bit_hi)),
1672 // h-register extract and store.
1673 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1676 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_ABCD),
1677 x86_subreg_8bit_hi))>;
1678 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1681 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD),
1682 x86_subreg_8bit_hi))>,
1683 Requires<[In64BitMode]>;
1684 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1687 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
1688 x86_subreg_8bit_hi))>,
1689 Requires<[In64BitMode]>;
1691 // (shl x, 1) ==> (add x, x)
1692 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1694 // (shl x (and y, 63)) ==> (shl x, y)
1695 def : Pat<(shl GR64:$src1, (and CL:$amt, 63)),
1696 (SHL64rCL GR64:$src1)>;
1697 def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1698 (SHL64mCL addr:$dst)>;
1700 def : Pat<(srl GR64:$src1, (and CL:$amt, 63)),
1701 (SHR64rCL GR64:$src1)>;
1702 def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1703 (SHR64mCL addr:$dst)>;
1705 def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
1706 (SAR64rCL GR64:$src1)>;
1707 def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1708 (SAR64mCL addr:$dst)>;
1710 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1711 def : Pat<(or (srl GR64:$src1, CL:$amt),
1712 (shl GR64:$src2, (sub 64, CL:$amt))),
1713 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1715 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1716 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1717 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1719 def : Pat<(or (srl GR64:$src1, (i8 (trunc RCX:$amt))),
1720 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1721 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1723 def : Pat<(store (or (srl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1724 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1726 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1728 def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1729 (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1731 def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
1732 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1733 (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1735 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1736 def : Pat<(or (shl GR64:$src1, CL:$amt),
1737 (srl GR64:$src2, (sub 64, CL:$amt))),
1738 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1740 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1741 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1742 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1744 def : Pat<(or (shl GR64:$src1, (i8 (trunc RCX:$amt))),
1745 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1746 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1748 def : Pat<(store (or (shl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1749 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1751 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1753 def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1754 (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1756 def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1),
1757 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1758 (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1760 // X86 specific add which produces a flag.
1761 def : Pat<(addc GR64:$src1, GR64:$src2),
1762 (ADD64rr GR64:$src1, GR64:$src2)>;
1763 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1764 (ADD64rm GR64:$src1, addr:$src2)>;
1765 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1766 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1767 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1768 (ADD64ri32 GR64:$src1, imm:$src2)>;
1770 def : Pat<(subc GR64:$src1, GR64:$src2),
1771 (SUB64rr GR64:$src1, GR64:$src2)>;
1772 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1773 (SUB64rm GR64:$src1, addr:$src2)>;
1774 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1775 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1776 def : Pat<(subc GR64:$src1, imm:$src2),
1777 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1779 //===----------------------------------------------------------------------===//
1780 // EFLAGS-defining Patterns
1781 //===----------------------------------------------------------------------===//
1783 // Register-Register Addition with EFLAGS result
1784 def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2),
1786 (ADD64rr GR64:$src1, GR64:$src2)>;
1788 // Register-Integer Addition with EFLAGS result
1789 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2),
1791 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1792 def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2),
1794 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1796 // Register-Memory Addition with EFLAGS result
1797 def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)),
1799 (ADD64rm GR64:$src1, addr:$src2)>;
1801 // Memory-Register Addition with EFLAGS result
1802 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2),
1805 (ADD64mr addr:$dst, GR64:$src2)>;
1806 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2),
1809 (ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
1810 def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2),
1813 (ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
1815 // Register-Register Subtraction with EFLAGS result
1816 def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2),
1818 (SUB64rr GR64:$src1, GR64:$src2)>;
1820 // Register-Memory Subtraction with EFLAGS result
1821 def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)),
1823 (SUB64rm GR64:$src1, addr:$src2)>;
1825 // Register-Integer Subtraction with EFLAGS result
1826 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2),
1828 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1829 def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2),
1831 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1833 // Memory-Register Subtraction with EFLAGS result
1834 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2),
1837 (SUB64mr addr:$dst, GR64:$src2)>;
1839 // Memory-Integer Subtraction with EFLAGS result
1840 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2),
1843 (SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
1844 def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2),
1847 (SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
1849 // Register-Register Signed Integer Multiplication with EFLAGS result
1850 def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2),
1852 (IMUL64rr GR64:$src1, GR64:$src2)>;
1854 // Register-Memory Signed Integer Multiplication with EFLAGS result
1855 def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)),
1857 (IMUL64rm GR64:$src1, addr:$src2)>;
1859 // Register-Integer Signed Integer Multiplication with EFLAGS result
1860 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2),
1862 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1863 def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2),
1865 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1867 // Memory-Integer Signed Integer Multiplication with EFLAGS result
1868 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2),
1870 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1871 def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2),
1873 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1875 // INC and DEC with EFLAGS result. Note that these do not set CF.
1876 def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
1877 (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1878 def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
1880 (INC64_16m addr:$dst)>, Requires<[In64BitMode]>;
1881 def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
1882 (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1883 def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
1885 (DEC64_16m addr:$dst)>, Requires<[In64BitMode]>;
1887 def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
1888 (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1889 def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
1891 (INC64_32m addr:$dst)>, Requires<[In64BitMode]>;
1892 def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
1893 (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1894 def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
1896 (DEC64_32m addr:$dst)>, Requires<[In64BitMode]>;
1898 def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)),
1899 (INC64r GR64:$src)>;
1900 def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst),
1902 (INC64m addr:$dst)>;
1903 def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)),
1904 (DEC64r GR64:$src)>;
1905 def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst),
1907 (DEC64m addr:$dst)>;
1909 //===----------------------------------------------------------------------===//
1910 // X86-64 SSE Instructions
1911 //===----------------------------------------------------------------------===//
1913 // Move instructions...
1915 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1916 "mov{d|q}\t{$src, $dst|$dst, $src}",
1918 (v2i64 (scalar_to_vector GR64:$src)))]>;
1919 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1920 "mov{d|q}\t{$src, $dst|$dst, $src}",
1921 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1924 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1925 "mov{d|q}\t{$src, $dst|$dst, $src}",
1926 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1927 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1928 "movq\t{$src, $dst|$dst, $src}",
1929 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1931 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1932 "mov{d|q}\t{$src, $dst|$dst, $src}",
1933 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1934 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1935 "movq\t{$src, $dst|$dst, $src}",
1936 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1938 //===----------------------------------------------------------------------===//
1939 // X86-64 SSE4.1 Instructions
1940 //===----------------------------------------------------------------------===//
1942 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1943 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1944 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
1945 (ins VR128:$src1, i32i8imm:$src2),
1946 !strconcat(OpcodeStr,
1947 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1949 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1950 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1951 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1952 !strconcat(OpcodeStr,
1953 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1954 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1955 addr:$dst)]>, OpSize, REX_W;
1958 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1960 let isTwoAddress = 1 in {
1961 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1962 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1963 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1964 !strconcat(OpcodeStr,
1965 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1967 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1969 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1970 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1971 !strconcat(OpcodeStr,
1972 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1974 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1975 imm:$src3)))]>, OpSize, REX_W;
1979 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;