1 //====- X86Instr64bit.td - Describe X86-64 Instructions ----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86-64 instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // Operand Definitions.
20 // 64-bits but only 32 bits are significant.
21 def i64i32imm : Operand<i64>;
22 // 64-bits but only 8 bits are significant.
23 def i64i8imm : Operand<i64>;
25 def lea64mem : Operand<i64> {
26 let PrintMethod = "printi64mem";
27 let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
30 def lea64_32mem : Operand<i32> {
31 let PrintMethod = "printlea64_32mem";
32 let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
35 //===----------------------------------------------------------------------===//
36 // Complex Pattern Definitions.
38 def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
39 [add, mul, shl, or, frameindex, X86Wrapper],
42 //===----------------------------------------------------------------------===//
46 def i64immSExt32 : PatLeaf<(i64 imm), [{
47 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
48 // sign extended field.
49 return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
52 def i64immZExt32 : PatLeaf<(i64 imm), [{
53 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
54 // unsignedsign extended field.
55 return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
58 def i64immSExt8 : PatLeaf<(i64 imm), [{
59 // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
60 // sign extended field.
61 return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
64 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
65 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
66 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
68 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
69 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
70 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
71 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
73 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
74 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
75 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
76 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
78 //===----------------------------------------------------------------------===//
79 // Instruction list...
82 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
83 // a stack adjustment and the codegen must know that they may modify the stack
84 // pointer before prolog-epilog rewriting occurs.
85 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
86 // sub / add which can clobber EFLAGS.
87 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
88 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
90 [(X86callseq_start timm:$amt)]>,
91 Requires<[In64BitMode]>;
92 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
94 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
95 Requires<[In64BitMode]>;
98 //===----------------------------------------------------------------------===//
99 // Call Instructions...
102 // All calls clobber the non-callee saved registers. RSP is marked as
103 // a use to prevent stack-pointer assignments that appear immediately
104 // before calls from potentially appearing dead. Uses for argument
105 // registers are added manually.
106 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
107 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
108 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
109 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
110 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
112 def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops),
113 "call\t${dst:call}", []>;
114 def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
115 "call\t{*}$dst", [(X86call GR64:$dst)]>;
116 def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
117 "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>;
122 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
123 def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset, variable_ops),
124 "#TC_RETURN $dst $offset",
127 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
128 def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset, variable_ops),
129 "#TC_RETURN $dst $offset",
133 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
134 def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
138 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
139 def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
140 [(brind GR64:$dst)]>;
141 def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
142 [(brind (loadi64 addr:$dst))]>;
145 //===----------------------------------------------------------------------===//
146 // EH Pseudo Instructions
148 let isTerminator = 1, isReturn = 1, isBarrier = 1,
150 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
151 "ret\t#eh_return, addr: $addr",
152 [(X86ehret GR64:$addr)]>;
156 //===----------------------------------------------------------------------===//
157 // Miscellaneous Instructions...
159 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
160 def LEAVE64 : I<0xC9, RawFrm,
161 (outs), (ins), "leave", []>;
162 let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
164 def POP64r : I<0x58, AddRegFrm,
165 (outs GR64:$reg), (ins), "pop{q}\t$reg", []>;
167 def PUSH64r : I<0x50, AddRegFrm,
168 (outs), (ins GR64:$reg), "push{q}\t$reg", []>;
171 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
172 def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W;
173 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
174 def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>;
176 def LEA64_32r : I<0x8D, MRMSrcMem,
177 (outs GR32:$dst), (ins lea64_32mem:$src),
178 "lea{l}\t{$src|$dst}, {$dst|$src}",
179 [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
181 let isReMaterializable = 1 in
182 def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
183 "lea{q}\t{$src|$dst}, {$dst|$src}",
184 [(set GR64:$dst, lea64addr:$src)]>;
186 let isTwoAddress = 1 in
187 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
189 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
191 // Bit scan instructions.
192 let Defs = [EFLAGS] in {
193 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
194 "bsf{q}\t{$src, $dst|$dst, $src}",
195 [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
196 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
197 "bsf{q}\t{$src, $dst|$dst, $src}",
198 [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
199 (implicit EFLAGS)]>, TB;
201 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
202 "bsr{q}\t{$src, $dst|$dst, $src}",
203 [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
204 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
205 "bsr{q}\t{$src, $dst|$dst, $src}",
206 [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
207 (implicit EFLAGS)]>, TB;
211 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
212 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
213 [(X86rep_movs i64)]>, REP;
214 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
215 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
216 [(X86rep_stos i64)]>, REP;
218 //===----------------------------------------------------------------------===//
219 // Move Instructions...
222 let neverHasSideEffects = 1 in
223 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
224 "mov{q}\t{$src, $dst|$dst, $src}", []>;
226 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
227 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
228 "movabs{q}\t{$src, $dst|$dst, $src}",
229 [(set GR64:$dst, imm:$src)]>;
230 def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
231 "mov{q}\t{$src, $dst|$dst, $src}",
232 [(set GR64:$dst, i64immSExt32:$src)]>;
235 let canFoldAsLoad = 1 in
236 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
237 "mov{q}\t{$src, $dst|$dst, $src}",
238 [(set GR64:$dst, (load addr:$src))]>;
240 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
241 "mov{q}\t{$src, $dst|$dst, $src}",
242 [(store GR64:$src, addr:$dst)]>;
243 def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
244 "mov{q}\t{$src, $dst|$dst, $src}",
245 [(store i64immSExt32:$src, addr:$dst)]>;
247 // Sign/Zero extenders
249 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
250 "movs{bq|x}\t{$src, $dst|$dst, $src}",
251 [(set GR64:$dst, (sext GR8:$src))]>, TB;
252 def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
253 "movs{bq|x}\t{$src, $dst|$dst, $src}",
254 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
255 def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
256 "movs{wq|x}\t{$src, $dst|$dst, $src}",
257 [(set GR64:$dst, (sext GR16:$src))]>, TB;
258 def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
259 "movs{wq|x}\t{$src, $dst|$dst, $src}",
260 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
261 def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
262 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
263 [(set GR64:$dst, (sext GR32:$src))]>;
264 def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
265 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
266 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
268 // Use movzbl instead of movzbq when the destination is a register; it's
269 // equivalent due to implicit zero-extending, and it has a smaller encoding.
270 def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
271 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
272 [(set GR64:$dst, (zext GR8:$src))]>, TB;
273 def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
274 "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
275 [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
276 // Use movzwl instead of movzwq when the destination is a register; it's
277 // equivalent due to implicit zero-extending, and it has a smaller encoding.
278 def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
279 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
280 [(set GR64:$dst, (zext GR16:$src))]>, TB;
281 def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
282 "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
283 [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
285 // There's no movzlq instruction, but movl can be used for this purpose, using
286 // implicit zero-extension. We need this because the seeming alternative for
287 // implementing zext from 32 to 64, an EXTRACT_SUBREG/SUBREG_TO_REG pair, isn't
288 // safe because both instructions could be optimized away in the
289 // register-to-register case, leaving nothing behind to do the zero extension.
290 def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
291 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
292 [(set GR64:$dst, (zext GR32:$src))]>;
293 def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
294 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
295 [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
297 let neverHasSideEffects = 1 in {
298 let Defs = [RAX], Uses = [EAX] in
299 def CDQE : RI<0x98, RawFrm, (outs), (ins),
300 "{cltq|cdqe}", []>; // RAX = signext(EAX)
302 let Defs = [RAX,RDX], Uses = [RAX] in
303 def CQO : RI<0x99, RawFrm, (outs), (ins),
304 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
307 //===----------------------------------------------------------------------===//
308 // Arithmetic Instructions...
311 let Defs = [EFLAGS] in {
312 let isTwoAddress = 1 in {
313 let isConvertibleToThreeAddress = 1 in {
314 let isCommutable = 1 in
315 // Register-Register Addition
316 def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
317 "add{q}\t{$src2, $dst|$dst, $src2}",
318 [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>;
320 // Register-Register Addition with Overflow
321 def ADDOvf64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
322 "add{q}\t{$src2, $dst|$dst, $src2}",
323 [(set GR64:$dst, (X86add_ovf GR64:$src1, GR64:$src2)),
326 // Register-Integer Addition
327 def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
328 "add{q}\t{$src2, $dst|$dst, $src2}",
329 [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>;
330 def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
331 "add{q}\t{$src2, $dst|$dst, $src2}",
332 [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>;
334 // Register-Integer Addition with Overflow
335 def ADDOvf64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
336 "add{q}\t{$src2, $dst|$dst, $src2}",
337 [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt32:$src2)),
339 def ADDOvf64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
340 "add{q}\t{$src2, $dst|$dst, $src2}",
341 [(set GR64:$dst, (X86add_ovf GR64:$src1, i64immSExt8:$src2)),
343 } // isConvertibleToThreeAddress
345 // Register-Memory Addition
346 def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
347 "add{q}\t{$src2, $dst|$dst, $src2}",
348 [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>;
350 // Register-Memory Addition with Overflow
351 def ADDOvf64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
352 "add{q}\t{$src2, $dst|$dst, $src2}",
353 [(set GR64:$dst, (X86add_ovf GR64:$src1, (load addr:$src2))),
357 // Memory-Register Addition
358 def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
359 "add{q}\t{$src2, $dst|$dst, $src2}",
360 [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>;
361 def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
362 "add{q}\t{$src2, $dst|$dst, $src2}",
363 [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
364 def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
365 "add{q}\t{$src2, $dst|$dst, $src2}",
366 [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
368 // Memory-Register Addition with Overflow
369 def ADDOvf64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
370 "add{q}\t{$src2, $dst|$dst, $src2}",
371 [(store (X86add_ovf (load addr:$dst), GR64:$src2),
374 def ADDOvf64mi32 : RIi32<0x81, MRM0m, (outs),(ins i64mem:$dst, i64i32imm:$src2),
375 "add{q}\t{$src2, $dst|$dst, $src2}",
376 [(store (X86add_ovf (load addr:$dst),
380 def ADDOvf64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
381 "add{q}\t{$src2, $dst|$dst, $src2}",
382 [(store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
386 let Uses = [EFLAGS] in {
387 let isTwoAddress = 1 in {
388 let isCommutable = 1 in
389 def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
390 "adc{q}\t{$src2, $dst|$dst, $src2}",
391 [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
393 def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
394 "adc{q}\t{$src2, $dst|$dst, $src2}",
395 [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
397 def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
398 "adc{q}\t{$src2, $dst|$dst, $src2}",
399 [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
400 def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
401 "adc{q}\t{$src2, $dst|$dst, $src2}",
402 [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
405 def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
406 "adc{q}\t{$src2, $dst|$dst, $src2}",
407 [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
408 def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
409 "adc{q}\t{$src2, $dst|$dst, $src2}",
410 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
411 def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
412 "adc{q}\t{$src2, $dst|$dst, $src2}",
413 [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
416 let isTwoAddress = 1 in {
417 // Register-Register Subtraction
418 def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
419 "sub{q}\t{$src2, $dst|$dst, $src2}",
420 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
422 // Register-Register Subtraction with Overflow
423 def SUBOvf64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
424 "sub{q}\t{$src2, $dst|$dst, $src2}",
425 [(set GR64:$dst, (X86sub_ovf GR64:$src1, GR64:$src2)),
428 // Register-Memory Subtraction
429 def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
430 "sub{q}\t{$src2, $dst|$dst, $src2}",
431 [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
433 // Register-Memory Subtraction with Overflow
434 def SUBOvf64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
435 "sub{q}\t{$src2, $dst|$dst, $src2}",
436 [(set GR64:$dst, (X86sub_ovf GR64:$src1, (load addr:$src2))),
439 // Register-Integer Subtraction
440 def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
441 (ins GR64:$src1, i64i32imm:$src2),
442 "sub{q}\t{$src2, $dst|$dst, $src2}",
443 [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
444 def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
445 (ins GR64:$src1, i64i8imm:$src2),
446 "sub{q}\t{$src2, $dst|$dst, $src2}",
447 [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
449 // Register-Integer Subtraction with Overflow
450 def SUBOvf64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
451 (ins GR64:$src1, i64i32imm:$src2),
452 "sub{q}\t{$src2, $dst|$dst, $src2}",
453 [(set GR64:$dst, (X86sub_ovf GR64:$src1,
454 i64immSExt32:$src2)),
456 def SUBOvf64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
457 (ins GR64:$src1, i64i8imm:$src2),
458 "sub{q}\t{$src2, $dst|$dst, $src2}",
459 [(set GR64:$dst, (X86sub_ovf GR64:$src1,
464 // Memory-Register Subtraction
465 def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
466 "sub{q}\t{$src2, $dst|$dst, $src2}",
467 [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
469 // Memory-Register Subtraction with Overflow
470 def SUBOvf64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
471 "sub{q}\t{$src2, $dst|$dst, $src2}",
472 [(store (X86sub_ovf (load addr:$dst), GR64:$src2),
476 // Memory-Integer Subtraction
477 def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
478 "sub{q}\t{$src2, $dst|$dst, $src2}",
479 [(store (sub (load addr:$dst), i64immSExt32:$src2),
481 def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
482 "sub{q}\t{$src2, $dst|$dst, $src2}",
483 [(store (sub (load addr:$dst), i64immSExt8:$src2),
486 // Memory-Integer Subtraction with Overflow
487 def SUBOvf64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst,i64i32imm:$src2),
488 "sub{q}\t{$src2, $dst|$dst, $src2}",
489 [(store (X86sub_ovf (load addr:$dst),
490 i64immSExt32:$src2), addr:$dst),
492 def SUBOvf64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
493 "sub{q}\t{$src2, $dst|$dst, $src2}",
494 [(store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
498 let Uses = [EFLAGS] in {
499 let isTwoAddress = 1 in {
500 def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
501 "sbb{q}\t{$src2, $dst|$dst, $src2}",
502 [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
504 def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
505 "sbb{q}\t{$src2, $dst|$dst, $src2}",
506 [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
508 def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
509 "sbb{q}\t{$src2, $dst|$dst, $src2}",
510 [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
511 def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
512 "sbb{q}\t{$src2, $dst|$dst, $src2}",
513 [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
516 def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
517 "sbb{q}\t{$src2, $dst|$dst, $src2}",
518 [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
519 def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
520 "sbb{q}\t{$src2, $dst|$dst, $src2}",
521 [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
522 def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
523 "sbb{q}\t{$src2, $dst|$dst, $src2}",
524 [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
528 // Unsigned multiplication
529 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in {
530 def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
531 "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64
533 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
534 "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
536 // Signed multiplication
537 def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src),
538 "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64
540 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
541 "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
544 let Defs = [EFLAGS] in {
545 let isTwoAddress = 1 in {
546 let isCommutable = 1 in
547 // Register-Register Integer Multiplication
548 def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
549 (ins GR64:$src1, GR64:$src2),
550 "imul{q}\t{$src2, $dst|$dst, $src2}",
551 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
553 // Register-Register Multiplication with Overflow
554 def IMULOvf64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
555 (ins GR64:$src1, GR64:$src2),
556 "imul{q}\t{$src2, $dst|$dst, $src2}",
557 [(set GR64:$dst, (X86mul_ovf GR64:$src1, GR64:$src2)),
558 (implicit EFLAGS)]>, TB;
560 // Register-Memory Integer Multiplication
561 def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
562 (ins GR64:$src1, i64mem:$src2),
563 "imul{q}\t{$src2, $dst|$dst, $src2}",
564 [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
566 // Register-Memory Integer Multiplication with Overflow
567 def IMULOvf64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
568 (ins GR64:$src1, i64mem:$src2),
569 "imul{q}\t{$src2, $dst|$dst, $src2}",
570 [(set GR64:$dst, (X86mul_ovf GR64:$src1,
572 (implicit EFLAGS)]>, TB;
575 // Suprisingly enough, these are not two address instructions!
577 // Register-Integer Integer Multiplication
578 def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
579 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
580 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
581 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
582 def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
583 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
584 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
585 [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
587 // Register-Integer Integer Multiplication with Overflow
588 def IMULOvf64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
589 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
590 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
591 [(set GR64:$dst, (X86mul_ovf GR64:$src1,
592 i64immSExt32:$src2)),
594 def IMULOvf64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
595 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
596 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
597 [(set GR64:$dst, (X86mul_ovf GR64:$src1,
601 // Memory-Integer Integer Multiplication
602 def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
603 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
604 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
605 [(set GR64:$dst, (mul (load addr:$src1),
606 i64immSExt32:$src2))]>;
607 def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
608 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
609 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
610 [(set GR64:$dst, (mul (load addr:$src1),
611 i64immSExt8:$src2))]>;
613 // Memory-Integer Integer Multiplication with Overflow
614 def IMULOvf64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
615 (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
616 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
617 [(set GR64:$dst, (X86mul_ovf (load addr:$src1),
618 i64immSExt32:$src2)),
620 def IMULOvf64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
621 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
622 "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
623 [(set GR64:$dst, (X86mul_ovf (load addr:$src1),
628 // Unsigned division / remainder
629 let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in {
630 def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
632 // Signed division / remainder
633 def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX
634 "idiv{q}\t$src", []>;
636 def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
638 def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX
639 "idiv{q}\t$src", []>;
643 // Unary instructions
644 let Defs = [EFLAGS], CodeSize = 2 in {
645 let isTwoAddress = 1 in
646 def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
647 [(set GR64:$dst, (ineg GR64:$src))]>;
648 def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
649 [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
651 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
652 def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
653 [(set GR64:$dst, (add GR64:$src, 1))]>;
654 def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
655 [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
657 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
658 def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
659 [(set GR64:$dst, (add GR64:$src, -1))]>;
660 def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
661 [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
663 // In 64-bit mode, single byte INC and DEC cannot be encoded.
664 let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
665 // Can transform into LEA.
666 def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
667 [(set GR16:$dst, (add GR16:$src, 1))]>,
668 OpSize, Requires<[In64BitMode]>;
669 def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
670 [(set GR32:$dst, (add GR32:$src, 1))]>,
671 Requires<[In64BitMode]>;
672 def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
673 [(set GR16:$dst, (add GR16:$src, -1))]>,
674 OpSize, Requires<[In64BitMode]>;
675 def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
676 [(set GR32:$dst, (add GR32:$src, -1))]>,
677 Requires<[In64BitMode]>;
678 } // isConvertibleToThreeAddress
680 // These are duplicates of their 32-bit counterparts. Only needed so X86 knows
681 // how to unfold them.
682 let isTwoAddress = 0, CodeSize = 2 in {
683 def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
684 [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
685 OpSize, Requires<[In64BitMode]>;
686 def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
687 [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
688 Requires<[In64BitMode]>;
689 def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
690 [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
691 OpSize, Requires<[In64BitMode]>;
692 def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
693 [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
694 Requires<[In64BitMode]>;
696 } // Defs = [EFLAGS], CodeSize
699 let Defs = [EFLAGS] in {
700 // Shift instructions
701 let isTwoAddress = 1 in {
703 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
704 "shl{q}\t{%cl, $dst|$dst, %CL}",
705 [(set GR64:$dst, (shl GR64:$src, CL))]>;
706 let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
707 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
708 "shl{q}\t{$src2, $dst|$dst, $src2}",
709 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
710 // NOTE: We don't use shifts of a register by one, because 'add reg,reg' is
715 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
716 "shl{q}\t{%cl, $dst|$dst, %CL}",
717 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
718 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
719 "shl{q}\t{$src, $dst|$dst, $src}",
720 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
721 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
723 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
725 let isTwoAddress = 1 in {
727 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
728 "shr{q}\t{%cl, $dst|$dst, %CL}",
729 [(set GR64:$dst, (srl GR64:$src, CL))]>;
730 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
731 "shr{q}\t{$src2, $dst|$dst, $src2}",
732 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
733 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
735 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
739 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
740 "shr{q}\t{%cl, $dst|$dst, %CL}",
741 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
742 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
743 "shr{q}\t{$src, $dst|$dst, $src}",
744 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
745 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
747 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
749 let isTwoAddress = 1 in {
751 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
752 "sar{q}\t{%cl, $dst|$dst, %CL}",
753 [(set GR64:$dst, (sra GR64:$src, CL))]>;
754 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
755 "sar{q}\t{$src2, $dst|$dst, $src2}",
756 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
757 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
759 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
763 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
764 "sar{q}\t{%cl, $dst|$dst, %CL}",
765 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
766 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
767 "sar{q}\t{$src, $dst|$dst, $src}",
768 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
769 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
771 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
773 // Rotate instructions
774 let isTwoAddress = 1 in {
776 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
777 "rol{q}\t{%cl, $dst|$dst, %CL}",
778 [(set GR64:$dst, (rotl GR64:$src, CL))]>;
779 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
780 "rol{q}\t{$src2, $dst|$dst, $src2}",
781 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
782 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
784 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
788 def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst),
789 "rol{q}\t{%cl, $dst|$dst, %CL}",
790 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
791 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src),
792 "rol{q}\t{$src, $dst|$dst, $src}",
793 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
794 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
796 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
798 let isTwoAddress = 1 in {
800 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
801 "ror{q}\t{%cl, $dst|$dst, %CL}",
802 [(set GR64:$dst, (rotr GR64:$src, CL))]>;
803 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
804 "ror{q}\t{$src2, $dst|$dst, $src2}",
805 [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
806 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
808 [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
812 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
813 "ror{q}\t{%cl, $dst|$dst, %CL}",
814 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
815 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
816 "ror{q}\t{$src, $dst|$dst, $src}",
817 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
818 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
820 [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
822 // Double shift instructions (generalizations of rotate)
823 let isTwoAddress = 1 in {
825 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
826 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
827 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB;
828 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
829 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
830 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB;
833 let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction
834 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
835 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
836 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
837 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
840 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
841 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3),
842 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
843 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
850 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
851 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
852 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
854 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
855 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
856 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
859 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
860 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
861 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
862 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
863 (i8 imm:$src3)), addr:$dst)]>,
865 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
866 (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
867 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
868 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
869 (i8 imm:$src3)), addr:$dst)]>,
873 //===----------------------------------------------------------------------===//
874 // Logical Instructions...
877 let isTwoAddress = 1 in
878 def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
879 [(set GR64:$dst, (not GR64:$src))]>;
880 def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
881 [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
883 let Defs = [EFLAGS] in {
884 let isTwoAddress = 1 in {
885 let isCommutable = 1 in
886 def AND64rr : RI<0x21, MRMDestReg,
887 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
888 "and{q}\t{$src2, $dst|$dst, $src2}",
889 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
890 def AND64rm : RI<0x23, MRMSrcMem,
891 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
892 "and{q}\t{$src2, $dst|$dst, $src2}",
893 [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
894 def AND64ri32 : RIi32<0x81, MRM4r,
895 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
896 "and{q}\t{$src2, $dst|$dst, $src2}",
897 [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
898 def AND64ri8 : RIi8<0x83, MRM4r,
899 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
900 "and{q}\t{$src2, $dst|$dst, $src2}",
901 [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
904 def AND64mr : RI<0x21, MRMDestMem,
905 (outs), (ins i64mem:$dst, GR64:$src),
906 "and{q}\t{$src, $dst|$dst, $src}",
907 [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
908 def AND64mi32 : RIi32<0x81, MRM4m,
909 (outs), (ins i64mem:$dst, i64i32imm:$src),
910 "and{q}\t{$src, $dst|$dst, $src}",
911 [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
912 def AND64mi8 : RIi8<0x83, MRM4m,
913 (outs), (ins i64mem:$dst, i64i8imm :$src),
914 "and{q}\t{$src, $dst|$dst, $src}",
915 [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
917 let isTwoAddress = 1 in {
918 let isCommutable = 1 in
919 def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
920 "or{q}\t{$src2, $dst|$dst, $src2}",
921 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
922 def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
923 "or{q}\t{$src2, $dst|$dst, $src2}",
924 [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
925 def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
926 "or{q}\t{$src2, $dst|$dst, $src2}",
927 [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
928 def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
929 "or{q}\t{$src2, $dst|$dst, $src2}",
930 [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
933 def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
934 "or{q}\t{$src, $dst|$dst, $src}",
935 [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
936 def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
937 "or{q}\t{$src, $dst|$dst, $src}",
938 [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
939 def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
940 "or{q}\t{$src, $dst|$dst, $src}",
941 [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
943 let isTwoAddress = 1 in {
944 let isCommutable = 1 in
945 def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
946 "xor{q}\t{$src2, $dst|$dst, $src2}",
947 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
948 def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
949 "xor{q}\t{$src2, $dst|$dst, $src2}",
950 [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
951 def XOR64ri32 : RIi32<0x81, MRM6r,
952 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
953 "xor{q}\t{$src2, $dst|$dst, $src2}",
954 [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
955 def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
956 "xor{q}\t{$src2, $dst|$dst, $src2}",
957 [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
960 def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
961 "xor{q}\t{$src, $dst|$dst, $src}",
962 [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
963 def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
964 "xor{q}\t{$src, $dst|$dst, $src}",
965 [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
966 def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
967 "xor{q}\t{$src, $dst|$dst, $src}",
968 [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
971 //===----------------------------------------------------------------------===//
972 // Comparison Instructions...
975 // Integer comparison
976 let Defs = [EFLAGS] in {
977 let isCommutable = 1 in
978 def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
979 "test{q}\t{$src2, $src1|$src1, $src2}",
980 [(X86cmp (and GR64:$src1, GR64:$src2), 0),
982 def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
983 "test{q}\t{$src2, $src1|$src1, $src2}",
984 [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
986 def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
987 (ins GR64:$src1, i64i32imm:$src2),
988 "test{q}\t{$src2, $src1|$src1, $src2}",
989 [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
991 def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
992 (ins i64mem:$src1, i64i32imm:$src2),
993 "test{q}\t{$src2, $src1|$src1, $src2}",
994 [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
997 def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
998 "cmp{q}\t{$src2, $src1|$src1, $src2}",
999 [(X86cmp GR64:$src1, GR64:$src2),
1000 (implicit EFLAGS)]>;
1001 def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1002 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1003 [(X86cmp (loadi64 addr:$src1), GR64:$src2),
1004 (implicit EFLAGS)]>;
1005 def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
1006 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1007 [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
1008 (implicit EFLAGS)]>;
1009 def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
1010 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1011 [(X86cmp GR64:$src1, i64immSExt32:$src2),
1012 (implicit EFLAGS)]>;
1013 def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
1014 (ins i64mem:$src1, i64i32imm:$src2),
1015 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1016 [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
1017 (implicit EFLAGS)]>;
1018 def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1019 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1020 [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
1021 (implicit EFLAGS)]>;
1022 def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1023 "cmp{q}\t{$src2, $src1|$src1, $src2}",
1024 [(X86cmp GR64:$src1, i64immSExt8:$src2),
1025 (implicit EFLAGS)]>;
1026 } // Defs = [EFLAGS]
1028 // Conditional moves
1029 let Uses = [EFLAGS], isTwoAddress = 1 in {
1030 let isCommutable = 1 in {
1031 def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
1032 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1033 "cmovb\t{$src2, $dst|$dst, $src2}",
1034 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1035 X86_COND_B, EFLAGS))]>, TB;
1036 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1037 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1038 "cmovae\t{$src2, $dst|$dst, $src2}",
1039 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1040 X86_COND_AE, EFLAGS))]>, TB;
1041 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1042 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1043 "cmove\t{$src2, $dst|$dst, $src2}",
1044 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1045 X86_COND_E, EFLAGS))]>, TB;
1046 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1047 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1048 "cmovne\t{$src2, $dst|$dst, $src2}",
1049 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1050 X86_COND_NE, EFLAGS))]>, TB;
1051 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1052 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1053 "cmovbe\t{$src2, $dst|$dst, $src2}",
1054 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1055 X86_COND_BE, EFLAGS))]>, TB;
1056 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1057 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1058 "cmova\t{$src2, $dst|$dst, $src2}",
1059 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1060 X86_COND_A, EFLAGS))]>, TB;
1061 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
1062 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1063 "cmovl\t{$src2, $dst|$dst, $src2}",
1064 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1065 X86_COND_L, EFLAGS))]>, TB;
1066 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1067 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1068 "cmovge\t{$src2, $dst|$dst, $src2}",
1069 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1070 X86_COND_GE, EFLAGS))]>, TB;
1071 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1072 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1073 "cmovle\t{$src2, $dst|$dst, $src2}",
1074 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1075 X86_COND_LE, EFLAGS))]>, TB;
1076 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1077 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1078 "cmovg\t{$src2, $dst|$dst, $src2}",
1079 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1080 X86_COND_G, EFLAGS))]>, TB;
1081 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1082 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1083 "cmovs\t{$src2, $dst|$dst, $src2}",
1084 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1085 X86_COND_S, EFLAGS))]>, TB;
1086 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1087 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1088 "cmovns\t{$src2, $dst|$dst, $src2}",
1089 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1090 X86_COND_NS, EFLAGS))]>, TB;
1091 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1092 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1093 "cmovp\t{$src2, $dst|$dst, $src2}",
1094 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1095 X86_COND_P, EFLAGS))]>, TB;
1096 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1097 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1098 "cmovnp\t{$src2, $dst|$dst, $src2}",
1099 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1100 X86_COND_NP, EFLAGS))]>, TB;
1101 } // isCommutable = 1
1103 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
1104 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1105 "cmovb\t{$src2, $dst|$dst, $src2}",
1106 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1107 X86_COND_B, EFLAGS))]>, TB;
1108 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1109 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1110 "cmovae\t{$src2, $dst|$dst, $src2}",
1111 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1112 X86_COND_AE, EFLAGS))]>, TB;
1113 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1114 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1115 "cmove\t{$src2, $dst|$dst, $src2}",
1116 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1117 X86_COND_E, EFLAGS))]>, TB;
1118 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1119 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1120 "cmovne\t{$src2, $dst|$dst, $src2}",
1121 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1122 X86_COND_NE, EFLAGS))]>, TB;
1123 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1124 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1125 "cmovbe\t{$src2, $dst|$dst, $src2}",
1126 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1127 X86_COND_BE, EFLAGS))]>, TB;
1128 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1129 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1130 "cmova\t{$src2, $dst|$dst, $src2}",
1131 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1132 X86_COND_A, EFLAGS))]>, TB;
1133 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
1134 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1135 "cmovl\t{$src2, $dst|$dst, $src2}",
1136 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1137 X86_COND_L, EFLAGS))]>, TB;
1138 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1139 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1140 "cmovge\t{$src2, $dst|$dst, $src2}",
1141 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1142 X86_COND_GE, EFLAGS))]>, TB;
1143 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1144 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1145 "cmovle\t{$src2, $dst|$dst, $src2}",
1146 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1147 X86_COND_LE, EFLAGS))]>, TB;
1148 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1149 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1150 "cmovg\t{$src2, $dst|$dst, $src2}",
1151 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1152 X86_COND_G, EFLAGS))]>, TB;
1153 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1154 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1155 "cmovs\t{$src2, $dst|$dst, $src2}",
1156 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1157 X86_COND_S, EFLAGS))]>, TB;
1158 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1159 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1160 "cmovns\t{$src2, $dst|$dst, $src2}",
1161 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1162 X86_COND_NS, EFLAGS))]>, TB;
1163 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1164 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1165 "cmovp\t{$src2, $dst|$dst, $src2}",
1166 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1167 X86_COND_P, EFLAGS))]>, TB;
1168 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1169 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1170 "cmovnp\t{$src2, $dst|$dst, $src2}",
1171 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1172 X86_COND_NP, EFLAGS))]>, TB;
1175 //===----------------------------------------------------------------------===//
1176 // Conversion Instructions...
1179 // f64 -> signed i64
1180 def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1181 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1183 (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
1184 def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1185 "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1186 [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
1187 (load addr:$src)))]>;
1188 def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1189 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1190 [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
1191 def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
1192 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1193 [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1194 def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1195 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1197 (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
1198 def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src),
1199 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1201 (int_x86_sse2_cvttsd2si64
1202 (load addr:$src)))]>;
1204 // Signed i64 -> f64
1205 def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1206 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1207 [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
1208 def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1209 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1210 [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1212 let isTwoAddress = 1 in {
1213 def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
1214 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1215 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1217 (int_x86_sse2_cvtsi642sd VR128:$src1,
1219 def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
1220 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1221 "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
1223 (int_x86_sse2_cvtsi642sd VR128:$src1,
1224 (loadi64 addr:$src2)))]>;
1227 // Signed i64 -> f32
1228 def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
1229 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1230 [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
1231 def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
1232 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1233 [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
1235 let isTwoAddress = 1 in {
1236 def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
1237 (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
1238 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1240 (int_x86_sse_cvtsi642ss VR128:$src1,
1242 def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
1243 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
1244 "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
1246 (int_x86_sse_cvtsi642ss VR128:$src1,
1247 (loadi64 addr:$src2)))]>;
1250 // f32 -> signed i64
1251 def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1252 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1254 (int_x86_sse_cvtss2si64 VR128:$src))]>;
1255 def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1256 "cvtss2si{q}\t{$src, $dst|$dst, $src}",
1257 [(set GR64:$dst, (int_x86_sse_cvtss2si64
1258 (load addr:$src)))]>;
1259 def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1260 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1261 [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
1262 def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1263 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1264 [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
1265 def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1266 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1268 (int_x86_sse_cvttss2si64 VR128:$src))]>;
1269 def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
1270 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1272 (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
1274 //===----------------------------------------------------------------------===//
1275 // Alias Instructions
1276 //===----------------------------------------------------------------------===//
1278 // Alias instructions that map movr0 to xor. Use xorl instead of xorq; it's
1279 // equivalent due to implicit zero-extending, and it sometimes has a smaller
1281 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
1282 // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove
1283 // when we have a better way to specify isel priority.
1284 let Defs = [EFLAGS], AddedComplexity = 1,
1285 isReMaterializable = 1, isAsCheapAsAMove = 1 in
1286 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins),
1287 "xor{l}\t${dst:subreg32}, ${dst:subreg32}",
1288 [(set GR64:$dst, 0)]>;
1290 // Materialize i64 constant where top 32-bits are zero.
1291 let AddedComplexity = 1, isReMaterializable = 1 in
1292 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
1293 "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
1294 [(set GR64:$dst, i64immZExt32:$src)]>;
1296 //===----------------------------------------------------------------------===//
1297 // Thread Local Storage Instructions
1298 //===----------------------------------------------------------------------===//
1300 def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
1301 ".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
1302 [(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
1304 //===----------------------------------------------------------------------===//
1305 // Atomic Instructions
1306 //===----------------------------------------------------------------------===//
1308 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
1309 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
1310 "lock\n\tcmpxchgq\t$swap,$ptr",
1311 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
1314 let Constraints = "$val = $dst" in {
1315 let Defs = [EFLAGS] in
1316 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1317 "lock\n\txadd\t$val, $ptr",
1318 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
1320 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
1322 [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
1325 // Atomic exchange, and, or, xor
1326 let Constraints = "$val = $dst", Defs = [EFLAGS],
1327 usesCustomDAGSchedInserter = 1 in {
1328 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1329 "#ATOMAND64 PSEUDO!",
1330 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
1331 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1332 "#ATOMOR64 PSEUDO!",
1333 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
1334 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1335 "#ATOMXOR64 PSEUDO!",
1336 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
1337 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1338 "#ATOMNAND64 PSEUDO!",
1339 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
1340 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
1341 "#ATOMMIN64 PSEUDO!",
1342 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
1343 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1344 "#ATOMMAX64 PSEUDO!",
1345 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
1346 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1347 "#ATOMUMIN64 PSEUDO!",
1348 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
1349 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
1350 "#ATOMUMAX64 PSEUDO!",
1351 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
1354 //===----------------------------------------------------------------------===//
1355 // Non-Instruction Patterns
1356 //===----------------------------------------------------------------------===//
1358 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
1359 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1360 (MOV64ri tconstpool :$dst)>, Requires<[NotSmallCode]>;
1361 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1362 (MOV64ri tjumptable :$dst)>, Requires<[NotSmallCode]>;
1363 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1364 (MOV64ri tglobaladdr :$dst)>, Requires<[NotSmallCode]>;
1365 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1366 (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
1368 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1369 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1370 Requires<[SmallCode, IsStatic]>;
1371 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1372 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1373 Requires<[SmallCode, IsStatic]>;
1374 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1375 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1376 Requires<[SmallCode, IsStatic]>;
1377 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1378 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1379 Requires<[SmallCode, IsStatic]>;
1382 // Direct PC relative function call for small code model. 32-bit displacement
1383 // sign extended to 64-bit.
1384 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1385 (CALL64pcrel32 tglobaladdr:$dst)>;
1386 def : Pat<(X86call (i64 texternalsym:$dst)),
1387 (CALL64pcrel32 texternalsym:$dst)>;
1389 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1390 (CALL64pcrel32 tglobaladdr:$dst)>;
1391 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1392 (CALL64pcrel32 texternalsym:$dst)>;
1394 def : Pat<(X86tailcall GR64:$dst),
1395 (CALL64r GR64:$dst)>;
1399 def : Pat<(X86tailcall GR32:$dst),
1401 def : Pat<(X86tailcall (i64 tglobaladdr:$dst)),
1403 def : Pat<(X86tailcall (i64 texternalsym:$dst)),
1406 def : Pat<(X86tcret GR64:$dst, imm:$off),
1407 (TCRETURNri64 GR64:$dst, imm:$off)>;
1409 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1410 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1412 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1413 (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
1417 // TEST R,R is smaller than CMP R,0
1418 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
1419 (TEST64rr GR64:$src1, GR64:$src1)>;
1424 def : Pat<(i64 (zext GR32:$src)),
1425 (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
1427 // zextload bool -> zextload byte
1428 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1431 // When extloading from 16-bit and smaller memory locations into 64-bit registers,
1432 // use zero-extending loads so that the entire 64-bit register is defined, avoiding
1433 // partial-register updates.
1434 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1435 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1436 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1437 // For other extloads, use subregs, since the high contents of the register are
1438 // defined after an extload.
1439 def : Pat<(extloadi64i32 addr:$src),
1440 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src),
1442 def : Pat<(extloadi16i1 addr:$src),
1443 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1445 Requires<[In64BitMode]>;
1446 def : Pat<(extloadi16i8 addr:$src),
1447 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src),
1449 Requires<[In64BitMode]>;
1452 def : Pat<(i64 (anyext GR8:$src)),
1453 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>;
1454 def : Pat<(i64 (anyext GR16:$src)),
1455 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
1456 def : Pat<(i64 (anyext GR32:$src)),
1457 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>;
1458 def : Pat<(i16 (anyext GR8:$src)),
1459 (INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1460 Requires<[In64BitMode]>;
1461 def : Pat<(i32 (anyext GR8:$src)),
1462 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>,
1463 Requires<[In64BitMode]>;
1465 //===----------------------------------------------------------------------===//
1467 //===----------------------------------------------------------------------===//
1469 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1470 // +128 doesn't, so in this special case use a sub instead of an add.
1471 def : Pat<(add GR64:$src1, 128),
1472 (SUB64ri8 GR64:$src1, -128)>;
1473 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1474 (SUB64mi8 addr:$dst, -128)>;
1476 // The same trick applies for 32-bit immediate fields in 64-bit
1478 def : Pat<(add GR64:$src1, 0x0000000080000000),
1479 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1480 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1481 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1483 // r & (2^32-1) ==> movz
1484 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1485 (MOVZX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
1486 // r & (2^16-1) ==> movz
1487 def : Pat<(and GR64:$src, 0xffff),
1488 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1489 // r & (2^8-1) ==> movz
1490 def : Pat<(and GR64:$src, 0xff),
1491 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1492 // r & (2^8-1) ==> movz
1493 def : Pat<(and GR32:$src1, 0xff),
1494 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit)))>,
1495 Requires<[In64BitMode]>;
1496 // r & (2^8-1) ==> movz
1497 def : Pat<(and GR16:$src1, 0xff),
1498 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
1499 Requires<[In64BitMode]>;
1501 // sext_inreg patterns
1502 def : Pat<(sext_inreg GR64:$src, i32),
1503 (MOVSX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
1504 def : Pat<(sext_inreg GR64:$src, i16),
1505 (MOVSX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
1506 def : Pat<(sext_inreg GR64:$src, i8),
1507 (MOVSX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
1508 def : Pat<(sext_inreg GR32:$src, i8),
1509 (MOVSX32rr8 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)))>,
1510 Requires<[In64BitMode]>;
1511 def : Pat<(sext_inreg GR16:$src, i8),
1512 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
1513 Requires<[In64BitMode]>;
1516 def : Pat<(i32 (trunc GR64:$src)),
1517 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
1518 def : Pat<(i16 (trunc GR64:$src)),
1519 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
1520 def : Pat<(i8 (trunc GR64:$src)),
1521 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
1522 def : Pat<(i8 (trunc GR32:$src)),
1523 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
1524 Requires<[In64BitMode]>;
1525 def : Pat<(i8 (trunc GR16:$src)),
1526 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit))>,
1527 Requires<[In64BitMode]>;
1529 // (shl x, 1) ==> (add x, x)
1530 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1532 // (shl x (and y, 63)) ==> (shl x, y)
1533 def : Pat<(shl GR64:$src1, (and CL:$amt, 63)),
1534 (SHL64rCL GR64:$src1)>;
1535 def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1536 (SHL64mCL addr:$dst)>;
1538 def : Pat<(srl GR64:$src1, (and CL:$amt, 63)),
1539 (SHR64rCL GR64:$src1)>;
1540 def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1541 (SHR64mCL addr:$dst)>;
1543 def : Pat<(sra GR64:$src1, (and CL:$amt, 63)),
1544 (SAR64rCL GR64:$src1)>;
1545 def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst),
1546 (SAR64mCL addr:$dst)>;
1548 // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c)
1549 def : Pat<(or (srl GR64:$src1, CL:$amt),
1550 (shl GR64:$src2, (sub 64, CL:$amt))),
1551 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1553 def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt),
1554 (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1555 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1557 def : Pat<(or (srl GR64:$src1, (i8 (trunc RCX:$amt))),
1558 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1559 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1561 def : Pat<(store (or (srl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1562 (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1564 (SHRD64mrCL addr:$dst, GR64:$src2)>;
1566 def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1567 (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1569 def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
1570 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1571 (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1573 // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
1574 def : Pat<(or (shl GR64:$src1, CL:$amt),
1575 (srl GR64:$src2, (sub 64, CL:$amt))),
1576 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1578 def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt),
1579 (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst),
1580 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1582 def : Pat<(or (shl GR64:$src1, (i8 (trunc RCX:$amt))),
1583 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1584 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1586 def : Pat<(store (or (shl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))),
1587 (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))),
1589 (SHLD64mrCL addr:$dst, GR64:$src2)>;
1591 def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)),
1592 (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
1594 def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1),
1595 GR64:$src2, (i8 imm:$amt2)), addr:$dst),
1596 (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
1598 // X86 specific add which produces a flag.
1599 def : Pat<(addc GR64:$src1, GR64:$src2),
1600 (ADD64rr GR64:$src1, GR64:$src2)>;
1601 def : Pat<(addc GR64:$src1, (load addr:$src2)),
1602 (ADD64rm GR64:$src1, addr:$src2)>;
1603 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
1604 (ADD64ri32 GR64:$src1, imm:$src2)>;
1605 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
1606 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1608 def : Pat<(subc GR64:$src1, GR64:$src2),
1609 (SUB64rr GR64:$src1, GR64:$src2)>;
1610 def : Pat<(subc GR64:$src1, (load addr:$src2)),
1611 (SUB64rm GR64:$src1, addr:$src2)>;
1612 def : Pat<(subc GR64:$src1, imm:$src2),
1613 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1614 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
1615 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1618 //===----------------------------------------------------------------------===//
1619 // X86-64 SSE Instructions
1620 //===----------------------------------------------------------------------===//
1622 // Move instructions...
1624 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
1625 "mov{d|q}\t{$src, $dst|$dst, $src}",
1627 (v2i64 (scalar_to_vector GR64:$src)))]>;
1628 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
1629 "mov{d|q}\t{$src, $dst|$dst, $src}",
1630 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
1633 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1634 "mov{d|q}\t{$src, $dst|$dst, $src}",
1635 [(set FR64:$dst, (bitconvert GR64:$src))]>;
1636 def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
1637 "movq\t{$src, $dst|$dst, $src}",
1638 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
1640 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1641 "mov{d|q}\t{$src, $dst|$dst, $src}",
1642 [(set GR64:$dst, (bitconvert FR64:$src))]>;
1643 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1644 "movq\t{$src, $dst|$dst, $src}",
1645 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
1647 //===----------------------------------------------------------------------===//
1648 // X86-64 SSE4.1 Instructions
1649 //===----------------------------------------------------------------------===//
1651 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
1652 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
1653 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
1654 (ins VR128:$src1, i32i8imm:$src2),
1655 !strconcat(OpcodeStr,
1656 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1658 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
1659 def mr : SS4AIi8<opc, MRMDestMem, (outs),
1660 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
1661 !strconcat(OpcodeStr,
1662 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1663 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
1664 addr:$dst)]>, OpSize, REX_W;
1667 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
1669 let isTwoAddress = 1 in {
1670 multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
1671 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
1672 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
1673 !strconcat(OpcodeStr,
1674 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1676 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
1678 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
1679 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
1680 !strconcat(OpcodeStr,
1681 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
1683 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
1684 imm:$src3)))]>, OpSize, REX_W;
1688 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;