1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[In32BitMode]>;
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[In32BitMode]>;
56 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57 // a stack adjustment and the codegen must know that they may modify the stack
58 // pointer before prolog-epilog rewriting occurs.
59 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60 // sub / add which can clobber EFLAGS.
61 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
74 // x86-64 va_start lowering magic.
75 let usesCustomInserter = 1 in {
76 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
79 i64imm:$regsavefi, i64imm:$offset,
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
86 // The VAARG_64 pseudo-instruction takes the address of the va_list,
87 // and places the address of the next argument into a register.
88 let Defs = [EFLAGS] in
89 def VAARG_64 : I<0, Pseudo,
91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
92 "#VAARG_64 $dst, $ap, $size, $mode, $align",
94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
97 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
98 // targets. These calls are needed to probe the stack when allocating more than
99 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
100 // ensure that the guard pages used by the OS virtual memory manager are
101 // allocated in correct sequence.
102 // The main point of having separate instruction are extra unmodelled effects
103 // (compared to ordinary calls) like stack pointer change.
105 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
107 "# dynamic stack allocation",
110 // When using segmented stacks these are lowered into instructions which first
111 // check if the current stacklet has enough free memory. If it does, memory is
112 // allocated by bumping the stack pointer. Otherwise memory is allocated from
115 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
116 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
117 "# variable sized alloca for segmented stacks",
119 (X86SegAlloca GR32:$size))]>,
120 Requires<[In32BitMode]>;
122 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
123 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
124 "# variable sized alloca for segmented stacks",
126 (X86SegAlloca GR64:$size))]>,
127 Requires<[In64BitMode]>;
130 // The MSVC runtime contains an _ftol2 routine for converting floating-point
131 // to integer values. It has a strange calling convention: the input is
132 // popped from the x87 stack, and the return value is given in EDX:EAX. No
133 // other registers (aside from flags) are touched.
134 // Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
135 // variant is unnecessary.
137 let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in {
138 def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
140 [(X86WinFTOL RFP32:$src)]>,
141 Requires<[In32BitMode]>;
143 def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
145 [(X86WinFTOL RFP64:$src)]>,
146 Requires<[In32BitMode]>;
149 //===----------------------------------------------------------------------===//
150 // EH Pseudo Instructions
152 let isTerminator = 1, isReturn = 1, isBarrier = 1,
153 hasCtrlDep = 1, isCodeGenOnly = 1 in {
154 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
155 "ret\t#eh_return, addr: $addr",
156 [(X86ehret GR32:$addr)], IIC_RET>;
160 let isTerminator = 1, isReturn = 1, isBarrier = 1,
161 hasCtrlDep = 1, isCodeGenOnly = 1 in {
162 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
163 "ret\t#eh_return, addr: $addr",
164 [(X86ehret GR64:$addr)], IIC_RET>;
168 //===----------------------------------------------------------------------===//
169 // Pseudo instructions used by segmented stacks.
172 // This is lowered into a RET instruction by MCInstLower. We need
173 // this so that we don't have to have a MachineBasicBlock which ends
174 // with a RET and also has successors.
175 let isPseudo = 1 in {
176 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
179 // This instruction is lowered to a RET followed by a MOV. The two
180 // instructions are not generated on a higher level since then the
181 // verifier sees a MachineBasicBlock ending with a non-terminator.
182 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
186 //===----------------------------------------------------------------------===//
187 // Alias Instructions
188 //===----------------------------------------------------------------------===//
190 // Alias instructions that map movr0 to xor.
191 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
192 // FIXME: Set encoding to pseudo.
193 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
194 isCodeGenOnly = 1 in {
195 def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
196 [(set GR8:$dst, 0)], IIC_ALU_NONMEM>;
198 // We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
199 // encoding and avoids a partial-register update sometimes, but doing so
200 // at isel time interferes with rematerialization in the current register
201 // allocator. For now, this is rewritten when the instruction is lowered
203 def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
205 [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize;
207 // FIXME: Set encoding to pseudo.
208 def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
209 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>;
212 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
213 // smaller encoding, but doing so at isel time interferes with rematerialization
214 // in the current register allocator. For now, this is rewritten when the
215 // instruction is lowered to an MCInst.
216 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
217 // when we have a better way to specify isel priority.
218 let Defs = [EFLAGS], isCodeGenOnly=1,
219 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
220 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
221 [(set GR64:$dst, 0)], IIC_ALU_NONMEM>;
223 // Materialize i64 constant where top 32-bits are zero. This could theoretically
224 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
225 // that would make it more difficult to rematerialize.
226 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
228 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
229 "", [(set GR64:$dst, i64immZExt32:$src)],
232 // Use sbb to materialize carry bit.
233 let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
234 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
235 // However, Pat<> can't replicate the destination reg into the inputs of the
237 // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
239 def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
240 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
242 def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
243 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
246 def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
247 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
249 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
250 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
255 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
257 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
259 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
262 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
264 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
266 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
269 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
270 // will be eliminated and that the sbb can be extended up to a wider type. When
271 // this happens, it is great. However, if we are left with an 8-bit sbb and an
272 // and, we might as well just match it as a setb.
273 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
276 // (add OP, SETB) -> (adc OP, 0)
277 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
278 (ADC8ri GR8:$op, 0)>;
279 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
280 (ADC32ri8 GR32:$op, 0)>;
281 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
282 (ADC64ri8 GR64:$op, 0)>;
284 // (sub OP, SETB) -> (sbb OP, 0)
285 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
286 (SBB8ri GR8:$op, 0)>;
287 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
288 (SBB32ri8 GR32:$op, 0)>;
289 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
290 (SBB64ri8 GR64:$op, 0)>;
292 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
293 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
294 (ADC8ri GR8:$op, 0)>;
295 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
296 (ADC32ri8 GR32:$op, 0)>;
297 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
298 (ADC64ri8 GR64:$op, 0)>;
300 //===----------------------------------------------------------------------===//
301 // String Pseudo Instructions
303 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
304 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
305 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
306 Requires<[In32BitMode]>;
307 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
308 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
309 Requires<[In32BitMode]>;
310 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
311 [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
312 Requires<[In32BitMode]>;
315 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
316 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
317 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
318 Requires<[In64BitMode]>;
319 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
320 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
321 Requires<[In64BitMode]>;
322 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
323 [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
324 Requires<[In64BitMode]>;
325 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
326 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
327 Requires<[In64BitMode]>;
330 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
331 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
332 let Uses = [AL,ECX,EDI] in
333 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
334 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
335 Requires<[In32BitMode]>;
336 let Uses = [AX,ECX,EDI] in
337 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
338 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
339 Requires<[In32BitMode]>;
340 let Uses = [EAX,ECX,EDI] in
341 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
342 [(X86rep_stos i32)], IIC_REP_STOS>, REP,
343 Requires<[In32BitMode]>;
346 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
347 let Uses = [AL,RCX,RDI] in
348 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
349 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
350 Requires<[In64BitMode]>;
351 let Uses = [AX,RCX,RDI] in
352 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
353 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
354 Requires<[In64BitMode]>;
355 let Uses = [RAX,RCX,RDI] in
356 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
357 [(X86rep_stos i32)], IIC_REP_STOS>, REP,
358 Requires<[In64BitMode]>;
360 let Uses = [RAX,RCX,RDI] in
361 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
362 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
363 Requires<[In64BitMode]>;
366 //===----------------------------------------------------------------------===//
367 // Thread Local Storage Instructions
371 // All calls clobber the non-callee saved registers. ESP is marked as
372 // a use to prevent stack-pointer assignments that appear immediately
373 // before calls from potentially appearing dead.
374 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
375 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
376 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
377 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
379 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
381 [(X86tlsaddr tls32addr:$sym)]>,
382 Requires<[In32BitMode]>;
383 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
385 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
386 Requires<[In32BitMode]>;
389 // All calls clobber the non-callee saved registers. RSP is marked as
390 // a use to prevent stack-pointer assignments that appear immediately
391 // before calls from potentially appearing dead.
392 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
393 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
394 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
395 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
396 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
398 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
400 [(X86tlsaddr tls64addr:$sym)]>,
401 Requires<[In64BitMode]>;
402 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
404 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
405 Requires<[In64BitMode]>;
408 // Darwin TLS Support
409 // For i386, the address of the thunk is passed on the stack, on return the
410 // address of the variable is in %eax. %ecx is trashed during the function
411 // call. All other registers are preserved.
412 let Defs = [EAX, ECX, EFLAGS],
414 usesCustomInserter = 1 in
415 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
417 [(X86TLSCall addr:$sym)]>,
418 Requires<[In32BitMode]>;
420 // For x86_64, the address of the thunk is passed in %rdi, on return
421 // the address of the variable is in %rax. All other registers are preserved.
422 let Defs = [RAX, EFLAGS],
424 usesCustomInserter = 1 in
425 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
427 [(X86TLSCall addr:$sym)]>,
428 Requires<[In64BitMode]>;
431 //===----------------------------------------------------------------------===//
432 // Conditional Move Pseudo Instructions
434 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
435 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
436 // however that requires promoting the operands, and can induce additional
437 // i8 register pressure.
438 let usesCustomInserter = 1, Uses = [EFLAGS] in {
439 def CMOV_GR8 : I<0, Pseudo,
440 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
442 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
443 imm:$cond, EFLAGS))]>;
445 let Predicates = [NoCMov] in {
446 def CMOV_GR32 : I<0, Pseudo,
447 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
448 "#CMOV_GR32* PSEUDO!",
450 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
451 def CMOV_GR16 : I<0, Pseudo,
452 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
453 "#CMOV_GR16* PSEUDO!",
455 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
456 def CMOV_RFP32 : I<0, Pseudo,
458 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
459 "#CMOV_RFP32 PSEUDO!",
461 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
463 def CMOV_RFP64 : I<0, Pseudo,
465 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
466 "#CMOV_RFP64 PSEUDO!",
468 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
470 def CMOV_RFP80 : I<0, Pseudo,
472 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
473 "#CMOV_RFP80 PSEUDO!",
475 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
477 } // Predicates = [NoCMov]
478 } // UsesCustomInserter = 1, Uses = [EFLAGS]
481 //===----------------------------------------------------------------------===//
482 // Atomic Instruction Pseudo Instructions
483 //===----------------------------------------------------------------------===//
485 // Atomic exchange, and, or, xor
486 let Constraints = "$val = $dst", Defs = [EFLAGS],
487 usesCustomInserter = 1 in {
489 def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
491 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
492 def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
494 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
495 def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
497 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
498 def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
499 "#ATOMNAND8 PSEUDO!",
500 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
502 def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
503 "#ATOMAND16 PSEUDO!",
504 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
505 def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
507 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
508 def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
509 "#ATOMXOR16 PSEUDO!",
510 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
511 def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
512 "#ATOMNAND16 PSEUDO!",
513 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
514 def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
515 "#ATOMMIN16 PSEUDO!",
516 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
517 def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
518 "#ATOMMAX16 PSEUDO!",
519 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
520 def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
521 "#ATOMUMIN16 PSEUDO!",
522 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
523 def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
524 "#ATOMUMAX16 PSEUDO!",
525 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
528 def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
529 "#ATOMAND32 PSEUDO!",
530 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
531 def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
533 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
534 def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
535 "#ATOMXOR32 PSEUDO!",
536 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
537 def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
538 "#ATOMNAND32 PSEUDO!",
539 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
540 def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
541 "#ATOMMIN32 PSEUDO!",
542 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
543 def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
544 "#ATOMMAX32 PSEUDO!",
545 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
546 def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
547 "#ATOMUMIN32 PSEUDO!",
548 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
549 def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
550 "#ATOMUMAX32 PSEUDO!",
551 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
555 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
556 "#ATOMAND64 PSEUDO!",
557 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
558 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
560 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
561 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
562 "#ATOMXOR64 PSEUDO!",
563 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
564 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
565 "#ATOMNAND64 PSEUDO!",
566 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
567 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
568 "#ATOMMIN64 PSEUDO!",
569 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
570 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
571 "#ATOMMAX64 PSEUDO!",
572 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
573 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
574 "#ATOMUMIN64 PSEUDO!",
575 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
576 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
577 "#ATOMUMAX64 PSEUDO!",
578 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
581 let Constraints = "$val1 = $dst1, $val2 = $dst2",
582 Defs = [EFLAGS, EAX, EBX, ECX, EDX],
583 Uses = [EAX, EBX, ECX, EDX],
584 mayLoad = 1, mayStore = 1,
585 usesCustomInserter = 1 in {
586 def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
587 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
588 "#ATOMAND6432 PSEUDO!", []>;
589 def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
590 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
591 "#ATOMOR6432 PSEUDO!", []>;
592 def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
593 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
594 "#ATOMXOR6432 PSEUDO!", []>;
595 def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
596 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
597 "#ATOMNAND6432 PSEUDO!", []>;
598 def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
599 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
600 "#ATOMADD6432 PSEUDO!", []>;
601 def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
602 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
603 "#ATOMSUB6432 PSEUDO!", []>;
604 def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
605 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
606 "#ATOMSWAP6432 PSEUDO!", []>;
609 //===----------------------------------------------------------------------===//
610 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
611 //===----------------------------------------------------------------------===//
613 // FIXME: Use normal instructions and add lock prefix dynamically.
617 // TODO: Get this to fold the constant into the instruction.
618 let isCodeGenOnly = 1, Defs = [EFLAGS] in
619 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
621 "or{l}\t{$zero, $dst|$dst, $zero}",
622 [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK;
624 let hasSideEffects = 1 in
625 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
629 // RegOpc corresponds to the mr version of the instruction
630 // ImmOpc corresponds to the mi version of the instruction
631 // ImmOpc8 corresponds to the mi8 version of the instruction
632 // ImmMod corresponds to the instruction format of the mi and mi8 versions
633 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
634 Format ImmMod, string mnemonic> {
635 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
637 def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
638 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
639 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
640 !strconcat("lock\n\t", mnemonic, "{b}\t",
641 "{$src2, $dst|$dst, $src2}"),
642 [], IIC_ALU_NONMEM>, LOCK;
643 def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
644 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
645 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
646 !strconcat("lock\n\t", mnemonic, "{w}\t",
647 "{$src2, $dst|$dst, $src2}"),
648 [], IIC_ALU_NONMEM>, OpSize, LOCK;
649 def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
650 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
651 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
652 !strconcat("lock\n\t", mnemonic, "{l}\t",
653 "{$src2, $dst|$dst, $src2}"),
654 [], IIC_ALU_NONMEM>, LOCK;
655 def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
656 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
657 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
658 !strconcat("lock\n\t", mnemonic, "{q}\t",
659 "{$src2, $dst|$dst, $src2}"),
660 [], IIC_ALU_NONMEM>, LOCK;
662 def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
663 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
664 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
665 !strconcat("lock\n\t", mnemonic, "{b}\t",
666 "{$src2, $dst|$dst, $src2}"),
667 [], IIC_ALU_MEM>, LOCK;
669 def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
670 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
671 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
672 !strconcat("lock\n\t", mnemonic, "{w}\t",
673 "{$src2, $dst|$dst, $src2}"),
674 [], IIC_ALU_MEM>, LOCK;
676 def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
677 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
678 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
679 !strconcat("lock\n\t", mnemonic, "{l}\t",
680 "{$src2, $dst|$dst, $src2}"),
681 [], IIC_ALU_MEM>, LOCK;
683 def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
684 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
685 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
686 !strconcat("lock\n\t", mnemonic, "{q}\t",
687 "{$src2, $dst|$dst, $src2}"),
688 [], IIC_ALU_MEM>, LOCK;
690 def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
691 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
692 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
693 !strconcat("lock\n\t", mnemonic, "{w}\t",
694 "{$src2, $dst|$dst, $src2}"),
695 [], IIC_ALU_MEM>, LOCK;
696 def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
697 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
698 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
699 !strconcat("lock\n\t", mnemonic, "{l}\t",
700 "{$src2, $dst|$dst, $src2}"),
701 [], IIC_ALU_MEM>, LOCK;
702 def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
703 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
704 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
705 !strconcat("lock\n\t", mnemonic, "{q}\t",
706 "{$src2, $dst|$dst, $src2}"),
707 [], IIC_ALU_MEM>, LOCK;
713 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
714 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
715 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
716 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
717 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
719 // Optimized codegen when the non-memory output is not used.
720 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
722 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
724 "inc{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
725 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
727 "inc{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
728 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
730 "inc{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
731 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
733 "inc{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
735 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
737 "dec{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
738 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
740 "dec{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
741 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
743 "dec{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
744 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
746 "dec{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
749 // Atomic compare and swap.
750 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
752 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
755 [(X86cas8 addr:$ptr)], IIC_CMPX_LOCK_8B>, TB, LOCK;
757 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
759 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
762 [(X86cas16 addr:$ptr)], IIC_CMPX_LOCK_16B>, TB, LOCK,
763 Requires<[HasCmpxchg16b]>;
765 let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
766 def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
768 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
769 [(X86cas addr:$ptr, GR8:$swap, 1)], IIC_CMPX_LOCK_8>, TB, LOCK;
772 let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
773 def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
775 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
776 [(X86cas addr:$ptr, GR16:$swap, 2)], IIC_CMPX_LOCK>, TB, OpSize, LOCK;
779 let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
780 def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
782 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
783 [(X86cas addr:$ptr, GR32:$swap, 4)], IIC_CMPX_LOCK>, TB, LOCK;
786 let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
787 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
789 "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}",
790 [(X86cas addr:$ptr, GR64:$swap, 8)], IIC_CMPX_LOCK>, TB, LOCK;
793 // Atomic exchange and add
794 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
795 def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
797 "xadd{b}\t{$val, $ptr|$ptr, $val}",
798 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))],
801 def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
803 "xadd{w}\t{$val, $ptr|$ptr, $val}",
804 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))],
807 def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
809 "xadd{l}\t{$val, $ptr|$ptr, $val}",
810 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))],
813 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
815 "xadd{q}\t{$val, $ptr|$ptr, $val}",
816 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))],
821 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
822 "#ACQUIRE_MOV PSEUDO!",
823 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
824 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
825 "#ACQUIRE_MOV PSEUDO!",
826 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
827 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
828 "#ACQUIRE_MOV PSEUDO!",
829 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
830 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
831 "#ACQUIRE_MOV PSEUDO!",
832 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
834 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
835 "#RELEASE_MOV PSEUDO!",
836 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
837 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
838 "#RELEASE_MOV PSEUDO!",
839 [(atomic_store_16 addr:$dst, GR16:$src)]>;
840 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
841 "#RELEASE_MOV PSEUDO!",
842 [(atomic_store_32 addr:$dst, GR32:$src)]>;
843 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
844 "#RELEASE_MOV PSEUDO!",
845 [(atomic_store_64 addr:$dst, GR64:$src)]>;
847 //===----------------------------------------------------------------------===//
848 // Conditional Move Pseudo Instructions.
849 //===----------------------------------------------------------------------===//
852 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
853 // instruction selection into a branch sequence.
854 let Uses = [EFLAGS], usesCustomInserter = 1 in {
855 def CMOV_FR32 : I<0, Pseudo,
856 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
857 "#CMOV_FR32 PSEUDO!",
858 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
860 def CMOV_FR64 : I<0, Pseudo,
861 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
862 "#CMOV_FR64 PSEUDO!",
863 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
865 def CMOV_V4F32 : I<0, Pseudo,
866 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
867 "#CMOV_V4F32 PSEUDO!",
869 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
871 def CMOV_V2F64 : I<0, Pseudo,
872 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
873 "#CMOV_V2F64 PSEUDO!",
875 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
877 def CMOV_V2I64 : I<0, Pseudo,
878 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
879 "#CMOV_V2I64 PSEUDO!",
881 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
883 def CMOV_V8F32 : I<0, Pseudo,
884 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
885 "#CMOV_V8F32 PSEUDO!",
887 (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
889 def CMOV_V4F64 : I<0, Pseudo,
890 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
891 "#CMOV_V4F64 PSEUDO!",
893 (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
895 def CMOV_V4I64 : I<0, Pseudo,
896 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
897 "#CMOV_V4I64 PSEUDO!",
899 (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
904 //===----------------------------------------------------------------------===//
905 // DAG Pattern Matching Rules
906 //===----------------------------------------------------------------------===//
908 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
909 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
910 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
911 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
912 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
913 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
914 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
916 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
917 (ADD32ri GR32:$src1, tconstpool:$src2)>;
918 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
919 (ADD32ri GR32:$src1, tjumptable:$src2)>;
920 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
921 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
922 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
923 (ADD32ri GR32:$src1, texternalsym:$src2)>;
924 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
925 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
927 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
928 (MOV32mi addr:$dst, tglobaladdr:$src)>;
929 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
930 (MOV32mi addr:$dst, texternalsym:$src)>;
931 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
932 (MOV32mi addr:$dst, tblockaddress:$src)>;
936 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
937 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
938 // 'movabs' predicate should handle this sort of thing.
939 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
940 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
941 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
942 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
943 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
944 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
945 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
946 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
947 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
948 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
950 // In static codegen with small code model, we can get the address of a label
951 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
952 // the MOV64ri64i32 should accept these.
953 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
954 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
955 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
956 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
957 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
958 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
959 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
960 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
961 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
962 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
964 // In kernel code model, we can get the address of a label
965 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
966 // the MOV64ri32 should accept these.
967 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
968 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
969 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
970 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
971 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
972 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
973 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
974 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
975 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
976 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
978 // If we have small model and -static mode, it is safe to store global addresses
979 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
980 // for MOV64mi32 should handle this sort of thing.
981 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
982 (MOV64mi32 addr:$dst, tconstpool:$src)>,
983 Requires<[NearData, IsStatic]>;
984 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
985 (MOV64mi32 addr:$dst, tjumptable:$src)>,
986 Requires<[NearData, IsStatic]>;
987 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
988 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
989 Requires<[NearData, IsStatic]>;
990 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
991 (MOV64mi32 addr:$dst, texternalsym:$src)>,
992 Requires<[NearData, IsStatic]>;
993 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
994 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
995 Requires<[NearData, IsStatic]>;
1001 // tls has some funny stuff here...
1002 // This corresponds to movabs $foo@tpoff, %rax
1003 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1004 (MOV64ri tglobaltlsaddr :$dst)>;
1005 // This corresponds to add $foo@tpoff, %rax
1006 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1007 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1008 // This corresponds to mov foo@tpoff(%rbx), %eax
1009 def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
1010 (MOV64rm tglobaltlsaddr :$dst)>;
1013 // Direct PC relative function call for small code model. 32-bit displacement
1014 // sign extended to 64-bit.
1015 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1016 (CALL64pcrel32 tglobaladdr:$dst)>;
1017 def : Pat<(X86call (i64 texternalsym:$dst)),
1018 (CALL64pcrel32 texternalsym:$dst)>;
1021 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1022 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1023 Requires<[In32BitMode]>;
1025 // FIXME: This is disabled for 32-bit PIC mode because the global base
1026 // register which is part of the address mode may be assigned a
1027 // callee-saved register.
1028 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1029 (TCRETURNmi addr:$dst, imm:$off)>,
1030 Requires<[In32BitMode, IsNotPIC]>;
1032 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1033 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1034 Requires<[In32BitMode]>;
1036 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1037 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1038 Requires<[In32BitMode]>;
1040 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1041 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1042 Requires<[In64BitMode]>;
1044 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1045 (TCRETURNmi64 addr:$dst, imm:$off)>,
1046 Requires<[In64BitMode]>;
1048 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1049 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1050 Requires<[In64BitMode]>;
1052 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1053 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1054 Requires<[In64BitMode]>;
1056 // Normal calls, with various flavors of addresses.
1057 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1058 (CALLpcrel32 tglobaladdr:$dst)>;
1059 def : Pat<(X86call (i32 texternalsym:$dst)),
1060 (CALLpcrel32 texternalsym:$dst)>;
1061 def : Pat<(X86call (i32 imm:$dst)),
1062 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1066 // TEST R,R is smaller than CMP R,0
1067 def : Pat<(X86cmp GR8:$src1, 0),
1068 (TEST8rr GR8:$src1, GR8:$src1)>;
1069 def : Pat<(X86cmp GR16:$src1, 0),
1070 (TEST16rr GR16:$src1, GR16:$src1)>;
1071 def : Pat<(X86cmp GR32:$src1, 0),
1072 (TEST32rr GR32:$src1, GR32:$src1)>;
1073 def : Pat<(X86cmp GR64:$src1, 0),
1074 (TEST64rr GR64:$src1, GR64:$src1)>;
1076 // Conditional moves with folded loads with operands swapped and conditions
1078 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1079 Instruction Inst64> {
1080 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1081 (Inst16 GR16:$src2, addr:$src1)>;
1082 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1083 (Inst32 GR32:$src2, addr:$src1)>;
1084 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1085 (Inst64 GR64:$src2, addr:$src1)>;
1088 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1089 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1090 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1091 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1092 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1093 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1094 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1095 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1096 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1097 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1098 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1099 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1100 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1101 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1102 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1103 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1105 // zextload bool -> zextload byte
1106 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1107 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1108 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1109 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1111 // extload bool -> extload byte
1112 // When extloading from 16-bit and smaller memory locations into 64-bit
1113 // registers, use zero-extending loads so that the entire 64-bit register is
1114 // defined, avoiding partial-register updates.
1116 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1117 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1118 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1119 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1120 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1121 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1123 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1124 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1125 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1126 // For other extloads, use subregs, since the high contents of the register are
1127 // defined after an extload.
1128 def : Pat<(extloadi64i32 addr:$src),
1129 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1132 // anyext. Define these to do an explicit zero-extend to
1133 // avoid partial-register updates.
1134 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1135 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1136 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1138 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1139 def : Pat<(i32 (anyext GR16:$src)),
1140 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1142 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1143 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1144 def : Pat<(i64 (anyext GR32:$src)),
1145 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1148 // Any instruction that defines a 32-bit result leaves the high half of the
1149 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1150 // be copying from a truncate. And x86's cmov doesn't do anything if the
1151 // condition is false. But any other 32-bit operation will zero-extend
1153 def def32 : PatLeaf<(i32 GR32:$src), [{
1154 return N->getOpcode() != ISD::TRUNCATE &&
1155 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1156 N->getOpcode() != ISD::CopyFromReg &&
1157 N->getOpcode() != X86ISD::CMOV;
1160 // In the case of a 32-bit def that is known to implicitly zero-extend,
1161 // we can use a SUBREG_TO_REG.
1162 def : Pat<(i64 (zext def32:$src)),
1163 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1165 //===----------------------------------------------------------------------===//
1166 // Pattern match OR as ADD
1167 //===----------------------------------------------------------------------===//
1169 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1170 // 3-addressified into an LEA instruction to avoid copies. However, we also
1171 // want to finally emit these instructions as an or at the end of the code
1172 // generator to make the generated code easier to read. To do this, we select
1173 // into "disjoint bits" pseudo ops.
1175 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1176 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1177 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1178 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1180 APInt KnownZero0, KnownOne0;
1181 CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1182 APInt KnownZero1, KnownOne1;
1183 CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1184 return (~KnownZero0 & ~KnownZero1) == 0;
1188 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1189 let AddedComplexity = 5 in { // Try this before the selecting to OR
1191 let isConvertibleToThreeAddress = 1,
1192 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1193 let isCommutable = 1 in {
1194 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1195 "", // orw/addw REG, REG
1196 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1197 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1198 "", // orl/addl REG, REG
1199 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1200 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1201 "", // orq/addq REG, REG
1202 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1205 // NOTE: These are order specific, we want the ri8 forms to be listed
1206 // first so that they are slightly preferred to the ri forms.
1208 def ADD16ri8_DB : I<0, Pseudo,
1209 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1210 "", // orw/addw REG, imm8
1211 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1212 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1213 "", // orw/addw REG, imm
1214 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1216 def ADD32ri8_DB : I<0, Pseudo,
1217 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1218 "", // orl/addl REG, imm8
1219 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1220 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1221 "", // orl/addl REG, imm
1222 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1225 def ADD64ri8_DB : I<0, Pseudo,
1226 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1227 "", // orq/addq REG, imm8
1228 [(set GR64:$dst, (or_is_add GR64:$src1,
1229 i64immSExt8:$src2))]>;
1230 def ADD64ri32_DB : I<0, Pseudo,
1231 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1232 "", // orq/addq REG, imm
1233 [(set GR64:$dst, (or_is_add GR64:$src1,
1234 i64immSExt32:$src2))]>;
1236 } // AddedComplexity
1239 //===----------------------------------------------------------------------===//
1241 //===----------------------------------------------------------------------===//
1243 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1244 // +128 doesn't, so in this special case use a sub instead of an add.
1245 def : Pat<(add GR16:$src1, 128),
1246 (SUB16ri8 GR16:$src1, -128)>;
1247 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1248 (SUB16mi8 addr:$dst, -128)>;
1250 def : Pat<(add GR32:$src1, 128),
1251 (SUB32ri8 GR32:$src1, -128)>;
1252 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1253 (SUB32mi8 addr:$dst, -128)>;
1255 def : Pat<(add GR64:$src1, 128),
1256 (SUB64ri8 GR64:$src1, -128)>;
1257 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1258 (SUB64mi8 addr:$dst, -128)>;
1260 // The same trick applies for 32-bit immediate fields in 64-bit
1262 def : Pat<(add GR64:$src1, 0x0000000080000000),
1263 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1264 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1265 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1267 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1268 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1269 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1270 // represented with a sign extension of a 8 bit constant, use that.
1272 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1276 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1277 (i32 (GetLo8XForm imm:$imm))),
1280 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1284 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1285 (i32 (GetLo32XForm imm:$imm))),
1289 // r & (2^16-1) ==> movz
1290 def : Pat<(and GR32:$src1, 0xffff),
1291 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1292 // r & (2^8-1) ==> movz
1293 def : Pat<(and GR32:$src1, 0xff),
1294 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1297 Requires<[In32BitMode]>;
1298 // r & (2^8-1) ==> movz
1299 def : Pat<(and GR16:$src1, 0xff),
1300 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1301 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1303 Requires<[In32BitMode]>;
1305 // r & (2^32-1) ==> movz
1306 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1307 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1308 // r & (2^16-1) ==> movz
1309 def : Pat<(and GR64:$src, 0xffff),
1310 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1311 // r & (2^8-1) ==> movz
1312 def : Pat<(and GR64:$src, 0xff),
1313 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1314 // r & (2^8-1) ==> movz
1315 def : Pat<(and GR32:$src1, 0xff),
1316 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1317 Requires<[In64BitMode]>;
1318 // r & (2^8-1) ==> movz
1319 def : Pat<(and GR16:$src1, 0xff),
1320 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1321 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1322 Requires<[In64BitMode]>;
1325 // sext_inreg patterns
1326 def : Pat<(sext_inreg GR32:$src, i16),
1327 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1328 def : Pat<(sext_inreg GR32:$src, i8),
1329 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1332 Requires<[In32BitMode]>;
1334 def : Pat<(sext_inreg GR16:$src, i8),
1335 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1336 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1338 Requires<[In32BitMode]>;
1340 def : Pat<(sext_inreg GR64:$src, i32),
1341 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1342 def : Pat<(sext_inreg GR64:$src, i16),
1343 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1344 def : Pat<(sext_inreg GR64:$src, i8),
1345 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1346 def : Pat<(sext_inreg GR32:$src, i8),
1347 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1348 Requires<[In64BitMode]>;
1349 def : Pat<(sext_inreg GR16:$src, i8),
1350 (EXTRACT_SUBREG (MOVSX32rr8
1351 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1352 Requires<[In64BitMode]>;
1354 // sext, sext_load, zext, zext_load
1355 def: Pat<(i16 (sext GR8:$src)),
1356 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1357 def: Pat<(sextloadi16i8 addr:$src),
1358 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1359 def: Pat<(i16 (zext GR8:$src)),
1360 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1361 def: Pat<(zextloadi16i8 addr:$src),
1362 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1365 def : Pat<(i16 (trunc GR32:$src)),
1366 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1367 def : Pat<(i8 (trunc GR32:$src)),
1368 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1370 Requires<[In32BitMode]>;
1371 def : Pat<(i8 (trunc GR16:$src)),
1372 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1374 Requires<[In32BitMode]>;
1375 def : Pat<(i32 (trunc GR64:$src)),
1376 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1377 def : Pat<(i16 (trunc GR64:$src)),
1378 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1379 def : Pat<(i8 (trunc GR64:$src)),
1380 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1381 def : Pat<(i8 (trunc GR32:$src)),
1382 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1383 Requires<[In64BitMode]>;
1384 def : Pat<(i8 (trunc GR16:$src)),
1385 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1386 Requires<[In64BitMode]>;
1388 // h-register tricks
1389 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1390 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1392 Requires<[In32BitMode]>;
1393 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1394 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1396 Requires<[In32BitMode]>;
1397 def : Pat<(srl GR16:$src, (i8 8)),
1400 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1403 Requires<[In32BitMode]>;
1404 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1405 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1408 Requires<[In32BitMode]>;
1409 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1410 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1413 Requires<[In32BitMode]>;
1414 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1415 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1418 Requires<[In32BitMode]>;
1419 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1420 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1423 Requires<[In32BitMode]>;
1425 // h-register tricks.
1426 // For now, be conservative on x86-64 and use an h-register extract only if the
1427 // value is immediately zero-extended or stored, which are somewhat common
1428 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1429 // from being allocated in the same instruction as the h register, as there's
1430 // currently no way to describe this requirement to the register allocator.
1432 // h-register extract and zero-extend.
1433 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1437 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1440 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1442 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1444 Requires<[In64BitMode]>;
1445 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1446 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1449 Requires<[In64BitMode]>;
1450 def : Pat<(srl GR16:$src, (i8 8)),
1453 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1456 Requires<[In64BitMode]>;
1457 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1459 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1461 Requires<[In64BitMode]>;
1462 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1464 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1466 Requires<[In64BitMode]>;
1467 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1471 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1474 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1478 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1482 // h-register extract and store.
1483 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1486 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1488 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1491 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1493 Requires<[In64BitMode]>;
1494 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1497 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1499 Requires<[In64BitMode]>;
1502 // (shl x, 1) ==> (add x, x)
1503 // Note that if x is undef (immediate or otherwise), we could theoretically
1504 // end up with the two uses of x getting different values, producing a result
1505 // where the least significant bit is not 0. However, the probability of this
1506 // happening is considered low enough that this is officially not a
1508 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1509 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1510 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1511 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1513 // Helper imms that check if a mask doesn't change significant shift bits.
1514 def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
1515 def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
1517 // (shl x (and y, 31)) ==> (shl x, y)
1518 def : Pat<(shl GR8:$src1, (and CL, immShift32)),
1519 (SHL8rCL GR8:$src1)>;
1520 def : Pat<(shl GR16:$src1, (and CL, immShift32)),
1521 (SHL16rCL GR16:$src1)>;
1522 def : Pat<(shl GR32:$src1, (and CL, immShift32)),
1523 (SHL32rCL GR32:$src1)>;
1524 def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1525 (SHL8mCL addr:$dst)>;
1526 def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1527 (SHL16mCL addr:$dst)>;
1528 def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1529 (SHL32mCL addr:$dst)>;
1531 def : Pat<(srl GR8:$src1, (and CL, immShift32)),
1532 (SHR8rCL GR8:$src1)>;
1533 def : Pat<(srl GR16:$src1, (and CL, immShift32)),
1534 (SHR16rCL GR16:$src1)>;
1535 def : Pat<(srl GR32:$src1, (and CL, immShift32)),
1536 (SHR32rCL GR32:$src1)>;
1537 def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1538 (SHR8mCL addr:$dst)>;
1539 def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1540 (SHR16mCL addr:$dst)>;
1541 def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1542 (SHR32mCL addr:$dst)>;
1544 def : Pat<(sra GR8:$src1, (and CL, immShift32)),
1545 (SAR8rCL GR8:$src1)>;
1546 def : Pat<(sra GR16:$src1, (and CL, immShift32)),
1547 (SAR16rCL GR16:$src1)>;
1548 def : Pat<(sra GR32:$src1, (and CL, immShift32)),
1549 (SAR32rCL GR32:$src1)>;
1550 def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1551 (SAR8mCL addr:$dst)>;
1552 def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1553 (SAR16mCL addr:$dst)>;
1554 def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1555 (SAR32mCL addr:$dst)>;
1557 // (shl x (and y, 63)) ==> (shl x, y)
1558 def : Pat<(shl GR64:$src1, (and CL, immShift64)),
1559 (SHL64rCL GR64:$src1)>;
1560 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1561 (SHL64mCL addr:$dst)>;
1563 def : Pat<(srl GR64:$src1, (and CL, immShift64)),
1564 (SHR64rCL GR64:$src1)>;
1565 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1566 (SHR64mCL addr:$dst)>;
1568 def : Pat<(sra GR64:$src1, (and CL, immShift64)),
1569 (SAR64rCL GR64:$src1)>;
1570 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1571 (SAR64mCL addr:$dst)>;
1574 // (anyext (setcc_carry)) -> (setcc_carry)
1575 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1577 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1579 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1585 //===----------------------------------------------------------------------===//
1586 // EFLAGS-defining Patterns
1587 //===----------------------------------------------------------------------===//
1590 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1591 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1592 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1595 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1596 (ADD8rm GR8:$src1, addr:$src2)>;
1597 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1598 (ADD16rm GR16:$src1, addr:$src2)>;
1599 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1600 (ADD32rm GR32:$src1, addr:$src2)>;
1603 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1604 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1605 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1606 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1607 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1608 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1609 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1612 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1613 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1614 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1617 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1618 (SUB8rm GR8:$src1, addr:$src2)>;
1619 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1620 (SUB16rm GR16:$src1, addr:$src2)>;
1621 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1622 (SUB32rm GR32:$src1, addr:$src2)>;
1625 def : Pat<(sub GR8:$src1, imm:$src2),
1626 (SUB8ri GR8:$src1, imm:$src2)>;
1627 def : Pat<(sub GR16:$src1, imm:$src2),
1628 (SUB16ri GR16:$src1, imm:$src2)>;
1629 def : Pat<(sub GR32:$src1, imm:$src2),
1630 (SUB32ri GR32:$src1, imm:$src2)>;
1631 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1632 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1633 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1634 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1637 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1638 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1639 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1640 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1643 def : Pat<(mul GR16:$src1, GR16:$src2),
1644 (IMUL16rr GR16:$src1, GR16:$src2)>;
1645 def : Pat<(mul GR32:$src1, GR32:$src2),
1646 (IMUL32rr GR32:$src1, GR32:$src2)>;
1649 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1650 (IMUL16rm GR16:$src1, addr:$src2)>;
1651 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1652 (IMUL32rm GR32:$src1, addr:$src2)>;
1655 def : Pat<(mul GR16:$src1, imm:$src2),
1656 (IMUL16rri GR16:$src1, imm:$src2)>;
1657 def : Pat<(mul GR32:$src1, imm:$src2),
1658 (IMUL32rri GR32:$src1, imm:$src2)>;
1659 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1660 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1661 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1662 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1664 // reg = mul mem, imm
1665 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1666 (IMUL16rmi addr:$src1, imm:$src2)>;
1667 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1668 (IMUL32rmi addr:$src1, imm:$src2)>;
1669 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1670 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1671 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1672 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1674 // Patterns for nodes that do not produce flags, for instructions that do.
1677 def : Pat<(add GR64:$src1, GR64:$src2),
1678 (ADD64rr GR64:$src1, GR64:$src2)>;
1679 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1680 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1681 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1682 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1683 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1684 (ADD64rm GR64:$src1, addr:$src2)>;
1687 def : Pat<(sub GR64:$src1, GR64:$src2),
1688 (SUB64rr GR64:$src1, GR64:$src2)>;
1689 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1690 (SUB64rm GR64:$src1, addr:$src2)>;
1691 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1692 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1693 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1694 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1697 def : Pat<(mul GR64:$src1, GR64:$src2),
1698 (IMUL64rr GR64:$src1, GR64:$src2)>;
1699 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1700 (IMUL64rm GR64:$src1, addr:$src2)>;
1701 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1702 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1703 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1704 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1705 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1706 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1707 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1708 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1711 def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1712 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
1713 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1714 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
1715 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1716 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1719 def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1720 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
1721 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1722 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
1723 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1724 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1727 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1728 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1729 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1730 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1733 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1734 (OR8rm GR8:$src1, addr:$src2)>;
1735 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1736 (OR16rm GR16:$src1, addr:$src2)>;
1737 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1738 (OR32rm GR32:$src1, addr:$src2)>;
1739 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1740 (OR64rm GR64:$src1, addr:$src2)>;
1743 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1744 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1745 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1746 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1747 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1748 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1749 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1750 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1751 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1752 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1753 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1756 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1757 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1758 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1759 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1762 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1763 (XOR8rm GR8:$src1, addr:$src2)>;
1764 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1765 (XOR16rm GR16:$src1, addr:$src2)>;
1766 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1767 (XOR32rm GR32:$src1, addr:$src2)>;
1768 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1769 (XOR64rm GR64:$src1, addr:$src2)>;
1772 def : Pat<(xor GR8:$src1, imm:$src2),
1773 (XOR8ri GR8:$src1, imm:$src2)>;
1774 def : Pat<(xor GR16:$src1, imm:$src2),
1775 (XOR16ri GR16:$src1, imm:$src2)>;
1776 def : Pat<(xor GR32:$src1, imm:$src2),
1777 (XOR32ri GR32:$src1, imm:$src2)>;
1778 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1779 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1780 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1781 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1782 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1783 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1784 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1785 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1788 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1789 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1790 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1791 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1794 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1795 (AND8rm GR8:$src1, addr:$src2)>;
1796 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1797 (AND16rm GR16:$src1, addr:$src2)>;
1798 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1799 (AND32rm GR32:$src1, addr:$src2)>;
1800 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1801 (AND64rm GR64:$src1, addr:$src2)>;
1804 def : Pat<(and GR8:$src1, imm:$src2),
1805 (AND8ri GR8:$src1, imm:$src2)>;
1806 def : Pat<(and GR16:$src1, imm:$src2),
1807 (AND16ri GR16:$src1, imm:$src2)>;
1808 def : Pat<(and GR32:$src1, imm:$src2),
1809 (AND32ri GR32:$src1, imm:$src2)>;
1810 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1811 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1812 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1813 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1814 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1815 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1816 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1817 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1819 // Bit scan instruction patterns to match explicit zero-undef behavior.
1820 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1821 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1822 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1823 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1824 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1825 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;