1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[Not64BitMode]>;
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[Not64BitMode]>;
56 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57 // a stack adjustment and the codegen must know that they may modify the stack
58 // pointer before prolog-epilog rewriting occurs.
59 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60 // sub / add which can clobber EFLAGS.
61 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
74 // x86-64 va_start lowering magic.
75 let usesCustomInserter = 1, Defs = [EFLAGS] in {
76 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
79 i64imm:$regsavefi, i64imm:$offset,
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
87 // The VAARG_64 pseudo-instruction takes the address of the va_list,
88 // and places the address of the next argument into a register.
89 let Defs = [EFLAGS] in
90 def VAARG_64 : I<0, Pseudo,
92 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
93 "#VAARG_64 $dst, $ap, $size, $mode, $align",
95 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
98 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
99 // targets. These calls are needed to probe the stack when allocating more than
100 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
101 // ensure that the guard pages used by the OS virtual memory manager are
102 // allocated in correct sequence.
103 // The main point of having separate instruction are extra unmodelled effects
104 // (compared to ordinary calls) like stack pointer change.
106 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
107 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
108 "# dynamic stack allocation",
111 // When using segmented stacks these are lowered into instructions which first
112 // check if the current stacklet has enough free memory. If it does, memory is
113 // allocated by bumping the stack pointer. Otherwise memory is allocated from
116 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
117 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
118 "# variable sized alloca for segmented stacks",
120 (X86SegAlloca GR32:$size))]>,
121 Requires<[Not64BitMode]>;
123 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
124 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
125 "# variable sized alloca for segmented stacks",
127 (X86SegAlloca GR64:$size))]>,
128 Requires<[In64BitMode]>;
131 // The MSVC runtime contains an _ftol2 routine for converting floating-point
132 // to integer values. It has a strange calling convention: the input is
133 // popped from the x87 stack, and the return value is given in EDX:EAX. ECX is
134 // used as a temporary register. No other registers (aside from flags) are
136 // Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
137 // variant is unnecessary.
139 let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
140 def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
142 [(X86WinFTOL RFP32:$src)]>,
143 Requires<[Not64BitMode]>;
145 def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
147 [(X86WinFTOL RFP64:$src)]>,
148 Requires<[Not64BitMode]>;
151 //===----------------------------------------------------------------------===//
152 // EH Pseudo Instructions
154 let SchedRW = [WriteSystem] in {
155 let isTerminator = 1, isReturn = 1, isBarrier = 1,
156 hasCtrlDep = 1, isCodeGenOnly = 1 in {
157 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
158 "ret\t#eh_return, addr: $addr",
159 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
163 let isTerminator = 1, isReturn = 1, isBarrier = 1,
164 hasCtrlDep = 1, isCodeGenOnly = 1 in {
165 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
166 "ret\t#eh_return, addr: $addr",
167 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
171 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
172 usesCustomInserter = 1 in {
173 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
175 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
176 Requires<[Not64BitMode]>;
177 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
179 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
180 Requires<[In64BitMode]>;
181 let isTerminator = 1 in {
182 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
183 "#EH_SJLJ_LONGJMP32",
184 [(X86eh_sjlj_longjmp addr:$buf)]>,
185 Requires<[Not64BitMode]>;
186 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
187 "#EH_SJLJ_LONGJMP64",
188 [(X86eh_sjlj_longjmp addr:$buf)]>,
189 Requires<[In64BitMode]>;
194 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
195 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
196 "#EH_SjLj_Setup\t$dst", []>;
199 //===----------------------------------------------------------------------===//
200 // Pseudo instructions used by segmented stacks.
203 // This is lowered into a RET instruction by MCInstLower. We need
204 // this so that we don't have to have a MachineBasicBlock which ends
205 // with a RET and also has successors.
206 let isPseudo = 1 in {
207 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
210 // This instruction is lowered to a RET followed by a MOV. The two
211 // instructions are not generated on a higher level since then the
212 // verifier sees a MachineBasicBlock ending with a non-terminator.
213 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
217 //===----------------------------------------------------------------------===//
218 // Alias Instructions
219 //===----------------------------------------------------------------------===//
221 // Alias instruction mapping movr0 to xor.
222 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
223 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
225 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
226 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
228 // Other widths can also make use of the 32-bit xor, which may have a smaller
229 // encoding and avoid partial register updates.
230 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
231 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
232 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
233 let AddedComplexity = 20;
236 // Materialize i64 constant where top 32-bits are zero. This could theoretically
237 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
238 // that would make it more difficult to rematerialize.
239 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
240 isCodeGenOnly = 1, neverHasSideEffects = 1 in
241 def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
242 "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
244 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
245 // actually the zero-extension of a 32-bit constant, and for labels in the
246 // x86-64 small code model.
247 def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
249 let AddedComplexity = 1 in
250 def : Pat<(i64 mov64imm32:$src),
251 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
253 // Use sbb to materialize carry bit.
254 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
255 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
256 // However, Pat<> can't replicate the destination reg into the inputs of the
258 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
259 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
260 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
261 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
262 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
263 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
264 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
265 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
269 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
271 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
273 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
276 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
278 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
280 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
283 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
284 // will be eliminated and that the sbb can be extended up to a wider type. When
285 // this happens, it is great. However, if we are left with an 8-bit sbb and an
286 // and, we might as well just match it as a setb.
287 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
290 // (add OP, SETB) -> (adc OP, 0)
291 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
292 (ADC8ri GR8:$op, 0)>;
293 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
294 (ADC32ri8 GR32:$op, 0)>;
295 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
296 (ADC64ri8 GR64:$op, 0)>;
298 // (sub OP, SETB) -> (sbb OP, 0)
299 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
300 (SBB8ri GR8:$op, 0)>;
301 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
302 (SBB32ri8 GR32:$op, 0)>;
303 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
304 (SBB64ri8 GR64:$op, 0)>;
306 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
307 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
308 (ADC8ri GR8:$op, 0)>;
309 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
310 (ADC32ri8 GR32:$op, 0)>;
311 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
312 (ADC64ri8 GR64:$op, 0)>;
314 //===----------------------------------------------------------------------===//
315 // String Pseudo Instructions
317 let SchedRW = [WriteMicrocoded] in {
318 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
319 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
320 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
321 Requires<[Not64BitMode]>;
322 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
323 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
324 Requires<[Not64BitMode]>;
325 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
326 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
327 Requires<[Not64BitMode]>;
330 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
331 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
332 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
333 Requires<[In64BitMode]>;
334 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
335 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
336 Requires<[In64BitMode]>;
337 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
338 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
339 Requires<[In64BitMode]>;
340 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
341 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
342 Requires<[In64BitMode]>;
345 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
346 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
347 let Uses = [AL,ECX,EDI] in
348 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
349 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
350 Requires<[Not64BitMode]>;
351 let Uses = [AX,ECX,EDI] in
352 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
353 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
354 Requires<[Not64BitMode]>;
355 let Uses = [EAX,ECX,EDI] in
356 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
357 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
358 Requires<[Not64BitMode]>;
361 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
362 let Uses = [AL,RCX,RDI] in
363 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
364 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
365 Requires<[In64BitMode]>;
366 let Uses = [AX,RCX,RDI] in
367 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
368 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
369 Requires<[In64BitMode]>;
370 let Uses = [RAX,RCX,RDI] in
371 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
372 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
373 Requires<[In64BitMode]>;
375 let Uses = [RAX,RCX,RDI] in
376 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
377 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
378 Requires<[In64BitMode]>;
382 //===----------------------------------------------------------------------===//
383 // Thread Local Storage Instructions
387 // All calls clobber the non-callee saved registers. ESP is marked as
388 // a use to prevent stack-pointer assignments that appear immediately
389 // before calls from potentially appearing dead.
390 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
391 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
392 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
393 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
395 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
397 [(X86tlsaddr tls32addr:$sym)]>,
398 Requires<[Not64BitMode]>;
399 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
401 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
402 Requires<[Not64BitMode]>;
405 // All calls clobber the non-callee saved registers. RSP is marked as
406 // a use to prevent stack-pointer assignments that appear immediately
407 // before calls from potentially appearing dead.
408 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
409 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
410 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
411 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
412 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
414 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
416 [(X86tlsaddr tls64addr:$sym)]>,
417 Requires<[In64BitMode]>;
418 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
420 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
421 Requires<[In64BitMode]>;
424 // Darwin TLS Support
425 // For i386, the address of the thunk is passed on the stack, on return the
426 // address of the variable is in %eax. %ecx is trashed during the function
427 // call. All other registers are preserved.
428 let Defs = [EAX, ECX, EFLAGS],
430 usesCustomInserter = 1 in
431 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
433 [(X86TLSCall addr:$sym)]>,
434 Requires<[Not64BitMode]>;
436 // For x86_64, the address of the thunk is passed in %rdi, on return
437 // the address of the variable is in %rax. All other registers are preserved.
438 let Defs = [RAX, EFLAGS],
440 usesCustomInserter = 1 in
441 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
443 [(X86TLSCall addr:$sym)]>,
444 Requires<[In64BitMode]>;
447 //===----------------------------------------------------------------------===//
448 // Conditional Move Pseudo Instructions
450 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
451 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
452 // however that requires promoting the operands, and can induce additional
453 // i8 register pressure.
454 let usesCustomInserter = 1, Uses = [EFLAGS] in {
455 def CMOV_GR8 : I<0, Pseudo,
456 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
458 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
459 imm:$cond, EFLAGS))]>;
461 let Predicates = [NoCMov] in {
462 def CMOV_GR32 : I<0, Pseudo,
463 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
464 "#CMOV_GR32* PSEUDO!",
466 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
467 def CMOV_GR16 : I<0, Pseudo,
468 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
469 "#CMOV_GR16* PSEUDO!",
471 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
472 } // Predicates = [NoCMov]
474 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
476 let Predicates = [FPStackf32] in
477 def CMOV_RFP32 : I<0, Pseudo,
479 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
480 "#CMOV_RFP32 PSEUDO!",
482 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
484 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
486 let Predicates = [FPStackf64] in
487 def CMOV_RFP64 : I<0, Pseudo,
489 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
490 "#CMOV_RFP64 PSEUDO!",
492 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
494 def CMOV_RFP80 : I<0, Pseudo,
496 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
497 "#CMOV_RFP80 PSEUDO!",
499 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
501 } // UsesCustomInserter = 1, Uses = [EFLAGS]
504 //===----------------------------------------------------------------------===//
505 // Atomic Instruction Pseudo Instructions
506 //===----------------------------------------------------------------------===//
508 // Pseudo atomic instructions
510 multiclass PSEUDO_ATOMIC_LOAD_BINOP<string mnemonic> {
511 let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in {
512 let Defs = [EFLAGS, AL] in
513 def NAME#8 : I<0, Pseudo, (outs GR8:$dst),
514 (ins i8mem:$ptr, GR8:$val),
515 !strconcat(mnemonic, "8 PSEUDO!"), []>;
516 let Defs = [EFLAGS, AX] in
517 def NAME#16 : I<0, Pseudo,(outs GR16:$dst),
518 (ins i16mem:$ptr, GR16:$val),
519 !strconcat(mnemonic, "16 PSEUDO!"), []>;
520 let Defs = [EFLAGS, EAX] in
521 def NAME#32 : I<0, Pseudo, (outs GR32:$dst),
522 (ins i32mem:$ptr, GR32:$val),
523 !strconcat(mnemonic, "32 PSEUDO!"), []>;
524 let Defs = [EFLAGS, RAX] in
525 def NAME#64 : I<0, Pseudo, (outs GR64:$dst),
526 (ins i64mem:$ptr, GR64:$val),
527 !strconcat(mnemonic, "64 PSEUDO!"), []>;
531 multiclass PSEUDO_ATOMIC_LOAD_BINOP_PATS<string name, string frag> {
532 def : Pat<(!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val),
533 (!cast<Instruction>(name # "8") addr:$ptr, GR8:$val)>;
534 def : Pat<(!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val),
535 (!cast<Instruction>(name # "16") addr:$ptr, GR16:$val)>;
536 def : Pat<(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val),
537 (!cast<Instruction>(name # "32") addr:$ptr, GR32:$val)>;
538 def : Pat<(!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val),
539 (!cast<Instruction>(name # "64") addr:$ptr, GR64:$val)>;
542 // Atomic exchange, and, or, xor
543 defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMAND">;
544 defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMOR">;
545 defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMXOR">;
546 defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMNAND">;
547 defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMAX">;
548 defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMIN">;
549 defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMAX">;
550 defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMIN">;
552 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMAND", "atomic_load_and">;
553 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMOR", "atomic_load_or">;
554 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMXOR", "atomic_load_xor">;
555 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMNAND", "atomic_load_nand">;
556 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMAX", "atomic_load_max">;
557 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMIN", "atomic_load_min">;
558 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMAX", "atomic_load_umax">;
559 defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMIN", "atomic_load_umin">;
561 multiclass PSEUDO_ATOMIC_LOAD_BINOP6432<string mnemonic> {
562 let usesCustomInserter = 1, Defs = [EFLAGS, EAX, EDX],
563 mayLoad = 1, mayStore = 1, hasSideEffects = 0 in
564 def NAME#6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
565 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
566 !strconcat(mnemonic, "6432 PSEUDO!"), []>;
569 defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMAND">;
570 defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMOR">;
571 defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMXOR">;
572 defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMNAND">;
573 defm ATOMADD : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMADD">;
574 defm ATOMSUB : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSUB">;
575 defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMAX">;
576 defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMIN">;
577 defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMAX">;
578 defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMIN">;
579 defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">;
581 //===----------------------------------------------------------------------===//
582 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
583 //===----------------------------------------------------------------------===//
585 // FIXME: Use normal instructions and add lock prefix dynamically.
589 // TODO: Get this to fold the constant into the instruction.
590 let isCodeGenOnly = 1, Defs = [EFLAGS] in
591 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
592 "or{l}\t{$zero, $dst|$dst, $zero}",
593 [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
594 Sched<[WriteALULd, WriteRMW]>;
596 let hasSideEffects = 1 in
597 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
599 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
601 // RegOpc corresponds to the mr version of the instruction
602 // ImmOpc corresponds to the mi version of the instruction
603 // ImmOpc8 corresponds to the mi8 version of the instruction
604 // ImmMod corresponds to the instruction format of the mi and mi8 versions
605 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
606 Format ImmMod, string mnemonic> {
607 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
608 SchedRW = [WriteALULd, WriteRMW] in {
610 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
611 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
612 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
613 !strconcat(mnemonic, "{b}\t",
614 "{$src2, $dst|$dst, $src2}"),
615 [], IIC_ALU_NONMEM>, LOCK;
616 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
617 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
618 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
619 !strconcat(mnemonic, "{w}\t",
620 "{$src2, $dst|$dst, $src2}"),
621 [], IIC_ALU_NONMEM>, OpSize16, LOCK;
622 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
623 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
624 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
625 !strconcat(mnemonic, "{l}\t",
626 "{$src2, $dst|$dst, $src2}"),
627 [], IIC_ALU_NONMEM>, OpSize32, LOCK;
628 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
629 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
630 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
631 !strconcat(mnemonic, "{q}\t",
632 "{$src2, $dst|$dst, $src2}"),
633 [], IIC_ALU_NONMEM>, LOCK;
635 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
636 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
637 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
638 !strconcat(mnemonic, "{b}\t",
639 "{$src2, $dst|$dst, $src2}"),
640 [], IIC_ALU_MEM>, LOCK;
642 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
643 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
644 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
645 !strconcat(mnemonic, "{w}\t",
646 "{$src2, $dst|$dst, $src2}"),
647 [], IIC_ALU_MEM>, OpSize16, LOCK;
649 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
650 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
651 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
652 !strconcat(mnemonic, "{l}\t",
653 "{$src2, $dst|$dst, $src2}"),
654 [], IIC_ALU_MEM>, OpSize32, LOCK;
656 def NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
657 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
658 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
659 !strconcat(mnemonic, "{q}\t",
660 "{$src2, $dst|$dst, $src2}"),
661 [], IIC_ALU_MEM>, LOCK;
663 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
664 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
665 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
666 !strconcat(mnemonic, "{w}\t",
667 "{$src2, $dst|$dst, $src2}"),
668 [], IIC_ALU_MEM>, OpSize16, LOCK;
669 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
670 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
671 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
672 !strconcat(mnemonic, "{l}\t",
673 "{$src2, $dst|$dst, $src2}"),
674 [], IIC_ALU_MEM>, OpSize32, LOCK;
675 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
676 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
677 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
678 !strconcat(mnemonic, "{q}\t",
679 "{$src2, $dst|$dst, $src2}"),
680 [], IIC_ALU_MEM>, LOCK;
686 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
687 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
688 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
689 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
690 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
692 // Optimized codegen when the non-memory output is not used.
693 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
695 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
696 SchedRW = [WriteALULd, WriteRMW] in {
698 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
699 !strconcat(mnemonic, "{b}\t$dst"),
700 [], IIC_UNARY_MEM>, LOCK;
701 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
702 !strconcat(mnemonic, "{w}\t$dst"),
703 [], IIC_UNARY_MEM>, OpSize16, LOCK;
704 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
705 !strconcat(mnemonic, "{l}\t$dst"),
706 [], IIC_UNARY_MEM>, OpSize32, LOCK;
707 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
708 !strconcat(mnemonic, "{q}\t$dst"),
709 [], IIC_UNARY_MEM>, LOCK;
713 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
714 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
716 // Atomic compare and swap.
717 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
718 SDPatternOperator frag, X86MemOperand x86memop,
719 InstrItinClass itin> {
720 let isCodeGenOnly = 1 in {
721 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
722 !strconcat(mnemonic, "\t$ptr"),
723 [(frag addr:$ptr)], itin>, TB, LOCK;
727 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
728 string mnemonic, SDPatternOperator frag,
729 InstrItinClass itin8, InstrItinClass itin> {
730 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
731 let Defs = [AL, EFLAGS], Uses = [AL] in
732 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
733 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
734 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
735 let Defs = [AX, EFLAGS], Uses = [AX] in
736 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
737 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
738 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
739 let Defs = [EAX, EFLAGS], Uses = [EAX] in
740 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
741 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
742 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
743 let Defs = [RAX, EFLAGS], Uses = [RAX] in
744 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
745 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
746 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
750 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
751 SchedRW = [WriteALULd, WriteRMW] in {
752 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
757 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
758 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
759 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
761 IIC_CMPX_LOCK_16B>, REX_W;
764 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
765 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
767 // Atomic exchange and add
768 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
770 InstrItinClass itin8, InstrItinClass itin> {
771 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
772 SchedRW = [WriteALULd, WriteRMW] in {
773 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
774 (ins GR8:$val, i8mem:$ptr),
775 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
777 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
779 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
780 (ins GR16:$val, i16mem:$ptr),
781 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
784 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
786 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
787 (ins GR32:$val, i32mem:$ptr),
788 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
791 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
793 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
794 (ins GR64:$val, i64mem:$ptr),
795 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
798 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
803 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
804 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
807 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
808 "#ACQUIRE_MOV PSEUDO!",
809 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
810 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
811 "#ACQUIRE_MOV PSEUDO!",
812 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
813 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
814 "#ACQUIRE_MOV PSEUDO!",
815 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
816 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
817 "#ACQUIRE_MOV PSEUDO!",
818 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
820 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
821 "#RELEASE_MOV PSEUDO!",
822 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
823 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
824 "#RELEASE_MOV PSEUDO!",
825 [(atomic_store_16 addr:$dst, GR16:$src)]>;
826 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
827 "#RELEASE_MOV PSEUDO!",
828 [(atomic_store_32 addr:$dst, GR32:$src)]>;
829 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
830 "#RELEASE_MOV PSEUDO!",
831 [(atomic_store_64 addr:$dst, GR64:$src)]>;
833 //===----------------------------------------------------------------------===//
834 // Conditional Move Pseudo Instructions.
835 //===----------------------------------------------------------------------===//
838 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
839 // instruction selection into a branch sequence.
840 let Uses = [EFLAGS], usesCustomInserter = 1 in {
841 def CMOV_FR32 : I<0, Pseudo,
842 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
843 "#CMOV_FR32 PSEUDO!",
844 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
846 def CMOV_FR64 : I<0, Pseudo,
847 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
848 "#CMOV_FR64 PSEUDO!",
849 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
851 def CMOV_V4F32 : I<0, Pseudo,
852 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
853 "#CMOV_V4F32 PSEUDO!",
855 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
857 def CMOV_V2F64 : I<0, Pseudo,
858 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
859 "#CMOV_V2F64 PSEUDO!",
861 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
863 def CMOV_V2I64 : I<0, Pseudo,
864 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
865 "#CMOV_V2I64 PSEUDO!",
867 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
869 def CMOV_V8F32 : I<0, Pseudo,
870 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
871 "#CMOV_V8F32 PSEUDO!",
873 (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
875 def CMOV_V4F64 : I<0, Pseudo,
876 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
877 "#CMOV_V4F64 PSEUDO!",
879 (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
881 def CMOV_V4I64 : I<0, Pseudo,
882 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
883 "#CMOV_V4I64 PSEUDO!",
885 (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
887 def CMOV_V8I64 : I<0, Pseudo,
888 (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
889 "#CMOV_V8I64 PSEUDO!",
891 (v8i64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
893 def CMOV_V8F64 : I<0, Pseudo,
894 (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
895 "#CMOV_V8F64 PSEUDO!",
897 (v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond,
899 def CMOV_V16F32 : I<0, Pseudo,
900 (outs VR512:$dst), (ins VR512:$t, VR512:$f, i8imm:$cond),
901 "#CMOV_V16F32 PSEUDO!",
903 (v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond,
908 //===----------------------------------------------------------------------===//
909 // DAG Pattern Matching Rules
910 //===----------------------------------------------------------------------===//
912 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
913 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
914 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
915 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
916 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
917 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
918 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
920 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
921 (ADD32ri GR32:$src1, tconstpool:$src2)>;
922 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
923 (ADD32ri GR32:$src1, tjumptable:$src2)>;
924 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
925 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
926 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
927 (ADD32ri GR32:$src1, texternalsym:$src2)>;
928 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
929 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
931 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
932 (MOV32mi addr:$dst, tglobaladdr:$src)>;
933 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
934 (MOV32mi addr:$dst, texternalsym:$src)>;
935 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
936 (MOV32mi addr:$dst, tblockaddress:$src)>;
938 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
939 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
940 // 'movabs' predicate should handle this sort of thing.
941 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
942 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
943 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
944 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
945 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
946 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
947 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
948 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
949 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
950 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
952 // In kernel code model, we can get the address of a label
953 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
954 // the MOV64ri32 should accept these.
955 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
956 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
957 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
958 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
959 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
960 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
961 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
962 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
963 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
964 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
966 // If we have small model and -static mode, it is safe to store global addresses
967 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
968 // for MOV64mi32 should handle this sort of thing.
969 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
970 (MOV64mi32 addr:$dst, tconstpool:$src)>,
971 Requires<[NearData, IsStatic]>;
972 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
973 (MOV64mi32 addr:$dst, tjumptable:$src)>,
974 Requires<[NearData, IsStatic]>;
975 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
976 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
977 Requires<[NearData, IsStatic]>;
978 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
979 (MOV64mi32 addr:$dst, texternalsym:$src)>,
980 Requires<[NearData, IsStatic]>;
981 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
982 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
983 Requires<[NearData, IsStatic]>;
987 // tls has some funny stuff here...
988 // This corresponds to movabs $foo@tpoff, %rax
989 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
990 (MOV64ri32 tglobaltlsaddr :$dst)>;
991 // This corresponds to add $foo@tpoff, %rax
992 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
993 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
996 // Direct PC relative function call for small code model. 32-bit displacement
997 // sign extended to 64-bit.
998 def : Pat<(X86call (i64 tglobaladdr:$dst)),
999 (CALL64pcrel32 tglobaladdr:$dst)>;
1000 def : Pat<(X86call (i64 texternalsym:$dst)),
1001 (CALL64pcrel32 texternalsym:$dst)>;
1003 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1004 // can never use callee-saved registers. That is the purpose of the GR64_TC
1005 // register classes.
1007 // The only volatile register that is never used by the calling convention is
1008 // %r11. This happens when calling a vararg function with 6 arguments.
1010 // Match an X86tcret that uses less than 7 volatile registers.
1011 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1012 (X86tcret node:$ptr, node:$off), [{
1013 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1014 unsigned NumRegs = 0;
1015 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1016 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1021 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1022 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1023 Requires<[Not64BitMode]>;
1025 // FIXME: This is disabled for 32-bit PIC mode because the global base
1026 // register which is part of the address mode may be assigned a
1027 // callee-saved register.
1028 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1029 (TCRETURNmi addr:$dst, imm:$off)>,
1030 Requires<[Not64BitMode, IsNotPIC]>;
1032 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1033 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1034 Requires<[Not64BitMode]>;
1036 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1037 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1038 Requires<[Not64BitMode]>;
1040 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1041 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1042 Requires<[In64BitMode]>;
1044 // Don't fold loads into X86tcret requiring more than 6 regs.
1045 // There wouldn't be enough scratch registers for base+index.
1046 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1047 (TCRETURNmi64 addr:$dst, imm:$off)>,
1048 Requires<[In64BitMode]>;
1050 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1051 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1052 Requires<[In64BitMode]>;
1054 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1055 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1056 Requires<[In64BitMode]>;
1058 // Normal calls, with various flavors of addresses.
1059 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1060 (CALLpcrel32 tglobaladdr:$dst)>;
1061 def : Pat<(X86call (i32 texternalsym:$dst)),
1062 (CALLpcrel32 texternalsym:$dst)>;
1063 def : Pat<(X86call (i32 imm:$dst)),
1064 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1068 // TEST R,R is smaller than CMP R,0
1069 def : Pat<(X86cmp GR8:$src1, 0),
1070 (TEST8rr GR8:$src1, GR8:$src1)>;
1071 def : Pat<(X86cmp GR16:$src1, 0),
1072 (TEST16rr GR16:$src1, GR16:$src1)>;
1073 def : Pat<(X86cmp GR32:$src1, 0),
1074 (TEST32rr GR32:$src1, GR32:$src1)>;
1075 def : Pat<(X86cmp GR64:$src1, 0),
1076 (TEST64rr GR64:$src1, GR64:$src1)>;
1078 // Conditional moves with folded loads with operands swapped and conditions
1080 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1081 Instruction Inst64> {
1082 let Predicates = [HasCMov] in {
1083 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1084 (Inst16 GR16:$src2, addr:$src1)>;
1085 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1086 (Inst32 GR32:$src2, addr:$src1)>;
1087 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1088 (Inst64 GR64:$src2, addr:$src1)>;
1092 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1093 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1094 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1095 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1096 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1097 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1098 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1099 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1100 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1101 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1102 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1103 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1104 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1105 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1106 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1107 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1109 // zextload bool -> zextload byte
1110 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1111 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1112 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1113 def : Pat<(zextloadi64i1 addr:$src),
1114 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1116 // extload bool -> extload byte
1117 // When extloading from 16-bit and smaller memory locations into 64-bit
1118 // registers, use zero-extending loads so that the entire 64-bit register is
1119 // defined, avoiding partial-register updates.
1121 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1122 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1123 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1124 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1125 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1126 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1128 // For other extloads, use subregs, since the high contents of the register are
1129 // defined after an extload.
1130 def : Pat<(extloadi64i1 addr:$src),
1131 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1132 def : Pat<(extloadi64i8 addr:$src),
1133 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1134 def : Pat<(extloadi64i16 addr:$src),
1135 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1136 def : Pat<(extloadi64i32 addr:$src),
1137 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1139 // anyext. Define these to do an explicit zero-extend to
1140 // avoid partial-register updates.
1141 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1142 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1143 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1145 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1146 def : Pat<(i32 (anyext GR16:$src)),
1147 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1149 def : Pat<(i64 (anyext GR8 :$src)),
1150 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1151 def : Pat<(i64 (anyext GR16:$src)),
1152 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1153 def : Pat<(i64 (anyext GR32:$src)),
1154 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1157 // Any instruction that defines a 32-bit result leaves the high half of the
1158 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1159 // be copying from a truncate. And x86's cmov doesn't do anything if the
1160 // condition is false. But any other 32-bit operation will zero-extend
1162 def def32 : PatLeaf<(i32 GR32:$src), [{
1163 return N->getOpcode() != ISD::TRUNCATE &&
1164 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1165 N->getOpcode() != ISD::CopyFromReg &&
1166 N->getOpcode() != X86ISD::CMOV;
1169 // In the case of a 32-bit def that is known to implicitly zero-extend,
1170 // we can use a SUBREG_TO_REG.
1171 def : Pat<(i64 (zext def32:$src)),
1172 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1174 //===----------------------------------------------------------------------===//
1175 // Pattern match OR as ADD
1176 //===----------------------------------------------------------------------===//
1178 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1179 // 3-addressified into an LEA instruction to avoid copies. However, we also
1180 // want to finally emit these instructions as an or at the end of the code
1181 // generator to make the generated code easier to read. To do this, we select
1182 // into "disjoint bits" pseudo ops.
1184 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1185 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1186 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1187 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1189 APInt KnownZero0, KnownOne0;
1190 CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1191 APInt KnownZero1, KnownOne1;
1192 CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1193 return (~KnownZero0 & ~KnownZero1) == 0;
1197 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1198 // Try this before the selecting to OR.
1199 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1201 let isConvertibleToThreeAddress = 1,
1202 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1203 let isCommutable = 1 in {
1204 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1205 "", // orw/addw REG, REG
1206 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1207 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1208 "", // orl/addl REG, REG
1209 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1210 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1211 "", // orq/addq REG, REG
1212 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1215 // NOTE: These are order specific, we want the ri8 forms to be listed
1216 // first so that they are slightly preferred to the ri forms.
1218 def ADD16ri8_DB : I<0, Pseudo,
1219 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1220 "", // orw/addw REG, imm8
1221 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1222 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1223 "", // orw/addw REG, imm
1224 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1226 def ADD32ri8_DB : I<0, Pseudo,
1227 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1228 "", // orl/addl REG, imm8
1229 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1230 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1231 "", // orl/addl REG, imm
1232 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1235 def ADD64ri8_DB : I<0, Pseudo,
1236 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1237 "", // orq/addq REG, imm8
1238 [(set GR64:$dst, (or_is_add GR64:$src1,
1239 i64immSExt8:$src2))]>;
1240 def ADD64ri32_DB : I<0, Pseudo,
1241 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1242 "", // orq/addq REG, imm
1243 [(set GR64:$dst, (or_is_add GR64:$src1,
1244 i64immSExt32:$src2))]>;
1246 } // AddedComplexity, SchedRW
1249 //===----------------------------------------------------------------------===//
1251 //===----------------------------------------------------------------------===//
1253 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1254 // +128 doesn't, so in this special case use a sub instead of an add.
1255 def : Pat<(add GR16:$src1, 128),
1256 (SUB16ri8 GR16:$src1, -128)>;
1257 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1258 (SUB16mi8 addr:$dst, -128)>;
1260 def : Pat<(add GR32:$src1, 128),
1261 (SUB32ri8 GR32:$src1, -128)>;
1262 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1263 (SUB32mi8 addr:$dst, -128)>;
1265 def : Pat<(add GR64:$src1, 128),
1266 (SUB64ri8 GR64:$src1, -128)>;
1267 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1268 (SUB64mi8 addr:$dst, -128)>;
1270 // The same trick applies for 32-bit immediate fields in 64-bit
1272 def : Pat<(add GR64:$src1, 0x0000000080000000),
1273 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1274 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1275 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1277 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1278 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1279 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1280 // represented with a sign extension of a 8 bit constant, use that.
1282 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1286 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1287 (i32 (GetLo8XForm imm:$imm))),
1290 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1294 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1295 (i32 (GetLo32XForm imm:$imm))),
1299 // r & (2^16-1) ==> movz
1300 def : Pat<(and GR32:$src1, 0xffff),
1301 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1302 // r & (2^8-1) ==> movz
1303 def : Pat<(and GR32:$src1, 0xff),
1304 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1307 Requires<[Not64BitMode]>;
1308 // r & (2^8-1) ==> movz
1309 def : Pat<(and GR16:$src1, 0xff),
1310 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1311 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1313 Requires<[Not64BitMode]>;
1315 // r & (2^32-1) ==> movz
1316 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1317 (SUBREG_TO_REG (i64 0),
1318 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1320 // r & (2^16-1) ==> movz
1321 def : Pat<(and GR64:$src, 0xffff),
1322 (SUBREG_TO_REG (i64 0),
1323 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1325 // r & (2^8-1) ==> movz
1326 def : Pat<(and GR64:$src, 0xff),
1327 (SUBREG_TO_REG (i64 0),
1328 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1330 // r & (2^8-1) ==> movz
1331 def : Pat<(and GR32:$src1, 0xff),
1332 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1333 Requires<[In64BitMode]>;
1334 // r & (2^8-1) ==> movz
1335 def : Pat<(and GR16:$src1, 0xff),
1336 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1337 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1338 Requires<[In64BitMode]>;
1341 // sext_inreg patterns
1342 def : Pat<(sext_inreg GR32:$src, i16),
1343 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1344 def : Pat<(sext_inreg GR32:$src, i8),
1345 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1348 Requires<[Not64BitMode]>;
1350 def : Pat<(sext_inreg GR16:$src, i8),
1351 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1352 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1354 Requires<[Not64BitMode]>;
1356 def : Pat<(sext_inreg GR64:$src, i32),
1357 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1358 def : Pat<(sext_inreg GR64:$src, i16),
1359 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1360 def : Pat<(sext_inreg GR64:$src, i8),
1361 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1362 def : Pat<(sext_inreg GR32:$src, i8),
1363 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1364 Requires<[In64BitMode]>;
1365 def : Pat<(sext_inreg GR16:$src, i8),
1366 (EXTRACT_SUBREG (MOVSX32rr8
1367 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1368 Requires<[In64BitMode]>;
1370 // sext, sext_load, zext, zext_load
1371 def: Pat<(i16 (sext GR8:$src)),
1372 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1373 def: Pat<(sextloadi16i8 addr:$src),
1374 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1375 def: Pat<(i16 (zext GR8:$src)),
1376 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1377 def: Pat<(zextloadi16i8 addr:$src),
1378 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1381 def : Pat<(i16 (trunc GR32:$src)),
1382 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1383 def : Pat<(i8 (trunc GR32:$src)),
1384 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1386 Requires<[Not64BitMode]>;
1387 def : Pat<(i8 (trunc GR16:$src)),
1388 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1390 Requires<[Not64BitMode]>;
1391 def : Pat<(i32 (trunc GR64:$src)),
1392 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1393 def : Pat<(i16 (trunc GR64:$src)),
1394 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1395 def : Pat<(i8 (trunc GR64:$src)),
1396 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1397 def : Pat<(i8 (trunc GR32:$src)),
1398 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1399 Requires<[In64BitMode]>;
1400 def : Pat<(i8 (trunc GR16:$src)),
1401 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1402 Requires<[In64BitMode]>;
1404 // h-register tricks
1405 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1406 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1408 Requires<[Not64BitMode]>;
1409 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1410 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1412 Requires<[Not64BitMode]>;
1413 def : Pat<(srl GR16:$src, (i8 8)),
1416 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1419 Requires<[Not64BitMode]>;
1420 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1421 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1424 Requires<[Not64BitMode]>;
1425 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1426 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1429 Requires<[Not64BitMode]>;
1430 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1431 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1434 Requires<[Not64BitMode]>;
1435 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1436 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1439 Requires<[Not64BitMode]>;
1441 // h-register tricks.
1442 // For now, be conservative on x86-64 and use an h-register extract only if the
1443 // value is immediately zero-extended or stored, which are somewhat common
1444 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1445 // from being allocated in the same instruction as the h register, as there's
1446 // currently no way to describe this requirement to the register allocator.
1448 // h-register extract and zero-extend.
1449 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1453 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1456 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1458 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1460 Requires<[In64BitMode]>;
1461 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1462 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1465 Requires<[In64BitMode]>;
1466 def : Pat<(srl GR16:$src, (i8 8)),
1469 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1472 Requires<[In64BitMode]>;
1473 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1475 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1477 Requires<[In64BitMode]>;
1478 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1480 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1482 Requires<[In64BitMode]>;
1483 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1487 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1490 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1494 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1498 // h-register extract and store.
1499 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1502 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1504 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1507 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1509 Requires<[In64BitMode]>;
1510 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1513 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1515 Requires<[In64BitMode]>;
1518 // (shl x, 1) ==> (add x, x)
1519 // Note that if x is undef (immediate or otherwise), we could theoretically
1520 // end up with the two uses of x getting different values, producing a result
1521 // where the least significant bit is not 0. However, the probability of this
1522 // happening is considered low enough that this is officially not a
1524 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1525 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1526 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1527 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1529 // Helper imms that check if a mask doesn't change significant shift bits.
1530 def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
1531 def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
1533 // Shift amount is implicitly masked.
1534 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1535 // (shift x (and y, 31)) ==> (shift x, y)
1536 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1537 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1538 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1539 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1540 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1541 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1542 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1543 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1544 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1545 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1546 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1547 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1549 // (shift x (and y, 63)) ==> (shift x, y)
1550 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1551 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1552 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1553 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1556 defm : MaskedShiftAmountPats<shl, "SHL">;
1557 defm : MaskedShiftAmountPats<srl, "SHR">;
1558 defm : MaskedShiftAmountPats<sra, "SAR">;
1559 defm : MaskedShiftAmountPats<rotl, "ROL">;
1560 defm : MaskedShiftAmountPats<rotr, "ROR">;
1562 // (anyext (setcc_carry)) -> (setcc_carry)
1563 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1565 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1567 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1573 //===----------------------------------------------------------------------===//
1574 // EFLAGS-defining Patterns
1575 //===----------------------------------------------------------------------===//
1578 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1579 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1580 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1583 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1584 (ADD8rm GR8:$src1, addr:$src2)>;
1585 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1586 (ADD16rm GR16:$src1, addr:$src2)>;
1587 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1588 (ADD32rm GR32:$src1, addr:$src2)>;
1591 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1592 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1593 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1594 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1595 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1596 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1597 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1600 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1601 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1602 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1605 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1606 (SUB8rm GR8:$src1, addr:$src2)>;
1607 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1608 (SUB16rm GR16:$src1, addr:$src2)>;
1609 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1610 (SUB32rm GR32:$src1, addr:$src2)>;
1613 def : Pat<(sub GR8:$src1, imm:$src2),
1614 (SUB8ri GR8:$src1, imm:$src2)>;
1615 def : Pat<(sub GR16:$src1, imm:$src2),
1616 (SUB16ri GR16:$src1, imm:$src2)>;
1617 def : Pat<(sub GR32:$src1, imm:$src2),
1618 (SUB32ri GR32:$src1, imm:$src2)>;
1619 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1620 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1621 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1622 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1625 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1626 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1627 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1628 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1631 def : Pat<(mul GR16:$src1, GR16:$src2),
1632 (IMUL16rr GR16:$src1, GR16:$src2)>;
1633 def : Pat<(mul GR32:$src1, GR32:$src2),
1634 (IMUL32rr GR32:$src1, GR32:$src2)>;
1637 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1638 (IMUL16rm GR16:$src1, addr:$src2)>;
1639 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1640 (IMUL32rm GR32:$src1, addr:$src2)>;
1643 def : Pat<(mul GR16:$src1, imm:$src2),
1644 (IMUL16rri GR16:$src1, imm:$src2)>;
1645 def : Pat<(mul GR32:$src1, imm:$src2),
1646 (IMUL32rri GR32:$src1, imm:$src2)>;
1647 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1648 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1649 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1650 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1652 // reg = mul mem, imm
1653 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1654 (IMUL16rmi addr:$src1, imm:$src2)>;
1655 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1656 (IMUL32rmi addr:$src1, imm:$src2)>;
1657 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1658 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1659 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1660 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1662 // Patterns for nodes that do not produce flags, for instructions that do.
1665 def : Pat<(add GR64:$src1, GR64:$src2),
1666 (ADD64rr GR64:$src1, GR64:$src2)>;
1667 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1668 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1669 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1670 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1671 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1672 (ADD64rm GR64:$src1, addr:$src2)>;
1675 def : Pat<(sub GR64:$src1, GR64:$src2),
1676 (SUB64rr GR64:$src1, GR64:$src2)>;
1677 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1678 (SUB64rm GR64:$src1, addr:$src2)>;
1679 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1680 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1681 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1682 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1685 def : Pat<(mul GR64:$src1, GR64:$src2),
1686 (IMUL64rr GR64:$src1, GR64:$src2)>;
1687 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1688 (IMUL64rm GR64:$src1, addr:$src2)>;
1689 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1690 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1691 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1692 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1693 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1694 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1695 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1696 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1699 def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1700 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[Not64BitMode]>;
1701 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1702 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[Not64BitMode]>;
1703 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1704 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1707 def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1708 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[Not64BitMode]>;
1709 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1710 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[Not64BitMode]>;
1711 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1712 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1715 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1716 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1717 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1718 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1721 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1722 (OR8rm GR8:$src1, addr:$src2)>;
1723 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1724 (OR16rm GR16:$src1, addr:$src2)>;
1725 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1726 (OR32rm GR32:$src1, addr:$src2)>;
1727 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1728 (OR64rm GR64:$src1, addr:$src2)>;
1731 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1732 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1733 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1734 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1735 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1736 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1737 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1738 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1739 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1740 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1741 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1744 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1745 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1746 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1747 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1750 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1751 (XOR8rm GR8:$src1, addr:$src2)>;
1752 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1753 (XOR16rm GR16:$src1, addr:$src2)>;
1754 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1755 (XOR32rm GR32:$src1, addr:$src2)>;
1756 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1757 (XOR64rm GR64:$src1, addr:$src2)>;
1760 def : Pat<(xor GR8:$src1, imm:$src2),
1761 (XOR8ri GR8:$src1, imm:$src2)>;
1762 def : Pat<(xor GR16:$src1, imm:$src2),
1763 (XOR16ri GR16:$src1, imm:$src2)>;
1764 def : Pat<(xor GR32:$src1, imm:$src2),
1765 (XOR32ri GR32:$src1, imm:$src2)>;
1766 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1767 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1768 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1769 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1770 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1771 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1772 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1773 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1776 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1777 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1778 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1779 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1782 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1783 (AND8rm GR8:$src1, addr:$src2)>;
1784 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1785 (AND16rm GR16:$src1, addr:$src2)>;
1786 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1787 (AND32rm GR32:$src1, addr:$src2)>;
1788 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1789 (AND64rm GR64:$src1, addr:$src2)>;
1792 def : Pat<(and GR8:$src1, imm:$src2),
1793 (AND8ri GR8:$src1, imm:$src2)>;
1794 def : Pat<(and GR16:$src1, imm:$src2),
1795 (AND16ri GR16:$src1, imm:$src2)>;
1796 def : Pat<(and GR32:$src1, imm:$src2),
1797 (AND32ri GR32:$src1, imm:$src2)>;
1798 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1799 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1800 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1801 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1802 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1803 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1804 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1805 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1807 // Bit scan instruction patterns to match explicit zero-undef behavior.
1808 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1809 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1810 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1811 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1812 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1813 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1815 // When HasMOVBE is enabled it is possible to get a non-legalized
1816 // register-register 16 bit bswap. This maps it to a ROL instruction.
1817 let Predicates = [HasMOVBE] in {
1818 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;