1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
55 def : Pat<(X86callseq_start timm:$amt1),
56 (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
59 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
60 // a stack adjustment and the codegen must know that they may modify the stack
61 // pointer before prolog-epilog rewriting occurs.
62 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
63 // sub / add which can clobber EFLAGS.
64 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
65 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
69 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
71 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
74 def : Pat<(X86callseq_start timm:$amt1),
75 (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
78 // x86-64 va_start lowering magic.
79 let usesCustomInserter = 1, Defs = [EFLAGS] in {
80 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
83 i64imm:$regsavefi, i64imm:$offset,
85 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
86 [(X86vastart_save_xmm_regs GR8:$al,
91 // The VAARG_64 pseudo-instruction takes the address of the va_list,
92 // and places the address of the next argument into a register.
93 let Defs = [EFLAGS] in
94 def VAARG_64 : I<0, Pseudo,
96 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
97 "#VAARG_64 $dst, $ap, $size, $mode, $align",
99 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
102 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
103 // targets. These calls are needed to probe the stack when allocating more than
104 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
105 // ensure that the guard pages used by the OS virtual memory manager are
106 // allocated in correct sequence.
107 // The main point of having separate instruction are extra unmodelled effects
108 // (compared to ordinary calls) like stack pointer change.
110 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
111 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
112 "# dynamic stack allocation",
115 // When using segmented stacks these are lowered into instructions which first
116 // check if the current stacklet has enough free memory. If it does, memory is
117 // allocated by bumping the stack pointer. Otherwise memory is allocated from
120 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
121 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
122 "# variable sized alloca for segmented stacks",
124 (X86SegAlloca GR32:$size))]>,
127 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
128 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
129 "# variable sized alloca for segmented stacks",
131 (X86SegAlloca GR64:$size))]>,
132 Requires<[In64BitMode]>;
135 // The MSVC runtime contains an _ftol2 routine for converting floating-point
136 // to integer values. It has a strange calling convention: the input is
137 // popped from the x87 stack, and the return value is given in EDX:EAX. ECX is
138 // used as a temporary register. No other registers (aside from flags) are
140 // Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
141 // variant is unnecessary.
143 let Defs = [EAX, EDX, ECX, EFLAGS], FPForm = SpecialFP in {
144 def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
146 [(X86WinFTOL RFP32:$src)]>,
147 Requires<[Not64BitMode]>;
149 def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
151 [(X86WinFTOL RFP64:$src)]>,
152 Requires<[Not64BitMode]>;
155 //===----------------------------------------------------------------------===//
156 // EH Pseudo Instructions
158 let SchedRW = [WriteSystem] in {
159 let isTerminator = 1, isReturn = 1, isBarrier = 1,
160 hasCtrlDep = 1, isCodeGenOnly = 1 in {
161 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
162 "ret\t#eh_return, addr: $addr",
163 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
167 let isTerminator = 1, isReturn = 1, isBarrier = 1,
168 hasCtrlDep = 1, isCodeGenOnly = 1 in {
169 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
170 "ret\t#eh_return, addr: $addr",
171 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
175 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
176 usesCustomInserter = 1 in {
177 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
179 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
180 Requires<[Not64BitMode]>;
181 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
183 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
184 Requires<[In64BitMode]>;
185 let isTerminator = 1 in {
186 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
187 "#EH_SJLJ_LONGJMP32",
188 [(X86eh_sjlj_longjmp addr:$buf)]>,
189 Requires<[Not64BitMode]>;
190 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
191 "#EH_SJLJ_LONGJMP64",
192 [(X86eh_sjlj_longjmp addr:$buf)]>,
193 Requires<[In64BitMode]>;
198 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
199 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
200 "#EH_SjLj_Setup\t$dst", []>;
203 //===----------------------------------------------------------------------===//
204 // Pseudo instructions used by unwind info.
206 let isPseudo = 1 in {
207 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
208 "#SEH_PushReg $reg", []>;
209 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
210 "#SEH_SaveReg $reg, $dst", []>;
211 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
212 "#SEH_SaveXMM $reg, $dst", []>;
213 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
214 "#SEH_StackAlloc $size", []>;
215 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
216 "#SEH_SetFrame $reg, $offset", []>;
217 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
218 "#SEH_PushFrame $mode", []>;
219 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
220 "#SEH_EndPrologue", []>;
221 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
222 "#SEH_Epilogue", []>;
225 //===----------------------------------------------------------------------===//
226 // Pseudo instructions used by segmented stacks.
229 // This is lowered into a RET instruction by MCInstLower. We need
230 // this so that we don't have to have a MachineBasicBlock which ends
231 // with a RET and also has successors.
232 let isPseudo = 1 in {
233 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
236 // This instruction is lowered to a RET followed by a MOV. The two
237 // instructions are not generated on a higher level since then the
238 // verifier sees a MachineBasicBlock ending with a non-terminator.
239 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
243 //===----------------------------------------------------------------------===//
244 // Alias Instructions
245 //===----------------------------------------------------------------------===//
247 // Alias instruction mapping movr0 to xor.
248 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
249 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
251 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
252 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
254 // Other widths can also make use of the 32-bit xor, which may have a smaller
255 // encoding and avoid partial register updates.
256 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
257 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
258 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
259 let AddedComplexity = 20;
262 // Materialize i64 constant where top 32-bits are zero. This could theoretically
263 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
264 // that would make it more difficult to rematerialize.
265 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
266 isCodeGenOnly = 1, hasSideEffects = 0 in
267 def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
268 "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
270 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
271 // actually the zero-extension of a 32-bit constant, and for labels in the
272 // x86-64 small code model.
273 def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
275 let AddedComplexity = 1 in
276 def : Pat<(i64 mov64imm32:$src),
277 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
279 // Use sbb to materialize carry bit.
280 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
281 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
282 // However, Pat<> can't replicate the destination reg into the inputs of the
284 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
285 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
286 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
287 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
288 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
289 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
290 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
291 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
295 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
297 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
299 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
302 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
304 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
306 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
309 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
310 // will be eliminated and that the sbb can be extended up to a wider type. When
311 // this happens, it is great. However, if we are left with an 8-bit sbb and an
312 // and, we might as well just match it as a setb.
313 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
316 // (add OP, SETB) -> (adc OP, 0)
317 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
318 (ADC8ri GR8:$op, 0)>;
319 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
320 (ADC32ri8 GR32:$op, 0)>;
321 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
322 (ADC64ri8 GR64:$op, 0)>;
324 // (sub OP, SETB) -> (sbb OP, 0)
325 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
326 (SBB8ri GR8:$op, 0)>;
327 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
328 (SBB32ri8 GR32:$op, 0)>;
329 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
330 (SBB64ri8 GR64:$op, 0)>;
332 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
333 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
334 (ADC8ri GR8:$op, 0)>;
335 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
336 (ADC32ri8 GR32:$op, 0)>;
337 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
338 (ADC64ri8 GR64:$op, 0)>;
340 //===----------------------------------------------------------------------===//
341 // String Pseudo Instructions
343 let SchedRW = [WriteMicrocoded] in {
344 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
345 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
346 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
347 Requires<[Not64BitMode]>;
348 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
349 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
350 Requires<[Not64BitMode]>;
351 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
352 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
353 Requires<[Not64BitMode]>;
356 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
357 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
358 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
359 Requires<[In64BitMode]>;
360 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
361 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
362 Requires<[In64BitMode]>;
363 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
364 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
365 Requires<[In64BitMode]>;
366 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
367 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
368 Requires<[In64BitMode]>;
371 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
372 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
373 let Uses = [AL,ECX,EDI] in
374 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
375 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
376 Requires<[Not64BitMode]>;
377 let Uses = [AX,ECX,EDI] in
378 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
379 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
380 Requires<[Not64BitMode]>;
381 let Uses = [EAX,ECX,EDI] in
382 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
383 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
384 Requires<[Not64BitMode]>;
387 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
388 let Uses = [AL,RCX,RDI] in
389 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
390 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
391 Requires<[In64BitMode]>;
392 let Uses = [AX,RCX,RDI] in
393 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
394 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
395 Requires<[In64BitMode]>;
396 let Uses = [RAX,RCX,RDI] in
397 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
398 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
399 Requires<[In64BitMode]>;
401 let Uses = [RAX,RCX,RDI] in
402 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
403 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
404 Requires<[In64BitMode]>;
408 //===----------------------------------------------------------------------===//
409 // Thread Local Storage Instructions
413 // All calls clobber the non-callee saved registers. ESP is marked as
414 // a use to prevent stack-pointer assignments that appear immediately
415 // before calls from potentially appearing dead.
416 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
417 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
418 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
419 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
420 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
422 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
424 [(X86tlsaddr tls32addr:$sym)]>,
425 Requires<[Not64BitMode]>;
426 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
428 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
429 Requires<[Not64BitMode]>;
432 // All calls clobber the non-callee saved registers. RSP is marked as
433 // a use to prevent stack-pointer assignments that appear immediately
434 // before calls from potentially appearing dead.
435 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
436 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
437 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
438 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
439 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
440 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
442 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
444 [(X86tlsaddr tls64addr:$sym)]>,
445 Requires<[In64BitMode]>;
446 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
448 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
449 Requires<[In64BitMode]>;
452 // Darwin TLS Support
453 // For i386, the address of the thunk is passed on the stack, on return the
454 // address of the variable is in %eax. %ecx is trashed during the function
455 // call. All other registers are preserved.
456 let Defs = [EAX, ECX, EFLAGS],
458 usesCustomInserter = 1 in
459 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
461 [(X86TLSCall addr:$sym)]>,
462 Requires<[Not64BitMode]>;
464 // For x86_64, the address of the thunk is passed in %rdi, on return
465 // the address of the variable is in %rax. All other registers are preserved.
466 let Defs = [RAX, EFLAGS],
468 usesCustomInserter = 1 in
469 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
471 [(X86TLSCall addr:$sym)]>,
472 Requires<[In64BitMode]>;
475 //===----------------------------------------------------------------------===//
476 // Conditional Move Pseudo Instructions
478 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
479 // instruction selection into a branch sequence.
480 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
481 def CMOV#NAME : I<0, Pseudo,
482 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
483 "#CMOV_"#NAME#" PSEUDO!",
484 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
488 let usesCustomInserter = 1, Uses = [EFLAGS] in {
489 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
490 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
491 // however that requires promoting the operands, and can induce additional
492 // i8 register pressure.
493 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
495 let Predicates = [NoCMov] in {
496 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
497 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
498 } // Predicates = [NoCMov]
500 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
502 let Predicates = [FPStackf32] in
503 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
505 let Predicates = [FPStackf64] in
506 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
508 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
510 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
511 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
512 defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
513 defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
514 defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
515 defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
516 defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
517 defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
518 defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
519 defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
520 defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
521 } // usesCustomInserter = 1, Uses = [EFLAGS]
523 //===----------------------------------------------------------------------===//
524 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
525 //===----------------------------------------------------------------------===//
527 // FIXME: Use normal instructions and add lock prefix dynamically.
531 // TODO: Get this to fold the constant into the instruction.
532 let isCodeGenOnly = 1, Defs = [EFLAGS] in
533 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
534 "or{l}\t{$zero, $dst|$dst, $zero}",
535 [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
536 Sched<[WriteALULd, WriteRMW]>;
538 let hasSideEffects = 1 in
539 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
541 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
543 // RegOpc corresponds to the mr version of the instruction
544 // ImmOpc corresponds to the mi version of the instruction
545 // ImmOpc8 corresponds to the mi8 version of the instruction
546 // ImmMod corresponds to the instruction format of the mi and mi8 versions
547 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
548 Format ImmMod, string mnemonic> {
549 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
550 SchedRW = [WriteALULd, WriteRMW] in {
552 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
553 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
554 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
555 !strconcat(mnemonic, "{b}\t",
556 "{$src2, $dst|$dst, $src2}"),
557 [], IIC_ALU_NONMEM>, LOCK;
558 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
559 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
560 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
561 !strconcat(mnemonic, "{w}\t",
562 "{$src2, $dst|$dst, $src2}"),
563 [], IIC_ALU_NONMEM>, OpSize16, LOCK;
564 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
565 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
566 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
567 !strconcat(mnemonic, "{l}\t",
568 "{$src2, $dst|$dst, $src2}"),
569 [], IIC_ALU_NONMEM>, OpSize32, LOCK;
570 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
571 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
572 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
573 !strconcat(mnemonic, "{q}\t",
574 "{$src2, $dst|$dst, $src2}"),
575 [], IIC_ALU_NONMEM>, LOCK;
577 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
578 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
579 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
580 !strconcat(mnemonic, "{b}\t",
581 "{$src2, $dst|$dst, $src2}"),
582 [], IIC_ALU_MEM>, LOCK;
584 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
585 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
586 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
587 !strconcat(mnemonic, "{w}\t",
588 "{$src2, $dst|$dst, $src2}"),
589 [], IIC_ALU_MEM>, OpSize16, LOCK;
591 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
592 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
593 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
594 !strconcat(mnemonic, "{l}\t",
595 "{$src2, $dst|$dst, $src2}"),
596 [], IIC_ALU_MEM>, OpSize32, LOCK;
598 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
599 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
600 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
601 !strconcat(mnemonic, "{q}\t",
602 "{$src2, $dst|$dst, $src2}"),
603 [], IIC_ALU_MEM>, LOCK;
605 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
606 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
607 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
608 !strconcat(mnemonic, "{w}\t",
609 "{$src2, $dst|$dst, $src2}"),
610 [], IIC_ALU_MEM>, OpSize16, LOCK;
611 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
612 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
613 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
614 !strconcat(mnemonic, "{l}\t",
615 "{$src2, $dst|$dst, $src2}"),
616 [], IIC_ALU_MEM>, OpSize32, LOCK;
617 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
618 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
619 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
620 !strconcat(mnemonic, "{q}\t",
621 "{$src2, $dst|$dst, $src2}"),
622 [], IIC_ALU_MEM>, LOCK;
628 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
629 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
630 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
631 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
632 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
634 // Optimized codegen when the non-memory output is not used.
635 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
637 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
638 SchedRW = [WriteALULd, WriteRMW] in {
640 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
641 !strconcat(mnemonic, "{b}\t$dst"),
642 [], IIC_UNARY_MEM>, LOCK;
643 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
644 !strconcat(mnemonic, "{w}\t$dst"),
645 [], IIC_UNARY_MEM>, OpSize16, LOCK;
646 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
647 !strconcat(mnemonic, "{l}\t$dst"),
648 [], IIC_UNARY_MEM>, OpSize32, LOCK;
649 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
650 !strconcat(mnemonic, "{q}\t$dst"),
651 [], IIC_UNARY_MEM>, LOCK;
655 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
656 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
658 // Atomic compare and swap.
659 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
660 SDPatternOperator frag, X86MemOperand x86memop,
661 InstrItinClass itin> {
662 let isCodeGenOnly = 1 in {
663 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
664 !strconcat(mnemonic, "\t$ptr"),
665 [(frag addr:$ptr)], itin>, TB, LOCK;
669 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
670 string mnemonic, SDPatternOperator frag,
671 InstrItinClass itin8, InstrItinClass itin> {
672 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
673 let Defs = [AL, EFLAGS], Uses = [AL] in
674 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
675 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
676 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
677 let Defs = [AX, EFLAGS], Uses = [AX] in
678 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
679 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
680 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
681 let Defs = [EAX, EFLAGS], Uses = [EAX] in
682 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
683 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
684 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
685 let Defs = [RAX, EFLAGS], Uses = [RAX] in
686 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
687 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
688 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
692 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
693 SchedRW = [WriteALULd, WriteRMW] in {
694 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
699 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
700 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
701 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
703 IIC_CMPX_LOCK_16B>, REX_W;
706 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
707 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
709 // Atomic exchange and add
710 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
712 InstrItinClass itin8, InstrItinClass itin> {
713 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
714 SchedRW = [WriteALULd, WriteRMW] in {
715 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
716 (ins GR8:$val, i8mem:$ptr),
717 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
719 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
721 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
722 (ins GR16:$val, i16mem:$ptr),
723 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
726 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
728 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
729 (ins GR32:$val, i32mem:$ptr),
730 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
733 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
735 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
736 (ins GR64:$val, i64mem:$ptr),
737 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
740 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
745 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
746 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
749 /* The following multiclass tries to make sure that in code like
750 * x.store (immediate op x.load(acquire), release)
751 * an operation directly on memory is generated instead of wasting a register.
752 * It is not automatic as atomic_store/load are only lowered to MOV instructions
753 * extremely late to prevent them from being accidentally reordered in the backend
754 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
756 multiclass RELEASE_BINOP_MI<string op> {
757 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
758 "#RELEASE_BINOP PSEUDO!",
759 [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
760 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
761 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
762 // costly and avoided as far as possible by this backend anyway
763 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
764 "#RELEASE_BINOP PSEUDO!",
765 [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
766 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
767 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
768 "#RELEASE_BINOP PSEUDO!",
769 [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
770 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
772 defm RELEASE_ADD : RELEASE_BINOP_MI<"add">;
773 defm RELEASE_AND : RELEASE_BINOP_MI<"and">;
774 defm RELEASE_OR : RELEASE_BINOP_MI<"or">;
775 defm RELEASE_XOR : RELEASE_BINOP_MI<"xor">;
776 // Note: we don't deal with sub, because substractions of constants are
777 // optimized into additions before this code can run
779 multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
780 def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
781 "#RELEASE_UNOP PSEUDO!",
782 [(atomic_store_8 addr:$dst, dag8)]>;
783 def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
784 "#RELEASE_UNOP PSEUDO!",
785 [(atomic_store_16 addr:$dst, dag16)]>;
786 def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
787 "#RELEASE_UNOP PSEUDO!",
788 [(atomic_store_32 addr:$dst, dag32)]>;
789 def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
790 "#RELEASE_UNOP PSEUDO!",
791 [(atomic_store_64 addr:$dst, dag64)]>;
794 defm RELEASE_INC : RELEASE_UNOP<
795 (add (atomic_load_8 addr:$dst), (i8 1)),
796 (add (atomic_load_16 addr:$dst), (i16 1)),
797 (add (atomic_load_32 addr:$dst), (i32 1)),
798 (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
799 defm RELEASE_DEC : RELEASE_UNOP<
800 (add (atomic_load_8 addr:$dst), (i8 -1)),
801 (add (atomic_load_16 addr:$dst), (i16 -1)),
802 (add (atomic_load_32 addr:$dst), (i32 -1)),
803 (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
805 TODO: These don't work because the type inference of TableGen fails.
806 TODO: find a way to fix it.
807 defm RELEASE_NEG : RELEASE_UNOP<
808 (ineg (atomic_load_8 addr:$dst)),
809 (ineg (atomic_load_16 addr:$dst)),
810 (ineg (atomic_load_32 addr:$dst)),
811 (ineg (atomic_load_64 addr:$dst))>;
812 defm RELEASE_NOT : RELEASE_UNOP<
813 (not (atomic_load_8 addr:$dst)),
814 (not (atomic_load_16 addr:$dst)),
815 (not (atomic_load_32 addr:$dst)),
816 (not (atomic_load_64 addr:$dst))>;
819 def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
820 "#RELEASE_MOV PSEUDO !",
821 [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
822 def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
823 "#RELEASE_MOV PSEUDO !",
824 [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
825 def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
826 "#RELEASE_MOV PSEUDO !",
827 [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
828 def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
829 "#RELEASE_MOV PSEUDO !",
830 [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
832 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
833 "#RELEASE_MOV PSEUDO!",
834 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
835 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
836 "#RELEASE_MOV PSEUDO!",
837 [(atomic_store_16 addr:$dst, GR16:$src)]>;
838 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
839 "#RELEASE_MOV PSEUDO!",
840 [(atomic_store_32 addr:$dst, GR32:$src)]>;
841 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
842 "#RELEASE_MOV PSEUDO!",
843 [(atomic_store_64 addr:$dst, GR64:$src)]>;
845 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
846 "#ACQUIRE_MOV PSEUDO!",
847 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
848 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
849 "#ACQUIRE_MOV PSEUDO!",
850 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
851 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
852 "#ACQUIRE_MOV PSEUDO!",
853 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
854 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
855 "#ACQUIRE_MOV PSEUDO!",
856 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
858 //===----------------------------------------------------------------------===//
859 // DAG Pattern Matching Rules
860 //===----------------------------------------------------------------------===//
862 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
863 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
864 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
865 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
866 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
867 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
868 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
870 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
871 (ADD32ri GR32:$src1, tconstpool:$src2)>;
872 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
873 (ADD32ri GR32:$src1, tjumptable:$src2)>;
874 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
875 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
876 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
877 (ADD32ri GR32:$src1, texternalsym:$src2)>;
878 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
879 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
881 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
882 (MOV32mi addr:$dst, tglobaladdr:$src)>;
883 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
884 (MOV32mi addr:$dst, texternalsym:$src)>;
885 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
886 (MOV32mi addr:$dst, tblockaddress:$src)>;
888 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
889 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
890 // 'movabs' predicate should handle this sort of thing.
891 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
892 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
893 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
894 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
895 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
896 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
897 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
898 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
899 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
900 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
902 // In kernel code model, we can get the address of a label
903 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
904 // the MOV64ri32 should accept these.
905 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
906 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
907 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
908 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
909 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
910 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
911 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
912 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
913 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
914 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
916 // If we have small model and -static mode, it is safe to store global addresses
917 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
918 // for MOV64mi32 should handle this sort of thing.
919 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
920 (MOV64mi32 addr:$dst, tconstpool:$src)>,
921 Requires<[NearData, IsStatic]>;
922 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
923 (MOV64mi32 addr:$dst, tjumptable:$src)>,
924 Requires<[NearData, IsStatic]>;
925 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
926 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
927 Requires<[NearData, IsStatic]>;
928 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
929 (MOV64mi32 addr:$dst, texternalsym:$src)>,
930 Requires<[NearData, IsStatic]>;
931 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
932 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
933 Requires<[NearData, IsStatic]>;
935 def : Pat<(i32 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
936 def : Pat<(i64 (X86RecoverFrameAlloc texternalsym:$dst)), (MOV64ri texternalsym:$dst)>;
940 // tls has some funny stuff here...
941 // This corresponds to movabs $foo@tpoff, %rax
942 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
943 (MOV64ri32 tglobaltlsaddr :$dst)>;
944 // This corresponds to add $foo@tpoff, %rax
945 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
946 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
949 // Direct PC relative function call for small code model. 32-bit displacement
950 // sign extended to 64-bit.
951 def : Pat<(X86call (i64 tglobaladdr:$dst)),
952 (CALL64pcrel32 tglobaladdr:$dst)>;
953 def : Pat<(X86call (i64 texternalsym:$dst)),
954 (CALL64pcrel32 texternalsym:$dst)>;
956 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
957 // can never use callee-saved registers. That is the purpose of the GR64_TC
960 // The only volatile register that is never used by the calling convention is
961 // %r11. This happens when calling a vararg function with 6 arguments.
963 // Match an X86tcret that uses less than 7 volatile registers.
964 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
965 (X86tcret node:$ptr, node:$off), [{
966 // X86tcret args: (*chain, ptr, imm, regs..., glue)
967 unsigned NumRegs = 0;
968 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
969 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
974 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
975 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
976 Requires<[Not64BitMode]>;
978 // FIXME: This is disabled for 32-bit PIC mode because the global base
979 // register which is part of the address mode may be assigned a
980 // callee-saved register.
981 def : Pat<(X86tcret (load addr:$dst), imm:$off),
982 (TCRETURNmi addr:$dst, imm:$off)>,
983 Requires<[Not64BitMode, IsNotPIC]>;
985 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
986 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
989 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
990 (TCRETURNdi texternalsym:$dst, imm:$off)>,
993 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
994 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
995 Requires<[In64BitMode]>;
997 // Don't fold loads into X86tcret requiring more than 6 regs.
998 // There wouldn't be enough scratch registers for base+index.
999 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1000 (TCRETURNmi64 addr:$dst, imm:$off)>,
1001 Requires<[In64BitMode]>;
1003 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1004 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1007 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1008 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1011 // Normal calls, with various flavors of addresses.
1012 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1013 (CALLpcrel32 tglobaladdr:$dst)>;
1014 def : Pat<(X86call (i32 texternalsym:$dst)),
1015 (CALLpcrel32 texternalsym:$dst)>;
1016 def : Pat<(X86call (i32 imm:$dst)),
1017 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1021 // TEST R,R is smaller than CMP R,0
1022 def : Pat<(X86cmp GR8:$src1, 0),
1023 (TEST8rr GR8:$src1, GR8:$src1)>;
1024 def : Pat<(X86cmp GR16:$src1, 0),
1025 (TEST16rr GR16:$src1, GR16:$src1)>;
1026 def : Pat<(X86cmp GR32:$src1, 0),
1027 (TEST32rr GR32:$src1, GR32:$src1)>;
1028 def : Pat<(X86cmp GR64:$src1, 0),
1029 (TEST64rr GR64:$src1, GR64:$src1)>;
1031 // Conditional moves with folded loads with operands swapped and conditions
1033 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1034 Instruction Inst64> {
1035 let Predicates = [HasCMov] in {
1036 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1037 (Inst16 GR16:$src2, addr:$src1)>;
1038 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1039 (Inst32 GR32:$src2, addr:$src1)>;
1040 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1041 (Inst64 GR64:$src2, addr:$src1)>;
1045 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1046 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1047 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1048 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1049 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1050 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1051 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1052 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1053 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1054 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1055 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1056 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1057 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1058 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1059 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1060 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1062 // zextload bool -> zextload byte
1063 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1064 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1065 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1066 def : Pat<(zextloadi64i1 addr:$src),
1067 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1069 // extload bool -> extload byte
1070 // When extloading from 16-bit and smaller memory locations into 64-bit
1071 // registers, use zero-extending loads so that the entire 64-bit register is
1072 // defined, avoiding partial-register updates.
1074 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1075 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1076 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1077 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1078 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1079 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1081 // For other extloads, use subregs, since the high contents of the register are
1082 // defined after an extload.
1083 def : Pat<(extloadi64i1 addr:$src),
1084 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1085 def : Pat<(extloadi64i8 addr:$src),
1086 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1087 def : Pat<(extloadi64i16 addr:$src),
1088 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1089 def : Pat<(extloadi64i32 addr:$src),
1090 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1092 // anyext. Define these to do an explicit zero-extend to
1093 // avoid partial-register updates.
1094 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1095 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1096 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1098 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1099 def : Pat<(i32 (anyext GR16:$src)),
1100 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1102 def : Pat<(i64 (anyext GR8 :$src)),
1103 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1104 def : Pat<(i64 (anyext GR16:$src)),
1105 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1106 def : Pat<(i64 (anyext GR32:$src)),
1107 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1110 // Any instruction that defines a 32-bit result leaves the high half of the
1111 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1112 // be copying from a truncate. And x86's cmov doesn't do anything if the
1113 // condition is false. But any other 32-bit operation will zero-extend
1115 def def32 : PatLeaf<(i32 GR32:$src), [{
1116 return N->getOpcode() != ISD::TRUNCATE &&
1117 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1118 N->getOpcode() != ISD::CopyFromReg &&
1119 N->getOpcode() != ISD::AssertSext &&
1120 N->getOpcode() != X86ISD::CMOV;
1123 // In the case of a 32-bit def that is known to implicitly zero-extend,
1124 // we can use a SUBREG_TO_REG.
1125 def : Pat<(i64 (zext def32:$src)),
1126 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1128 //===----------------------------------------------------------------------===//
1129 // Pattern match OR as ADD
1130 //===----------------------------------------------------------------------===//
1132 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1133 // 3-addressified into an LEA instruction to avoid copies. However, we also
1134 // want to finally emit these instructions as an or at the end of the code
1135 // generator to make the generated code easier to read. To do this, we select
1136 // into "disjoint bits" pseudo ops.
1138 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1139 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1140 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1141 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1143 APInt KnownZero0, KnownOne0;
1144 CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1145 APInt KnownZero1, KnownOne1;
1146 CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1147 return (~KnownZero0 & ~KnownZero1) == 0;
1151 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1152 // Try this before the selecting to OR.
1153 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1155 let isConvertibleToThreeAddress = 1,
1156 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1157 let isCommutable = 1 in {
1158 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1159 "", // orw/addw REG, REG
1160 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1161 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1162 "", // orl/addl REG, REG
1163 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1164 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1165 "", // orq/addq REG, REG
1166 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1169 // NOTE: These are order specific, we want the ri8 forms to be listed
1170 // first so that they are slightly preferred to the ri forms.
1172 def ADD16ri8_DB : I<0, Pseudo,
1173 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1174 "", // orw/addw REG, imm8
1175 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1176 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1177 "", // orw/addw REG, imm
1178 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1180 def ADD32ri8_DB : I<0, Pseudo,
1181 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1182 "", // orl/addl REG, imm8
1183 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1184 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1185 "", // orl/addl REG, imm
1186 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1189 def ADD64ri8_DB : I<0, Pseudo,
1190 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1191 "", // orq/addq REG, imm8
1192 [(set GR64:$dst, (or_is_add GR64:$src1,
1193 i64immSExt8:$src2))]>;
1194 def ADD64ri32_DB : I<0, Pseudo,
1195 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1196 "", // orq/addq REG, imm
1197 [(set GR64:$dst, (or_is_add GR64:$src1,
1198 i64immSExt32:$src2))]>;
1200 } // AddedComplexity, SchedRW
1203 //===----------------------------------------------------------------------===//
1205 //===----------------------------------------------------------------------===//
1207 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1208 // +128 doesn't, so in this special case use a sub instead of an add.
1209 def : Pat<(add GR16:$src1, 128),
1210 (SUB16ri8 GR16:$src1, -128)>;
1211 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1212 (SUB16mi8 addr:$dst, -128)>;
1214 def : Pat<(add GR32:$src1, 128),
1215 (SUB32ri8 GR32:$src1, -128)>;
1216 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1217 (SUB32mi8 addr:$dst, -128)>;
1219 def : Pat<(add GR64:$src1, 128),
1220 (SUB64ri8 GR64:$src1, -128)>;
1221 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1222 (SUB64mi8 addr:$dst, -128)>;
1224 // The same trick applies for 32-bit immediate fields in 64-bit
1226 def : Pat<(add GR64:$src1, 0x0000000080000000),
1227 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1228 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1229 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1231 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1232 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1233 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1234 // represented with a sign extension of a 8 bit constant, use that.
1236 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1240 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1241 (i32 (GetLo8XForm imm:$imm))),
1244 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1248 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1249 (i32 (GetLo32XForm imm:$imm))),
1253 // r & (2^16-1) ==> movz
1254 def : Pat<(and GR32:$src1, 0xffff),
1255 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1256 // r & (2^8-1) ==> movz
1257 def : Pat<(and GR32:$src1, 0xff),
1258 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1261 Requires<[Not64BitMode]>;
1262 // r & (2^8-1) ==> movz
1263 def : Pat<(and GR16:$src1, 0xff),
1264 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1265 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1267 Requires<[Not64BitMode]>;
1269 // r & (2^32-1) ==> movz
1270 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1271 (SUBREG_TO_REG (i64 0),
1272 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1274 // r & (2^16-1) ==> movz
1275 def : Pat<(and GR64:$src, 0xffff),
1276 (SUBREG_TO_REG (i64 0),
1277 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1279 // r & (2^8-1) ==> movz
1280 def : Pat<(and GR64:$src, 0xff),
1281 (SUBREG_TO_REG (i64 0),
1282 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1284 // r & (2^8-1) ==> movz
1285 def : Pat<(and GR32:$src1, 0xff),
1286 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1287 Requires<[In64BitMode]>;
1288 // r & (2^8-1) ==> movz
1289 def : Pat<(and GR16:$src1, 0xff),
1290 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1291 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1292 Requires<[In64BitMode]>;
1295 // sext_inreg patterns
1296 def : Pat<(sext_inreg GR32:$src, i16),
1297 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1298 def : Pat<(sext_inreg GR32:$src, i8),
1299 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1302 Requires<[Not64BitMode]>;
1304 def : Pat<(sext_inreg GR16:$src, i8),
1305 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1306 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1308 Requires<[Not64BitMode]>;
1310 def : Pat<(sext_inreg GR64:$src, i32),
1311 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1312 def : Pat<(sext_inreg GR64:$src, i16),
1313 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1314 def : Pat<(sext_inreg GR64:$src, i8),
1315 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1316 def : Pat<(sext_inreg GR32:$src, i8),
1317 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1318 Requires<[In64BitMode]>;
1319 def : Pat<(sext_inreg GR16:$src, i8),
1320 (EXTRACT_SUBREG (MOVSX32rr8
1321 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1322 Requires<[In64BitMode]>;
1324 // sext, sext_load, zext, zext_load
1325 def: Pat<(i16 (sext GR8:$src)),
1326 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1327 def: Pat<(sextloadi16i8 addr:$src),
1328 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1329 def: Pat<(i16 (zext GR8:$src)),
1330 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1331 def: Pat<(zextloadi16i8 addr:$src),
1332 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1335 def : Pat<(i16 (trunc GR32:$src)),
1336 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1337 def : Pat<(i8 (trunc GR32:$src)),
1338 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1340 Requires<[Not64BitMode]>;
1341 def : Pat<(i8 (trunc GR16:$src)),
1342 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1344 Requires<[Not64BitMode]>;
1345 def : Pat<(i32 (trunc GR64:$src)),
1346 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1347 def : Pat<(i16 (trunc GR64:$src)),
1348 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1349 def : Pat<(i8 (trunc GR64:$src)),
1350 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1351 def : Pat<(i8 (trunc GR32:$src)),
1352 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1353 Requires<[In64BitMode]>;
1354 def : Pat<(i8 (trunc GR16:$src)),
1355 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1356 Requires<[In64BitMode]>;
1358 // h-register tricks
1359 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1360 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1362 Requires<[Not64BitMode]>;
1363 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1364 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1366 Requires<[Not64BitMode]>;
1367 def : Pat<(srl GR16:$src, (i8 8)),
1370 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1373 Requires<[Not64BitMode]>;
1374 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1375 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1378 Requires<[Not64BitMode]>;
1379 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1380 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1383 Requires<[Not64BitMode]>;
1384 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1385 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1388 Requires<[Not64BitMode]>;
1389 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1390 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1393 Requires<[Not64BitMode]>;
1395 // h-register tricks.
1396 // For now, be conservative on x86-64 and use an h-register extract only if the
1397 // value is immediately zero-extended or stored, which are somewhat common
1398 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1399 // from being allocated in the same instruction as the h register, as there's
1400 // currently no way to describe this requirement to the register allocator.
1402 // h-register extract and zero-extend.
1403 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1407 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1410 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1412 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1414 Requires<[In64BitMode]>;
1415 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1416 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1419 Requires<[In64BitMode]>;
1420 def : Pat<(srl GR16:$src, (i8 8)),
1423 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1426 Requires<[In64BitMode]>;
1427 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1429 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1431 Requires<[In64BitMode]>;
1432 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1434 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1436 Requires<[In64BitMode]>;
1437 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1441 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1444 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1448 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1452 // h-register extract and store.
1453 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1456 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1458 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1461 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1463 Requires<[In64BitMode]>;
1464 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1467 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1469 Requires<[In64BitMode]>;
1472 // (shl x, 1) ==> (add x, x)
1473 // Note that if x is undef (immediate or otherwise), we could theoretically
1474 // end up with the two uses of x getting different values, producing a result
1475 // where the least significant bit is not 0. However, the probability of this
1476 // happening is considered low enough that this is officially not a
1478 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1479 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1480 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1481 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1483 // Helper imms that check if a mask doesn't change significant shift bits.
1484 def immShift32 : ImmLeaf<i8, [{
1485 return countTrailingOnes<uint64_t>(Imm) >= 5;
1487 def immShift64 : ImmLeaf<i8, [{
1488 return countTrailingOnes<uint64_t>(Imm) >= 6;
1491 // Shift amount is implicitly masked.
1492 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1493 // (shift x (and y, 31)) ==> (shift x, y)
1494 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1495 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1496 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1497 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1498 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1499 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1500 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1501 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1502 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1503 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1504 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1505 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1507 // (shift x (and y, 63)) ==> (shift x, y)
1508 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1509 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1510 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1511 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1514 defm : MaskedShiftAmountPats<shl, "SHL">;
1515 defm : MaskedShiftAmountPats<srl, "SHR">;
1516 defm : MaskedShiftAmountPats<sra, "SAR">;
1517 defm : MaskedShiftAmountPats<rotl, "ROL">;
1518 defm : MaskedShiftAmountPats<rotr, "ROR">;
1520 // (anyext (setcc_carry)) -> (setcc_carry)
1521 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1523 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1525 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1531 //===----------------------------------------------------------------------===//
1532 // EFLAGS-defining Patterns
1533 //===----------------------------------------------------------------------===//
1536 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1537 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1538 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1541 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1542 (ADD8rm GR8:$src1, addr:$src2)>;
1543 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1544 (ADD16rm GR16:$src1, addr:$src2)>;
1545 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1546 (ADD32rm GR32:$src1, addr:$src2)>;
1549 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1550 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1551 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1552 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1553 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1554 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1555 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1558 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1559 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1560 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1563 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1564 (SUB8rm GR8:$src1, addr:$src2)>;
1565 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1566 (SUB16rm GR16:$src1, addr:$src2)>;
1567 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1568 (SUB32rm GR32:$src1, addr:$src2)>;
1571 def : Pat<(sub GR8:$src1, imm:$src2),
1572 (SUB8ri GR8:$src1, imm:$src2)>;
1573 def : Pat<(sub GR16:$src1, imm:$src2),
1574 (SUB16ri GR16:$src1, imm:$src2)>;
1575 def : Pat<(sub GR32:$src1, imm:$src2),
1576 (SUB32ri GR32:$src1, imm:$src2)>;
1577 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1578 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1579 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1580 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1583 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1584 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1585 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1586 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1589 def : Pat<(mul GR16:$src1, GR16:$src2),
1590 (IMUL16rr GR16:$src1, GR16:$src2)>;
1591 def : Pat<(mul GR32:$src1, GR32:$src2),
1592 (IMUL32rr GR32:$src1, GR32:$src2)>;
1595 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1596 (IMUL16rm GR16:$src1, addr:$src2)>;
1597 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1598 (IMUL32rm GR32:$src1, addr:$src2)>;
1601 def : Pat<(mul GR16:$src1, imm:$src2),
1602 (IMUL16rri GR16:$src1, imm:$src2)>;
1603 def : Pat<(mul GR32:$src1, imm:$src2),
1604 (IMUL32rri GR32:$src1, imm:$src2)>;
1605 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1606 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1607 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1608 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1610 // reg = mul mem, imm
1611 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1612 (IMUL16rmi addr:$src1, imm:$src2)>;
1613 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1614 (IMUL32rmi addr:$src1, imm:$src2)>;
1615 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1616 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1617 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1618 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1620 // Patterns for nodes that do not produce flags, for instructions that do.
1623 def : Pat<(add GR64:$src1, GR64:$src2),
1624 (ADD64rr GR64:$src1, GR64:$src2)>;
1625 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1626 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1627 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1628 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1629 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1630 (ADD64rm GR64:$src1, addr:$src2)>;
1633 def : Pat<(sub GR64:$src1, GR64:$src2),
1634 (SUB64rr GR64:$src1, GR64:$src2)>;
1635 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1636 (SUB64rm GR64:$src1, addr:$src2)>;
1637 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1638 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1639 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1640 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1643 def : Pat<(mul GR64:$src1, GR64:$src2),
1644 (IMUL64rr GR64:$src1, GR64:$src2)>;
1645 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1646 (IMUL64rm GR64:$src1, addr:$src2)>;
1647 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1648 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1649 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1650 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1651 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1652 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1653 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1654 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1656 // Increment/Decrement reg.
1657 // Do not make INC/DEC if it is slow
1658 let Predicates = [NotSlowIncDec] in {
1659 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
1660 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
1661 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
1662 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1663 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
1664 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
1665 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
1666 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1670 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1671 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1672 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1673 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1676 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1677 (OR8rm GR8:$src1, addr:$src2)>;
1678 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1679 (OR16rm GR16:$src1, addr:$src2)>;
1680 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1681 (OR32rm GR32:$src1, addr:$src2)>;
1682 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1683 (OR64rm GR64:$src1, addr:$src2)>;
1686 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1687 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1688 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1689 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1690 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1691 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1692 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1693 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1694 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1695 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1696 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1699 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1700 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1701 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1702 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1705 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1706 (XOR8rm GR8:$src1, addr:$src2)>;
1707 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1708 (XOR16rm GR16:$src1, addr:$src2)>;
1709 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1710 (XOR32rm GR32:$src1, addr:$src2)>;
1711 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1712 (XOR64rm GR64:$src1, addr:$src2)>;
1715 def : Pat<(xor GR8:$src1, imm:$src2),
1716 (XOR8ri GR8:$src1, imm:$src2)>;
1717 def : Pat<(xor GR16:$src1, imm:$src2),
1718 (XOR16ri GR16:$src1, imm:$src2)>;
1719 def : Pat<(xor GR32:$src1, imm:$src2),
1720 (XOR32ri GR32:$src1, imm:$src2)>;
1721 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1722 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1723 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1724 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1725 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1726 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1727 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1728 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1731 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1732 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1733 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1734 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1737 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1738 (AND8rm GR8:$src1, addr:$src2)>;
1739 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1740 (AND16rm GR16:$src1, addr:$src2)>;
1741 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1742 (AND32rm GR32:$src1, addr:$src2)>;
1743 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1744 (AND64rm GR64:$src1, addr:$src2)>;
1747 def : Pat<(and GR8:$src1, imm:$src2),
1748 (AND8ri GR8:$src1, imm:$src2)>;
1749 def : Pat<(and GR16:$src1, imm:$src2),
1750 (AND16ri GR16:$src1, imm:$src2)>;
1751 def : Pat<(and GR32:$src1, imm:$src2),
1752 (AND32ri GR32:$src1, imm:$src2)>;
1753 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1754 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1755 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1756 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1757 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1758 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1759 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1760 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1762 // Bit scan instruction patterns to match explicit zero-undef behavior.
1763 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1764 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1765 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1766 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1767 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1768 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1770 // When HasMOVBE is enabled it is possible to get a non-legalized
1771 // register-register 16 bit bswap. This maps it to a ROL instruction.
1772 let Predicates = [HasMOVBE] in {
1773 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;