1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue(), SDLoc(N));
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
55 def : Pat<(X86callseq_start timm:$amt1),
56 (ADJCALLSTACKDOWN32 i32imm:$amt1, 0)>, Requires<[NotLP64]>;
59 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
60 // a stack adjustment and the codegen must know that they may modify the stack
61 // pointer before prolog-epilog rewriting occurs.
62 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
63 // sub / add which can clobber EFLAGS.
64 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
65 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
69 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
71 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
74 def : Pat<(X86callseq_start timm:$amt1),
75 (ADJCALLSTACKDOWN64 i32imm:$amt1, 0)>, Requires<[IsLP64]>;
78 // x86-64 va_start lowering magic.
79 let usesCustomInserter = 1, Defs = [EFLAGS] in {
80 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
83 i64imm:$regsavefi, i64imm:$offset,
85 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
86 [(X86vastart_save_xmm_regs GR8:$al,
91 // The VAARG_64 pseudo-instruction takes the address of the va_list,
92 // and places the address of the next argument into a register.
93 let Defs = [EFLAGS] in
94 def VAARG_64 : I<0, Pseudo,
96 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
97 "#VAARG_64 $dst, $ap, $size, $mode, $align",
99 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
102 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
103 // targets. These calls are needed to probe the stack when allocating more than
104 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
105 // ensure that the guard pages used by the OS virtual memory manager are
106 // allocated in correct sequence.
107 // The main point of having separate instruction are extra unmodelled effects
108 // (compared to ordinary calls) like stack pointer change.
110 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
111 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
112 "# dynamic stack allocation",
115 // When using segmented stacks these are lowered into instructions which first
116 // check if the current stacklet has enough free memory. If it does, memory is
117 // allocated by bumping the stack pointer. Otherwise memory is allocated from
120 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
121 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
122 "# variable sized alloca for segmented stacks",
124 (X86SegAlloca GR32:$size))]>,
127 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
128 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
129 "# variable sized alloca for segmented stacks",
131 (X86SegAlloca GR64:$size))]>,
132 Requires<[In64BitMode]>;
135 //===----------------------------------------------------------------------===//
136 // EH Pseudo Instructions
138 let SchedRW = [WriteSystem] in {
139 let isTerminator = 1, isReturn = 1, isBarrier = 1,
140 hasCtrlDep = 1, isCodeGenOnly = 1 in {
141 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
142 "ret\t#eh_return, addr: $addr",
143 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
147 let isTerminator = 1, isReturn = 1, isBarrier = 1,
148 hasCtrlDep = 1, isCodeGenOnly = 1 in {
149 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
150 "ret\t#eh_return, addr: $addr",
151 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
155 let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1, isCodeGenOnly = 1, isReturn = 1 in {
156 def CATCHRET : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
157 "ret{l}\t# CATCHRET",
158 [(X86catchret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
159 def CATCHRET64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
160 "ret{q}\t# CATCHRET",
161 [(X86catchret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
162 def CLEANUPRET : I<0xC3, RawFrm, (outs), (ins),
163 "ret{l}\t# CLEANUPRET",
164 [(X86cleanupret)], IIC_RET>, Sched<[WriteJumpLd]>,
165 Requires<[Not64BitMode]>;
166 def CLEANUPRET64 : I<0xC3, RawFrm, (outs), (ins),
167 "ret{q}\t# CLEANUPRET",
168 [(X86cleanupret)], IIC_RET>, Sched<[WriteJumpLd]>,
169 Requires<[In64BitMode]>;
172 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
173 usesCustomInserter = 1 in {
174 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
176 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
177 Requires<[Not64BitMode]>;
178 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
180 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
181 Requires<[In64BitMode]>;
182 let isTerminator = 1 in {
183 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
184 "#EH_SJLJ_LONGJMP32",
185 [(X86eh_sjlj_longjmp addr:$buf)]>,
186 Requires<[Not64BitMode]>;
187 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
188 "#EH_SJLJ_LONGJMP64",
189 [(X86eh_sjlj_longjmp addr:$buf)]>,
190 Requires<[In64BitMode]>;
195 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
196 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
197 "#EH_SjLj_Setup\t$dst", []>;
200 //===----------------------------------------------------------------------===//
201 // Pseudo instructions used by unwind info.
203 let isPseudo = 1 in {
204 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
205 "#SEH_PushReg $reg", []>;
206 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
207 "#SEH_SaveReg $reg, $dst", []>;
208 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
209 "#SEH_SaveXMM $reg, $dst", []>;
210 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
211 "#SEH_StackAlloc $size", []>;
212 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
213 "#SEH_SetFrame $reg, $offset", []>;
214 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
215 "#SEH_PushFrame $mode", []>;
216 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
217 "#SEH_EndPrologue", []>;
218 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
219 "#SEH_Epilogue", []>;
222 //===----------------------------------------------------------------------===//
223 // Pseudo instructions used by segmented stacks.
226 // This is lowered into a RET instruction by MCInstLower. We need
227 // this so that we don't have to have a MachineBasicBlock which ends
228 // with a RET and also has successors.
229 let isPseudo = 1 in {
230 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
233 // This instruction is lowered to a RET followed by a MOV. The two
234 // instructions are not generated on a higher level since then the
235 // verifier sees a MachineBasicBlock ending with a non-terminator.
236 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
240 //===----------------------------------------------------------------------===//
241 // Alias Instructions
242 //===----------------------------------------------------------------------===//
244 // Alias instruction mapping movr0 to xor.
245 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
246 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
248 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
249 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
251 // Other widths can also make use of the 32-bit xor, which may have a smaller
252 // encoding and avoid partial register updates.
253 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
254 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
255 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
256 let AddedComplexity = 20;
259 // Materialize i64 constant where top 32-bits are zero. This could theoretically
260 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
261 // that would make it more difficult to rematerialize.
262 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
263 isCodeGenOnly = 1, hasSideEffects = 0 in
264 def MOV32ri64 : Ii32<0xb8, AddRegFrm, (outs GR32:$dst), (ins i64i32imm:$src),
265 "", [], IIC_ALU_NONMEM>, Sched<[WriteALU]>;
267 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
268 // actually the zero-extension of a 32-bit constant, and for labels in the
269 // x86-64 small code model.
270 def mov64imm32 : ComplexPattern<i64, 1, "SelectMOV64Imm32", [imm, X86Wrapper]>;
272 let AddedComplexity = 1 in
273 def : Pat<(i64 mov64imm32:$src),
274 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
276 // Use sbb to materialize carry bit.
277 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
278 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
279 // However, Pat<> can't replicate the destination reg into the inputs of the
281 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
282 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
283 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
284 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
285 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
286 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
287 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
288 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
292 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
294 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
296 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
299 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
301 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
303 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
306 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
307 // will be eliminated and that the sbb can be extended up to a wider type. When
308 // this happens, it is great. However, if we are left with an 8-bit sbb and an
309 // and, we might as well just match it as a setb.
310 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
313 // (add OP, SETB) -> (adc OP, 0)
314 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
315 (ADC8ri GR8:$op, 0)>;
316 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
317 (ADC32ri8 GR32:$op, 0)>;
318 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
319 (ADC64ri8 GR64:$op, 0)>;
321 // (sub OP, SETB) -> (sbb OP, 0)
322 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
323 (SBB8ri GR8:$op, 0)>;
324 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
325 (SBB32ri8 GR32:$op, 0)>;
326 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
327 (SBB64ri8 GR64:$op, 0)>;
329 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
330 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
331 (ADC8ri GR8:$op, 0)>;
332 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
333 (ADC32ri8 GR32:$op, 0)>;
334 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
335 (ADC64ri8 GR64:$op, 0)>;
337 //===----------------------------------------------------------------------===//
338 // String Pseudo Instructions
340 let SchedRW = [WriteMicrocoded] in {
341 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
342 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
343 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
344 Requires<[Not64BitMode]>;
345 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
346 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
347 Requires<[Not64BitMode]>;
348 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
349 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
350 Requires<[Not64BitMode]>;
353 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
354 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
355 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
356 Requires<[In64BitMode]>;
357 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
358 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
359 Requires<[In64BitMode]>;
360 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
361 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
362 Requires<[In64BitMode]>;
363 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
364 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
365 Requires<[In64BitMode]>;
368 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
369 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
370 let Uses = [AL,ECX,EDI] in
371 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
372 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
373 Requires<[Not64BitMode]>;
374 let Uses = [AX,ECX,EDI] in
375 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
376 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
377 Requires<[Not64BitMode]>;
378 let Uses = [EAX,ECX,EDI] in
379 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
380 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
381 Requires<[Not64BitMode]>;
384 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
385 let Uses = [AL,RCX,RDI] in
386 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
387 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
388 Requires<[In64BitMode]>;
389 let Uses = [AX,RCX,RDI] in
390 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
391 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
392 Requires<[In64BitMode]>;
393 let Uses = [RAX,RCX,RDI] in
394 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
395 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
396 Requires<[In64BitMode]>;
398 let Uses = [RAX,RCX,RDI] in
399 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
400 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
401 Requires<[In64BitMode]>;
405 //===----------------------------------------------------------------------===//
406 // Thread Local Storage Instructions
410 // All calls clobber the non-callee saved registers. ESP is marked as
411 // a use to prevent stack-pointer assignments that appear immediately
412 // before calls from potentially appearing dead.
413 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
414 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
415 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
416 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
417 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
419 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
421 [(X86tlsaddr tls32addr:$sym)]>,
422 Requires<[Not64BitMode]>;
423 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
425 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
426 Requires<[Not64BitMode]>;
429 // All calls clobber the non-callee saved registers. RSP is marked as
430 // a use to prevent stack-pointer assignments that appear immediately
431 // before calls from potentially appearing dead.
432 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
433 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
434 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
435 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
436 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
437 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
439 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
441 [(X86tlsaddr tls64addr:$sym)]>,
442 Requires<[In64BitMode]>;
443 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
445 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
446 Requires<[In64BitMode]>;
449 // Darwin TLS Support
450 // For i386, the address of the thunk is passed on the stack, on return the
451 // address of the variable is in %eax. %ecx is trashed during the function
452 // call. All other registers are preserved.
453 let Defs = [EAX, ECX, EFLAGS],
455 usesCustomInserter = 1 in
456 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
458 [(X86TLSCall addr:$sym)]>,
459 Requires<[Not64BitMode]>;
461 // For x86_64, the address of the thunk is passed in %rdi, on return
462 // the address of the variable is in %rax. All other registers are preserved.
463 let Defs = [RAX, EFLAGS],
465 usesCustomInserter = 1 in
466 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
468 [(X86TLSCall addr:$sym)]>,
469 Requires<[In64BitMode]>;
472 //===----------------------------------------------------------------------===//
473 // Conditional Move Pseudo Instructions
475 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
476 // instruction selection into a branch sequence.
477 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
478 def CMOV#NAME : I<0, Pseudo,
479 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
480 "#CMOV_"#NAME#" PSEUDO!",
481 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
485 let usesCustomInserter = 1, Uses = [EFLAGS] in {
486 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
487 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
488 // however that requires promoting the operands, and can induce additional
489 // i8 register pressure.
490 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
492 let Predicates = [NoCMov] in {
493 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
494 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
495 } // Predicates = [NoCMov]
497 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
499 let Predicates = [FPStackf32] in
500 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
502 let Predicates = [FPStackf64] in
503 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
505 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
507 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
508 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
509 defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
510 defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
511 defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
512 defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
513 defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
514 defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
515 defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
516 defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
517 defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
518 defm _V8I1 : CMOVrr_PSEUDO<VK8, v8i1>;
519 defm _V16I1 : CMOVrr_PSEUDO<VK16, v16i1>;
520 defm _V32I1 : CMOVrr_PSEUDO<VK32, v32i1>;
521 defm _V64I1 : CMOVrr_PSEUDO<VK64, v64i1>;
522 } // usesCustomInserter = 1, Uses = [EFLAGS]
524 //===----------------------------------------------------------------------===//
525 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
526 //===----------------------------------------------------------------------===//
528 // FIXME: Use normal instructions and add lock prefix dynamically.
532 // TODO: Get this to fold the constant into the instruction.
533 let isCodeGenOnly = 1, Defs = [EFLAGS] in
534 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
535 "or{l}\t{$zero, $dst|$dst, $zero}",
536 [], IIC_ALU_MEM>, Requires<[Not64BitMode]>, LOCK,
537 Sched<[WriteALULd, WriteRMW]>;
539 let hasSideEffects = 1 in
540 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
542 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
544 // RegOpc corresponds to the mr version of the instruction
545 // ImmOpc corresponds to the mi version of the instruction
546 // ImmOpc8 corresponds to the mi8 version of the instruction
547 // ImmMod corresponds to the instruction format of the mi and mi8 versions
548 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
549 Format ImmMod, string mnemonic> {
550 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
551 SchedRW = [WriteALULd, WriteRMW] in {
553 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
554 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
555 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
556 !strconcat(mnemonic, "{b}\t",
557 "{$src2, $dst|$dst, $src2}"),
558 [], IIC_ALU_NONMEM>, LOCK;
559 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
560 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
561 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
562 !strconcat(mnemonic, "{w}\t",
563 "{$src2, $dst|$dst, $src2}"),
564 [], IIC_ALU_NONMEM>, OpSize16, LOCK;
565 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
566 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
567 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
568 !strconcat(mnemonic, "{l}\t",
569 "{$src2, $dst|$dst, $src2}"),
570 [], IIC_ALU_NONMEM>, OpSize32, LOCK;
571 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
572 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
573 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
574 !strconcat(mnemonic, "{q}\t",
575 "{$src2, $dst|$dst, $src2}"),
576 [], IIC_ALU_NONMEM>, LOCK;
578 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
579 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
580 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
581 !strconcat(mnemonic, "{b}\t",
582 "{$src2, $dst|$dst, $src2}"),
583 [], IIC_ALU_MEM>, LOCK;
585 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
586 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
587 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
588 !strconcat(mnemonic, "{w}\t",
589 "{$src2, $dst|$dst, $src2}"),
590 [], IIC_ALU_MEM>, OpSize16, LOCK;
592 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
593 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
594 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
595 !strconcat(mnemonic, "{l}\t",
596 "{$src2, $dst|$dst, $src2}"),
597 [], IIC_ALU_MEM>, OpSize32, LOCK;
599 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
600 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
601 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
602 !strconcat(mnemonic, "{q}\t",
603 "{$src2, $dst|$dst, $src2}"),
604 [], IIC_ALU_MEM>, LOCK;
606 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
607 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
608 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
609 !strconcat(mnemonic, "{w}\t",
610 "{$src2, $dst|$dst, $src2}"),
611 [], IIC_ALU_MEM>, OpSize16, LOCK;
612 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
613 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
614 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
615 !strconcat(mnemonic, "{l}\t",
616 "{$src2, $dst|$dst, $src2}"),
617 [], IIC_ALU_MEM>, OpSize32, LOCK;
618 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
619 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
620 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
621 !strconcat(mnemonic, "{q}\t",
622 "{$src2, $dst|$dst, $src2}"),
623 [], IIC_ALU_MEM>, LOCK;
629 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
630 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
631 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
632 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
633 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
635 // Optimized codegen when the non-memory output is not used.
636 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
638 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
639 SchedRW = [WriteALULd, WriteRMW] in {
641 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
642 !strconcat(mnemonic, "{b}\t$dst"),
643 [], IIC_UNARY_MEM>, LOCK;
644 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
645 !strconcat(mnemonic, "{w}\t$dst"),
646 [], IIC_UNARY_MEM>, OpSize16, LOCK;
647 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
648 !strconcat(mnemonic, "{l}\t$dst"),
649 [], IIC_UNARY_MEM>, OpSize32, LOCK;
650 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
651 !strconcat(mnemonic, "{q}\t$dst"),
652 [], IIC_UNARY_MEM>, LOCK;
656 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
657 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
659 // Atomic compare and swap.
660 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
661 SDPatternOperator frag, X86MemOperand x86memop,
662 InstrItinClass itin> {
663 let isCodeGenOnly = 1 in {
664 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
665 !strconcat(mnemonic, "\t$ptr"),
666 [(frag addr:$ptr)], itin>, TB, LOCK;
670 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
671 string mnemonic, SDPatternOperator frag,
672 InstrItinClass itin8, InstrItinClass itin> {
673 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
674 let Defs = [AL, EFLAGS], Uses = [AL] in
675 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
676 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
677 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
678 let Defs = [AX, EFLAGS], Uses = [AX] in
679 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
680 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
681 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
682 let Defs = [EAX, EFLAGS], Uses = [EAX] in
683 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
684 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
685 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
686 let Defs = [RAX, EFLAGS], Uses = [RAX] in
687 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
688 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
689 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
693 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
694 SchedRW = [WriteALULd, WriteRMW] in {
695 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
700 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
701 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
702 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
704 IIC_CMPX_LOCK_16B>, REX_W;
707 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
708 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
710 // Atomic exchange and add
711 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
713 InstrItinClass itin8, InstrItinClass itin> {
714 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
715 SchedRW = [WriteALULd, WriteRMW] in {
716 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
717 (ins GR8:$val, i8mem:$ptr),
718 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
720 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
722 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
723 (ins GR16:$val, i16mem:$ptr),
724 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
727 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
729 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
730 (ins GR32:$val, i32mem:$ptr),
731 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
734 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
736 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
737 (ins GR64:$val, i64mem:$ptr),
738 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
741 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
746 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
747 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
750 /* The following multiclass tries to make sure that in code like
751 * x.store (immediate op x.load(acquire), release)
753 * x.store (register op x.load(acquire), release)
754 * an operation directly on memory is generated instead of wasting a register.
755 * It is not automatic as atomic_store/load are only lowered to MOV instructions
756 * extremely late to prevent them from being accidentally reordered in the backend
757 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
759 multiclass RELEASE_BINOP_MI<SDNode op> {
760 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
761 "#BINOP "#NAME#"8mi PSEUDO!",
762 [(atomic_store_8 addr:$dst, (op
763 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
764 def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
765 "#BINOP "#NAME#"8mr PSEUDO!",
766 [(atomic_store_8 addr:$dst, (op
767 (atomic_load_8 addr:$dst), GR8:$src))]>;
768 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
769 // costly and avoided as far as possible by this backend anyway
770 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
771 "#BINOP "#NAME#"32mi PSEUDO!",
772 [(atomic_store_32 addr:$dst, (op
773 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
774 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
775 "#BINOP "#NAME#"32mr PSEUDO!",
776 [(atomic_store_32 addr:$dst, (op
777 (atomic_load_32 addr:$dst), GR32:$src))]>;
778 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
779 "#BINOP "#NAME#"64mi32 PSEUDO!",
780 [(atomic_store_64 addr:$dst, (op
781 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
782 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
783 "#BINOP "#NAME#"64mr PSEUDO!",
784 [(atomic_store_64 addr:$dst, (op
785 (atomic_load_64 addr:$dst), GR64:$src))]>;
787 defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
788 defm RELEASE_AND : RELEASE_BINOP_MI<and>;
789 defm RELEASE_OR : RELEASE_BINOP_MI<or>;
790 defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
791 // Note: we don't deal with sub, because substractions of constants are
792 // optimized into additions before this code can run
794 // Same as above, but for floating-point.
795 // FIXME: imm version.
796 // FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
797 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
798 let usesCustomInserter = 1 in {
799 multiclass RELEASE_FP_BINOP_MI<SDNode op> {
800 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
801 "#BINOP "#NAME#"32mr PSEUDO!",
802 [(atomic_store_32 addr:$dst,
804 (f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
805 FR32:$src))))]>, Requires<[HasSSE1]>;
806 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
807 "#BINOP "#NAME#"64mr PSEUDO!",
808 [(atomic_store_64 addr:$dst,
810 (f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
811 FR64:$src))))]>, Requires<[HasSSE2]>;
813 defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
814 // FIXME: Add fsub, fmul, fdiv, ...
817 multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
818 def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
819 "#UNOP "#NAME#"8m PSEUDO!",
820 [(atomic_store_8 addr:$dst, dag8)]>;
821 def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
822 "#UNOP "#NAME#"16m PSEUDO!",
823 [(atomic_store_16 addr:$dst, dag16)]>;
824 def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
825 "#UNOP "#NAME#"32m PSEUDO!",
826 [(atomic_store_32 addr:$dst, dag32)]>;
827 def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
828 "#UNOP "#NAME#"64m PSEUDO!",
829 [(atomic_store_64 addr:$dst, dag64)]>;
832 defm RELEASE_INC : RELEASE_UNOP<
833 (add (atomic_load_8 addr:$dst), (i8 1)),
834 (add (atomic_load_16 addr:$dst), (i16 1)),
835 (add (atomic_load_32 addr:$dst), (i32 1)),
836 (add (atomic_load_64 addr:$dst), (i64 1))>, Requires<[NotSlowIncDec]>;
837 defm RELEASE_DEC : RELEASE_UNOP<
838 (add (atomic_load_8 addr:$dst), (i8 -1)),
839 (add (atomic_load_16 addr:$dst), (i16 -1)),
840 (add (atomic_load_32 addr:$dst), (i32 -1)),
841 (add (atomic_load_64 addr:$dst), (i64 -1))>, Requires<[NotSlowIncDec]>;
843 TODO: These don't work because the type inference of TableGen fails.
844 TODO: find a way to fix it.
845 defm RELEASE_NEG : RELEASE_UNOP<
846 (ineg (atomic_load_8 addr:$dst)),
847 (ineg (atomic_load_16 addr:$dst)),
848 (ineg (atomic_load_32 addr:$dst)),
849 (ineg (atomic_load_64 addr:$dst))>;
850 defm RELEASE_NOT : RELEASE_UNOP<
851 (not (atomic_load_8 addr:$dst)),
852 (not (atomic_load_16 addr:$dst)),
853 (not (atomic_load_32 addr:$dst)),
854 (not (atomic_load_64 addr:$dst))>;
857 def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
858 "#RELEASE_MOV8mi PSEUDO!",
859 [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
860 def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
861 "#RELEASE_MOV16mi PSEUDO!",
862 [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
863 def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
864 "#RELEASE_MOV32mi PSEUDO!",
865 [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
866 def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
867 "#RELEASE_MOV64mi32 PSEUDO!",
868 [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
870 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
871 "#RELEASE_MOV8mr PSEUDO!",
872 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
873 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
874 "#RELEASE_MOV16mr PSEUDO!",
875 [(atomic_store_16 addr:$dst, GR16:$src)]>;
876 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
877 "#RELEASE_MOV32mr PSEUDO!",
878 [(atomic_store_32 addr:$dst, GR32:$src)]>;
879 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
880 "#RELEASE_MOV64mr PSEUDO!",
881 [(atomic_store_64 addr:$dst, GR64:$src)]>;
883 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
884 "#ACQUIRE_MOV8rm PSEUDO!",
885 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
886 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
887 "#ACQUIRE_MOV16rm PSEUDO!",
888 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
889 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
890 "#ACQUIRE_MOV32rm PSEUDO!",
891 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
892 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
893 "#ACQUIRE_MOV64rm PSEUDO!",
894 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
896 //===----------------------------------------------------------------------===//
897 // DAG Pattern Matching Rules
898 //===----------------------------------------------------------------------===//
900 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
901 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
902 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
903 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
904 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
905 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
906 def : Pat<(i32 (X86Wrapper mcsym:$dst)), (MOV32ri mcsym:$dst)>;
907 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
909 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
910 (ADD32ri GR32:$src1, tconstpool:$src2)>;
911 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
912 (ADD32ri GR32:$src1, tjumptable:$src2)>;
913 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
914 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
915 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
916 (ADD32ri GR32:$src1, texternalsym:$src2)>;
917 def : Pat<(add GR32:$src1, (X86Wrapper mcsym:$src2)),
918 (ADD32ri GR32:$src1, mcsym:$src2)>;
919 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
920 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
922 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
923 (MOV32mi addr:$dst, tglobaladdr:$src)>;
924 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
925 (MOV32mi addr:$dst, texternalsym:$src)>;
926 def : Pat<(store (i32 (X86Wrapper mcsym:$src)), addr:$dst),
927 (MOV32mi addr:$dst, mcsym:$src)>;
928 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
929 (MOV32mi addr:$dst, tblockaddress:$src)>;
931 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
932 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
933 // 'movabs' predicate should handle this sort of thing.
934 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
935 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
936 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
937 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
938 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
939 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
940 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
941 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
942 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
943 (MOV64ri mcsym:$dst)>, Requires<[FarData]>;
944 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
945 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
947 // In kernel code model, we can get the address of a label
948 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
949 // the MOV64ri32 should accept these.
950 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
951 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
952 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
953 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
954 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
955 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
956 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
957 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
958 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
959 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
960 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
961 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
963 // If we have small model and -static mode, it is safe to store global addresses
964 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
965 // for MOV64mi32 should handle this sort of thing.
966 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
967 (MOV64mi32 addr:$dst, tconstpool:$src)>,
968 Requires<[NearData, IsStatic]>;
969 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
970 (MOV64mi32 addr:$dst, tjumptable:$src)>,
971 Requires<[NearData, IsStatic]>;
972 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
973 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
974 Requires<[NearData, IsStatic]>;
975 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
976 (MOV64mi32 addr:$dst, texternalsym:$src)>,
977 Requires<[NearData, IsStatic]>;
978 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
979 (MOV64mi32 addr:$dst, mcsym:$src)>,
980 Requires<[NearData, IsStatic]>;
981 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
982 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
983 Requires<[NearData, IsStatic]>;
985 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
986 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
990 // tls has some funny stuff here...
991 // This corresponds to movabs $foo@tpoff, %rax
992 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
993 (MOV64ri32 tglobaltlsaddr :$dst)>;
994 // This corresponds to add $foo@tpoff, %rax
995 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
996 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
999 // Direct PC relative function call for small code model. 32-bit displacement
1000 // sign extended to 64-bit.
1001 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1002 (CALL64pcrel32 tglobaladdr:$dst)>;
1003 def : Pat<(X86call (i64 texternalsym:$dst)),
1004 (CALL64pcrel32 texternalsym:$dst)>;
1006 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1007 // can never use callee-saved registers. That is the purpose of the GR64_TC
1008 // register classes.
1010 // The only volatile register that is never used by the calling convention is
1011 // %r11. This happens when calling a vararg function with 6 arguments.
1013 // Match an X86tcret that uses less than 7 volatile registers.
1014 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1015 (X86tcret node:$ptr, node:$off), [{
1016 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1017 unsigned NumRegs = 0;
1018 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1019 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1024 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1025 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1026 Requires<[Not64BitMode]>;
1028 // FIXME: This is disabled for 32-bit PIC mode because the global base
1029 // register which is part of the address mode may be assigned a
1030 // callee-saved register.
1031 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1032 (TCRETURNmi addr:$dst, imm:$off)>,
1033 Requires<[Not64BitMode, IsNotPIC]>;
1035 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1036 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
1037 Requires<[NotLP64]>;
1039 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1040 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1041 Requires<[NotLP64]>;
1043 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1044 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1045 Requires<[In64BitMode]>;
1047 // Don't fold loads into X86tcret requiring more than 6 regs.
1048 // There wouldn't be enough scratch registers for base+index.
1049 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1050 (TCRETURNmi64 addr:$dst, imm:$off)>,
1051 Requires<[In64BitMode]>;
1053 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1054 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1057 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1058 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1061 // Normal calls, with various flavors of addresses.
1062 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1063 (CALLpcrel32 tglobaladdr:$dst)>;
1064 def : Pat<(X86call (i32 texternalsym:$dst)),
1065 (CALLpcrel32 texternalsym:$dst)>;
1066 def : Pat<(X86call (i32 imm:$dst)),
1067 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1071 // TEST R,R is smaller than CMP R,0
1072 def : Pat<(X86cmp GR8:$src1, 0),
1073 (TEST8rr GR8:$src1, GR8:$src1)>;
1074 def : Pat<(X86cmp GR16:$src1, 0),
1075 (TEST16rr GR16:$src1, GR16:$src1)>;
1076 def : Pat<(X86cmp GR32:$src1, 0),
1077 (TEST32rr GR32:$src1, GR32:$src1)>;
1078 def : Pat<(X86cmp GR64:$src1, 0),
1079 (TEST64rr GR64:$src1, GR64:$src1)>;
1081 // Conditional moves with folded loads with operands swapped and conditions
1083 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1084 Instruction Inst64> {
1085 let Predicates = [HasCMov] in {
1086 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1087 (Inst16 GR16:$src2, addr:$src1)>;
1088 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1089 (Inst32 GR32:$src2, addr:$src1)>;
1090 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1091 (Inst64 GR64:$src2, addr:$src1)>;
1095 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1096 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1097 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1098 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1099 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1100 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1101 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1102 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1103 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1104 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1105 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1106 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1107 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1108 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1109 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1110 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1112 // zextload bool -> zextload byte
1113 def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
1114 def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
1115 def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
1116 def : Pat<(zextloadi64i1 addr:$src),
1117 (SUBREG_TO_REG (i64 0),
1118 (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
1120 // extload bool -> extload byte
1121 // When extloading from 16-bit and smaller memory locations into 64-bit
1122 // registers, use zero-extending loads so that the entire 64-bit register is
1123 // defined, avoiding partial-register updates.
1125 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1126 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1127 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1128 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1129 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1130 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1132 // For other extloads, use subregs, since the high contents of the register are
1133 // defined after an extload.
1134 def : Pat<(extloadi64i1 addr:$src),
1135 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1136 def : Pat<(extloadi64i8 addr:$src),
1137 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1138 def : Pat<(extloadi64i16 addr:$src),
1139 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1140 def : Pat<(extloadi64i32 addr:$src),
1141 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1143 // anyext. Define these to do an explicit zero-extend to
1144 // avoid partial-register updates.
1145 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1146 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1147 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1149 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1150 def : Pat<(i32 (anyext GR16:$src)),
1151 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1153 def : Pat<(i64 (anyext GR8 :$src)),
1154 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1155 def : Pat<(i64 (anyext GR16:$src)),
1156 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1157 def : Pat<(i64 (anyext GR32:$src)),
1158 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1161 // Any instruction that defines a 32-bit result leaves the high half of the
1162 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1163 // be copying from a truncate. And x86's cmov doesn't do anything if the
1164 // condition is false. But any other 32-bit operation will zero-extend
1166 def def32 : PatLeaf<(i32 GR32:$src), [{
1167 return N->getOpcode() != ISD::TRUNCATE &&
1168 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1169 N->getOpcode() != ISD::CopyFromReg &&
1170 N->getOpcode() != ISD::AssertSext &&
1171 N->getOpcode() != X86ISD::CMOV;
1174 // In the case of a 32-bit def that is known to implicitly zero-extend,
1175 // we can use a SUBREG_TO_REG.
1176 def : Pat<(i64 (zext def32:$src)),
1177 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1179 //===----------------------------------------------------------------------===//
1180 // Pattern match OR as ADD
1181 //===----------------------------------------------------------------------===//
1183 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1184 // 3-addressified into an LEA instruction to avoid copies. However, we also
1185 // want to finally emit these instructions as an or at the end of the code
1186 // generator to make the generated code easier to read. To do this, we select
1187 // into "disjoint bits" pseudo ops.
1189 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1190 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1191 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1192 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1194 APInt KnownZero0, KnownOne0;
1195 CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
1196 APInt KnownZero1, KnownOne1;
1197 CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
1198 return (~KnownZero0 & ~KnownZero1) == 0;
1202 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1203 // Try this before the selecting to OR.
1204 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1206 let isConvertibleToThreeAddress = 1,
1207 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1208 let isCommutable = 1 in {
1209 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1210 "", // orw/addw REG, REG
1211 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1212 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1213 "", // orl/addl REG, REG
1214 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1215 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1216 "", // orq/addq REG, REG
1217 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1220 // NOTE: These are order specific, we want the ri8 forms to be listed
1221 // first so that they are slightly preferred to the ri forms.
1223 def ADD16ri8_DB : I<0, Pseudo,
1224 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1225 "", // orw/addw REG, imm8
1226 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1227 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1228 "", // orw/addw REG, imm
1229 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1231 def ADD32ri8_DB : I<0, Pseudo,
1232 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1233 "", // orl/addl REG, imm8
1234 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1235 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1236 "", // orl/addl REG, imm
1237 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1240 def ADD64ri8_DB : I<0, Pseudo,
1241 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1242 "", // orq/addq REG, imm8
1243 [(set GR64:$dst, (or_is_add GR64:$src1,
1244 i64immSExt8:$src2))]>;
1245 def ADD64ri32_DB : I<0, Pseudo,
1246 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1247 "", // orq/addq REG, imm
1248 [(set GR64:$dst, (or_is_add GR64:$src1,
1249 i64immSExt32:$src2))]>;
1251 } // AddedComplexity, SchedRW
1254 //===----------------------------------------------------------------------===//
1256 //===----------------------------------------------------------------------===//
1258 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1259 // +128 doesn't, so in this special case use a sub instead of an add.
1260 def : Pat<(add GR16:$src1, 128),
1261 (SUB16ri8 GR16:$src1, -128)>;
1262 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1263 (SUB16mi8 addr:$dst, -128)>;
1265 def : Pat<(add GR32:$src1, 128),
1266 (SUB32ri8 GR32:$src1, -128)>;
1267 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1268 (SUB32mi8 addr:$dst, -128)>;
1270 def : Pat<(add GR64:$src1, 128),
1271 (SUB64ri8 GR64:$src1, -128)>;
1272 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1273 (SUB64mi8 addr:$dst, -128)>;
1275 // The same trick applies for 32-bit immediate fields in 64-bit
1277 def : Pat<(add GR64:$src1, 0x0000000080000000),
1278 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1279 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1280 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1282 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1283 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1284 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1285 // represented with a sign extension of a 8 bit constant, use that.
1286 // This can also reduce instruction size by eliminating the need for the REX
1289 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1290 let AddedComplexity = 1 in {
1291 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1295 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1296 (i32 (GetLo8XForm imm:$imm))),
1299 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1303 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1304 (i32 (GetLo32XForm imm:$imm))),
1306 } // AddedComplexity = 1
1309 // AddedComplexity is needed due to the increased complexity on the
1310 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1311 // the MOVZX patterns keeps thems together in DAGIsel tables.
1312 let AddedComplexity = 1 in {
1313 // r & (2^16-1) ==> movz
1314 def : Pat<(and GR32:$src1, 0xffff),
1315 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1316 // r & (2^8-1) ==> movz
1317 def : Pat<(and GR32:$src1, 0xff),
1318 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1321 Requires<[Not64BitMode]>;
1322 // r & (2^8-1) ==> movz
1323 def : Pat<(and GR16:$src1, 0xff),
1324 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1325 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1327 Requires<[Not64BitMode]>;
1329 // r & (2^32-1) ==> movz
1330 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1331 (SUBREG_TO_REG (i64 0),
1332 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1334 // r & (2^16-1) ==> movz
1335 let AddedComplexity = 1 in // Give priority over i64immZExt32.
1336 def : Pat<(and GR64:$src, 0xffff),
1337 (SUBREG_TO_REG (i64 0),
1338 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1340 // r & (2^8-1) ==> movz
1341 def : Pat<(and GR64:$src, 0xff),
1342 (SUBREG_TO_REG (i64 0),
1343 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1345 // r & (2^8-1) ==> movz
1346 def : Pat<(and GR32:$src1, 0xff),
1347 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1348 Requires<[In64BitMode]>;
1349 // r & (2^8-1) ==> movz
1350 def : Pat<(and GR16:$src1, 0xff),
1351 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1352 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
1353 Requires<[In64BitMode]>;
1354 } // AddedComplexity = 1
1357 // sext_inreg patterns
1358 def : Pat<(sext_inreg GR32:$src, i16),
1359 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1360 def : Pat<(sext_inreg GR32:$src, i8),
1361 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1364 Requires<[Not64BitMode]>;
1366 def : Pat<(sext_inreg GR16:$src, i8),
1367 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1368 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1370 Requires<[Not64BitMode]>;
1372 def : Pat<(sext_inreg GR64:$src, i32),
1373 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1374 def : Pat<(sext_inreg GR64:$src, i16),
1375 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1376 def : Pat<(sext_inreg GR64:$src, i8),
1377 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1378 def : Pat<(sext_inreg GR32:$src, i8),
1379 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1380 Requires<[In64BitMode]>;
1381 def : Pat<(sext_inreg GR16:$src, i8),
1382 (EXTRACT_SUBREG (MOVSX32rr8
1383 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
1384 Requires<[In64BitMode]>;
1386 // sext, sext_load, zext, zext_load
1387 def: Pat<(i16 (sext GR8:$src)),
1388 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1389 def: Pat<(sextloadi16i8 addr:$src),
1390 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1391 def: Pat<(i16 (zext GR8:$src)),
1392 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1393 def: Pat<(zextloadi16i8 addr:$src),
1394 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1397 def : Pat<(i16 (trunc GR32:$src)),
1398 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1399 def : Pat<(i8 (trunc GR32:$src)),
1400 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1402 Requires<[Not64BitMode]>;
1403 def : Pat<(i8 (trunc GR16:$src)),
1404 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1406 Requires<[Not64BitMode]>;
1407 def : Pat<(i32 (trunc GR64:$src)),
1408 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1409 def : Pat<(i16 (trunc GR64:$src)),
1410 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1411 def : Pat<(i8 (trunc GR64:$src)),
1412 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1413 def : Pat<(i8 (trunc GR32:$src)),
1414 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1415 Requires<[In64BitMode]>;
1416 def : Pat<(i8 (trunc GR16:$src)),
1417 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1418 Requires<[In64BitMode]>;
1420 // h-register tricks
1421 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1422 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1424 Requires<[Not64BitMode]>;
1425 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1426 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1428 Requires<[Not64BitMode]>;
1429 def : Pat<(srl GR16:$src, (i8 8)),
1432 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1435 Requires<[Not64BitMode]>;
1436 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1437 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1440 Requires<[Not64BitMode]>;
1441 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1442 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1445 Requires<[Not64BitMode]>;
1446 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1447 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1450 Requires<[Not64BitMode]>;
1451 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1452 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1455 Requires<[Not64BitMode]>;
1457 // h-register tricks.
1458 // For now, be conservative on x86-64 and use an h-register extract only if the
1459 // value is immediately zero-extended or stored, which are somewhat common
1460 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1461 // from being allocated in the same instruction as the h register, as there's
1462 // currently no way to describe this requirement to the register allocator.
1464 // h-register extract and zero-extend.
1465 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1469 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1472 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1474 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1476 Requires<[In64BitMode]>;
1477 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1478 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1481 Requires<[In64BitMode]>;
1482 def : Pat<(srl GR16:$src, (i8 8)),
1485 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1488 Requires<[In64BitMode]>;
1489 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1491 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1493 Requires<[In64BitMode]>;
1494 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1496 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1498 Requires<[In64BitMode]>;
1499 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1503 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1506 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1510 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1514 // h-register extract and store.
1515 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1518 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1520 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1523 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1525 Requires<[In64BitMode]>;
1526 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1529 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1531 Requires<[In64BitMode]>;
1534 // (shl x, 1) ==> (add x, x)
1535 // Note that if x is undef (immediate or otherwise), we could theoretically
1536 // end up with the two uses of x getting different values, producing a result
1537 // where the least significant bit is not 0. However, the probability of this
1538 // happening is considered low enough that this is officially not a
1540 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1541 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1542 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1543 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1545 // Helper imms that check if a mask doesn't change significant shift bits.
1546 def immShift32 : ImmLeaf<i8, [{
1547 return countTrailingOnes<uint64_t>(Imm) >= 5;
1549 def immShift64 : ImmLeaf<i8, [{
1550 return countTrailingOnes<uint64_t>(Imm) >= 6;
1553 // Shift amount is implicitly masked.
1554 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1555 // (shift x (and y, 31)) ==> (shift x, y)
1556 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1557 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1558 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1559 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1560 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1561 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1562 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1563 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1564 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1565 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1566 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1567 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1569 // (shift x (and y, 63)) ==> (shift x, y)
1570 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1571 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1572 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1573 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1576 defm : MaskedShiftAmountPats<shl, "SHL">;
1577 defm : MaskedShiftAmountPats<srl, "SHR">;
1578 defm : MaskedShiftAmountPats<sra, "SAR">;
1579 defm : MaskedShiftAmountPats<rotl, "ROL">;
1580 defm : MaskedShiftAmountPats<rotr, "ROR">;
1582 // (anyext (setcc_carry)) -> (setcc_carry)
1583 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1585 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1587 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1593 //===----------------------------------------------------------------------===//
1594 // EFLAGS-defining Patterns
1595 //===----------------------------------------------------------------------===//
1598 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1599 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1600 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1603 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1604 (ADD8rm GR8:$src1, addr:$src2)>;
1605 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1606 (ADD16rm GR16:$src1, addr:$src2)>;
1607 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1608 (ADD32rm GR32:$src1, addr:$src2)>;
1611 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1612 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1613 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1614 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1615 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1616 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1617 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1620 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1621 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1622 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1625 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1626 (SUB8rm GR8:$src1, addr:$src2)>;
1627 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1628 (SUB16rm GR16:$src1, addr:$src2)>;
1629 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1630 (SUB32rm GR32:$src1, addr:$src2)>;
1633 def : Pat<(sub GR8:$src1, imm:$src2),
1634 (SUB8ri GR8:$src1, imm:$src2)>;
1635 def : Pat<(sub GR16:$src1, imm:$src2),
1636 (SUB16ri GR16:$src1, imm:$src2)>;
1637 def : Pat<(sub GR32:$src1, imm:$src2),
1638 (SUB32ri GR32:$src1, imm:$src2)>;
1639 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1640 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1641 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1642 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1645 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1646 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1647 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1648 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1651 def : Pat<(mul GR16:$src1, GR16:$src2),
1652 (IMUL16rr GR16:$src1, GR16:$src2)>;
1653 def : Pat<(mul GR32:$src1, GR32:$src2),
1654 (IMUL32rr GR32:$src1, GR32:$src2)>;
1657 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1658 (IMUL16rm GR16:$src1, addr:$src2)>;
1659 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1660 (IMUL32rm GR32:$src1, addr:$src2)>;
1663 def : Pat<(mul GR16:$src1, imm:$src2),
1664 (IMUL16rri GR16:$src1, imm:$src2)>;
1665 def : Pat<(mul GR32:$src1, imm:$src2),
1666 (IMUL32rri GR32:$src1, imm:$src2)>;
1667 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1668 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1669 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1670 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1672 // reg = mul mem, imm
1673 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1674 (IMUL16rmi addr:$src1, imm:$src2)>;
1675 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1676 (IMUL32rmi addr:$src1, imm:$src2)>;
1677 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1678 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1679 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1680 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1682 // Patterns for nodes that do not produce flags, for instructions that do.
1685 def : Pat<(add GR64:$src1, GR64:$src2),
1686 (ADD64rr GR64:$src1, GR64:$src2)>;
1687 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1688 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1689 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1690 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1691 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1692 (ADD64rm GR64:$src1, addr:$src2)>;
1695 def : Pat<(sub GR64:$src1, GR64:$src2),
1696 (SUB64rr GR64:$src1, GR64:$src2)>;
1697 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1698 (SUB64rm GR64:$src1, addr:$src2)>;
1699 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1700 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1701 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1702 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1705 def : Pat<(mul GR64:$src1, GR64:$src2),
1706 (IMUL64rr GR64:$src1, GR64:$src2)>;
1707 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1708 (IMUL64rm GR64:$src1, addr:$src2)>;
1709 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1710 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1711 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1712 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1713 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1714 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1715 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1716 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1718 // Increment/Decrement reg.
1719 // Do not make INC/DEC if it is slow
1720 let Predicates = [NotSlowIncDec] in {
1721 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
1722 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
1723 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
1724 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1725 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
1726 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
1727 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
1728 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1732 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1733 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1734 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1735 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1738 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1739 (OR8rm GR8:$src1, addr:$src2)>;
1740 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1741 (OR16rm GR16:$src1, addr:$src2)>;
1742 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1743 (OR32rm GR32:$src1, addr:$src2)>;
1744 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1745 (OR64rm GR64:$src1, addr:$src2)>;
1748 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1749 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1750 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1751 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1752 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1753 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1754 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1755 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1756 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1757 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1758 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1761 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1762 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1763 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1764 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1767 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1768 (XOR8rm GR8:$src1, addr:$src2)>;
1769 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1770 (XOR16rm GR16:$src1, addr:$src2)>;
1771 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1772 (XOR32rm GR32:$src1, addr:$src2)>;
1773 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1774 (XOR64rm GR64:$src1, addr:$src2)>;
1777 def : Pat<(xor GR8:$src1, imm:$src2),
1778 (XOR8ri GR8:$src1, imm:$src2)>;
1779 def : Pat<(xor GR16:$src1, imm:$src2),
1780 (XOR16ri GR16:$src1, imm:$src2)>;
1781 def : Pat<(xor GR32:$src1, imm:$src2),
1782 (XOR32ri GR32:$src1, imm:$src2)>;
1783 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1784 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1785 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1786 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1787 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1788 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1789 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1790 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1793 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1794 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1795 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1796 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1799 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1800 (AND8rm GR8:$src1, addr:$src2)>;
1801 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1802 (AND16rm GR16:$src1, addr:$src2)>;
1803 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1804 (AND32rm GR32:$src1, addr:$src2)>;
1805 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1806 (AND64rm GR64:$src1, addr:$src2)>;
1809 def : Pat<(and GR8:$src1, imm:$src2),
1810 (AND8ri GR8:$src1, imm:$src2)>;
1811 def : Pat<(and GR16:$src1, imm:$src2),
1812 (AND16ri GR16:$src1, imm:$src2)>;
1813 def : Pat<(and GR32:$src1, imm:$src2),
1814 (AND32ri GR32:$src1, imm:$src2)>;
1815 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1816 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1817 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1818 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1819 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1820 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1821 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1822 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1824 // Bit scan instruction patterns to match explicit zero-undef behavior.
1825 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1826 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1827 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1828 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1829 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1830 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
1832 // When HasMOVBE is enabled it is possible to get a non-legalized
1833 // register-register 16 bit bswap. This maps it to a ROL instruction.
1834 let Predicates = [HasMOVBE] in {
1835 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;