1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
40 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41 // a stack adjustment and the codegen must know that they may modify the stack
42 // pointer before prolog-epilog rewriting occurs.
43 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44 // sub / add which can clobber EFLAGS.
45 let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[In32BitMode]>;
50 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[In32BitMode]>;
56 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57 // a stack adjustment and the codegen must know that they may modify the stack
58 // pointer before prolog-epilog rewriting occurs.
59 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60 // sub / add which can clobber EFLAGS.
61 let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
74 // x86-64 va_start lowering magic.
75 let usesCustomInserter = 1 in {
76 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
79 i64imm:$regsavefi, i64imm:$offset,
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
86 // The VAARG_64 pseudo-instruction takes the address of the va_list,
87 // and places the address of the next argument into a register.
88 let Defs = [EFLAGS] in
89 def VAARG_64 : I<0, Pseudo,
91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
92 "#VAARG_64 $dst, $ap, $size, $mode, $align",
94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
97 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
98 // targets. These calls are needed to probe the stack when allocating more than
99 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
100 // ensure that the guard pages used by the OS virtual memory manager are
101 // allocated in correct sequence.
102 // The main point of having separate instruction are extra unmodelled effects
103 // (compared to ordinary calls) like stack pointer change.
105 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
107 "# dynamic stack allocation",
113 //===----------------------------------------------------------------------===//
114 // EH Pseudo Instructions
116 let isTerminator = 1, isReturn = 1, isBarrier = 1,
117 hasCtrlDep = 1, isCodeGenOnly = 1 in {
118 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
119 "ret\t#eh_return, addr: $addr",
120 [(X86ehret GR32:$addr)]>;
124 let isTerminator = 1, isReturn = 1, isBarrier = 1,
125 hasCtrlDep = 1, isCodeGenOnly = 1 in {
126 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
127 "ret\t#eh_return, addr: $addr",
128 [(X86ehret GR64:$addr)]>;
132 //===----------------------------------------------------------------------===//
133 // Alias Instructions
134 //===----------------------------------------------------------------------===//
136 // Alias instructions that map movr0 to xor.
137 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
138 // FIXME: Set encoding to pseudo.
139 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
140 isCodeGenOnly = 1 in {
141 def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
142 [(set GR8:$dst, 0)]>;
144 // We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
145 // encoding and avoids a partial-register update sometimes, but doing so
146 // at isel time interferes with rematerialization in the current register
147 // allocator. For now, this is rewritten when the instruction is lowered
149 def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
151 [(set GR16:$dst, 0)]>, OpSize;
153 // FIXME: Set encoding to pseudo.
154 def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
155 [(set GR32:$dst, 0)]>;
158 // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
159 // smaller encoding, but doing so at isel time interferes with rematerialization
160 // in the current register allocator. For now, this is rewritten when the
161 // instruction is lowered to an MCInst.
162 // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
163 // when we have a better way to specify isel priority.
165 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
166 def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
167 [(set GR64:$dst, 0)]>;
169 // Materialize i64 constant where top 32-bits are zero. This could theoretically
170 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
171 // that would make it more difficult to rematerialize.
172 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
173 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
174 "", [(set GR64:$dst, i64immZExt32:$src)]>;
177 // Use sbb to materialize carry bit.
178 let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
179 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
180 // However, Pat<> can't replicate the destination reg into the inputs of the
182 // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
184 def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
185 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
186 def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
187 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
189 def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
190 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
191 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
192 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
196 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
200 //===----------------------------------------------------------------------===//
201 // String Pseudo Instructions
203 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
204 def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
205 [(X86rep_movs i8)]>, REP;
206 def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
207 [(X86rep_movs i16)]>, REP, OpSize;
208 def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
209 [(X86rep_movs i32)]>, REP;
212 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
213 def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
214 [(X86rep_movs i64)]>, REP;
217 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
218 let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
219 def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
220 [(X86rep_stos i8)]>, REP;
221 let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
222 def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
223 [(X86rep_stos i16)]>, REP, OpSize;
224 let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
225 def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
226 [(X86rep_stos i32)]>, REP;
228 let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
229 def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
230 [(X86rep_stos i64)]>, REP;
233 //===----------------------------------------------------------------------===//
234 // Thread Local Storage Instructions
238 // All calls clobber the non-callee saved registers. ESP is marked as
239 // a use to prevent stack-pointer assignments that appear immediately
240 // before calls from potentially appearing dead.
241 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
242 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
243 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
244 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
246 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
248 "call\t___tls_get_addr@PLT",
249 [(X86tlsaddr tls32addr:$sym)]>,
250 Requires<[In32BitMode]>;
252 // All calls clobber the non-callee saved registers. RSP is marked as
253 // a use to prevent stack-pointer assignments that appear immediately
254 // before calls from potentially appearing dead.
255 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
256 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
257 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
258 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
259 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
261 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
263 "leaq\t$sym(%rip), %rdi; "
266 "call\t__tls_get_addr@PLT",
267 [(X86tlsaddr tls64addr:$sym)]>,
268 Requires<[In64BitMode]>;
270 // Darwin TLS Support
271 // For i386, the address of the thunk is passed on the stack, on return the
272 // address of the variable is in %eax. %ecx is trashed during the function
273 // call. All other registers are preserved.
274 let Defs = [EAX, ECX],
276 usesCustomInserter = 1 in
277 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
279 [(X86TLSCall addr:$sym)]>,
280 Requires<[In32BitMode]>;
282 // For x86_64, the address of the thunk is passed in %rdi, on return
283 // the address of the variable is in %rax. All other registers are preserved.
286 usesCustomInserter = 1 in
287 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
289 [(X86TLSCall addr:$sym)]>,
290 Requires<[In64BitMode]>;
293 //===----------------------------------------------------------------------===//
294 // Conditional Move Pseudo Instructions
296 let Constraints = "$src1 = $dst" in {
299 let Uses = [EFLAGS] in {
301 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
302 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
303 // however that requires promoting the operands, and can induce additional
304 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
305 // clobber EFLAGS, because if one of the operands is zero, the expansion
306 // could involve an xor.
307 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
308 def CMOV_GR8 : I<0, Pseudo,
309 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
311 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
312 imm:$cond, EFLAGS))]>;
314 let Predicates = [NoCMov] in {
315 def CMOV_GR32 : I<0, Pseudo,
316 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
317 "#CMOV_GR32* PSEUDO!",
319 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
320 def CMOV_GR16 : I<0, Pseudo,
321 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
322 "#CMOV_GR16* PSEUDO!",
324 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
325 def CMOV_RFP32 : I<0, Pseudo,
327 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
328 "#CMOV_RFP32 PSEUDO!",
330 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
332 def CMOV_RFP64 : I<0, Pseudo,
334 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
335 "#CMOV_RFP64 PSEUDO!",
337 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
339 def CMOV_RFP80 : I<0, Pseudo,
341 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
342 "#CMOV_RFP80 PSEUDO!",
344 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
346 } // Predicates = [NoCMov]
347 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
350 } // Constraints = "$src1 = $dst" in
353 //===----------------------------------------------------------------------===//
354 // Atomic Instruction Pseudo Instructions
355 //===----------------------------------------------------------------------===//
357 // Atomic exchange, and, or, xor
358 let Constraints = "$val = $dst", Defs = [EFLAGS],
359 usesCustomInserter = 1 in {
361 def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
363 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
364 def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
366 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
367 def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
369 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
370 def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
371 "#ATOMNAND8 PSEUDO!",
372 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
374 def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
375 "#ATOMAND16 PSEUDO!",
376 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
377 def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
379 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
380 def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
381 "#ATOMXOR16 PSEUDO!",
382 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
383 def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
384 "#ATOMNAND16 PSEUDO!",
385 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
386 def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
387 "#ATOMMIN16 PSEUDO!",
388 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
389 def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
390 "#ATOMMAX16 PSEUDO!",
391 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
392 def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
393 "#ATOMUMIN16 PSEUDO!",
394 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
395 def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
396 "#ATOMUMAX16 PSEUDO!",
397 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
400 def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
401 "#ATOMAND32 PSEUDO!",
402 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
403 def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
405 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
406 def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
407 "#ATOMXOR32 PSEUDO!",
408 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
409 def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
410 "#ATOMNAND32 PSEUDO!",
411 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
412 def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
413 "#ATOMMIN32 PSEUDO!",
414 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
415 def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
416 "#ATOMMAX32 PSEUDO!",
417 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
418 def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
419 "#ATOMUMIN32 PSEUDO!",
420 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
421 def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
422 "#ATOMUMAX32 PSEUDO!",
423 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
427 def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
428 "#ATOMAND64 PSEUDO!",
429 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
430 def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
432 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
433 def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
434 "#ATOMXOR64 PSEUDO!",
435 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
436 def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
437 "#ATOMNAND64 PSEUDO!",
438 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
439 def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
440 "#ATOMMIN64 PSEUDO!",
441 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
442 def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
443 "#ATOMMAX64 PSEUDO!",
444 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
445 def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
446 "#ATOMUMIN64 PSEUDO!",
447 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
448 def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
449 "#ATOMUMAX64 PSEUDO!",
450 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
453 let Constraints = "$val1 = $dst1, $val2 = $dst2",
454 Defs = [EFLAGS, EAX, EBX, ECX, EDX],
455 Uses = [EAX, EBX, ECX, EDX],
456 mayLoad = 1, mayStore = 1,
457 usesCustomInserter = 1 in {
458 def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
459 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
460 "#ATOMAND6432 PSEUDO!", []>;
461 def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
462 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
463 "#ATOMOR6432 PSEUDO!", []>;
464 def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
465 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
466 "#ATOMXOR6432 PSEUDO!", []>;
467 def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
468 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
469 "#ATOMNAND6432 PSEUDO!", []>;
470 def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
471 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
472 "#ATOMADD6432 PSEUDO!", []>;
473 def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
474 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
475 "#ATOMSUB6432 PSEUDO!", []>;
476 def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
477 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
478 "#ATOMSWAP6432 PSEUDO!", []>;
481 //===----------------------------------------------------------------------===//
482 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
483 //===----------------------------------------------------------------------===//
485 // FIXME: Use normal instructions and add lock prefix dynamically.
489 // TODO: Get this to fold the constant into the instruction.
490 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
492 "or{l}\t{$zero, $dst|$dst, $zero}",
493 []>, Requires<[In32BitMode]>, LOCK;
495 let hasSideEffects = 1 in
496 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
498 [(X86MemBarrier)]>, Requires<[HasSSE2]>;
500 // TODO: Get this to fold the constant into the instruction.
501 let hasSideEffects = 1, Defs = [ESP] in
502 def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
504 "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
505 [(X86MemBarrierNoSSE GR64:$zero)]>,
506 Requires<[In64BitMode]>, LOCK;
509 // Optimized codegen when the non-memory output is not used.
510 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
511 def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
513 "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
514 def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
516 "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
517 def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
519 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
520 def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
522 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
524 def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
526 "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
527 def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
529 "add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
530 def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
532 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
533 def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
534 (ins i64mem:$dst, i64i32imm :$src2),
536 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
538 def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
540 "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
541 def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
543 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
544 def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
545 (ins i64mem:$dst, i64i8imm :$src2),
547 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
549 def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
551 "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
552 def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
554 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
555 def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
557 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
558 def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
560 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
563 def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
565 "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
566 def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
568 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
569 def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
571 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
572 def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
573 (ins i64mem:$dst, i64i32imm:$src2),
575 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
578 def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
580 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
581 def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
583 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
584 def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
585 (ins i64mem:$dst, i64i8imm :$src2),
587 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
589 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
591 "inc{b}\t$dst", []>, LOCK;
592 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
594 "inc{w}\t$dst", []>, OpSize, LOCK;
595 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
597 "inc{l}\t$dst", []>, LOCK;
598 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
600 "inc{q}\t$dst", []>, LOCK;
602 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
604 "dec{b}\t$dst", []>, LOCK;
605 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
607 "dec{w}\t$dst", []>, OpSize, LOCK;
608 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
610 "dec{l}\t$dst", []>, LOCK;
611 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
613 "dec{q}\t$dst", []>, LOCK;
616 // Atomic compare and swap.
617 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
618 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
621 [(X86cas8 addr:$ptr)]>, TB, LOCK;
623 let Defs = [AL, EFLAGS], Uses = [AL] in {
624 def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
626 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
627 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
630 let Defs = [AX, EFLAGS], Uses = [AX] in {
631 def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
633 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
634 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
637 let Defs = [EAX, EFLAGS], Uses = [EAX] in {
638 def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
640 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
641 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
644 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
645 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
647 "cmpxchgq\t$swap,$ptr",
648 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
651 // Atomic exchange and add
652 let Constraints = "$val = $dst", Defs = [EFLAGS] in {
653 def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
655 "xadd{b}\t{$val, $ptr|$ptr, $val}",
656 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
658 def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
660 "xadd{w}\t{$val, $ptr|$ptr, $val}",
661 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
663 def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
665 "xadd{l}\t{$val, $ptr|$ptr, $val}",
666 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
668 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
671 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
675 //===----------------------------------------------------------------------===//
676 // Conditional Move Pseudo Instructions.
677 //===----------------------------------------------------------------------===//
680 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
681 // instruction selection into a branch sequence.
682 let Uses = [EFLAGS], usesCustomInserter = 1 in {
683 def CMOV_FR32 : I<0, Pseudo,
684 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
685 "#CMOV_FR32 PSEUDO!",
686 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
688 def CMOV_FR64 : I<0, Pseudo,
689 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
690 "#CMOV_FR64 PSEUDO!",
691 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
693 def CMOV_V4F32 : I<0, Pseudo,
694 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
695 "#CMOV_V4F32 PSEUDO!",
697 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
699 def CMOV_V2F64 : I<0, Pseudo,
700 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
701 "#CMOV_V2F64 PSEUDO!",
703 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
705 def CMOV_V2I64 : I<0, Pseudo,
706 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
707 "#CMOV_V2I64 PSEUDO!",
709 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
714 //===----------------------------------------------------------------------===//
715 // DAG Pattern Matching Rules
716 //===----------------------------------------------------------------------===//
718 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
719 def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
720 def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
721 def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
722 def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
723 def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
724 def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
726 def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
727 (ADD32ri GR32:$src1, tconstpool:$src2)>;
728 def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
729 (ADD32ri GR32:$src1, tjumptable:$src2)>;
730 def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
731 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
732 def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
733 (ADD32ri GR32:$src1, texternalsym:$src2)>;
734 def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
735 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
737 def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
738 (MOV32mi addr:$dst, tglobaladdr:$src)>;
739 def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
740 (MOV32mi addr:$dst, texternalsym:$src)>;
741 def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
742 (MOV32mi addr:$dst, tblockaddress:$src)>;
746 // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
747 // code model mode, should use 'movabs'. FIXME: This is really a hack, the
748 // 'movabs' predicate should handle this sort of thing.
749 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
750 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
751 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
752 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
753 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
754 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
755 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
756 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
757 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
758 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
760 // In static codegen with small code model, we can get the address of a label
761 // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
762 // the MOV64ri64i32 should accept these.
763 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
764 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
765 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
766 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
767 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
768 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
769 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
770 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
771 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
772 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
774 // In kernel code model, we can get the address of a label
775 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
776 // the MOV64ri32 should accept these.
777 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
778 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
779 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
780 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
781 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
782 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
783 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
784 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
785 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
786 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
788 // If we have small model and -static mode, it is safe to store global addresses
789 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
790 // for MOV64mi32 should handle this sort of thing.
791 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
792 (MOV64mi32 addr:$dst, tconstpool:$src)>,
793 Requires<[NearData, IsStatic]>;
794 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
795 (MOV64mi32 addr:$dst, tjumptable:$src)>,
796 Requires<[NearData, IsStatic]>;
797 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
798 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
799 Requires<[NearData, IsStatic]>;
800 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
801 (MOV64mi32 addr:$dst, texternalsym:$src)>,
802 Requires<[NearData, IsStatic]>;
803 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
804 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
805 Requires<[NearData, IsStatic]>;
811 // tls has some funny stuff here...
812 // This corresponds to movabs $foo@tpoff, %rax
813 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
814 (MOV64ri tglobaltlsaddr :$dst)>;
815 // This corresponds to add $foo@tpoff, %rax
816 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
817 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
818 // This corresponds to mov foo@tpoff(%rbx), %eax
819 def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
820 (MOV64rm tglobaltlsaddr :$dst)>;
823 // Direct PC relative function call for small code model. 32-bit displacement
824 // sign extended to 64-bit.
825 def : Pat<(X86call (i64 tglobaladdr:$dst)),
826 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
827 def : Pat<(X86call (i64 texternalsym:$dst)),
828 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
830 def : Pat<(X86call (i64 tglobaladdr:$dst)),
831 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
832 def : Pat<(X86call (i64 texternalsym:$dst)),
833 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
836 def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
837 (TCRETURNri GR32_TC:$dst, imm:$off)>,
838 Requires<[In32BitMode]>;
840 // FIXME: This is disabled for 32-bit PIC mode because the global base
841 // register which is part of the address mode may be assigned a
842 // callee-saved register.
843 def : Pat<(X86tcret (load addr:$dst), imm:$off),
844 (TCRETURNmi addr:$dst, imm:$off)>,
845 Requires<[In32BitMode, IsNotPIC]>;
847 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
848 (TCRETURNdi texternalsym:$dst, imm:$off)>,
849 Requires<[In32BitMode]>;
851 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
852 (TCRETURNdi texternalsym:$dst, imm:$off)>,
853 Requires<[In32BitMode]>;
855 def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
856 (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
857 Requires<[In64BitMode]>;
859 def : Pat<(X86tcret (load addr:$dst), imm:$off),
860 (TCRETURNmi64 addr:$dst, imm:$off)>,
861 Requires<[In64BitMode]>;
863 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
864 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
865 Requires<[In64BitMode]>;
867 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
868 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
869 Requires<[In64BitMode]>;
871 // Normal calls, with various flavors of addresses.
872 def : Pat<(X86call (i32 tglobaladdr:$dst)),
873 (CALLpcrel32 tglobaladdr:$dst)>;
874 def : Pat<(X86call (i32 texternalsym:$dst)),
875 (CALLpcrel32 texternalsym:$dst)>;
876 def : Pat<(X86call (i32 imm:$dst)),
877 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
879 // X86 specific add which produces a flag.
880 def : Pat<(addc GR32:$src1, GR32:$src2),
881 (ADD32rr GR32:$src1, GR32:$src2)>;
882 def : Pat<(addc GR32:$src1, (load addr:$src2)),
883 (ADD32rm GR32:$src1, addr:$src2)>;
884 def : Pat<(addc GR32:$src1, imm:$src2),
885 (ADD32ri GR32:$src1, imm:$src2)>;
886 def : Pat<(addc GR32:$src1, i32immSExt8:$src2),
887 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
889 def : Pat<(addc GR64:$src1, GR64:$src2),
890 (ADD64rr GR64:$src1, GR64:$src2)>;
891 def : Pat<(addc GR64:$src1, (load addr:$src2)),
892 (ADD64rm GR64:$src1, addr:$src2)>;
893 def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
894 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
895 def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
896 (ADD64ri32 GR64:$src1, imm:$src2)>;
898 def : Pat<(subc GR32:$src1, GR32:$src2),
899 (SUB32rr GR32:$src1, GR32:$src2)>;
900 def : Pat<(subc GR32:$src1, (load addr:$src2)),
901 (SUB32rm GR32:$src1, addr:$src2)>;
902 def : Pat<(subc GR32:$src1, imm:$src2),
903 (SUB32ri GR32:$src1, imm:$src2)>;
904 def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
905 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
907 def : Pat<(subc GR64:$src1, GR64:$src2),
908 (SUB64rr GR64:$src1, GR64:$src2)>;
909 def : Pat<(subc GR64:$src1, (load addr:$src2)),
910 (SUB64rm GR64:$src1, addr:$src2)>;
911 def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
912 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
913 def : Pat<(subc GR64:$src1, imm:$src2),
914 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
918 // TEST R,R is smaller than CMP R,0
919 def : Pat<(X86cmp GR8:$src1, 0),
920 (TEST8rr GR8:$src1, GR8:$src1)>;
921 def : Pat<(X86cmp GR16:$src1, 0),
922 (TEST16rr GR16:$src1, GR16:$src1)>;
923 def : Pat<(X86cmp GR32:$src1, 0),
924 (TEST32rr GR32:$src1, GR32:$src1)>;
925 def : Pat<(X86cmp GR64:$src1, 0),
926 (TEST64rr GR64:$src1, GR64:$src1)>;
928 // Conditional moves with folded loads with operands swapped and conditions
930 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
931 Instruction Inst64> {
932 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
933 (Inst16 GR16:$src2, addr:$src1)>;
934 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
935 (Inst32 GR32:$src2, addr:$src1)>;
936 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
937 (Inst64 GR64:$src2, addr:$src1)>;
940 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
941 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
942 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
943 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
944 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
945 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
946 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
947 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
948 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
949 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
950 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
951 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
952 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
953 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
954 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
955 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
957 // zextload bool -> zextload byte
958 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
959 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
960 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
961 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
963 // extload bool -> extload byte
964 // When extloading from 16-bit and smaller memory locations into 64-bit
965 // registers, use zero-extending loads so that the entire 64-bit register is
966 // defined, avoiding partial-register updates.
968 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
969 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
970 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
971 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
972 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
973 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
975 def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
976 def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
977 def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
978 // For other extloads, use subregs, since the high contents of the register are
979 // defined after an extload.
980 def : Pat<(extloadi64i32 addr:$src),
981 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
984 // anyext. Define these to do an explicit zero-extend to
985 // avoid partial-register updates.
986 def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
987 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
989 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
990 def : Pat<(i32 (anyext GR16:$src)),
991 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
993 def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
994 def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
995 def : Pat<(i64 (anyext GR32:$src)),
996 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
999 // Any instruction that defines a 32-bit result leaves the high half of the
1000 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1001 // be copying from a truncate. And x86's cmov doesn't do anything if the
1002 // condition is false. But any other 32-bit operation will zero-extend
1004 def def32 : PatLeaf<(i32 GR32:$src), [{
1005 return N->getOpcode() != ISD::TRUNCATE &&
1006 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1007 N->getOpcode() != ISD::CopyFromReg &&
1008 N->getOpcode() != X86ISD::CMOV;
1011 // In the case of a 32-bit def that is known to implicitly zero-extend,
1012 // we can use a SUBREG_TO_REG.
1013 def : Pat<(i64 (zext def32:$src)),
1014 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1016 //===----------------------------------------------------------------------===//
1017 // Pattern match OR as ADD
1018 //===----------------------------------------------------------------------===//
1020 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1021 // 3-addressified into an LEA instruction to avoid copies. However, we also
1022 // want to finally emit these instructions as an or at the end of the code
1023 // generator to make the generated code easier to read. To do this, we select
1024 // into "disjoint bits" pseudo ops.
1026 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1027 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1028 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1029 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1031 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
1032 APInt Mask = APInt::getAllOnesValue(BitWidth);
1033 APInt KnownZero0, KnownOne0;
1034 CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
1035 APInt KnownZero1, KnownOne1;
1036 CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
1037 return (~KnownZero0 & ~KnownZero1) == 0;
1041 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1042 let AddedComplexity = 5 in { // Try this before the selecting to OR
1044 let isCommutable = 1, isConvertibleToThreeAddress = 1,
1045 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1046 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1047 "", // orw/addw REG, REG
1048 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1049 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1050 "", // orl/addl REG, REG
1051 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1052 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1053 "", // orq/addq REG, REG
1054 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1056 // NOTE: These are order specific, we want the ri8 forms to be listed
1057 // first so that they are slightly preferred to the ri forms.
1059 def ADD16ri8_DB : I<0, Pseudo,
1060 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1061 "", // orw/addw REG, imm8
1062 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1063 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1064 "", // orw/addw REG, imm
1065 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1067 def ADD32ri8_DB : I<0, Pseudo,
1068 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1069 "", // orl/addl REG, imm8
1070 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1071 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1072 "", // orl/addl REG, imm
1073 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1076 def ADD64ri8_DB : I<0, Pseudo,
1077 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1078 "", // orq/addq REG, imm8
1079 [(set GR64:$dst, (or_is_add GR64:$src1,
1080 i64immSExt8:$src2))]>;
1081 def ADD64ri32_DB : I<0, Pseudo,
1082 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1083 "", // orq/addq REG, imm
1084 [(set GR64:$dst, (or_is_add GR64:$src1,
1085 i64immSExt32:$src2))]>;
1087 } // AddedComplexity
1090 //===----------------------------------------------------------------------===//
1092 //===----------------------------------------------------------------------===//
1094 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1095 // +128 doesn't, so in this special case use a sub instead of an add.
1096 def : Pat<(add GR16:$src1, 128),
1097 (SUB16ri8 GR16:$src1, -128)>;
1098 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1099 (SUB16mi8 addr:$dst, -128)>;
1101 def : Pat<(add GR32:$src1, 128),
1102 (SUB32ri8 GR32:$src1, -128)>;
1103 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1104 (SUB32mi8 addr:$dst, -128)>;
1106 def : Pat<(add GR64:$src1, 128),
1107 (SUB64ri8 GR64:$src1, -128)>;
1108 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1109 (SUB64mi8 addr:$dst, -128)>;
1111 // The same trick applies for 32-bit immediate fields in 64-bit
1113 def : Pat<(add GR64:$src1, 0x0000000080000000),
1114 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1115 def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1116 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1118 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1119 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1120 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1121 // represented with a sign extension of a 8 bit constant, use that.
1123 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1127 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1128 (i32 (GetLo8XForm imm:$imm))),
1131 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1135 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1136 (i32 (GetLo32XForm imm:$imm))),
1140 // r & (2^16-1) ==> movz
1141 def : Pat<(and GR32:$src1, 0xffff),
1142 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1143 // r & (2^8-1) ==> movz
1144 def : Pat<(and GR32:$src1, 0xff),
1145 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
1148 Requires<[In32BitMode]>;
1149 // r & (2^8-1) ==> movz
1150 def : Pat<(and GR16:$src1, 0xff),
1151 (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
1154 Requires<[In32BitMode]>;
1156 // r & (2^32-1) ==> movz
1157 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1158 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1159 // r & (2^16-1) ==> movz
1160 def : Pat<(and GR64:$src, 0xffff),
1161 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1162 // r & (2^8-1) ==> movz
1163 def : Pat<(and GR64:$src, 0xff),
1164 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1165 // r & (2^8-1) ==> movz
1166 def : Pat<(and GR32:$src1, 0xff),
1167 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1168 Requires<[In64BitMode]>;
1169 // r & (2^8-1) ==> movz
1170 def : Pat<(and GR16:$src1, 0xff),
1171 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
1172 Requires<[In64BitMode]>;
1175 // sext_inreg patterns
1176 def : Pat<(sext_inreg GR32:$src, i16),
1177 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1178 def : Pat<(sext_inreg GR32:$src, i8),
1179 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1182 Requires<[In32BitMode]>;
1183 def : Pat<(sext_inreg GR16:$src, i8),
1184 (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1187 Requires<[In32BitMode]>;
1189 def : Pat<(sext_inreg GR64:$src, i32),
1190 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1191 def : Pat<(sext_inreg GR64:$src, i16),
1192 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1193 def : Pat<(sext_inreg GR64:$src, i8),
1194 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1195 def : Pat<(sext_inreg GR32:$src, i8),
1196 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1197 Requires<[In64BitMode]>;
1198 def : Pat<(sext_inreg GR16:$src, i8),
1199 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
1200 Requires<[In64BitMode]>;
1204 def : Pat<(i16 (trunc GR32:$src)),
1205 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1206 def : Pat<(i8 (trunc GR32:$src)),
1207 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1209 Requires<[In32BitMode]>;
1210 def : Pat<(i8 (trunc GR16:$src)),
1211 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1213 Requires<[In32BitMode]>;
1214 def : Pat<(i32 (trunc GR64:$src)),
1215 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1216 def : Pat<(i16 (trunc GR64:$src)),
1217 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1218 def : Pat<(i8 (trunc GR64:$src)),
1219 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1220 def : Pat<(i8 (trunc GR32:$src)),
1221 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1222 Requires<[In64BitMode]>;
1223 def : Pat<(i8 (trunc GR16:$src)),
1224 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1225 Requires<[In64BitMode]>;
1227 // h-register tricks
1228 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1229 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1231 Requires<[In32BitMode]>;
1232 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1233 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1235 Requires<[In32BitMode]>;
1236 def : Pat<(srl GR16:$src, (i8 8)),
1239 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1242 Requires<[In32BitMode]>;
1243 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1244 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1247 Requires<[In32BitMode]>;
1248 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1249 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
1252 Requires<[In32BitMode]>;
1253 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1254 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1257 Requires<[In32BitMode]>;
1258 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1259 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1262 Requires<[In32BitMode]>;
1264 // h-register tricks.
1265 // For now, be conservative on x86-64 and use an h-register extract only if the
1266 // value is immediately zero-extended or stored, which are somewhat common
1267 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1268 // from being allocated in the same instruction as the h register, as there's
1269 // currently no way to describe this requirement to the register allocator.
1271 // h-register extract and zero-extend.
1272 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1276 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1279 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1281 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1283 Requires<[In64BitMode]>;
1284 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1285 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
1288 Requires<[In64BitMode]>;
1289 def : Pat<(srl GR16:$src, (i8 8)),
1292 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1295 Requires<[In64BitMode]>;
1296 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1298 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1300 Requires<[In64BitMode]>;
1301 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1303 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1305 Requires<[In64BitMode]>;
1306 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1310 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1313 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1317 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1321 // h-register extract and store.
1322 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1325 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1327 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1330 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1332 Requires<[In64BitMode]>;
1333 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1336 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1338 Requires<[In64BitMode]>;
1341 // (shl x, 1) ==> (add x, x)
1342 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1343 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1344 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1345 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1347 // (shl x (and y, 31)) ==> (shl x, y)
1348 def : Pat<(shl GR8:$src1, (and CL, 31)),
1349 (SHL8rCL GR8:$src1)>;
1350 def : Pat<(shl GR16:$src1, (and CL, 31)),
1351 (SHL16rCL GR16:$src1)>;
1352 def : Pat<(shl GR32:$src1, (and CL, 31)),
1353 (SHL32rCL GR32:$src1)>;
1354 def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1355 (SHL8mCL addr:$dst)>;
1356 def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1357 (SHL16mCL addr:$dst)>;
1358 def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1359 (SHL32mCL addr:$dst)>;
1361 def : Pat<(srl GR8:$src1, (and CL, 31)),
1362 (SHR8rCL GR8:$src1)>;
1363 def : Pat<(srl GR16:$src1, (and CL, 31)),
1364 (SHR16rCL GR16:$src1)>;
1365 def : Pat<(srl GR32:$src1, (and CL, 31)),
1366 (SHR32rCL GR32:$src1)>;
1367 def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1368 (SHR8mCL addr:$dst)>;
1369 def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1370 (SHR16mCL addr:$dst)>;
1371 def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1372 (SHR32mCL addr:$dst)>;
1374 def : Pat<(sra GR8:$src1, (and CL, 31)),
1375 (SAR8rCL GR8:$src1)>;
1376 def : Pat<(sra GR16:$src1, (and CL, 31)),
1377 (SAR16rCL GR16:$src1)>;
1378 def : Pat<(sra GR32:$src1, (and CL, 31)),
1379 (SAR32rCL GR32:$src1)>;
1380 def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1381 (SAR8mCL addr:$dst)>;
1382 def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1383 (SAR16mCL addr:$dst)>;
1384 def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1385 (SAR32mCL addr:$dst)>;
1387 // (shl x (and y, 63)) ==> (shl x, y)
1388 def : Pat<(shl GR64:$src1, (and CL, 63)),
1389 (SHL64rCL GR64:$src1)>;
1390 def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1391 (SHL64mCL addr:$dst)>;
1393 def : Pat<(srl GR64:$src1, (and CL, 63)),
1394 (SHR64rCL GR64:$src1)>;
1395 def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1396 (SHR64mCL addr:$dst)>;
1398 def : Pat<(sra GR64:$src1, (and CL, 63)),
1399 (SAR64rCL GR64:$src1)>;
1400 def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1401 (SAR64mCL addr:$dst)>;
1404 // (anyext (setcc_carry)) -> (setcc_carry)
1405 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1407 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1409 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1415 //===----------------------------------------------------------------------===//
1416 // EFLAGS-defining Patterns
1417 //===----------------------------------------------------------------------===//
1420 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1421 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1422 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1425 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1426 (ADD8rm GR8:$src1, addr:$src2)>;
1427 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1428 (ADD16rm GR16:$src1, addr:$src2)>;
1429 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1430 (ADD32rm GR32:$src1, addr:$src2)>;
1433 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1434 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1435 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1436 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1437 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1438 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1439 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1442 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1443 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1444 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1447 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1448 (SUB8rm GR8:$src1, addr:$src2)>;
1449 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1450 (SUB16rm GR16:$src1, addr:$src2)>;
1451 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1452 (SUB32rm GR32:$src1, addr:$src2)>;
1455 def : Pat<(sub GR8:$src1, imm:$src2),
1456 (SUB8ri GR8:$src1, imm:$src2)>;
1457 def : Pat<(sub GR16:$src1, imm:$src2),
1458 (SUB16ri GR16:$src1, imm:$src2)>;
1459 def : Pat<(sub GR32:$src1, imm:$src2),
1460 (SUB32ri GR32:$src1, imm:$src2)>;
1461 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1462 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1463 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1464 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1467 def : Pat<(mul GR16:$src1, GR16:$src2),
1468 (IMUL16rr GR16:$src1, GR16:$src2)>;
1469 def : Pat<(mul GR32:$src1, GR32:$src2),
1470 (IMUL32rr GR32:$src1, GR32:$src2)>;
1473 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1474 (IMUL16rm GR16:$src1, addr:$src2)>;
1475 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1476 (IMUL32rm GR32:$src1, addr:$src2)>;
1479 def : Pat<(mul GR16:$src1, imm:$src2),
1480 (IMUL16rri GR16:$src1, imm:$src2)>;
1481 def : Pat<(mul GR32:$src1, imm:$src2),
1482 (IMUL32rri GR32:$src1, imm:$src2)>;
1483 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1484 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1485 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1486 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1488 // reg = mul mem, imm
1489 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1490 (IMUL16rmi addr:$src1, imm:$src2)>;
1491 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1492 (IMUL32rmi addr:$src1, imm:$src2)>;
1493 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1494 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1495 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1496 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1498 // Optimize multiply by 2 with EFLAGS result.
1499 let AddedComplexity = 2 in {
1500 def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>;
1501 def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
1504 // Patterns for nodes that do not produce flags, for instructions that do.
1507 def : Pat<(add GR64:$src1, GR64:$src2),
1508 (ADD64rr GR64:$src1, GR64:$src2)>;
1509 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1510 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1511 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1512 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1513 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1514 (ADD64rm GR64:$src1, addr:$src2)>;
1517 def : Pat<(sub GR64:$src1, GR64:$src2),
1518 (SUB64rr GR64:$src1, GR64:$src2)>;
1519 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1520 (SUB64rm GR64:$src1, addr:$src2)>;
1521 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1522 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1523 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1524 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1527 def : Pat<(mul GR64:$src1, GR64:$src2),
1528 (IMUL64rr GR64:$src1, GR64:$src2)>;
1529 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1530 (IMUL64rm GR64:$src1, addr:$src2)>;
1531 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1532 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1533 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1534 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1535 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1536 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1537 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1538 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1541 def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1542 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
1543 def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1544 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
1545 def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1546 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1549 def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1550 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
1551 def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1552 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
1553 def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1554 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1557 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1558 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1559 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1560 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1563 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1564 (OR8rm GR8:$src1, addr:$src2)>;
1565 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1566 (OR16rm GR16:$src1, addr:$src2)>;
1567 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1568 (OR32rm GR32:$src1, addr:$src2)>;
1569 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1570 (OR64rm GR64:$src1, addr:$src2)>;
1573 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1574 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1575 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1576 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1577 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1578 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1579 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1580 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1581 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1582 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1583 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1586 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1587 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1588 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1589 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1592 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1593 (XOR8rm GR8:$src1, addr:$src2)>;
1594 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1595 (XOR16rm GR16:$src1, addr:$src2)>;
1596 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1597 (XOR32rm GR32:$src1, addr:$src2)>;
1598 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1599 (XOR64rm GR64:$src1, addr:$src2)>;
1602 def : Pat<(xor GR8:$src1, imm:$src2),
1603 (XOR8ri GR8:$src1, imm:$src2)>;
1604 def : Pat<(xor GR16:$src1, imm:$src2),
1605 (XOR16ri GR16:$src1, imm:$src2)>;
1606 def : Pat<(xor GR32:$src1, imm:$src2),
1607 (XOR32ri GR32:$src1, imm:$src2)>;
1608 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1609 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1610 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1611 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1612 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1613 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1614 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1615 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1618 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1619 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1620 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1621 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1624 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1625 (AND8rm GR8:$src1, addr:$src2)>;
1626 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1627 (AND16rm GR16:$src1, addr:$src2)>;
1628 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1629 (AND32rm GR32:$src1, addr:$src2)>;
1630 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1631 (AND64rm GR64:$src1, addr:$src2)>;
1634 def : Pat<(and GR8:$src1, imm:$src2),
1635 (AND8ri GR8:$src1, imm:$src2)>;
1636 def : Pat<(and GR16:$src1, imm:$src2),
1637 (AND16ri GR16:$src1, imm:$src2)>;
1638 def : Pat<(and GR32:$src1, imm:$src2),
1639 (AND32ri GR32:$src1, imm:$src2)>;
1640 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1641 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1642 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1643 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1644 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1645 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1646 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1647 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;