def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
+def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
+ SDTCisVT<1, iPTR>,
+ SDTCisVT<2, iPTR>]>;
+
def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
def SDTX86RdTsc : SDTypeProfile<0, 0, []>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInFlag]>;
+def X86vastart_save_xmm_regs :
+ SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
+ SDT_X86VASTART_SAVE_XMM_REGS,
+ [SDNPHasChain]>;
+
def X86callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
[SDNPHasChain, SDNPOutFlag]>;
let ParserMatchClass = X86MemAsmOperand;
}
+def opaque32mem : X86MemOperand<"printopaquemem">;
+def opaque48mem : X86MemOperand<"printopaquemem">;
+def opaque80mem : X86MemOperand<"printopaquemem">;
+
def i8mem : X86MemOperand<"printi8mem">;
def i16mem : X86MemOperand<"printi16mem">;
def i32mem : X86MemOperand<"printi32mem">;
Requires<[In32BitMode]>;
}
+// x86-64 va_start lowering magic.
+let usesCustomDAGSchedInserter = 1 in
+def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
+ (outs),
+ (ins GR8:$al,
+ i64imm:$regsavefi, i64imm:$offset,
+ variable_ops),
+ "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
+ [(X86vastart_save_xmm_regs GR8:$al,
+ imm:$regsavefi,
+ imm:$offset)]>;
+
// Nop
let neverHasSideEffects = 1 in {
def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
[(X86retflag 0)]>;
def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret\t$amt",
- [(X86retflag imm:$amt)]>;
+ [(X86retflag timm:$amt)]>;
}
// All branches are RawFrm, Void, Branch, and Terminators
[(brind GR32:$dst)]>;
def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
[(brind (loadi32 addr:$dst))]>;
+ def FARJMP16 : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
+ "ljmp{w}\t{*}$dst", []>, OpSize;
+ def FARJMP32 : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
+ "ljmp{l}\t{*}$dst", []>;
}
// Conditional branches
"call\t{*}$dst", [(X86call GR32:$dst)]>;
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
"call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>;
+
+ def FARCALL16 : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
+ "lcall{w}\t{*}$dst", []>, OpSize;
+ def FARCALL32 : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
+ "lcall{l}\t{*}$dst", []>;
}
// Tail call stuff.
def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
}
+def SYSCALL : I<0x05, RawFrm,
+ (outs), (ins), "syscall", []>, TB;
+def SYSRET : I<0x07, RawFrm,
+ (outs), (ins), "sysret", []>, TB;
+def SYSENTER : I<0x34, RawFrm,
+ (outs), (ins), "sysenter", []>, TB;
+def SYSEXIT : I<0x35, RawFrm,
+ (outs), (ins), "sysexit", []>, TB;
+
+
+
//===----------------------------------------------------------------------===//
// Input/Output Instructions...
//
// Conditional moves
let Uses = [EFLAGS] in {
+
+// X86 doesn't have 8-bit conditional moves. Use a customDAGSchedInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
+// clobber EFLAGS, because if one of the operands is zero, the expansion
+// could involve an xor.
+let usesCustomDAGSchedInserter = 1, isTwoAddress = 0, Defs = [EFLAGS] in
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
let isCommutable = 1 in {
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"and{l}\t{$src, $dst|$dst, $src}",
[(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
+
+ def AND8i8 : Ii8<0x24, RawFrm, (outs), (ins i8imm:$src),
+ "and{b}\t{$src, %al|%al, $src}", []>;
+ def AND16i16 : Ii16<0x25, RawFrm, (outs), (ins i16imm:$src),
+ "and{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+ def AND32i32 : Ii32<0x25, RawFrm, (outs), (ins i32imm:$src),
+ "and{l}\t{$src, %eax|%eax, $src}", []>;
+
}
[(store (add (load addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)]>;
+
+ // addition to rAX
+ def ADD8i8 : Ii8<0x04, RawFrm, (outs), (ins i8imm:$src),
+ "add{b}\t{$src, %al|%al, $src}", []>;
+ def ADD16i16 : Ii16<0x05, RawFrm, (outs), (ins i16imm:$src),
+ "add{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+ def ADD32i32 : Ii32<0x05, RawFrm, (outs), (ins i32imm:$src),
+ "add{l}\t{$src, %eax|%eax, $src}", []>;
}
let Uses = [EFLAGS] in {
(implicit EFLAGS)]>;
}
+def TEST8i8 : Ii8<0xA8, RawFrm, (outs), (ins i8imm:$src),
+ "test{b}\t{$src, %al|%al, $src}", []>;
+def TEST16i16 : Ii16<0xA9, RawFrm, (outs), (ins i16imm:$src),
+ "test{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+def TEST32i32 : Ii32<0xA9, RawFrm, (outs), (ins i32imm:$src),
+ "test{l}\t{$src, %eax|%eax, $src}", []>;
+
def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0),
// Integer comparisons
let Defs = [EFLAGS] in {
+def CMP8i8 : Ii8<0x3C, RawFrm, (outs), (ins i8imm:$src),
+ "cmp{b}\t{$src, %al|%al, $src}", []>;
+def CMP16i16 : Ii16<0x3D, RawFrm, (outs), (ins i16imm:$src),
+ "cmp{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+def CMP32i32 : Ii32<0x3D, RawFrm, (outs), (ins i32imm:$src),
+ "cmp{l}\t{$src, %eax|%eax, $src}", []>;
+
def CMP8rr : I<0x38, MRMDestReg,
(outs), (ins GR8 :$src1, GR8 :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
// Alias instructions that map movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
-let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1 in {
+let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
+ isCodeGenOnly = 1 in {
def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins),
"xor{b}\t$dst, $dst",
[(set GR8:$dst, 0)]>;
[(X86tlsaddr tls32addr:$sym)]>,
Requires<[In32BitMode]>;
-let AddedComplexity = 5 in
+let AddedComplexity = 5, isCodeGenOnly = 1 in
def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%gs:$src, $dst",
[(set GR32:$dst, (gsload addr:$src))]>, SegGS;
-let AddedComplexity = 5 in
+let AddedComplexity = 5, isCodeGenOnly = 1 in
def FS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%fs:$src, $dst",
[(set GR32:$dst, (fsload addr:$src))]>, SegFS;
// EH Pseudo Instructions
//
let isTerminator = 1, isReturn = 1, isBarrier = 1,
- hasCtrlDep = 1 in {
+ hasCtrlDep = 1, isCodeGenOnly = 1 in {
def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
"ret\t#eh_return, addr: $addr",
[(X86ehret GR32:$addr)]>;
// extload bool -> extload byte
def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
-def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>,
- Requires<[In32BitMode]>;
+def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
-def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>,
- Requires<[In32BitMode]>;
+def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
-// anyext
-def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>,
- Requires<[In32BitMode]>;
-def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>,
- Requires<[In32BitMode]>;
-def : Pat<(i32 (anyext GR16:$src)),
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
+// anyext. Define these to do an explicit zero-extend to
+// avoid partial-register updates.
+def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
+def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
+def : Pat<(i32 (anyext GR16:$src)), (MOVZX32rr16 GR16:$src)>;
// (and (i32 load), 255) -> (zextload i8)
def : Pat<(i32 (and (nvloadi32 addr:$src), (i32 255))),
(MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
x86_subreg_8bit_hi))>,
Requires<[In32BitMode]>;
+def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
+ (MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
+ x86_subreg_8bit_hi))>,
+ Requires<[In32BitMode]>;
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
(MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD),
x86_subreg_8bit_hi))>,
(implicit EFLAGS)),
(DEC32m addr:$dst)>, Requires<[In32BitMode]>;
+// -disable-16bit support.
+def : Pat<(truncstorei16 (i32 imm:$src), addr:$dst),
+ (MOV16mi addr:$dst, imm:$src)>;
+def : Pat<(truncstorei16 GR32:$src, addr:$dst),
+ (MOV16mr addr:$dst, (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
+def : Pat<(i32 (sextloadi16 addr:$dst)),
+ (MOVSX32rm16 addr:$dst)>;
+def : Pat<(i32 (zextloadi16 addr:$dst)),
+ (MOVZX32rm16 addr:$dst)>;
+def : Pat<(i32 (extloadi16 addr:$dst)),
+ (MOVZX32rm16 addr:$dst)>;
+
//===----------------------------------------------------------------------===//
// Floating Point Stack Support
//===----------------------------------------------------------------------===//