X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86Instr64bit.td;h=472ba4c462854744fcee70c901c69b5119f05be6;hb=d1474d09cbe5fdeec8ba0d6c6b52f316f3422532;hp=730f3930bf44a4acaa617102c505d8fc3e7d1b65;hpb=da47e6e0d003c873da960361549e57ee4617c301;p=oota-llvm.git diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 730f3930bf4..472ba4c4628 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -19,16 +19,25 @@ // 64-bits but only 32 bits are significant. def i64i32imm : Operand; + +// 64-bits but only 32 bits are significant, and those bits are treated as being +// pc relative. +def i64i32imm_pcrel : Operand { + let PrintMethod = "print_pcrel_imm"; +} + + // 64-bits but only 8 bits are significant. def i64i8imm : Operand; def lea64mem : Operand { - let PrintMethod = "printi64mem"; + let PrintMethod = "printlea64mem"; let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm); } def lea64_32mem : Operand { let PrintMethod = "printlea64_32mem"; + let AsmOperandLowerMethod = "lower_lea64_32mem"; let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm); } @@ -36,38 +45,35 @@ def lea64_32mem : Operand { // Complex Pattern Definitions. // def lea64addr : ComplexPattern; + [add, mul, X86mul_imm, shl, or, frameindex, X86Wrapper, + X86WrapperRIP], + []>; + +def tls64addr : ComplexPattern; //===----------------------------------------------------------------------===// // Pattern fragments. // +def i64immSExt8 : PatLeaf<(i64 imm), [{ + // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit + // sign extended field. + return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue(); +}]>; + def i64immSExt32 : PatLeaf<(i64 imm), [{ // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit // sign extended field. - return (int64_t)N->getValue() == (int32_t)N->getValue(); + return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue(); }]>; def i64immZExt32 : PatLeaf<(i64 imm), [{ // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit // unsignedsign extended field. - return (uint64_t)N->getValue() == (uint32_t)N->getValue(); -}]>; - -def i64immSExt8 : PatLeaf<(i64 imm), [{ - // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit - // sign extended field. - return (int64_t)N->getValue() == (int8_t)N->getValue(); + return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue(); }]>; -def i64immFFFFFFFF : PatLeaf<(i64 imm), [{ - // i64immFFFFFFFF - True if this is a specific constant we can't write in - // tblgen files. - return N->getValue() == 0x00000000FFFFFFFFULL; -}]>; - - def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>; def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>; def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>; @@ -86,40 +92,69 @@ def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>; // Instruction list... // +// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into +// a stack adjustment and the codegen must know that they may modify the stack +// pointer before prolog-epilog rewriting occurs. +// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become +// sub / add which can clobber EFLAGS. +let Defs = [RSP, EFLAGS], Uses = [RSP] in { +def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt), + "#ADJCALLSTACKDOWN", + [(X86callseq_start timm:$amt)]>, + Requires<[In64BitMode]>; +def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), + "#ADJCALLSTACKUP", + [(X86callseq_end timm:$amt1, timm:$amt2)]>, + Requires<[In64BitMode]>; +} + //===----------------------------------------------------------------------===// // Call Instructions... // let isCall = 1 in - // All calls clobber the non-callee saved registers... + // All calls clobber the non-callee saved registers. RSP is marked as + // a use to prevent stack-pointer assignments that appear immediately + // before calls from potentially appearing dead. Uses for argument + // registers are added manually. let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1, MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, - XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS] in { - def CALL64pcrel32 : I<0xE8, RawFrm, (outs), (ins i64imm:$dst, variable_ops), - "call\t${dst:call}", []>; + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], + Uses = [RSP] in { + + // NOTE: this pattern doesn't match "X86call imm", because we do not know + // that the offset between an arbitrary immediate and the call will fit in + // the 32-bit pcrel field that we have. + def CALL64pcrel32 : Ii32<0xE8, RawFrm, + (outs), (ins i64i32imm_pcrel:$dst, variable_ops), + "call\t$dst", []>, + Requires<[In64BitMode]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), "call\t{*}$dst", [(X86call GR64:$dst)]>; def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", []>; + "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>; } let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset), +def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset, + variable_ops), "#TC_RETURN $dst $offset", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset), +def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset, + variable_ops), "#TC_RETURN $dst $offset", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in - def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL", - []>; + def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), + "jmp{q}\t{*}$dst # TAILCALL", + []>; // Branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { @@ -129,6 +164,17 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { [(brind (loadi64 addr:$dst))]>; } +//===----------------------------------------------------------------------===// +// EH Pseudo Instructions +// +let isTerminator = 1, isReturn = 1, isBarrier = 1, + hasCtrlDep = 1 in { +def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), + "ret\t#eh_return, addr: $addr", + [(X86ehret GR64:$addr)]>; + +} + //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // @@ -144,6 +190,15 @@ def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>; } +let Defs = [RSP], Uses = [RSP], neverHasSideEffects = 1, mayStore = 1 in { +def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm), + "push{q}\t$imm", []>; +def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm), + "push{q}\t$imm", []>; +def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), + "push{q}\t$imm", []>; +} + let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W; let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in @@ -154,6 +209,7 @@ def LEA64_32r : I<0x8D, MRMSrcMem, "lea{l}\t{$src|$dst}, {$dst|$src}", [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>; +let isReMaterializable = 1 in def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src), "lea{q}\t{$src|$dst}, {$dst|$src}", [(set GR64:$dst, lea64addr:$src)]>; @@ -198,7 +254,7 @@ let neverHasSideEffects = 1 in def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), "mov{q}\t{$src, $dst|$dst, $src}", []>; -let isReMaterializable = 1 in { +let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src), "movabs{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, imm:$src)]>; @@ -207,7 +263,7 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), [(set GR64:$dst, i64immSExt32:$src)]>; } -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (load addr:$src))]>; @@ -221,6 +277,10 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), // Sign/Zero extenders +// MOVSX64rr8 always has a REX prefix and it has an 8-bit register +// operand, which makes it a rare instruction with an 8-bit register +// operand that can never access an h register. If support for h registers +// were generalized, this would require a special register class. def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), "movs{bq|x}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sext GR8:$src))]>, TB; @@ -240,18 +300,51 @@ def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i32 addr:$src))]>; -def MOVZX64rr8 : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), - "movz{bq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (zext GR8:$src))]>, TB; -def MOVZX64rm8 : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), - "movz{bq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB; -def MOVZX64rr16: RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), - "movz{wq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (zext GR16:$src))]>, TB; -def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), - "movz{wq|x}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB; +// Use movzbl instead of movzbq when the destination is a register; it's +// equivalent due to implicit zero-extending, and it has a smaller encoding. +def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), + "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zext GR8:$src))]>, TB; +def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src), + "movz{bl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB; +// Use movzwl instead of movzwq when the destination is a register; it's +// equivalent due to implicit zero-extending, and it has a smaller encoding. +def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), + "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zext GR16:$src))]>, TB; +def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), + "movz{wl|x}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB; + +// There's no movzlq instruction, but movl can be used for this purpose, using +// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero +// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit +// zero-extension, however this isn't possible when the 32-bit value is +// defined by a truncate or is copied from something where the high bits aren't +// necessarily all zero. In such cases, we fall back to these explicit zext +// instructions. +def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src), + "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zext GR32:$src))]>; +def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), + "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", + [(set GR64:$dst, (zextloadi64i32 addr:$src))]>; + +// Any instruction that defines a 32-bit result leaves the high half of the +// register. Truncate can be lowered to EXTRACT_SUBREG, and CopyFromReg may +// be copying from a truncate, but any other 32-bit operation will zero-extend +// up to 64 bits. +def def32 : PatLeaf<(i32 GR32:$src), [{ + return N->getOpcode() != ISD::TRUNCATE && + N->getOpcode() != TargetInstrInfo::EXTRACT_SUBREG && + N->getOpcode() != ISD::CopyFromReg; +}]>; + +// In the case of a 32-bit def that is known to implicitly zero-extend, +// we can use a SUBREG_TO_REG. +def : Pat<(i64 (zext def32:$src)), + (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>; let neverHasSideEffects = 1 in { let Defs = [RAX], Uses = [EAX] in @@ -271,32 +364,43 @@ let Defs = [EFLAGS] in { let isTwoAddress = 1 in { let isConvertibleToThreeAddress = 1 in { let isCommutable = 1 in -def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, GR64:$src2))]>; +// Register-Register Addition +def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; +// Register-Integer Addition +def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2))]>; -def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2))]>; + [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // isConvertibleToThreeAddress -def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (add GR64:$src1, (load addr:$src2)))]>; +// Register-Memory Addition +def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; } // isTwoAddress +// Memory-Register Addition def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), GR64:$src2), addr:$dst)]>; -def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2), - "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>; + [(store (add (load addr:$dst), GR64:$src2), addr:$dst), + (implicit EFLAGS)]>; def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "add{q}\t{$src2, $dst|$dst, $src2}", - [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst), + (implicit EFLAGS)]>; +def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2), + "add{q}\t{$src2, $dst|$dst, $src2}", + [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst), + (implicit EFLAGS)]>; let Uses = [EFLAGS] in { let isTwoAddress = 1 in { @@ -309,51 +413,68 @@ def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$s "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>; -def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "adc{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>; def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>; +def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "adc{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>; -def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2), - "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; +def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2), + "adc{q}\t{$src2, $dst|$dst, $src2}", + [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; } // Uses = [EFLAGS] let isTwoAddress = 1 in { +// Register-Register Subtraction def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>; + [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; +// Register-Memory Subtraction def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>; + [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; -def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>; -def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +// Register-Integer Subtraction +def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>; + [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; +def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // isTwoAddress +// Memory-Register Subtraction def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>; -def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), - "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>; + [(store (sub (load addr:$dst), GR64:$src2), addr:$dst), + (implicit EFLAGS)]>; + +// Memory-Integer Subtraction def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", - [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (sub (load addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)]>; +def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", + [(store (sub (load addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)]>; let Uses = [EFLAGS] in { let isTwoAddress = 1 in { @@ -365,23 +486,23 @@ def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$sr "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>; -def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "sbb{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>; def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>; +def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "sbb{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>; -def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2), - "sbb{q}\t{$src2, $dst|$dst, $src2}", - [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>; def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; +def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2), + "sbb{q}\t{$src2, $dst|$dst, $src2}", + [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>; } // Uses = [EFLAGS] } // Defs = [EFLAGS] @@ -404,36 +525,51 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src), let Defs = [EFLAGS] in { let isTwoAddress = 1 in { let isCommutable = 1 in -def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +// Register-Register Signed Integer Multiplication +def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "imul{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB; + [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>, TB; -def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +// Register-Memory Signed Integer Multiplication +def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "imul{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB; + [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>, TB; } // isTwoAddress // Suprisingly enough, these are not two address instructions! -def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32 - (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>; + +// Register-Integer Signed Integer Multiplication def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>; -def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 - (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2), + [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; +def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32 + (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>; + [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; + +// Memory-Integer Signed Integer Multiplication def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8 (outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2), "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", - [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>; + [(set GR64:$dst, (mul (load addr:$src1), + i64immSExt8:$src2)), + (implicit EFLAGS)]>; +def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 + (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2), + "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}", + [(set GR64:$dst, (mul (load addr:$src1), + i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // Defs = [EFLAGS] // Unsigned division / remainder -let neverHasSideEffects = 1 in { let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in { def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX "div{q}\t$src", []>; @@ -447,42 +583,51 @@ def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] "idiv{q}\t$src", []>; } } -} // Unary instructions let Defs = [EFLAGS], CodeSize = 2 in { let isTwoAddress = 1 in def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst", - [(set GR64:$dst, (ineg GR64:$src))]>; + [(set GR64:$dst, (ineg GR64:$src)), + (implicit EFLAGS)]>; def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst", - [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>; + [(store (ineg (loadi64 addr:$dst)), addr:$dst), + (implicit EFLAGS)]>; let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst", - [(set GR64:$dst, (add GR64:$src, 1))]>; + [(set GR64:$dst, (add GR64:$src, 1)), + (implicit EFLAGS)]>; def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst", - [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>; + [(store (add (loadi64 addr:$dst), 1), addr:$dst), + (implicit EFLAGS)]>; let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst", - [(set GR64:$dst, (add GR64:$src, -1))]>; + [(set GR64:$dst, (add GR64:$src, -1)), + (implicit EFLAGS)]>; def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", - [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>; + [(store (add (loadi64 addr:$dst), -1), addr:$dst), + (implicit EFLAGS)]>; // In 64-bit mode, single byte INC and DEC cannot be encoded. let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in { // Can transform into LEA. def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", - [(set GR16:$dst, (add GR16:$src, 1))]>, + [(set GR16:$dst, (add GR16:$src, 1)), + (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", - [(set GR32:$dst, (add GR32:$src, 1))]>, + [(set GR32:$dst, (add GR32:$src, 1)), + (implicit EFLAGS)]>, Requires<[In64BitMode]>; def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", - [(set GR16:$dst, (add GR16:$src, -1))]>, + [(set GR16:$dst, (add GR16:$src, -1)), + (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", - [(set GR32:$dst, (add GR32:$src, -1))]>, + [(set GR32:$dst, (add GR32:$src, -1)), + (implicit EFLAGS)]>, Requires<[In64BitMode]>; } // isConvertibleToThreeAddress @@ -490,16 +635,20 @@ def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst" // how to unfold them. let isTwoAddress = 0, CodeSize = 2 in { def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst", - [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>, + [(store (add (loadi16 addr:$dst), 1), addr:$dst), + (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst", - [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>, + [(store (add (loadi32 addr:$dst), 1), addr:$dst), + (implicit EFLAGS)]>, Requires<[In64BitMode]>; def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst", - [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>, + [(store (add (loadi16 addr:$dst), -1), addr:$dst), + (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst", - [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>, + [(store (add (loadi32 addr:$dst), -1), addr:$dst), + (implicit EFLAGS)]>, Requires<[In64BitMode]>; } } // Defs = [EFLAGS], CodeSize @@ -683,7 +832,7 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem, // Logical Instructions... // -let isTwoAddress = 1 in +let isTwoAddress = 1 , AddedComplexity = 15 in def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst", [(set GR64:$dst, (not GR64:$src))]>; def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst", @@ -695,86 +844,107 @@ let isCommutable = 1 in def AND64rr : RI<0x21, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>; + [(set GR64:$dst, (and GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; def AND64rm : RI<0x23, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>; -def AND64ri32 : RIi32<0x81, MRM4r, - (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "and{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>; + [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; def AND64ri8 : RIi8<0x83, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>; + [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; +def AND64ri32 : RIi32<0x81, MRM4r, + (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "and{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // isTwoAddress def AND64mr : RI<0x21, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "and{q}\t{$src, $dst|$dst, $src}", - [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>; -def AND64mi32 : RIi32<0x81, MRM4m, - (outs), (ins i64mem:$dst, i64i32imm:$src), - "and{q}\t{$src, $dst|$dst, $src}", - [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>; + [(store (and (load addr:$dst), GR64:$src), addr:$dst), + (implicit EFLAGS)]>; def AND64mi8 : RIi8<0x83, MRM4m, (outs), (ins i64mem:$dst, i64i8imm :$src), "and{q}\t{$src, $dst|$dst, $src}", - [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>; + [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst), + (implicit EFLAGS)]>; +def AND64mi32 : RIi32<0x81, MRM4m, + (outs), (ins i64mem:$dst, i64i32imm:$src), + "and{q}\t{$src, $dst|$dst, $src}", + [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst), + (implicit EFLAGS)]>; let isTwoAddress = 1 in { let isCommutable = 1 in def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>; + [(set GR64:$dst, (or GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>; -def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), - "or{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>; + [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>; + [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; +def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), + "or{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // isTwoAddress def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "or{q}\t{$src, $dst|$dst, $src}", - [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>; -def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src), - "or{q}\t{$src, $dst|$dst, $src}", - [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>; + [(store (or (load addr:$dst), GR64:$src), addr:$dst), + (implicit EFLAGS)]>; def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src), "or{q}\t{$src, $dst|$dst, $src}", - [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>; + [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst), + (implicit EFLAGS)]>; +def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src), + "or{q}\t{$src, $dst|$dst, $src}", + [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst), + (implicit EFLAGS)]>; let isTwoAddress = 1 in { let isCommutable = 1 in def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>; + [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)), + (implicit EFLAGS)]>; def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>; + [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))), + (implicit EFLAGS)]>; +def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), + "xor{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)), + (implicit EFLAGS)]>; def XOR64ri32 : RIi32<0x81, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>; -def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), - "xor{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>; + [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)), + (implicit EFLAGS)]>; } // isTwoAddress def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), "xor{q}\t{$src, $dst|$dst, $src}", - [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>; -def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src), - "xor{q}\t{$src, $dst|$dst, $src}", - [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>; + [(store (xor (load addr:$dst), GR64:$src), addr:$dst), + (implicit EFLAGS)]>; def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src), "xor{q}\t{$src, $dst|$dst, $src}", - [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>; + [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst), + (implicit EFLAGS)]>; +def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src), + "xor{q}\t{$src, $dst|$dst, $src}", + [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst), + (implicit EFLAGS)]>; } // Defs = [EFLAGS] //===----------------------------------------------------------------------===// @@ -815,23 +985,52 @@ def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", [(X86cmp GR64:$src1, (loadi64 addr:$src2)), (implicit EFLAGS)]>; +def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "cmp{q}\t{$src2, $src1|$src1, $src2}", + [(X86cmp GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)]>; def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", [(X86cmp GR64:$src1, i64immSExt32:$src2), (implicit EFLAGS)]>; +def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "cmp{q}\t{$src2, $src1|$src1, $src2}", + [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2), + (implicit EFLAGS)]>; def CMP64mi32 : RIi32<0x81, MRM7m, (outs), (ins i64mem:$src1, i64i32imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2), (implicit EFLAGS)]>; -def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2), - (implicit EFLAGS)]>; -def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR64:$src1, i64immSExt8:$src2), - (implicit EFLAGS)]>; +} // Defs = [EFLAGS] + +// Bit tests. +// TODO: BTC, BTR, and BTS +let Defs = [EFLAGS] in { +def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", + [(X86bt GR64:$src1, GR64:$src2), + (implicit EFLAGS)]>, TB; + +// Unlike with the register+register form, the memory+register form of the +// bt instruction does not ignore the high bits of the index. From ISel's +// perspective, this is pretty bizarre. Disable these instructions for now. +//def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), +// "bt{q}\t{$src2, $src1|$src1, $src2}", +// [(X86bt (loadi64 addr:$src1), GR64:$src2), +// (implicit EFLAGS)]>, TB; + +def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", + [(X86bt GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)]>, TB; +// Note that these instructions don't need FastBTMem because that +// only applies when the other operand is in a register. When it's +// an immediate, bt is still fast. +def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", + [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2), + (implicit EFLAGS)]>, TB; } // Defs = [EFLAGS] // Conditional moves @@ -907,6 +1106,16 @@ def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NP, EFLAGS))]>, TB; +def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64 + (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + "cmovo\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, + X86_COND_O, EFLAGS))]>, TB; +def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64 + (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + "cmovno\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, + X86_COND_NO, EFLAGS))]>, TB; } // isCommutable = 1 def CMOVB64rm : RI<0x42, MRMSrcMem, // if , TB; +def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64] + (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "cmovo\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), + X86_COND_O, EFLAGS))]>, TB; +def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] + (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), + "cmovno\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), + X86_COND_NO, EFLAGS))]>, TB; } // isTwoAddress //===----------------------------------------------------------------------===// @@ -1090,47 +1309,101 @@ def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src) // FIXME: remove when we can teach regalloc that xor reg, reg is ok. // FIXME: AddedComplexity gives MOV64r0 a higher priority than MOV64ri32. Remove // when we have a better way to specify isel priority. -let Defs = [EFLAGS], AddedComplexity = 1, isReMaterializable = 1 in -def MOV64r0 : RI<0x31, MRMInitReg, (outs GR64:$dst), (ins), - "xor{l}\t${dst:subreg32}, ${dst:subreg32}", - [(set GR64:$dst, 0)]>; +let Defs = [EFLAGS], AddedComplexity = 1, + isReMaterializable = 1, isAsCheapAsAMove = 1 in +def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), + "xor{l}\t${dst:subreg32}, ${dst:subreg32}", + [(set GR64:$dst, 0)]>; // Materialize i64 constant where top 32-bits are zero. -let AddedComplexity = 1, isReMaterializable = 1 in +let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), "mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}", [(set GR64:$dst, i64immZExt32:$src)]>; +//===----------------------------------------------------------------------===// +// Thread Local Storage Instructions +//===----------------------------------------------------------------------===// + +// All calls clobber the non-callee saved registers. RSP is marked as +// a use to prevent stack-pointer assignments that appear immediately +// before calls from potentially appearing dead. +let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, + FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1, + MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, + XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], + Uses = [RSP] in +def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym), + ".byte\t0x66; " + "leaq\t$sym(%rip), %rdi; " + ".word\t0x6666; " + "rex64; " + "call\t__tls_get_addr@PLT", + [(X86tlsaddr tls64addr:$sym)]>, + Requires<[In64BitMode]>; + +let AddedComplexity = 5 in +def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "movq\t%gs:$src, $dst", + [(set GR64:$dst, (gsload addr:$src))]>, SegGS; + +let AddedComplexity = 5 in +def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "movq\t%fs:$src, $dst", + [(set GR64:$dst, (fsload addr:$src))]>, SegFS; //===----------------------------------------------------------------------===// // Atomic Instructions //===----------------------------------------------------------------------===// -//FIXME: Please check the format Pseudo is certainly wrong, but the opcode and -// prefixes should be correct - let Defs = [RAX, EFLAGS], Uses = [RAX] in { -def CMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap), - "cmpxchgq $swap,$ptr", []>, TB; -def LCMPXCHG64 : RI<0xB1, Pseudo, (outs), (ins i64mem:$ptr, GR64:$swap), - "lock cmpxchgq $swap,$ptr", +def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), + "lock\n\t" + "cmpxchgq\t$swap,$ptr", [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; } -let Constraints = "$val = $dst", Defs = [EFLAGS] in { -def LXADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), - "lock xadd $val, $ptr", - [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>, +let Constraints = "$val = $dst" in { +let Defs = [EFLAGS] in +def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), + "lock\n\t" + "xadd\t$val, $ptr", + [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; -def XADD64 : RI<0xC1, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), - "xadd $val, $ptr", []>, TB; -def LXCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), - "lock xchg $val, $ptr", - [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>, LOCK; -def XCHG64 : RI<0x87, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), - "xchg $val, $ptr", []>; +def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), + "xchg\t$val, $ptr", + [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>; } +// Atomic exchange, and, or, xor +let Constraints = "$val = $dst", Defs = [EFLAGS], + usesCustomDAGSchedInserter = 1 in { +def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMAND64 PSEUDO!", + [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>; +def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMOR64 PSEUDO!", + [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>; +def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMXOR64 PSEUDO!", + [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>; +def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMNAND64 PSEUDO!", + [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>; +def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), + "#ATOMMIN64 PSEUDO!", + [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>; +def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMMAX64 PSEUDO!", + [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>; +def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMUMIN64 PSEUDO!", + [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>; +def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), + "#ATOMUMAX64 PSEUDO!", + [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>; +} //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -1146,6 +1419,9 @@ def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), def : Pat<(i64 (X86Wrapper texternalsym:$dst)), (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>; +// If we have small model and -static mode, it is safe to store global addresses +// directly as immediates. FIXME: This is really a hack, the 'imm' predicate +// should handle this sort of thing. def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), (MOV64mi32 addr:$dst, tconstpool:$src)>, Requires<[SmallCode, IsStatic]>; @@ -1159,6 +1435,23 @@ def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), (MOV64mi32 addr:$dst, texternalsym:$src)>, Requires<[SmallCode, IsStatic]>; +// If we have small model and -static mode, it is safe to store global addresses +// directly as immediates. FIXME: This is really a hack, the 'imm' predicate +// should handle this sort of thing. +def : Pat<(store (i64 (X86WrapperRIP tconstpool:$src)), addr:$dst), + (MOV64mi32 addr:$dst, tconstpool:$src)>, + Requires<[SmallCode, IsStatic]>; +def : Pat<(store (i64 (X86WrapperRIP tjumptable:$src)), addr:$dst), + (MOV64mi32 addr:$dst, tjumptable:$src)>, + Requires<[SmallCode, IsStatic]>; +def : Pat<(store (i64 (X86WrapperRIP tglobaladdr:$src)), addr:$dst), + (MOV64mi32 addr:$dst, tglobaladdr:$src)>, + Requires<[SmallCode, IsStatic]>; +def : Pat<(store (i64 (X86WrapperRIP texternalsym:$src)), addr:$dst), + (MOV64mi32 addr:$dst, texternalsym:$src)>, + Requires<[SmallCode, IsStatic]>; + + // Calls // Direct PC relative function call for small code model. 32-bit displacement // sign extended to 64-bit. @@ -1199,51 +1492,221 @@ def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)), (TEST64rr GR64:$src1, GR64:$src1)>; - - -// Zero-extension -def : Pat<(i64 (zext GR32:$src)), (INSERT_SUBREG tii_impl_val_zero, - GR32:$src, x86_subreg_32bit)>; +// Conditional moves with folded loads with operands swapped and conditions +// inverted. +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS), + (CMOVAE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS), + (CMOVB64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS), + (CMOVNE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS), + (CMOVE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS), + (CMOVA64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS), + (CMOVBE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS), + (CMOVGE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS), + (CMOVL64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS), + (CMOVG64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS), + (CMOVLE64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS), + (CMOVNP64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS), + (CMOVP64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS), + (CMOVNS64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS), + (CMOVS64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS), + (CMOVNO64rm GR64:$src2, addr:$src1)>; +def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS), + (CMOVO64rm GR64:$src2, addr:$src1)>; // zextload bool -> zextload byte def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; -def : Pat<(zextloadi64i32 addr:$src), (INSERT_SUBREG tii_impl_val_zero, - (MOV32rm addr:$src), x86_subreg_32bit)>; - // extload +// When extloading from 16-bit and smaller memory locations into 64-bit registers, +// use zero-extending loads so that the entire 64-bit register is defined, avoiding +// partial-register updates. def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; -def : Pat<(extloadi64i32 addr:$src), (INSERT_SUBREG tii_impl_val_undef, - (MOV32rm addr:$src), x86_subreg_32bit)>; - -// anyext -> zext -def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>; -def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16:$src)>; -def : Pat<(i64 (anyext GR32:$src)), (INSERT_SUBREG tii_impl_val_undef, - GR32:$src, x86_subreg_32bit)>; - -def : Pat<(i64 (anyext (loadi8 addr:$src))), (MOVZX64rm8 addr:$src)>; -def : Pat<(i64 (anyext (loadi16 addr:$src))), (MOVZX64rm16 addr:$src)>; -def : Pat<(i64 (anyext (loadi32 addr:$src))), (INSERT_SUBREG tii_impl_val_undef, - (MOV32rm addr:$src), - x86_subreg_32bit)>; +// For other extloads, use subregs, since the high contents of the register are +// defined after an extload. +def : Pat<(extloadi64i32 addr:$src), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (MOV32rm addr:$src), + x86_subreg_32bit)>; +def : Pat<(extloadi16i1 addr:$src), + (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src), + x86_subreg_8bit)>, + Requires<[In64BitMode]>; +def : Pat<(extloadi16i8 addr:$src), + (INSERT_SUBREG (i16 (IMPLICIT_DEF)), (MOV8rm addr:$src), + x86_subreg_8bit)>, + Requires<[In64BitMode]>; + +// anyext +def : Pat<(i64 (anyext GR8:$src)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>; +def : Pat<(i64 (anyext GR16:$src)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>; +def : Pat<(i64 (anyext GR32:$src)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, x86_subreg_32bit)>; +def : Pat<(i16 (anyext GR8:$src)), + (INSERT_SUBREG (i16 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>, + Requires<[In64BitMode]>; +def : Pat<(i32 (anyext GR8:$src)), + (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, x86_subreg_8bit)>, + Requires<[In64BitMode]>; //===----------------------------------------------------------------------===// // Some peepholes //===----------------------------------------------------------------------===// - -// r & (2^32-1) ==> mov32 + implicit zext -def : Pat<(and GR64:$src, i64immFFFFFFFF), - (INSERT_SUBREG tii_impl_val_zero, - (MOV32rr (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)), +// Odd encoding trick: -128 fits into an 8-bit immediate field while +// +128 doesn't, so in this special case use a sub instead of an add. +def : Pat<(add GR64:$src1, 128), + (SUB64ri8 GR64:$src1, -128)>; +def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), + (SUB64mi8 addr:$dst, -128)>; + +// The same trick applies for 32-bit immediate fields in 64-bit +// instructions. +def : Pat<(add GR64:$src1, 0x0000000080000000), + (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; +def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst), + (SUB64mi32 addr:$dst, 0xffffffff80000000)>; + +// r & (2^32-1) ==> movz +def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), + (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>; +// r & (2^16-1) ==> movz +def : Pat<(and GR64:$src, 0xffff), + (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>; +// r & (2^8-1) ==> movz +def : Pat<(and GR64:$src, 0xff), + (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>; +// r & (2^8-1) ==> movz +def : Pat<(and GR32:$src1, 0xff), + (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>, + Requires<[In64BitMode]>; +// r & (2^8-1) ==> movz +def : Pat<(and GR16:$src1, 0xff), + (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>, + Requires<[In64BitMode]>; + +// sext_inreg patterns +def : Pat<(sext_inreg GR64:$src, i32), + (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>; +def : Pat<(sext_inreg GR64:$src, i16), + (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>; +def : Pat<(sext_inreg GR64:$src, i8), + (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>; +def : Pat<(sext_inreg GR32:$src, i8), + (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>, + Requires<[In64BitMode]>; +def : Pat<(sext_inreg GR16:$src, i8), + (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>, + Requires<[In64BitMode]>; + +// trunc patterns +def : Pat<(i32 (trunc GR64:$src)), + (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>; +def : Pat<(i16 (trunc GR64:$src)), + (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>; +def : Pat<(i8 (trunc GR64:$src)), + (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>; +def : Pat<(i8 (trunc GR32:$src)), + (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>, + Requires<[In64BitMode]>; +def : Pat<(i8 (trunc GR16:$src)), + (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>, + Requires<[In64BitMode]>; + +// h-register tricks. +// For now, be conservative on x86-64 and use an h-register extract only if the +// value is immediately zero-extended or stored, which are somewhat common +// cases. This uses a bunch of code to prevent a register requiring a REX prefix +// from being allocated in the same instruction as the h register, as there's +// currently no way to describe this requirement to the register allocator. + +// h-register extract and zero-extend. +def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), + (SUBREG_TO_REG + (i64 0), + (MOVZX32_NOREXrr8 + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_ABCD), + x86_subreg_8bit_hi)), + x86_subreg_32bit)>; +def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), + (MOVZX32_NOREXrr8 + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD), + x86_subreg_8bit_hi))>, + Requires<[In64BitMode]>; +def : Pat<(srl_su GR16:$src, (i8 8)), + (EXTRACT_SUBREG + (MOVZX32_NOREXrr8 + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD), + x86_subreg_8bit_hi)), + x86_subreg_16bit)>, + Requires<[In64BitMode]>; +def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), + (MOVZX32_NOREXrr8 + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD), + x86_subreg_8bit_hi))>, + Requires<[In64BitMode]>; +def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), + (SUBREG_TO_REG + (i64 0), + (MOVZX32_NOREXrr8 + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD), + x86_subreg_8bit_hi)), x86_subreg_32bit)>; +// h-register extract and store. +def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), + (MOV8mr_NOREX + addr:$dst, + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_ABCD), + x86_subreg_8bit_hi))>; +def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), + (MOV8mr_NOREX + addr:$dst, + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD), + x86_subreg_8bit_hi))>, + Requires<[In64BitMode]>; +def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), + (MOV8mr_NOREX + addr:$dst, + (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD), + x86_subreg_8bit_hi))>, + Requires<[In64BitMode]>; + // (shl x, 1) ==> (add x, x) def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; +// (shl x (and y, 63)) ==> (shl x, y) +def : Pat<(shl GR64:$src1, (and CL:$amt, 63)), + (SHL64rCL GR64:$src1)>; +def : Pat<(store (shl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst), + (SHL64mCL addr:$dst)>; + +def : Pat<(srl GR64:$src1, (and CL:$amt, 63)), + (SHR64rCL GR64:$src1)>; +def : Pat<(store (srl (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst), + (SHR64mCL addr:$dst)>; + +def : Pat<(sra GR64:$src1, (and CL:$amt, 63)), + (SAR64rCL GR64:$src1)>; +def : Pat<(store (sra (loadi64 addr:$dst), (and CL:$amt, 63)), addr:$dst), + (SAR64mCL addr:$dst)>; + // (or (x >> c) | (y << (64 - c))) ==> (shrd64 x, y, c) def : Pat<(or (srl GR64:$src1, CL:$amt), (shl GR64:$src2, (sub 64, CL:$amt))), @@ -1253,6 +1716,22 @@ def : Pat<(store (or (srl (loadi64 addr:$dst), CL:$amt), (shl GR64:$src2, (sub 64, CL:$amt))), addr:$dst), (SHRD64mrCL addr:$dst, GR64:$src2)>; +def : Pat<(or (srl GR64:$src1, (i8 (trunc RCX:$amt))), + (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))), + (SHRD64rrCL GR64:$src1, GR64:$src2)>; + +def : Pat<(store (or (srl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))), + (shl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))), + addr:$dst), + (SHRD64mrCL addr:$dst, GR64:$src2)>; + +def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)), + (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>; + +def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1), + GR64:$src2, (i8 imm:$amt2)), addr:$dst), + (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>; + // (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) def : Pat<(or (shl GR64:$src1, CL:$amt), (srl GR64:$src2, (sub 64, CL:$amt))), @@ -1262,25 +1741,170 @@ def : Pat<(store (or (shl (loadi64 addr:$dst), CL:$amt), (srl GR64:$src2, (sub 64, CL:$amt))), addr:$dst), (SHLD64mrCL addr:$dst, GR64:$src2)>; +def : Pat<(or (shl GR64:$src1, (i8 (trunc RCX:$amt))), + (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))), + (SHLD64rrCL GR64:$src1, GR64:$src2)>; + +def : Pat<(store (or (shl (loadi64 addr:$dst), (i8 (trunc RCX:$amt))), + (srl GR64:$src2, (i8 (trunc (sub 64, RCX:$amt))))), + addr:$dst), + (SHLD64mrCL addr:$dst, GR64:$src2)>; + +def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm:$amt2)), + (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>; + +def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1), + GR64:$src2, (i8 imm:$amt2)), addr:$dst), + (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>; + // X86 specific add which produces a flag. def : Pat<(addc GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>; def : Pat<(addc GR64:$src1, (load addr:$src2)), (ADD64rm GR64:$src1, addr:$src2)>; -def : Pat<(addc GR64:$src1, i64immSExt32:$src2), - (ADD64ri32 GR64:$src1, imm:$src2)>; def : Pat<(addc GR64:$src1, i64immSExt8:$src2), (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(addc GR64:$src1, i64immSExt32:$src2), + (ADD64ri32 GR64:$src1, imm:$src2)>; def : Pat<(subc GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>; def : Pat<(subc GR64:$src1, (load addr:$src2)), (SUB64rm GR64:$src1, addr:$src2)>; +def : Pat<(subc GR64:$src1, i64immSExt8:$src2), + (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; def : Pat<(subc GR64:$src1, imm:$src2), (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; -def : Pat<(subc GR64:$src1, i64immSExt8:$src2), + +//===----------------------------------------------------------------------===// +// EFLAGS-defining Patterns +//===----------------------------------------------------------------------===// + +// Register-Register Addition with EFLAGS result +def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (ADD64rr GR64:$src1, GR64:$src2)>; + +// Register-Integer Addition with EFLAGS result +def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), + (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; + +// Register-Memory Addition with EFLAGS result +def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (ADD64rm GR64:$src1, addr:$src2)>; + +// Memory-Register Addition with EFLAGS result +def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)), + (ADD64mr addr:$dst, GR64:$src2)>; +def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (ADD64mi8 addr:$dst, i64immSExt8:$src2)>; +def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)), + (ADD64mi32 addr:$dst, i64immSExt32:$src2)>; + +// Register-Register Subtraction with EFLAGS result +def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (SUB64rr GR64:$src1, GR64:$src2)>; + +// Register-Memory Subtraction with EFLAGS result +def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (SUB64rm GR64:$src1, addr:$src2)>; + +// Register-Integer Subtraction with EFLAGS result +def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; +// Memory-Register Subtraction with EFLAGS result +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2), + addr:$dst), + (implicit EFLAGS)), + (SUB64mr addr:$dst, GR64:$src2)>; + +// Memory-Integer Subtraction with EFLAGS result +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2), + addr:$dst), + (implicit EFLAGS)), + (SUB64mi8 addr:$dst, i64immSExt8:$src2)>; +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2), + addr:$dst), + (implicit EFLAGS)), + (SUB64mi32 addr:$dst, i64immSExt32:$src2)>; + +// Register-Register Signed Integer Multiplication with EFLAGS result +def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2), + (implicit EFLAGS)), + (IMUL64rr GR64:$src1, GR64:$src2)>; + +// Register-Memory Signed Integer Multiplication with EFLAGS result +def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)), + (implicit EFLAGS)), + (IMUL64rm GR64:$src1, addr:$src2)>; + +// Register-Integer Signed Integer Multiplication with EFLAGS result +def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2), + (implicit EFLAGS)), + (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2), + (implicit EFLAGS)), + (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; + +// Memory-Integer Signed Integer Multiplication with EFLAGS result +def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2), + (implicit EFLAGS)), + (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; +def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2), + (implicit EFLAGS)), + (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; + +// INC and DEC with EFLAGS result. Note that these do not set CF. +def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)), + (INC64_16r GR16:$src)>, Requires<[In64BitMode]>; +def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (INC64_16m addr:$dst)>, Requires<[In64BitMode]>; +def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)), + (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>; +def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (DEC64_16m addr:$dst)>, Requires<[In64BitMode]>; + +def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)), + (INC64_32r GR32:$src)>, Requires<[In64BitMode]>; +def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (INC64_32m addr:$dst)>, Requires<[In64BitMode]>; +def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)), + (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>; +def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (DEC64_32m addr:$dst)>, Requires<[In64BitMode]>; + +def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)), + (INC64r GR64:$src)>; +def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (INC64m addr:$dst)>; +def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)), + (DEC64r GR64:$src)>; +def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst), + (implicit EFLAGS)), + (DEC64m addr:$dst)>; //===----------------------------------------------------------------------===// // X86-64 SSE Instructions @@ -1301,14 +1925,14 @@ def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert GR64:$src))]>; def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>; def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src), "mov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (bitconvert FR64:$src))]>; def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>; //===----------------------------------------------------------------------===// @@ -1317,7 +1941,7 @@ def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src), /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination multiclass SS41I_extract64 opc, string OpcodeStr> { - def rr : SS4AIi8