// Pattern fragments.
//
+def i64immSExt8 : PatLeaf<(i64 imm), [{
+ // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
+ // sign extended field.
+ return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
+}]>;
+
def i64immSExt32 : PatLeaf<(i64 imm), [{
// i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// sign extended field.
return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
}]>;
-def i64immSExt8 : PatLeaf<(i64 imm), [{
- // i64immSExt8 predicate - True if the 64-bit immediate fits in a 8-bit
- // sign extended field.
- return (int64_t)N->getZExtValue() == (int8_t)N->getZExtValue();
-}]>;
-
def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset, variable_ops),
+def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
+ variable_ops),
"#TC_RETURN $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset, variable_ops),
+def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
+ variable_ops),
"#TC_RETURN $dst $offset",
[]>;
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst # TAILCALL",
- []>;
+ def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst),
+ "jmp{q}\t{*}$dst # TAILCALL",
+ []>;
// Branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
let isTwoAddress = 1 in {
let isConvertibleToThreeAddress = 1 in {
let isCommutable = 1 in
-def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+// Register-Register Addition
+def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
+// Register-Integer Addition
+def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
(implicit EFLAGS)]>;
-def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
} // isConvertibleToThreeAddress
-def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+// Register-Memory Addition
+def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
} // isTwoAddress
+// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), GR64:$src2), addr:$dst),
(implicit EFLAGS)]>;
-def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
- "add{q}\t{$src2, $dst|$dst, $src2}",
- [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
- (implicit EFLAGS)]>;
def ADD64mi8 : RIi8<0x83, MRM0m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
[(store (add (load addr:$dst), i64immSExt8:$src2), addr:$dst),
(implicit EFLAGS)]>;
+def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
+ "add{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (add (load addr:$dst), i64immSExt32:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>;
-def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>;
+def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "adc{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
} // isTwoAddress
def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>;
-def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "adc{q}\t{$src2, $dst|$dst, $src2}",
- [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
+def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
+ "adc{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
} // Uses = [EFLAGS]
let isTwoAddress = 1 in {
+// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
+ [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
+// Register-Memory Subtraction
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
+ [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
-def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
-def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+// Register-Integer Subtraction
+def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
+ (ins GR64:$src1, i64i32imm:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
+// Memory-Register Subtraction
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
-def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
+
+// Memory-Integer Subtraction
def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
+def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
+ "sub{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (sub (load addr:$dst), i64immSExt32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>;
-def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>;
+def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "sbb{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
} // isTwoAddress
def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(store (sube (load addr:$dst), GR64:$src2), addr:$dst)]>;
-def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
- "sbb{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
+def SBB64mi32 : RIi32<0x81, MRM3m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
+ "sbb{q}\t{$src2, $dst|$dst, $src2}",
+ [(store (sube (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
} // Uses = [EFLAGS]
} // Defs = [EFLAGS]
let Defs = [EFLAGS] in {
let isTwoAddress = 1 in {
let isCommutable = 1 in
-def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+// Register-Register Signed Integer Multiplication
+def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
+ [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>, TB;
-def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+// Register-Memory Signed Integer Multiplication
+def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
+ [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, TB;
} // isTwoAddress
// Suprisingly enough, these are not two address instructions!
-def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
+
+// Register-Integer Signed Integer Multiplication
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
-def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
- (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
+
+// Memory-Integer Signed Integer Multiplication
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (mul (load addr:$src1),
+ i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
+def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
+ (outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
+ "imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR64:$dst, (mul (load addr:$src1),
+ i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Unsigned division / remainder
// Logical Instructions...
//
-let isTwoAddress = 1 in
+let isTwoAddress = 1 , AddedComplexity = 15 in
def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
[(set GR64:$dst, (not GR64:$src))]>;
def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
-def AND64ri32 : RIi32<0x81, MRM4r,
- (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
def AND64ri8 : RIi8<0x83, MRM4r,
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
+def AND64ri32 : RIi32<0x81, MRM4r,
+ (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "and{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
} // isTwoAddress
def AND64mr : RI<0x21, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src),
"and{q}\t{$src, $dst|$dst, $src}",
[(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
-def AND64mi32 : RIi32<0x81, MRM4m,
- (outs), (ins i64mem:$dst, i64i32imm:$src),
- "and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
def AND64mi8 : RIi8<0x83, MRM4m,
(outs), (ins i64mem:$dst, i64i8imm :$src),
"and{q}\t{$src, $dst|$dst, $src}",
[(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+def AND64mi32 : RIi32<0x81, MRM4m,
+ (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "and{q}\t{$src, $dst|$dst, $src}",
+ [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
-def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
- "or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
+def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
+ "or{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
} // isTwoAddress
def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"or{q}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
-def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
"or{q}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "or{q}\t{$src, $dst|$dst, $src}",
+ [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
+def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
+ "xor{q}\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
def XOR64ri32 : RIi32<0x81, MRM6r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
-def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
- "xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
} // isTwoAddress
def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
-def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
- "xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
"xor{q}\t{$src, $dst|$dst, $src}",
[(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
+ "xor{q}\t{$src, $dst|$dst, $src}",
+ [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//
"cmp{q}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR64:$src1, (loadi64 addr:$src2)),
(implicit EFLAGS)]>;
+def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)]>;
def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)]>;
+def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
+ (implicit EFLAGS)]>;
def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
(ins i64mem:$src1, i64i32imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
(implicit EFLAGS)]>;
-def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
- (implicit EFLAGS)]>;
-def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
+// Bit tests.
+// TODO: BTC, BTR, and BTS
+let Defs = [EFLAGS] in {
+def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86bt GR64:$src1, GR64:$src2),
+ (implicit EFLAGS)]>, TB;
+
+// Unlike with the register+register form, the memory+register form of the
+// bt instruction does not ignore the high bits of the index. From ISel's
+// perspective, this is pretty bizarre. Disable these instructions for now.
+//def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+// "bt{q}\t{$src2, $src1|$src1, $src2}",
+// [(X86bt (loadi64 addr:$src1), GR64:$src2),
+// (implicit EFLAGS)]>, TB;
+
+def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86bt GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)]>, TB;
+// Note that these instructions don't need FastBTMem because that
+// only applies when the other operand is in a register. When it's
+// an immediate, bt is still fast.
+def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "bt{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2),
+ (implicit EFLAGS)]>, TB;
} // Defs = [EFLAGS]
// Conditional moves
"cmovnp\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
X86_COND_NP, EFLAGS))]>, TB;
+def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovo\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
+ X86_COND_O, EFLAGS))]>, TB;
+def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovno\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
+ X86_COND_NO, EFLAGS))]>, TB;
} // isCommutable = 1
def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
"cmovnp\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
X86_COND_NP, EFLAGS))]>, TB;
+def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovo\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_O, EFLAGS))]>, TB;
+def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovno\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_NO, EFLAGS))]>, TB;
} // isTwoAddress
//===----------------------------------------------------------------------===//
[(set GR64:$dst, 0)]>;
// Materialize i64 constant where top 32-bits are zero.
-let AddedComplexity = 1, isReMaterializable = 1 in
+let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
"mov{l}\t{$src, ${dst:subreg32}|${dst:subreg32}, $src}",
[(set GR64:$dst, i64immZExt32:$src)]>;
".byte\t0x66; leaq\t${sym:mem}(%rip), $dst; .word\t0x6666; rex64",
[(set GR64:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
+let AddedComplexity = 5 in
+def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+ "movq\t%gs:$src, $dst",
+ [(set GR64:$dst, (gsload addr:$src))]>, SegGS;
+
//===----------------------------------------------------------------------===//
// Atomic Instructions
//===----------------------------------------------------------------------===//
def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
(TEST64rr GR64:$src1, GR64:$src1)>;
-
+// Conditional moves with folded loads with operands swapped and conditions
+// inverted.
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS),
+ (CMOVAE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS),
+ (CMOVB64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS),
+ (CMOVNE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS),
+ (CMOVE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS),
+ (CMOVA64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS),
+ (CMOVBE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS),
+ (CMOVGE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS),
+ (CMOVL64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS),
+ (CMOVG64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS),
+ (CMOVLE64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS),
+ (CMOVNP64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS),
+ (CMOVP64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS),
+ (CMOVNS64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS),
+ (CMOVS64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS),
+ (CMOVNO64rm GR64:$src2, addr:$src1)>;
+def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS),
+ (CMOVO64rm GR64:$src2, addr:$src1)>;
// Zero-extension
def : Pat<(i64 (zext GR32:$src)),
(ADD64rr GR64:$src1, GR64:$src2)>;
def : Pat<(addc GR64:$src1, (load addr:$src2)),
(ADD64rm GR64:$src1, addr:$src2)>;
-def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
- (ADD64ri32 GR64:$src1, imm:$src2)>;
def : Pat<(addc GR64:$src1, i64immSExt8:$src2),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(addc GR64:$src1, i64immSExt32:$src2),
+ (ADD64ri32 GR64:$src1, imm:$src2)>;
def : Pat<(subc GR64:$src1, GR64:$src2),
(SUB64rr GR64:$src1, GR64:$src2)>;
def : Pat<(subc GR64:$src1, (load addr:$src2)),
(SUB64rm GR64:$src1, addr:$src2)>;
+def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
+ (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(subc GR64:$src1, imm:$src2),
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(subc GR64:$src1, i64immSExt8:$src2),
+
+//===----------------------------------------------------------------------===//
+// Overflow Patterns
+//===----------------------------------------------------------------------===//
+
+// Register-Register Addition with Overflow
+def : Pat<(parallel (X86add_ovf GR64:$src1, GR64:$src2),
+ (implicit EFLAGS)),
+ (ADD64rr GR64:$src1, GR64:$src2)>;
+
+// Register-Integer Addition with Overflow
+def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)),
+ (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt32:$src2),
+ (implicit EFLAGS)),
+ (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Register-Memory Addition with Overflow
+def : Pat<(parallel (X86add_ovf GR64:$src1, (load addr:$src2)),
+ (implicit EFLAGS)),
+ (ADD64rm GR64:$src1, addr:$src2)>;
+
+// Memory-Register Addition with Overflow
+def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR64:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (ADD64mr addr:$dst, GR64:$src2)>;
+def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
+def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
+
+// Register-Register Subtraction with Overflow
+def : Pat<(parallel (X86sub_ovf GR64:$src1, GR64:$src2),
+ (implicit EFLAGS)),
+ (SUB64rr GR64:$src1, GR64:$src2)>;
+
+// Register-Memory Subtraction with Overflow
+def : Pat<(parallel (X86sub_ovf GR64:$src1, (load addr:$src2)),
+ (implicit EFLAGS)),
+ (SUB64rm GR64:$src1, addr:$src2)>;
+
+// Register-Integer Subtraction with Overflow
+def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)),
(SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt32:$src2),
+ (implicit EFLAGS)),
+ (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
+// Memory-Register Subtraction with Overflow
+def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR64:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (SUB64mr addr:$dst, GR64:$src2)>;
+
+// Memory-Integer Subtraction with Overflow
+def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
+def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt32:$src2),
+ addr:$dst),
+ (implicit EFLAGS)),
+ (SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
+
+// Register-Register Signed Integer Multiplication with Overflow
+def : Pat<(parallel (X86smul_ovf GR64:$src1, GR64:$src2),
+ (implicit EFLAGS)),
+ (IMUL64rr GR64:$src1, GR64:$src2)>;
+
+// Register-Memory Signed Integer Multiplication with Overflow
+def : Pat<(parallel (X86smul_ovf GR64:$src1, (load addr:$src2)),
+ (implicit EFLAGS)),
+ (IMUL64rm GR64:$src1, addr:$src2)>;
+
+// Register-Integer Signed Integer Multiplication with Overflow
+def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)),
+ (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
+def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt32:$src2),
+ (implicit EFLAGS)),
+ (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Memory-Integer Signed Integer Multiplication with Overflow
+def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt8:$src2),
+ (implicit EFLAGS)),
+ (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
+def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt32:$src2),
+ (implicit EFLAGS)),
+ (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
//===----------------------------------------------------------------------===//
// X86-64 SSE Instructions