[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
return false;
}]>;
+def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec),
+ (atomic_load_sub node:$ptr, node:$dec), [{
+ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+ return V->getValueType(0) == MVT::i8;
+ return false;
+}]>;
+def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec),
+ (atomic_load_sub node:$ptr, node:$dec), [{
+ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+ return V->getValueType(0) == MVT::i16;
+ return false;
+}]>;
+def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec),
+ (atomic_load_sub node:$ptr, node:$dec), [{
+ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+ return V->getValueType(0) == MVT::i32;
+ return false;
+}]>;
+def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec),
+ (atomic_load_sub node:$ptr, node:$dec), [{
+ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+ return V->getValueType(0) == MVT::i64;
+ return false;
+}]>;
+
+
def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
}]>;
-
// setcc convenience fragments.
def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
(setcc node:$lhs, node:$rhs, SETOEQ)>;
let Defs = [RAX, EFLAGS], Uses = [RAX] in {
def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
- "lock\n\tcmpxchgq $swap,$ptr",
+ "lock\n\tcmpxchgq\t$swap,$ptr",
[(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
}
let Constraints = "$val = $dst" in {
let Defs = [EFLAGS] in
def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
- "lock\n\txadd $val, $ptr",
+ "lock\n\txadd\t$val, $ptr",
[(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
TB, LOCK;
+
+let Defs = [EFLAGS] in
+def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
+ "lock\n\txadd\t$val, $ptr",
+ [(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>,
+ TB, LOCK;
+
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
- "xchg $val, $ptr",
+ "xchg\t$val, $ptr",
[(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
}
TB, LOCK;
}
+// Atomic exchange and subtract
+let Constraints = "$val = $dst", Defs = [EFLAGS] in {
+def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
+ "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}",
+ [(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>,
+ TB, LOCK;
+def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
+ "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}",
+ [(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>,
+ TB, OpSize, LOCK;
+def LXSUB8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
+ "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}",
+ [(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>,
+ TB, LOCK;
+}
+
// Atomic exchange, and, or, xor
let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86-64 | grep xadd
+
+@var = external global i64 ; <i64*> [#uses=1]
+
+define i32 @main() nounwind {
+entry:
+ tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 ) ; <i64>:0 [#uses=0]
+ unreachable
+}
+
+declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind