Add remaining 64-bit atomic patterns for x86-64.
authorDale Johannesen <dalej@apple.com>
Wed, 20 Aug 2008 00:48:50 +0000 (00:48 +0000)
committerDale Johannesen <dalej@apple.com>
Wed, 20 Aug 2008 00:48:50 +0000 (00:48 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55029 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86ISelLowering.cpp
lib/Target/X86/X86Instr64bit.td

index 2fb9a2e651e51e3a9038ef6f8bbe261c13dd7f10..c43ce33c8c6ba69e2e9d02865945e33e9e93c426 100644 (file)
@@ -6568,6 +6568,38 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
                                                X86::NOT8r, X86::AL,
                                                X86::GR8RegisterClass, true);
   // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
+  case X86::ATOMAND64:
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
+                                               X86::AND64ri32, X86::MOV64rm, 
+                                               X86::LCMPXCHG64, X86::MOV64rr,
+                                               X86::NOT64r, X86::RAX,
+                                               X86::GR64RegisterClass);
+  case X86::ATOMOR64:
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 
+                                               X86::OR64ri32, X86::MOV64rm, 
+                                               X86::LCMPXCHG64, X86::MOV64rr,
+                                               X86::NOT64r, X86::RAX,
+                                               X86::GR64RegisterClass);
+  case X86::ATOMXOR64:
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
+                                               X86::XOR64ri32, X86::MOV64rm, 
+                                               X86::LCMPXCHG64, X86::MOV64rr,
+                                               X86::NOT64r, X86::RAX,
+                                               X86::GR64RegisterClass);
+  case X86::ATOMNAND64:
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
+                                               X86::AND64ri32, X86::MOV64rm,
+                                               X86::LCMPXCHG64, X86::MOV64rr,
+                                               X86::NOT64r, X86::RAX,
+                                               X86::GR64RegisterClass, true);
+  case X86::ATOMMIN64:
+    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
+  case X86::ATOMMAX64:
+    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
+  case X86::ATOMUMIN64:
+    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
+  case X86::ATOMUMAX64:
+    return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
   }
 }
 
index e49a548b766afdee44da71398f1e92cd749f59bd..da981c361a354ca809d8f2bd63b3659ee134a6d4 100644 (file)
@@ -1148,6 +1148,34 @@ def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val)
                   [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
 }
 
+// Atomic exchange, and, or, xor
+let Constraints = "$val = $dst", Defs = [EFLAGS],
+                  usesCustomDAGSchedInserter = 1 in {
+def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMAND64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>;
+def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMOR64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>;
+def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMXOR64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>;
+def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMNAND64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>;
+def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
+               "#ATOMMIN64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>;
+def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMMAX64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>;
+def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMUMIN64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>;
+def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
+               "#ATOMUMAX64 PSUEDO!", 
+               [(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>;
+}
 
 //===----------------------------------------------------------------------===//
 // Non-Instruction Patterns