Added __builtin_ia32_storelv4si, __builtin_ia32_movqv4si,
authorEvan Cheng <evan.cheng@apple.com>
Tue, 11 Apr 2006 22:28:25 +0000 (22:28 +0000)
committerEvan Cheng <evan.cheng@apple.com>
Tue, 11 Apr 2006 22:28:25 +0000 (22:28 +0000)
__builtin_ia32_loadlv4si, __builtin_ia32_loaddqu, __builtin_ia32_storedqu.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27599 91177308-0d34-0410-b5e6-96231b3b80d8

include/llvm/IntrinsicsX86.td
lib/Target/X86/X86InstrSSE.td

index 48f79d25a242ce30debc0b724cc874f085898dfd..77f787c63bb858ef70dc2fa36b69326d483501cd 100644 (file)
@@ -269,6 +269,8 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_sse2_loadu_pd : GCCBuiltin<"__builtin_ia32_loadupd">,
               Intrinsic<[llvm_v2f64_ty, llvm_ptr_ty], [IntrReadMem]>;
+  def int_x86_sse2_loadu_dq : GCCBuiltin<"__builtin_ia32_loaddqu">,
+              Intrinsic<[llvm_v16i8_ty, llvm_ptr_ty], [IntrReadMem]>;
 }
 
 // SIMD store ops
@@ -276,6 +278,12 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">,
               Intrinsic<[llvm_void_ty, llvm_ptr_ty,
                          llvm_v2f64_ty], [IntrWriteMem]>;
+  def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">,
+              Intrinsic<[llvm_void_ty, llvm_ptr_ty,
+                         llvm_v16i8_ty], [IntrWriteMem]>;
+  def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">,
+              Intrinsic<[llvm_void_ty, llvm_ptr_ty,
+                         llvm_v4i32_ty], [IntrWriteMem]>;
 }
 
 // Cacheability support ops
@@ -302,6 +310,9 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">,
               Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty,
                          llvm_v8i16_ty], [IntrNoMem]>;
+  // FIXME: Temporary workaround since 2-wide shuffle is broken.
+  def int_x86_sse2_movl_dq : GCCBuiltin<"__builtin_ia32_movqv4si">,
+              Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
   def int_x86_sse2_movmskpd : GCCBuiltin<"__builtin_ia32_movmskpd">,
               Intrinsic<[llvm_int_ty, llvm_v2f64_ty], [IntrNoMem]>;
   def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">,
index 10f4d56276639b0a23eccf7e32d1a4cd95217c23..65e8aaeffa569f2bdb4cf70971b19610f9adc515 100644 (file)
@@ -724,6 +724,14 @@ def MOVUPDrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
 def MOVUPDmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
                    "movupd {$src, $dst|$dst, $src}",
                    [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
+def MOVDQUrm :   I<0x6F, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
+                   "movdqu {$src, $dst|$dst, $src}",
+                   [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
+                 XS, Requires<[HasSSE2]>;
+def MOVDQUmr :   I<0x7F, MRMDestMem, (ops i128mem:$dst, VR128:$src),
+                   "movdqu {$src, $dst|$dst, $src}",
+                   [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
+                 XS, Requires<[HasSSE2]>;
 
 let isTwoAddress = 1 in {
 def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
@@ -1657,6 +1665,16 @@ def MOVLPDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
                              MOVS_shuffle_mask)))]>;
 }
 
+// Store / copy lower 64-bits of a XMM register.
+def MOVLQ128mr : PDI<0xD6, MRMDestMem, (ops i64mem:$dst, VR128:$src),
+                     "movq {$src, $dst|$dst, $src}",
+                     [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
+
+// FIXME: Temporary workaround since 2-wide shuffle is broken.
+def MOVLQ128rr : PDI<0xD6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
+                     "movq {$src, $dst|$dst, $src}",
+                     [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>;
+
 // Move to lower bits of a VR128 and zeroing upper bits.
 // Loading from memory automatically zeroing upper bits.
 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src),
@@ -1672,9 +1690,10 @@ def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
                        [(set VR128:$dst,
                          (v4i32 (X86zexts2vec (loadi32 addr:$src))))]>;
 def MOVZQI2PQIrm : PDI<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
-                       "movd {$src, $dst|$dst, $src}",
+                       "movq {$src, $dst|$dst, $src}",
                        [(set VR128:$dst,
-                         (v2i64 (X86zexts2vec (loadi64 addr:$src))))]>;
+                         (bc_v2i64 (v2f64 (X86zexts2vec
+                                           (loadf64 addr:$src)))))]>;
 
 //===----------------------------------------------------------------------===//
 // Non-Instruction Patterns