ARM NEON two-operand aliases for VSHL(register).
authorJim Grosbach <grosbach@apple.com>
Thu, 8 Dec 2011 01:12:35 +0000 (01:12 +0000)
committerJim Grosbach <grosbach@apple.com>
Thu, 8 Dec 2011 01:12:35 +0000 (01:12 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146123 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/ARM/ARMInstrNEON.td
test/MC/ARM/neon-shift-encoding.s

index b63ecc386f3432ae8f495c58a02e037b47aee23c..743ce22aaf21134ac7d96aebd54cf50618b8c25c 100644 (file)
@@ -5402,6 +5402,41 @@ def : NEONInstAlias<"vmul${p}.f32 $Qdn, $Dm$lane",
                     (VMULslfq QPR:$Qdn, QPR:$Qdn, DPR_VFP2:$Dm,
                               VectorIndex32:$lane, pred:$p)>;
 
+// VSHL (register) two-operand aliases.
+def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
+                    (VSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
+                    (VSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
+                    (VSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
+                    (VSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
+                    (VSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
+                    (VSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
+                    (VSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
+                    (VSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
+                    (VSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
+                    (VSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
+                    (VSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
+                    (VSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
+                    (VSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
+                    (VSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
+                    (VSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
+                    (VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
 // VLD1 single-lane pseudo-instructions. These need special handling for
 // the lane index that an InstAlias can't handle, so we use these instead.
 defm VLD1LNdAsm : NEONDT8AsmPseudoInst<"vld1${p}", "$list, $addr",
index a7a1b83860440212a3ee7b8b6597fbc371244b10..af37dd9a7c82d141320af02fd6e88e531ce41f7b 100644 (file)
@@ -235,3 +235,44 @@ _foo:
        vqrshrn.u32     d16, q8, #13
 @ CHECK: vqrshrn.u64   d16, q8, #13  @ encoding: [0x70,0x09,0xf3,0xf3]
        vqrshrn.u64     d16, q8, #13
+
+@ Optional destination operand variants.
+        vshl.s8 q4, q5
+        vshl.s16 q4, q5
+        vshl.s32 q4, q5
+        vshl.s64 q4, q5
+
+        vshl.u8 q4, q5
+        vshl.u16 q4, q5
+        vshl.u32 q4, q5
+        vshl.u64 q4, q5
+
+        vshl.s8 d4, d5
+        vshl.s16 d4, d5
+        vshl.s32 d4, d5
+        vshl.s64 d4, d5
+
+        vshl.u8 d4, d5
+        vshl.u16 d4, d5
+        vshl.u32 d4, d5
+        vshl.u64 d4, d5
+
+@ CHECK: vshl.s8       q4, q4, q5      @ encoding: [0x48,0x84,0x0a,0xf2]
+@ CHECK: vshl.s16      q4, q4, q5      @ encoding: [0x48,0x84,0x1a,0xf2]
+@ CHECK: vshl.s32      q4, q4, q5      @ encoding: [0x48,0x84,0x2a,0xf2]
+@ CHECK: vshl.s64      q4, q4, q5      @ encoding: [0x48,0x84,0x3a,0xf2]
+
+@ CHECK: vshl.u8       q4, q4, q5      @ encoding: [0x48,0x84,0x0a,0xf3]
+@ CHECK: vshl.u16      q4, q4, q5      @ encoding: [0x48,0x84,0x1a,0xf3]
+@ CHECK: vshl.u32      q4, q4, q5      @ encoding: [0x48,0x84,0x2a,0xf3]
+@ CHECK: vshl.u64      q4, q4, q5      @ encoding: [0x48,0x84,0x3a,0xf3]
+
+@ CHECK: vshl.s8       d4, d4, d5      @ encoding: [0x04,0x44,0x05,0xf2]
+@ CHECK: vshl.s16      d4, d4, d5      @ encoding: [0x04,0x44,0x15,0xf2]
+@ CHECK: vshl.s32      d4, d4, d5      @ encoding: [0x04,0x44,0x25,0xf2]
+@ CHECK: vshl.s64      d4, d4, d5      @ encoding: [0x04,0x44,0x35,0xf2]
+
+@ CHECK: vshl.u8       d4, d4, d5      @ encoding: [0x04,0x44,0x05,0xf3]
+@ CHECK: vshl.u16      d4, d4, d5      @ encoding: [0x04,0x44,0x15,0xf3]
+@ CHECK: vshl.u32      d4, d4, d5      @ encoding: [0x04,0x44,0x25,0xf3]
+@ CHECK: vshl.u64      d4, d4, d5      @ encoding: [0x04,0x44,0x35,0xf3]