ARM optional destination operand variants for VEXT instructions.
authorJim Grosbach <grosbach@apple.com>
Thu, 8 Dec 2011 00:43:47 +0000 (00:43 +0000)
committerJim Grosbach <grosbach@apple.com>
Thu, 8 Dec 2011 00:43:47 +0000 (00:43 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146114 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/ARM/ARMInstrNEON.td
test/MC/ARM/neon-shuffle-encoding.s

index 35630c5a7e661df548c36b8d884ec833c21db167..18c9de22b7e8a94b5a7e29fc40a54c2ea077f5d0 100644 (file)
@@ -5491,3 +5491,20 @@ def : NEONInstAlias<"vclt${p}.u32 $Qd, $Qn, $Qm",
                     (VCGTuv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
 def : NEONInstAlias<"vclt${p}.f32 $Qd, $Qn, $Qm",
                     (VCGTfq QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+
+// Two-operand variants for VEXT
+def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
+                  (VEXTd8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
+                  (VEXTd16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_3:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
+                  (VEXTd32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_1:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
+                  (VEXTq8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_15:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
+                  (VEXTq16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
+                  (VEXTq32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_3:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.64 $Vdn, $Vm, $imm",
+                  (VEXTq64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_1:$imm, pred:$p)>;
index b40904c04072b40ad67882ed3b54d1efff47c1f5..d62f79a7016348c6c5cffc6c524285c6272ceae8 100644 (file)
@@ -7,6 +7,14 @@
        vext.16 d16, d17, d16, #3
        vext.32 q8, q9, q8, #3
 
+       vext.8  d17, d16, #3
+       vext.8  d7, d11, #5
+       vext.8  q3, q8, #3
+       vext.8  q9, q4, #7
+       vext.16 d1, d26, #3
+       vext.32 q5, q8, #3
+
+
 @ CHECK: vext.8        d16, d17, d16, #3       @ encoding: [0xa0,0x03,0xf1,0xf2]
 @ CHECK: vext.8        d16, d17, d16, #5       @ encoding: [0xa0,0x05,0xf1,0xf2]
 @ CHECK: vext.8        q8, q9, q8, #3          @ encoding: [0xe0,0x03,0xf2,0xf2]
 @ CHECK: vext.16 d16, d17, d16, #3      @ encoding: [0xa0,0x06,0xf1,0xf2]
 @ CHECK: vext.32 q8, q9, q8, #3         @ encoding: [0xe0,0x0c,0xf2,0xf2]
 
+@ CHECK: vext.8        d17, d17, d16, #3       @ encoding: [0xa0,0x13,0xf1,0xf2]
+@ CHECK: vext.8        d7, d7, d11, #5         @ encoding: [0x0b,0x75,0xb7,0xf2]
+@ CHECK: vext.8        q3, q3, q8, #3          @ encoding: [0x60,0x63,0xb6,0xf2]
+@ CHECK: vext.8        q9, q9, q4, #7          @ encoding: [0xc8,0x27,0xf2,0xf2]
+@ CHECK: vext.16 d1, d1, d26, #3        @ encoding: [0x2a,0x16,0xb1,0xf2]
+@ CHECK: vext.32 q5, q5, q8, #3         @ encoding: [0x60,0xac,0xba,0xf2]
+
 
        vtrn.8  d17, d16
        vtrn.16 d17, d16