[AArch64] Add support for NEON scalar floating-point reciprocal estimate,
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
index b8840aa18c1fa5a380583427aa60f1ee7d5567b3..a9f60619a2168bdb545c18f39cf48891cd91a83a 100644 (file)
@@ -49,6 +49,8 @@ def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
 def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
 def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
 
+def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
+                           [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
 
 //===----------------------------------------------------------------------===//
 // Multiclasses
@@ -215,8 +217,8 @@ defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
 // two operands constraints.
 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
-  RegisterClass VPRC, ValueType OpTy, bit q, bit u, bits<2> size, bits<5> opcode,
-  SDPatternOperator opnode>
+  RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size, 
+  bits<5> opcode, SDPatternOperator opnode>
   : NeonI_3VSame<q, u, size, opcode,
     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
@@ -321,11 +323,13 @@ defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
 // ORR disassembled as MOV if Vn==Vm
 
 // Vector Move - register
-// Alias for ORR if Vn=Vm and it is the preferred syntax
+// Alias for ORR if Vn=Vm.
+// FIXME: This is actually the preferred syntax but TableGen can't deal with
+// custom printing of aliases.
 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
-                    (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
+                    (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
-                    (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
+                    (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
 
 def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{
   ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0));
@@ -571,7 +575,7 @@ def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
 // NeonI_compare_aliases class: swaps register operands to implement
 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
 class NeonI_compare_aliases<string asmop, string asmlane,
-                            Instruction inst, RegisterClass VPRC>
+                            Instruction inst, RegisterOperand VPRC>
   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
                     ", $Rm" # asmlane,
                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
@@ -1032,6 +1036,20 @@ defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
   return (HasShift && !ShiftOnesIn);
 }]>;
 
+def neon_uimm1_asmoperand : AsmOperandClass
+{
+  let Name = "UImm1";
+  let PredicateMethod = "isUImm<1>";
+  let RenderMethod = "addImmOperands";
+}
+
+def neon_uimm2_asmoperand : AsmOperandClass
+{
+  let Name = "UImm2";
+  let PredicateMethod = "isUImm<2>";
+  let RenderMethod = "addImmOperands";
+}
+
 def neon_uimm8_asmoperand : AsmOperandClass
 {
   let Name = "UImm8";
@@ -1324,7 +1342,7 @@ defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
 }
 
 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
-                                Instruction inst, RegisterClass VPRC>
+                                Instruction inst, RegisterOperand VPRC>
   : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"),
                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
 
@@ -1401,7 +1419,7 @@ def MOVIdi : NeonI_1VModImm<0b0, 0b1,
 
 // Vector Floating Point Move Immediate
 
-class NeonI_FMOV_impl<string asmlane, RegisterClass VPRC, ValueType OpTy,
+class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
                       Operand immOpType, bit q, bit op>
   : NeonI_1VModImm<q, op,
                    (outs VPRC:$Rd), (ins immOpType:$Imm),
@@ -1456,7 +1474,7 @@ def shr_imm32 : shr_imm<"32">;
 def shr_imm64 : shr_imm<"64">;
 
 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
-               RegisterClass VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
+               RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
   : NeonI_2VShiftImm<q, u, opcode,
                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
@@ -1541,12 +1559,22 @@ defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
 
-def Neon_top16B : PatFrag<(ops node:$in),
-                          (extract_subvector (v16i8 node:$in), (iPTR 8))>;
-def Neon_top8H : PatFrag<(ops node:$in),
-                         (extract_subvector (v8i16 node:$in), (iPTR 4))>;
-def Neon_top4S : PatFrag<(ops node:$in),
-                         (extract_subvector (v4i32 node:$in), (iPTR 2))>;
+def Neon_High16B : PatFrag<(ops node:$in),
+                           (extract_subvector (v16i8 node:$in), (iPTR 8))>;
+def Neon_High8H  : PatFrag<(ops node:$in),
+                           (extract_subvector (v8i16 node:$in), (iPTR 4))>;
+def Neon_High4S  : PatFrag<(ops node:$in),
+                           (extract_subvector (v4i32 node:$in), (iPTR 2))>;
+
+def Neon_low8H : PatFrag<(ops node:$in),
+                         (v4i16 (extract_subvector (v8i16 node:$in),
+                                                   (iPTR 0)))>;
+def Neon_low4S : PatFrag<(ops node:$in),
+                         (v2i32 (extract_subvector (v4i32 node:$in),
+                                                   (iPTR 0)))>;
+def Neon_low4f : PatFrag<(ops node:$in),
+                         (v2f32 (extract_subvector (v4f32 node:$in),
+                                                   (iPTR 0)))>;
 
 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
                    string SrcT, ValueType DestTy, ValueType SrcTy,
@@ -1594,17 +1622,17 @@ multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
 
   // 128-bit vector types
   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b",
-                              v8i16, v8i8, 8, uimm3, ExtOp, Neon_top16B> {
+                              v8i16, v8i8, 8, uimm3, ExtOp, Neon_High16B> {
     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
   }
 
   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h",
-                             v4i32, v4i16, 4, uimm4, ExtOp, Neon_top8H> {
+                             v4i32, v4i16, 4, uimm4, ExtOp, Neon_High8H> {
     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
   }
 
   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s",
-                             v2i64, v2i32, 2, uimm5, ExtOp, Neon_top4S> {
+                             v2i64, v2i32, 2, uimm5, ExtOp, Neon_High4S> {
     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
   }
 
@@ -1618,13 +1646,13 @@ multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
 
-  def : Pat<(v8i16 (ExtOp (v8i8 (Neon_top16B VPR128:$Rn)))),
+  def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
 
-  def : Pat<(v4i32 (ExtOp (v4i16 (Neon_top8H VPR128:$Rn)))),
+  def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
 
-  def : Pat<(v2i64 (ExtOp (v2i32 (Neon_top4S VPR128:$Rn)))),
+  def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
 }
 
@@ -1634,7 +1662,7 @@ defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
 
 // Rounding/Saturating shift
 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterClass VPRC, ValueType Ty, Operand ImmTy,
+                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
                   SDPatternOperator OpNode>
   : NeonI_2VShiftImm<q, u, opcode,
                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
@@ -1736,7 +1764,7 @@ defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
 
 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterClass VPRC, ValueType Ty, Operand ImmTy,
+                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
                   SDNode OpNode>
   : NeonI_2VShiftImm<q, u, opcode,
            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
@@ -1792,7 +1820,7 @@ defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
 
 // Rounding shift accumulate
 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
-                    RegisterClass VPRC, ValueType Ty, Operand ImmTy,
+                    RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
                     SDPatternOperator OpNode>
   : NeonI_2VShiftImm<q, u, opcode,
                      (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
@@ -1847,7 +1875,7 @@ defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
 
 // Shift insert by immediate
 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterClass VPRC, ValueType Ty, Operand ImmTy,
+                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
                   SDPatternOperator OpNode>
     : NeonI_2VShiftImm<q, u, opcode,
            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
@@ -1953,7 +1981,7 @@ class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
                        string SrcT, Operand ImmTy>
   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
-                     (ins VPR64:$src, VPR128:$Rn, ImmTy:$Imm),
+                     (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
                      [], NoItinerary> {
   let Constraints = "$src = $Rd";
@@ -2002,9 +2030,21 @@ defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
 
-def Neon_combine : PatFrag<(ops node:$Rm, node:$Rn),
-                           (v2i64 (concat_vectors (v1i64 node:$Rm),
-                                                  (v1i64 node:$Rn)))>;
+def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
+                              (v2i64 (concat_vectors (v1i64 node:$Rm),
+                                                     (v1i64 node:$Rn)))>;
+def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
+                              (v8i16 (concat_vectors (v4i16 node:$Rm),
+                                                     (v4i16 node:$Rn)))>;
+def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
+                              (v4i32 (concat_vectors (v2i32 node:$Rm),
+                                                     (v2i32 node:$Rn)))>;
+def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
+                              (v4f32 (concat_vectors (v2f32 node:$Rm),
+                                                     (v2f32 node:$Rn)))>;
+def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
+                              (v2f64 (concat_vectors (v1f64 node:$Rm),
+                                                     (v1f64 node:$Rn)))>;
 
 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
                              (v8i16 (srl (v8i16 node:$lhs),
@@ -2037,18 +2077,21 @@ multiclass Neon_shiftNarrow_patterns<string shr> {
               imm:$Imm))),
             (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
 
-  def : Pat<(Neon_combine (v1i64 VPR64:$src), (v1i64 (bitconvert
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
               (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
                 VPR128:$Rn, imm:$Imm)))))),
-            (SHRNvvi_16B VPR64:$src, VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine (v1i64 VPR64:$src), (v1i64 (bitconvert
+            (SHRNvvi_16B (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                         VPR128:$Rn, imm:$Imm)>;
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
               (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
                 VPR128:$Rn, imm:$Imm)))))),
-            (SHRNvvi_8H VPR64:$src, VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine (v1i64 VPR64:$src), (v1i64 (bitconvert
+            (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                        VPR128:$Rn, imm:$Imm)>;
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
               (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
                 VPR128:$Rn, imm:$Imm)))))),
-            (SHRNvvi_4S VPR64:$src, VPR128:$Rn, imm:$Imm)>;
+            (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                        VPR128:$Rn, imm:$Imm)>;
 }
 
 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
@@ -2059,18 +2102,21 @@ multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
   def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm)),
             (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
 
-  def : Pat<(Neon_combine (v1i64 VPR64:$src),
-              (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+                (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
             (!cast<Instruction>(prefix # "_16B")
-              VPR64:$src, VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine (v1i64 VPR64:$src),
-              (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
+                (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                VPR128:$Rn, imm:$Imm)>;
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+                (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
             (!cast<Instruction>(prefix # "_8H")
-              VPR64:$src, VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine (v1i64 VPR64:$src),
-              (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
+                (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                VPR128:$Rn, imm:$Imm)>;
+  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+                (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
             (!cast<Instruction>(prefix # "_4S")
-              VPR64:$src, VPR128:$Rn, imm:$Imm)>;
+                  (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+                  VPR128:$Rn, imm:$Imm)>;
 }
 
 defm : Neon_shiftNarrow_patterns<"lshr">;
@@ -2086,7 +2132,7 @@ defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
 
 // Convert fix-point and float-pointing
 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
-                RegisterClass VPRC, ValueType DestTy, ValueType SrcTy,
+                RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
                 Operand ImmTy, SDPatternOperator IntOp>
   : NeonI_2VShiftImm<q, u, opcode,
                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
@@ -2146,23 +2192,148 @@ defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
 multiclass Neon_sshll2_0<SDNode ext>
 {
   def _v8i8  : PatFrag<(ops node:$Rn),
-                       (v8i16 (ext (v8i8 (Neon_top16B node:$Rn))))>;
+                       (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
   def _v4i16 : PatFrag<(ops node:$Rn),
-                       (v4i32 (ext (v4i16 (Neon_top8H node:$Rn))))>;
+                       (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
   def _v2i32 : PatFrag<(ops node:$Rn),
-                       (v2i64 (ext (v2i32 (Neon_top4S node:$Rn))))>;
+                       (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
 }
 
 defm NI_sext_high : Neon_sshll2_0<sext>;
 defm NI_zext_high : Neon_sshll2_0<zext>;
 
+
+//===----------------------------------------------------------------------===//
+// Multiclasses for NeonI_Across
+//===----------------------------------------------------------------------===//
+
+// Variant 1
+
+multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
+                            string asmop, SDPatternOperator opnode>
+{
+    def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
+                (outs FPR16:$Rd), (ins VPR64:$Rn),
+                asmop # "\t$Rd, $Rn.8b",
+                [(set (v1i16 FPR16:$Rd),
+                    (v1i16 (opnode (v8i8 VPR64:$Rn))))],
+                NoItinerary>;
+
+    def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+                (outs FPR16:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.16b",
+                [(set (v1i16 FPR16:$Rd),
+                    (v1i16 (opnode (v16i8 VPR128:$Rn))))],
+                NoItinerary>;
+
+    def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
+                (outs FPR32:$Rd), (ins VPR64:$Rn),
+                asmop # "\t$Rd, $Rn.4h",
+                [(set (v1i32 FPR32:$Rd),
+                    (v1i32 (opnode (v4i16 VPR64:$Rn))))],
+                NoItinerary>;
+
+    def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
+                (outs FPR32:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.8h",
+                [(set (v1i32 FPR32:$Rd),
+                    (v1i32 (opnode (v8i16 VPR128:$Rn))))],
+                NoItinerary>;
+
+    // _1d2s doesn't exist!
+
+    def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
+                (outs FPR64:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.4s",
+                [(set (v1i64 FPR64:$Rd),
+                    (v1i64 (opnode (v4i32 VPR128:$Rn))))],
+                NoItinerary>;
+}
+
+defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
+defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
+
+// Variant 2
+
+multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
+                            string asmop, SDPatternOperator opnode>
+{
+    def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
+                (outs FPR8:$Rd), (ins VPR64:$Rn),
+                asmop # "\t$Rd, $Rn.8b",
+                [(set (v1i8 FPR8:$Rd),
+                    (v1i8 (opnode (v8i8 VPR64:$Rn))))],
+                NoItinerary>;
+
+    def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+                (outs FPR8:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.16b",
+                [(set (v1i8 FPR8:$Rd),
+                    (v1i8 (opnode (v16i8 VPR128:$Rn))))],
+                NoItinerary>;
+
+    def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
+                (outs FPR16:$Rd), (ins VPR64:$Rn),
+                asmop # "\t$Rd, $Rn.4h",
+                [(set (v1i16 FPR16:$Rd),
+                    (v1i16 (opnode (v4i16 VPR64:$Rn))))],
+                NoItinerary>;
+
+    def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
+                (outs FPR16:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.8h",
+                [(set (v1i16 FPR16:$Rd),
+                    (v1i16 (opnode (v8i16 VPR128:$Rn))))],
+                NoItinerary>;
+
+    // _1s2s doesn't exist!
+
+    def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
+                (outs FPR32:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.4s",
+                [(set (v1i32 FPR32:$Rd),
+                    (v1i32 (opnode (v4i32 VPR128:$Rn))))],
+                NoItinerary>;
+}
+
+defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
+defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
+
+defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
+defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
+
+defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
+
+// Variant 3
+
+multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
+                            string asmop, SDPatternOperator opnode>
+{
+    def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
+                (outs FPR32:$Rd), (ins VPR128:$Rn),
+                asmop # "\t$Rd, $Rn.4s",
+                [(set (v1f32 FPR32:$Rd),
+                    (v1f32 (opnode (v4f32 VPR128:$Rn))))],
+                NoItinerary>;
+}
+
+defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
+                                int_aarch64_neon_vmaxnmv>;
+defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
+                                int_aarch64_neon_vminnmv>;
+
+defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
+                              int_aarch64_neon_vmaxv>;
+defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
+                              int_aarch64_neon_vminv>;
+
 // The followings are for instruction class (3V Diff)
 
 // normal long/long2 pattern
 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
                  string asmop, string ResS, string OpS,
                  SDPatternOperator opnode, SDPatternOperator ext,
-                 RegisterClass OpVPR,
+                 RegisterOperand OpVPR,
                  ValueType ResTy, ValueType OpTy>
   : NeonI_3VDiff<q, u, size, opcode,
                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
@@ -2244,7 +2415,7 @@ defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
                  string asmop, string ResS, string OpS,
                  SDPatternOperator opnode, SDPatternOperator ext,
-                 RegisterClass OpVPR,
+                 RegisterOperand OpVPR,
                  ValueType ResTy, ValueType OpTy>
   : NeonI_3VDiff<q, u, size, opcode,
                  (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
@@ -2325,7 +2496,7 @@ multiclass NeonI_get_high
 }
 
 defm NI_get_hi : NeonI_get_high;
-                                 
+
 // pattern for addhn/subhn with 2 operands
 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
                            string asmop, string ResS, string OpS,
@@ -2361,7 +2532,7 @@ defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
                     string asmop, string ResS, string OpS,
                     SDPatternOperator opnode,
-                    RegisterClass ResVPR, RegisterClass OpVPR,
+                    RegisterOperand ResVPR, RegisterOperand OpVPR,
                     ValueType ResTy, ValueType OpTy>
   : NeonI_3VDiff<q, u, size, opcode,
                  (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
@@ -2388,79 +2559,71 @@ multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode,
 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
 
-// pattern for acle intrinsic with 3 operands
-class NeonI_3VDN_addhn2_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                            string asmop, string ResS, string OpS,
-                            SDPatternOperator opnode, SDPatternOperator get_hi,
-                            ValueType OpTy, ValueType OpSTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR64:$src, VPR128:$Rn, VPR128:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (v2i64 VPR128:$Rd),
-                    (Neon_combine
-                      (v1i64 VPR64:$src),
-                      (v1i64 (bitconvert
-                        (OpSTy (get_hi
-                          (OpTy (opnode (OpTy VPR128:$Rn),
-                                        (OpTy VPR128:$Rm)))))))))],
-                 NoItinerary> {
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDN_addhn2_3Op_v1<bit u, bits<4> opcode,
-                                    string asmop, 
-                                    SDPatternOperator opnode>
-{
-  def _16b8h : NeonI_3VDN_addhn2_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h",
-                                     opnode, NI_get_hi_8h, v8i16, v8i8>;
-  def _8h4s : NeonI_3VDN_addhn2_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s",
-                                    opnode, NI_get_hi_4s, v4i32, v4i16>;
-  def _4s2d : NeonI_3VDN_addhn2_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d",
-                                    opnode, NI_get_hi_2d, v2i64, v2i32>;
-}
-
-defm ADDHN2vvv  : NeonI_3VDN_addhn2_3Op_v1<0b0, 0b0100, "addhn2", add>;
-defm SUBHN2vvv  : NeonI_3VDN_addhn2_3Op_v1<0b0, 0b0110, "subhn2", sub>;
-
 // pattern for acle intrinsic with 3 operands
 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                     string asmop, string ResS, string OpS,
-                     SDPatternOperator opnode,
-                     ValueType OpTy, ValueType OpSTy>
+                     string asmop, string ResS, string OpS>
   : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR64:$src, VPR128:$Rn, VPR128:$Rm),
+                 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (v2i64 VPR128:$Rd),
-                    (Neon_combine (v1i64 VPR64:$src),
-                                  (v1i64 (bitconvert 
-                                     (OpSTy (opnode (OpTy VPR128:$Rn),
-                                                    (OpTy VPR128:$Rm)))))))],
-                 NoItinerary> {
+                 [], NoItinerary> {
   let Constraints = "$src = $Rd";
+  let neverHasSideEffects = 1;
 }
 
 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode,
-                             string asmop, 
-                             SDPatternOperator opnode>
-{
-  def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h",
-                              opnode, v8i16, v8i8>;
-  def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s",
-                             opnode, v4i32, v4i16>;
-  def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d",
-                             opnode, v2i64, v2i32>;
+                             string asmop> {
+  def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
+  def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
+  def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
 }
 
-defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2",
-                                    int_arm_neon_vraddhn>;
-defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2",
-                                    int_arm_neon_vrsubhn>;
+defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
+defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
+
+defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
+defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
+
+// Patterns have to be separate because there's a SUBREG_TO_REG in the output
+// part.
+class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
+                        SDPatternOperator coreop>
+  : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
+                      (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
+                                                        (SrcTy VPR128:$Rm)))))),
+        (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
+              VPR128:$Rn, VPR128:$Rm)>;
+
+// addhn2 patterns
+def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
+          BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
+          BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
+          BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
+
+// subhn2 patterns
+def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
+          BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
+          BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
+def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
+          BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
+
+// raddhn2 patterns
+def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
+def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
+def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
+
+// rsubhn2 patterns
+def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
+def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
+def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
 
 // pattern that need to extend result
 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
                      string asmop, string ResS, string OpS,
                      SDPatternOperator opnode,
-                     RegisterClass OpVPR,
+                     RegisterOperand OpVPR,
                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
   : NeonI_3VDiff<q, u, size, opcode,
                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
@@ -2490,11 +2653,12 @@ defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
 multiclass NeonI_Op_High<SDPatternOperator op>
 {
   def _16B : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (Neon_top16B node:$Rn), (Neon_top16B node:$Rm))>;
+                     (op (v8i8 (Neon_High16B node:$Rn)), (v8i8 (Neon_High16B node:$Rm)))>;
   def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (Neon_top8H node:$Rn), (Neon_top8H node:$Rm))>;
+                     (op (v4i16 (Neon_High8H node:$Rn)), (v4i16 (Neon_High8H node:$Rm)))>;
   def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (Neon_top4S node:$Rn), (Neon_top4S node:$Rm))>;
+                     (op (v2i32 (Neon_High4S node:$Rn)), (v2i32 (Neon_High4S node:$Rm)))>;
+
 }
 
 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
@@ -2528,7 +2692,7 @@ defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
                      string asmop, string ResS, string OpS, 
                      SDPatternOperator opnode, SDPatternOperator subop,
-                     RegisterClass OpVPR,
+                     RegisterOperand OpVPR,
                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
   : NeonI_3VDiff<q, u, size, opcode,
                  (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
@@ -2659,19 +2823,19 @@ multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode,
                              opnode, v2i64, v2i32>;
 }
 
-def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rm, node:$Rn),
+def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
                          (add node:$Rd,
                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
 
-def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rm, node:$Rn),
+def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
                          (add node:$Rd,
                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
 
-def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rm, node:$Rn),
+def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
                          (sub node:$Rd,
                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
 
-def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rm, node:$Rn),
+def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
                          (sub node:$Rd,
                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
 
@@ -2684,7 +2848,7 @@ defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
                            string asmop, string ResS, string OpS,
                            SDPatternOperator subop, SDPatternOperator opnode,
-                           RegisterClass OpVPR,
+                           RegisterOperand OpVPR,
                            ValueType ResTy, ValueType OpTy>
   : NeonI_3VDiff<q, u, size, opcode,
                (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
@@ -2827,6 +2991,40 @@ class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
                 [],
                 NoItinerary>;
 
+multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode,
+                                      string asmop, bit Commutable = 0>
+{
+  let isCommutable = Commutable in {
+    def hhh : NeonI_Scalar3Same<u, 0b01, opcode,
+                                (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm),
+                                !strconcat(asmop, " $Rd, $Rn, $Rm"),
+                                [],
+                                NoItinerary>;
+    def sss : NeonI_Scalar3Same<u, 0b10, opcode,
+                                (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
+                                !strconcat(asmop, " $Rd, $Rn, $Rm"),
+                                [],
+                                NoItinerary>;
+  }
+}
+
+multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
+                                      string asmop, bit Commutable = 0>
+{
+  let isCommutable = Commutable in {
+    def sss : NeonI_Scalar3Same<u, {size_high, 0b0}, opcode,
+                                (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
+                                !strconcat(asmop, " $Rd, $Rn, $Rm"),
+                                [],
+                                NoItinerary>;
+    def ddd : NeonI_Scalar3Same<u, {size_high, 0b1}, opcode,
+                                (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
+                                !strconcat(asmop, " $Rd, $Rn, $Rm"),
+                                [],
+                                NoItinerary>;
+  }
+}
+
 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
                                         string asmop, bit Commutable = 0>
 {
@@ -2854,13 +3052,78 @@ multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
   }
 }
 
-class Neon_Scalar_D_size_patterns<SDPatternOperator opnode, Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
-        (SUBREG_TO_REG (i64 0),
-              (INSTD (EXTRACT_SUBREG VPR64:$Rn, sub_64),
-             (EXTRACT_SUBREG VPR64:$Rm, sub_64)),
-          sub_64)>;
+multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
+                                            Instruction INSTD> {
+  def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
+            (INSTD FPR64:$Rn, FPR64:$Rm)>;        
+}
+
+multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
+                                               Instruction INSTB,
+                                               Instruction INSTH,
+                                               Instruction INSTS,
+                                               Instruction INSTD>
+  : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
+  def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
+           (INSTB FPR8:$Rn, FPR8:$Rm)>;
+
+  def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+           (INSTH FPR16:$Rn, FPR16:$Rm)>;
+
+  def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+           (INSTS FPR32:$Rn, FPR32:$Rm)>;
+}
 
+multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
+                                             Instruction INSTH,
+                                             Instruction INSTS> {
+  def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+            (INSTH FPR16:$Rn, FPR16:$Rm)>;
+  def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+            (INSTS FPR32:$Rn, FPR32:$Rm)>;
+}
+
+multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
+                                             Instruction INSTS,
+                                             Instruction INSTD> {
+  def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
+            (INSTS FPR32:$Rn, FPR32:$Rm)>;
+  def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
+            (INSTD FPR64:$Rn, FPR64:$Rm)>;
+}
+
+// Scalar Two Registers Miscellaneous
+multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
+                                         string asmop> {
+  def ss : NeonI_Scalar2SameMisc<u, {size_high, 0b0}, opcode,
+                          (outs FPR32:$Rd), (ins FPR32:$Rn),
+                          !strconcat(asmop, " $Rd, $Rn"),
+                          [], NoItinerary>;
+  def dd : NeonI_Scalar2SameMisc<u, {size_high, 0b1}, opcode,
+                          (outs FPR64:$Rd), (ins FPR64:$Rn),
+                          !strconcat(asmop, " $Rd, $Rn"),
+                          [], NoItinerary>;
+}
+
+multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
+                                                     SDPatternOperator Dopnode,
+                                                     Instruction INSTS,
+                                                     Instruction INSTD> {
+  def : Pat<(v1f32 (Sopnode (v1i32 FPR32:$Rn))),
+            (INSTS FPR32:$Rn)>;
+  def : Pat<(v1f64 (Dopnode (v1i64 FPR64:$Rn))),
+            (INSTD FPR64:$Rn)>;
+}
+
+multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
+                                                 Instruction INSTS,
+                                                 Instruction INSTD> {
+  def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
+            (INSTS FPR32:$Rn)>;
+  def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
+            (INSTD FPR64:$Rn)>;
+}
 
 // Scalar Integer Add
 let isCommutable = 1 in {
@@ -2870,9 +3133,15 @@ def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
 // Scalar Integer Sub
 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
 
-// Pattern for Scalar Integer Add and Sub with D register
-def : Neon_Scalar_D_size_patterns<add, ADDddd>;
-def : Neon_Scalar_D_size_patterns<sub, SUBddd>;
+// Pattern for Scalar Integer Add and Sub with D register only
+defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
 
 // Scalar Integer Saturating Add (Signed, Unsigned)
 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
@@ -2882,40 +3151,223 @@ defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
 
-// Patterns for Scalar Integer Saturating Add, Sub with D register only
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqadds, SQADDbbb,
+                                           SQADDhhh, SQADDsss, SQADDddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqaddu, UQADDbbb,
+                                           UQADDhhh, UQADDsss, UQADDddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqsubs, SQSUBbbb,
+                                           SQSUBhhh, SQSUBsss, SQSUBddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqsubu, UQSUBbbb,
+                                           UQSUBhhh, UQSUBsss, UQSUBddd>;
+
+// Scalar Integer Saturating Doubling Multiply Half High
+defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
+
+// Scalar Integer Saturating Rounding Doubling Multiply Half High
+defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Doubling Multiply Half High and
+// Scalar Integer Saturating Rounding Doubling Multiply Half High
+defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
+                                                               SQDMULHsss>;
+defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
+                                                                SQRDMULHsss>;
+
+// Scalar Floating-point Multiply Extended
+defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
+
+// Scalar Floating-point Reciprocal Step
+defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
+
+// Scalar Floating-point Reciprocal Square Root Step
+defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Floating-point Reciprocal Step and
+// Scalar Floating-point Reciprocal Square Root Step
+defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrecps, FRECPSsss,
+                                                              FRECPSddd>;
+defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
+                                                               FRSQRTSddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Floating-point Multiply Extended,
+defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vmulx, FMULXsss,
+                                         FMULXddd>;
 
 // Scalar Integer Shift Left (Signed, Unsigned)
 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
 
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
+
 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
 
-// Scalar Integer Rouding Shift Left (Signed, Unsigned)
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
+                                           SQSHLhhh, SQSHLsss, SQSHLddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
+                                           UQSHLhhh, UQSHLsss, UQSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
+
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
 
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
+
 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
 
-// Patterns for Scalar Integer Shift Lef, Saturating Shift Left,
-// Rounding Shift Left, Rounding Saturating Shift Left with D register only
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
-def : Neon_Scalar_D_size_patterns<shl, SSHLddd>;
-def : Neon_Scalar_D_size_patterns<shl, USHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
-def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
+                                           SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
+defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
+                                           UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
+
+// Patterns to match llvm.arm.* intrinsic for
+// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
+defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
+
+// Scalar Signed Integer Convert To Floating-point
+defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
+defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_s32,
+                                                 int_aarch64_neon_vcvtf64_s64,
+                                                 SCVTFss, SCVTFdd>;
+
+// Scalar Unsigned Integer Convert To Floating-point
+defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
+defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_u32,
+                                                 int_aarch64_neon_vcvtf64_u64,
+                                                 UCVTFss, UCVTFdd>;
+
+// Scalar Floating-point Reciprocal Estimate
+defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
+                                             FRECPEss, FRECPEdd>;
+
+// Scalar Floating-point Reciprocal Exponent
+defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
+                                             FRECPXss, FRECPXdd>;
+
+// Scalar Floating-point Reciprocal Square Root Estimate
+defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
+                                             FRSQRTEss, FRSQRTEdd>;
+
+// Scalar Reduce Pairwise
+
+multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
+                                     string asmop, bit Commutable = 0> {
+  let isCommutable = Commutable in {
+    def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
+                                (outs FPR64:$Rd), (ins VPR128:$Rn),
+                                !strconcat(asmop, " $Rd, $Rn.2d"),
+                                [],
+                                NoItinerary>;
+  }
+}
+
+multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
+                                     string asmop, bit Commutable = 0>
+  : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
+  let isCommutable = Commutable in {
+    def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
+                                (outs FPR32:$Rd), (ins VPR64:$Rn),
+                                !strconcat(asmop, " $Rd, $Rn.2s"),
+                                [],
+                                NoItinerary>;
+  }
+}
+
+// Scalar Reduce Addition Pairwise (Integer) with
+// Pattern to match llvm.arm.* intrinsic
+defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
+
+// Pattern to match llvm.aarch64.* intrinsic for
+// Scalar Reduce Addition Pairwise (Integer)
+def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
+          (ADDPvv_D_2D VPR128:$Rn)>;
+
+// Scalar Reduce Addition Pairwise (Floating Point)
+defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
+
+// Scalar Reduce Maximum Pairwise (Floating Point)
+defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
+
+// Scalar Reduce Minimum Pairwise (Floating Point)
+defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
+
+// Scalar Reduce maxNum Pairwise (Floating Point)
+defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
+
+// Scalar Reduce minNum Pairwise (Floating Point)
+defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
+
+multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
+                                            SDPatternOperator opnodeD,
+                                            Instruction INSTS,
+                                            Instruction INSTD> {
+  def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
+            (INSTS VPR64:$Rn)>;
+  def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
+            (INSTD VPR128:$Rn)>;
+}
+
+// Patterns to match llvm.aarch64.* intrinsic for
+// Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
+  int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
+  int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
+  int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
+  int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
+
+defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm, 
+  int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
+
 
 
 //===----------------------------------------------------------------------===//
@@ -2989,59 +3441,1223 @@ def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
 
 
 // ...and scalar bitcasts...
+def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
+def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
+def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
+def : Pat<(f32 (bitconvert (v1f32  FPR32:$src))), (f32 FPR32:$src)>;
+def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
+
+def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
+def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
 
 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
 
-def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))),
-                 (f64 (EXTRACT_SUBREG (v8i8  VPR64:$src), sub_64))>;
-def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))),
-                 (f64 (EXTRACT_SUBREG (v4i16  VPR64:$src), sub_64))>;
-def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))),
-                 (f64 (EXTRACT_SUBREG (v2i32  VPR64:$src), sub_64))>;
-def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))),
-                 (f64 (EXTRACT_SUBREG (v2f32  VPR64:$src), sub_64))>;
-def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))),
-                 (f64 (EXTRACT_SUBREG (v1i64  VPR64:$src), sub_64))>;
-def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v16i8  VPR128:$src), sub_alias))>;
-def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v8i16  VPR128:$src), sub_alias))>;
-def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v4i32  VPR128:$src), sub_alias))>;
-def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v2i64  VPR128:$src), sub_alias))>;
-def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v4f32  VPR128:$src), sub_alias))>;
-def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))),
-                 (f128 (EXTRACT_SUBREG (v2f64  VPR128:$src), sub_alias))>;
-
-def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))),
-                  (v8i8 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
-def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))),
-                  (v4i16 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
-def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))),
-                  (v2i32 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
-def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))),
-                  (v2f32 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
-def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))),
-                  (v1i64 (SUBREG_TO_REG (i64 0), (f64  FPR64:$src), sub_64))>;
-def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))),
-                  (v16i8 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
-def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))),
-                  (v8i16 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
-def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))),
-                  (v4i32 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
-def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))),
-                  (v2i64 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
-def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))),
-                  (v4f32 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
-def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))),
-                  (v2f64 (SUBREG_TO_REG (i128 0), (f128  FPR128:$src),
-                  sub_alias))>;
+def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
+def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
+
+def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
+def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
+
+def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
+def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
+def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
+def : Pat<(v1f32 (bitconvert (f32  FPR32:$src))), (v1f32 FPR32:$src)>;
+def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
+
+def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
+def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
+
+def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
+def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
+def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
+def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
+def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
+
+def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
+def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
+def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
+def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
+def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
+def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
+
+def neon_uimm0_bare : Operand<i64>,
+                        ImmLeaf<i64, [{return Imm == 0;}]> {
+  let ParserMatchClass = neon_uimm0_asmoperand;
+  let PrintMethod = "printNeonUImm8OperandBare";
+}
+
+def neon_uimm1_bare : Operand<i64>,
+                        ImmLeaf<i64, [{(void)Imm; return true;}]> {
+  let ParserMatchClass = neon_uimm1_asmoperand;
+  let PrintMethod = "printNeonUImm8OperandBare";
+}
+
+def neon_uimm2_bare : Operand<i64>,
+                        ImmLeaf<i64, [{(void)Imm; return true;}]> {
+  let ParserMatchClass = neon_uimm2_asmoperand;
+  let PrintMethod = "printNeonUImm8OperandBare";
+}
+
+def neon_uimm3_bare : Operand<i64>,
+                        ImmLeaf<i64, [{(void)Imm; return true;}]> {
+  let ParserMatchClass = uimm3_asmoperand;
+  let PrintMethod = "printNeonUImm8OperandBare";
+}
+
+def neon_uimm4_bare : Operand<i64>,
+                        ImmLeaf<i64, [{(void)Imm; return true;}]> {
+  let ParserMatchClass = uimm4_asmoperand;
+  let PrintMethod = "printNeonUImm8OperandBare";
+}
+
+class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
+                     RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
+  : NeonI_copy<0b1, 0b0, 0b0011,
+               (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
+               asmop # "\t$Rd." # Res # "[$Imm], $Rn",
+               [(set (ResTy VPR128:$Rd),
+                 (ResTy (vector_insert
+                   (ResTy VPR128:$src),
+                   (OpTy OpGPR:$Rn),
+                   (OpImm:$Imm))))],
+               NoItinerary> {
+  bits<4> Imm;
+  let Constraints = "$src = $Rd";
+}
+
+// The followings are for instruction class (3V Elem)
+
+// Variant 1
+
+class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
+             string asmop, string ResS, string OpS, string EleOpS,
+             Operand OpImm, RegisterOperand ResVPR,
+             RegisterOperand OpVPR, RegisterOperand EleOpVPR>
+  : NeonI_2VElem<q, u, size, opcode, 
+                 (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
+                                         EleOpVPR:$Re, OpImm:$Index),
+                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
+                 ", $Re." # EleOpS # "[$Index]",
+                 [],
+                 NoItinerary> {
+  bits<3> Index;
+  bits<5> Re;
+
+  let Constraints = "$src = $Rd";
+}
+
+multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+                     neon_uimm2_bare, VPR64, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+  def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
+                     neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+
+  def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
+                     neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+}
+
+defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
+defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+                   RegisterOperand ResVPR, RegisterOperand OpVPR,
+                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
+                   ValueType EleOpTy, SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
+          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+                  RegisterOperand ResVPR, RegisterOperand OpVPR,
+                  RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
+                  ValueType EleOpTy, SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
+          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST ResVPR:$src, OpVPR:$Rn, 
+          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+                     op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low4S node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+                     op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32,
+                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
+                     op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low8H node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
+                     op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
+                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+                    op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
+                    op, VPR128, VPR128, VPR64, v4i32, v4i32, v2i32,
+                    BinOpFrag<(Neon_vduplane
+                                (Neon_combine_4S node:$LHS, undef),
+                                 node:$RHS)>>;
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
+                    op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
+                    op, VPR128, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
+                    BinOpFrag<(Neon_vduplane
+                                (Neon_combine_8H node:$LHS, undef),
+                                node:$RHS)>>;
+}
+
+defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
+defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
+
+class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
+                 string asmop, string ResS, string OpS, string EleOpS,
+                 Operand OpImm, RegisterOperand ResVPR,
+                 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
+  : NeonI_2VElem<q, u, size, opcode, 
+                 (outs ResVPR:$Rd), (ins OpVPR:$Rn,
+                                         EleOpVPR:$Re, OpImm:$Index),
+                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
+                 ", $Re." # EleOpS # "[$Index]",
+                 [],
+                 NoItinerary> {
+  bits<3> Index;
+  bits<5> Re;
+}
+
+multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+                         neon_uimm2_bare, VPR64, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+  def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
+                         neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+
+  def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
+                         neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+}
+
+defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
+defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
+defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
+                       SDPatternOperator coreop>
+  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+                      RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+                      ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
+                      SDPatternOperator coreop>
+  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST OpVPR:$Rn, 
+          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+                         op, VPR64, VPR128, v2i32, v2i32, v4i32,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low4S node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+                         op, VPR128, VPR128, v4i32, v4i32, v4i32,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
+                         op, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
+                         BinOpFrag<(Neon_vduplane
+                                    (Neon_low8H node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
+                         op, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+                        op, VPR64, VPR64, v2i32, v2i32, v2i32,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
+                        op, VPR128, VPR64, v4i32, v4i32, v2i32,
+                        BinOpFrag<(Neon_vduplane
+                                    (Neon_combine_4S node:$LHS, undef),
+                                     node:$RHS)>>;
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
+                        op, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
+                        op, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
+                        BinOpFrag<(Neon_vduplane
+                                    (Neon_combine_8H node:$LHS, undef),
+                                    node:$RHS)>>;
+}
+
+defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
+defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
+defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
+
+// Variant 2
+
+multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+                         neon_uimm2_bare, VPR64, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // _1d2d doesn't exist!
+
+  def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
+                         neon_uimm1_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{0}};
+    let Inst{21} = 0b0;
+    let Inst{20-16} = Re;
+  }
+}
+
+defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
+defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
+
+class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
+                         RegisterOperand OpVPR, RegisterOperand EleOpVPR,
+                         ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
+                         SDPatternOperator coreop>
+  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
+          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
+        (INST OpVPR:$Rn, 
+          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
+
+multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
+                         op, VPR64, VPR128, v2f32, v2f32, v4f32,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low4f node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
+                         op, VPR128, VPR128, v4f32, v4f32, v4f32,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
+                         op, VPR128, VPR128, v2f64, v2f64, v2f64,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
+                        op, VPR64, VPR64, v2f32, v2f32, v2f32,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
+                        op, VPR128, VPR64, v4f32, v4f32, v2f32,
+                        BinOpFrag<(Neon_vduplane
+                                    (Neon_combine_4f node:$LHS, undef),
+                                    node:$RHS)>>;
+
+  def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
+                           op, VPR128, VPR64, v2f64, v2f64, v1f64,
+                           BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
+}
+
+defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
+defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
+
+// The followings are patterns using fma
+// -ffp-contract=fast generates fma
+
+multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
+                     neon_uimm2_bare, VPR64, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
+                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // _1d2d doesn't exist!
+  
+  def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
+                     neon_uimm1_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{0}};
+    let Inst{21} = 0b0;
+    let Inst{20-16} = Re;
+  }
+}
+
+defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
+defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+                       RegisterOperand ResVPR, RegisterOperand OpVPR,
+                       ValueType ResTy, ValueType OpTy,
+                       SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
+                   (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
+        (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+                      RegisterOperand ResVPR, RegisterOperand OpVPR,
+                      ValueType ResTy, ValueType OpTy,
+                      SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
+                   (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
+        (INST ResVPR:$src, ResVPR:$Rn, 
+          (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
+                           SDPatternOperator op,
+                           RegisterOperand ResVPR, RegisterOperand OpVPR,
+                           ValueType ResTy, ValueType OpTy,
+                           SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
+                   (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
+        (INST ResVPR:$src, ResVPR:$Rn, 
+          (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
+
+
+multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low4f node:$LHS), node:$RHS)>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
+                        neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
+                        BinOpFrag<(Neon_vduplane
+                                    (Neon_combine_4f node:$LHS, undef),
+                                    node:$RHS)>>;
+
+  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+                             BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
+}
+
+defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
+
+multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+                         BinOpFrag<(fneg (Neon_vduplane
+                                     (Neon_low4f node:$LHS), node:$RHS))>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
+                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low4f (fneg node:$LHS)),
+                                     node:$RHS)>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+                         BinOpFrag<(fneg (Neon_vduplane
+                                     node:$LHS, node:$RHS))>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
+                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
+                         BinOpFrag<(Neon_vduplane
+                                     (fneg node:$LHS), node:$RHS)>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+                         BinOpFrag<(fneg (Neon_vduplane
+                                     node:$LHS, node:$RHS))>>;
+
+  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
+                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
+                         BinOpFrag<(Neon_vduplane
+                                     (fneg node:$LHS), node:$RHS)>>;
+
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+                        BinOpFrag<(fneg (Neon_vduplane
+                                    node:$LHS, node:$RHS))>>;
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
+                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
+                        BinOpFrag<(Neon_vduplane
+                                    (fneg node:$LHS), node:$RHS)>>;
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
+                        neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
+                        BinOpFrag<(fneg (Neon_vduplane
+                                    (Neon_combine_4f node:$LHS, undef),
+                                    node:$RHS))>>;
+
+  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
+                        neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
+                        BinOpFrag<(Neon_vduplane
+                                    (Neon_combine_4f (fneg node:$LHS), undef),
+                                    node:$RHS)>>;
+
+  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+                             BinOpFrag<(fneg (Neon_combine_2d
+                                         node:$LHS, node:$RHS))>>;
+
+  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
+                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
+                             BinOpFrag<(Neon_combine_2d
+                                         (fneg node:$LHS), (fneg node:$RHS))>>;
+}
+
+defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
+
+// Variant 3: Long type
+// E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
+//      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
+
+multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
+                     neon_uimm2_bare, VPR128, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+  
+  def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
+                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+  def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
+                     neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+  
+  def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
+                     neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+}
+
+defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
+defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
+defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
+defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
+defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
+defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
+
+multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop>
+{
+  // vector register class for element is always 128-bit to cover the max index
+  def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
+                         neon_uimm2_bare, VPR128, VPR64, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+  
+  def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
+                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
+    let Inst{11} = {Index{1}};
+    let Inst{21} = {Index{0}};
+    let Inst{20-16} = Re;
+  }
+
+  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
+  def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
+                         neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+  
+  def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
+                         neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
+    let Inst{11} = {Index{2}};
+    let Inst{21} = {Index{1}};
+    let Inst{20} = {Index{0}};
+    let Inst{19-16} = Re{3-0};
+  }
+}
+
+defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
+defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
+defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+                     RegisterOperand EleOpVPR, ValueType ResTy,
+                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+                     SDPatternOperator hiop, SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy VPR128:$src),
+          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+          (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+                    RegisterOperand EleOpVPR, ValueType ResTy,
+                    ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+                    SDPatternOperator hiop, SDPatternOperator coreop>
+  : Pat<(ResTy (op (ResTy VPR128:$src),
+          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+          (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST VPR128:$src, VPR128:$Rn, 
+          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+                     op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low8H node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+                     op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low4S node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+                       op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H,
+                       BinOpFrag<(Neon_vduplane
+                                   (Neon_low8H node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+                       op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
+                       BinOpFrag<(Neon_vduplane
+                                   (Neon_low4S node:$LHS), node:$RHS)>>;
+  
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+                    op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+  
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+                    op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+                      op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
+                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+  
+  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+                      op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
+                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+}
+
+defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
+defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
+defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
+defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
+
+// Pattern for lane in 128-bit vector
+class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
+                         RegisterOperand EleOpVPR, ValueType ResTy,
+                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+                         SDPatternOperator hiop, SDPatternOperator coreop>
+  : Pat<(ResTy (op 
+          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+          (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
+
+// Pattern for lane in 64-bit vector
+class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
+                        RegisterOperand EleOpVPR, ValueType ResTy,
+                        ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
+                        SDPatternOperator hiop, SDPatternOperator coreop>
+  : Pat<(ResTy (op
+          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
+          (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
+        (INST VPR128:$Rn, 
+          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
+
+multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op>
+{
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+                         op, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low8H node:$LHS), node:$RHS)>>;
+
+  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+                         op, VPR64, VPR128, v2i64, v2i32, v4i32,
+                         BinOpFrag<(Neon_vduplane
+                                     (Neon_low4S node:$LHS), node:$RHS)>>;
+
+  def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+                           op, VPR128Lo, v4i32, v8i16, v8i16, v4i16,
+                           Neon_High8H,
+                           BinOpFrag<(Neon_vduplane
+                                       (Neon_low8H node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+                           op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
+                           BinOpFrag<(Neon_vduplane
+                                       (Neon_low4S node:$LHS), node:$RHS)>>;
+  
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+                        op, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+                        op, VPR64, VPR64, v2i64, v2i32, v2i32,
+                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+                          op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
+                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+  
+  def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+                          op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
+                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+}
+
+defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
+defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
+defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
+
+multiclass NI_qdma<SDPatternOperator op>
+{
+  def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+                    (op node:$Ra,
+                      (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
+
+  def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
+                    (op node:$Ra,
+                      (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
+}
+
+defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
+defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
+
+multiclass NI_2VEL_v3_qdma_pat<string subop, string op>
+{
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
+                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
+                     v4i32, v4i16, v8i16,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low8H node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
+                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
+                     v2i64, v2i32, v4i32,
+                     BinOpFrag<(Neon_vduplane
+                                 (Neon_low4S node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
+                       !cast<PatFrag>(op # "_4s"), VPR128Lo,
+                       v4i32, v8i16, v8i16, v4i16, Neon_High8H,
+                       BinOpFrag<(Neon_vduplane
+                                   (Neon_low8H node:$LHS), node:$RHS)>>;
+  
+  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
+                       !cast<PatFrag>(op # "_2d"), VPR128,
+                       v2i64, v4i32, v4i32, v2i32, Neon_High4S,
+                       BinOpFrag<(Neon_vduplane
+                                   (Neon_low4S node:$LHS), node:$RHS)>>;
+  
+  // Index can only be half of the max value for lane in 64-bit vector
+
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
+                    !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
+                    v4i32, v4i16, v4i16,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+  
+  def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
+                    !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
+                    v2i64, v2i32, v2i32,
+                    BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+
+  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
+                      !cast<PatFrag>(op # "_4s"), VPR64Lo,
+                      v4i32, v8i16, v4i16, v4i16, Neon_High8H,
+                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+  
+  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
+                      !cast<PatFrag>(op # "_2d"), VPR64,
+                      v2i64, v4i32, v2i32, v2i32, Neon_High4S,
+                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
+}
+
+defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
+defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
+
+// End of implementation for instruction class (3V Elem)
+
+//Insert element (vector, from main)
+def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
+                           neon_uimm4_bare> {
+  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
+                           neon_uimm3_bare> {
+  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
+                           neon_uimm2_bare> {
+  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
+                           neon_uimm1_bare> {
+  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
+                             RegisterClass OpGPR, ValueType OpTy, 
+                             Operand OpImm, Instruction INS> 
+  : Pat<(ResTy (vector_insert
+              (ResTy VPR64:$src),
+              (OpTy OpGPR:$Rn),
+              (OpImm:$Imm))),
+        (ResTy (EXTRACT_SUBREG 
+          (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+            OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
+
+def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
+                                          neon_uimm3_bare, INSbw>;
+def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
+                                          neon_uimm2_bare, INShw>;
+def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
+                                          neon_uimm1_bare, INSsw>;
+def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
+                                          neon_uimm0_bare, INSdx>;
+
+class NeonI_INS_element<string asmop, string Res, ValueType ResTy,
+                        Operand ResImm, ValueType MidTy>
+  : NeonI_insert<0b1, 0b1,
+                 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, 
+                 ResImm:$Immd, ResImm:$Immn),
+                 asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
+                 [(set (ResTy VPR128:$Rd),
+                    (ResTy (vector_insert
+                      (ResTy VPR128:$src),
+                      (MidTy (vector_extract
+                        (ResTy VPR128:$Rn),
+                        (ResImm:$Immn))),
+                      (ResImm:$Immd))))],
+                 NoItinerary> {
+  let Constraints = "$src = $Rd";
+  bits<4> Immd;
+  bits<4> Immn;
+}
+
+//Insert element (vector, from element)
+def INSELb : NeonI_INS_element<"ins", "b", v16i8, neon_uimm4_bare, i32> {
+  let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
+  let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
+}
+def INSELh : NeonI_INS_element<"ins", "h", v8i16, neon_uimm3_bare, i32> {
+  let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
+  let Inst{14-12} = {Immn{2}, Immn{1}, Immn{0}};
+  // bit 11 is unspecified.
+}
+def INSELs : NeonI_INS_element<"ins", "s", v4i32, neon_uimm2_bare, i32> {
+  let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
+  let Inst{14-13} = {Immn{1}, Immn{0}};
+  // bits 11-12 are unspecified.
+}
+def INSELd : NeonI_INS_element<"ins", "d", v2i64, neon_uimm1_bare, i64> {
+  let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
+  let Inst{14} = Immn{0};
+  // bits 11-13 are unspecified.
+}
+
+multiclass Neon_INS_elt_pattern <ValueType NaTy, Operand NaImm,
+                                ValueType MidTy, ValueType StTy,
+                                Operand StImm, Instruction INS> { 
+def : Pat<(NaTy (vector_insert
+            (NaTy VPR64:$src),
+            (MidTy (vector_extract
+              (StTy VPR128:$Rn),
+              (StImm:$Immn))),
+            (NaImm:$Immd))),
+          (NaTy (EXTRACT_SUBREG
+            (StTy (INS 
+              (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+              (StTy VPR128:$Rn),
+              NaImm:$Immd,
+              StImm:$Immn)),
+          sub_64))>;
+
+def : Pat<(StTy (vector_insert
+            (StTy VPR128:$src),
+            (MidTy (vector_extract
+              (NaTy VPR64:$Rn),
+              (NaImm:$Immn))),
+            (StImm:$Immd))),
+          (StTy (INS 
+            (StTy VPR128:$src),
+            (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+            StImm:$Immd,
+            NaImm:$Immn))>;
+
+def : Pat<(NaTy (vector_insert
+            (NaTy VPR64:$src),
+            (MidTy (vector_extract
+              (NaTy VPR64:$Rn),
+              (NaImm:$Immn))),
+            (NaImm:$Immd))),
+          (NaTy (EXTRACT_SUBREG
+            (StTy (INS 
+              (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
+              (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+              NaImm:$Immd,
+              NaImm:$Immn)),
+          sub_64))>;
+}
+
+defm INSb_pattern : Neon_INS_elt_pattern<v8i8, neon_uimm3_bare, i32,
+                                         v16i8, neon_uimm4_bare, INSELb>;
+defm INSh_pattern : Neon_INS_elt_pattern<v4i16, neon_uimm2_bare, i32,
+                                         v8i16, neon_uimm3_bare, INSELh>;
+defm INSs_pattern : Neon_INS_elt_pattern<v2i32, neon_uimm1_bare, i32,
+                                         v4i32, neon_uimm2_bare, INSELs>;
+defm INSd_pattern : Neon_INS_elt_pattern<v1i64, neon_uimm0_bare, i64,
+                                         v2i64, neon_uimm1_bare, INSELd>;
+
+class NeonI_SMOV<string asmop, string Res, bit Q,
+                 ValueType OpTy, ValueType eleTy,
+                 Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
+  : NeonI_copy<Q, 0b0, 0b0101,
+               (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
+               asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
+               [(set (ResTy ResGPR:$Rd),
+                 (ResTy (sext_inreg
+                   (ResTy (vector_extract
+                     (OpTy VPR128:$Rn), (OpImm:$Imm))),
+                   eleTy)))],
+               NoItinerary> {
+  bits<4> Imm;
+}
+
+//Signed integer move (main, from element)
+def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
+                        GPR32, i32> {
+  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
+                        GPR32, i32> {
+  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
+                        GPR64, i64> {
+  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
+                        GPR64, i64> {
+  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
+                        GPR64, i64> {
+  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+
+multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
+                               ValueType eleTy, Operand StImm,  Operand NaImm,
+                               Instruction SMOVI> {
+  def : Pat<(i64 (sext_inreg
+              (i64 (anyext
+                (i32 (vector_extract
+                  (StTy VPR128:$Rn), (StImm:$Imm))))),
+              eleTy)),
+            (SMOVI VPR128:$Rn, StImm:$Imm)>;
+  
+  def : Pat<(i64 (sext
+              (i32 (vector_extract
+                (StTy VPR128:$Rn), (StImm:$Imm))))),
+            (SMOVI VPR128:$Rn, StImm:$Imm)>;
+  
+  def : Pat<(i64 (sext_inreg
+              (i64 (vector_extract
+                (NaTy VPR64:$Rn), (NaImm:$Imm))),
+              eleTy)),
+            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+              NaImm:$Imm)>;
+  
+  def : Pat<(i64 (sext_inreg
+              (i64 (anyext
+                (i32 (vector_extract
+                  (NaTy VPR64:$Rn), (NaImm:$Imm))))),
+              eleTy)),
+            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+              NaImm:$Imm)>;
+  
+  def : Pat<(i64 (sext
+              (i32 (vector_extract
+                (NaTy VPR64:$Rn), (NaImm:$Imm))))),
+            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+              NaImm:$Imm)>; 
+}
+
+defm SMOVxb_pattern : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
+                                          neon_uimm3_bare, SMOVxb>;
+defm SMOVxh_pattern : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
+                                          neon_uimm2_bare, SMOVxh>;
+defm SMOVxs_pattern : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
+                                          neon_uimm1_bare, SMOVxs>;
+
+class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
+                          ValueType eleTy, Operand StImm,  Operand NaImm,
+                          Instruction SMOVI>
+  : Pat<(i32 (sext_inreg
+          (i32 (vector_extract
+            (NaTy VPR64:$Rn), (NaImm:$Imm))),
+          eleTy)),
+        (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+          NaImm:$Imm)>;
+
+def SMOVwb_pattern : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
+                                          neon_uimm3_bare, SMOVwb>;
+def SMOVwh_pattern : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
+                                          neon_uimm2_bare, SMOVwh>;
+
+
+class NeonI_UMOV<string asmop, string Res, bit Q,
+                 ValueType OpTy, Operand OpImm,
+                 RegisterClass ResGPR, ValueType ResTy>
+  : NeonI_copy<Q, 0b0, 0b0111,
+               (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
+               asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
+               [(set (ResTy ResGPR:$Rd),
+                  (ResTy (vector_extract
+                    (OpTy VPR128:$Rn), (OpImm:$Imm))))],
+               NoItinerary> {
+  bits<4> Imm;
+}
+
+//Unsigned integer move (main, from element)
+def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
+                         GPR32, i32> {
+  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
+}
+def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
+                         GPR32, i32> {
+  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
+}
+def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
+                         GPR32, i32> {
+  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
+}
+def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
+                         GPR64, i64> {
+  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
+}
+
+class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
+                         Operand StImm,  Operand NaImm,
+                         Instruction SMOVI>
+  : Pat<(ResTy (vector_extract
+          (NaTy VPR64:$Rn), NaImm:$Imm)),
+        (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
+          NaImm:$Imm)>;
+
+def UMOVwb_pattern : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
+                                       neon_uimm3_bare, UMOVwb>;
+def UMOVwh_pattern : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
+                                       neon_uimm2_bare, UMOVwh>; 
+def UMOVws_pattern : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
+                                       neon_uimm1_bare, UMOVws>;
+
+def : Pat<(i32 (and
+            (i32 (vector_extract
+              (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
+            255)),
+          (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
+
+def : Pat<(i32 (and
+            (i32 (vector_extract
+              (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
+            65535)),
+          (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
+
+def : Pat<(i64 (zext
+            (i32 (vector_extract
+              (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
+          (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
+
+def : Pat<(i32 (and
+            (i32 (vector_extract
+              (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
+            255)),
+          (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+            neon_uimm3_bare:$Imm)>;
+
+def : Pat<(i32 (and
+            (i32 (vector_extract
+              (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
+            65535)),
+          (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+            neon_uimm2_bare:$Imm)>;
+
+def : Pat<(i64 (zext
+            (i32 (vector_extract
+              (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
+          (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
+            neon_uimm0_bare:$Imm)>;
+
+// Additional copy patterns for scalar types
+def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
+          (UMOVwb (v16i8
+            (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
+
+def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
+          (UMOVwh (v8i16
+            (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
+
+def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
+          (FMOVws FPR32:$Rn)>;
+
+def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
+          (FMOVxd FPR64:$Rn)>;
+               
+def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
+          (f64 FPR64:$Rn)>;
+
+def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
+          (f32 FPR32:$Rn)>;
+
+def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
+          (v1i8 (EXTRACT_SUBREG (v16i8
+            (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
+            sub_8))>;
+
+def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
+          (v1i16 (EXTRACT_SUBREG (v8i16
+            (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
+            sub_16))>;
+
+def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
+          (FMOVsw $src)>;
+
+def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
+          (FMOVdx $src)>;
+
+def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
+          (v1f32 FPR32:$Rn)>;
+def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
+          (v1f64 FPR64:$Rn)>;
\ No newline at end of file